ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Einsum.h File Reference
#include "cker/Types.h"
#include "cker/Shape.h"
#include "cker/Utils.h"
#include "cker/operation/Helper/Tensor.h"
#include "cker/operation/Helper/MatmulBCast.h"
#include "Transpose.h"
#include "BatchMatMul.h"
#include <string>
#include <vector>
#include <map>
#include <numeric>
#include <algorithm>

Go to the source code of this file.

Data Structures

struct  nnfw::cker::functor::StrideFunctor< Device, T, N >
 
struct  nnfw::cker::functor::InflateFunctor< Device, T, N >
 
struct  nnfw::cker::functor::ReduceFunctor< Device, Reducer >
 
struct  nnfw::cker::functor::SetZeroFunctor< Device, T >
 
class  nnfw::cker::Einsum
 

Namespaces

namespace  nnfw
 
namespace  nnfw::cker
 
namespace  nnfw::cker::functor
 

Macros

#define NDIMS_CASE(N)
 

Typedefs

using nnfw::cker::ShapeVec = std::vector< int32_t >
 
using nnfw::cker::Labels = std::vector< int32_t >
 
using nnfw::cker::OperandLabels = std::vector< Labels >
 
using nnfw::cker::LabelCounts = std::vector< int32_t >
 
using nnfw::cker::OperandLabelCounts = std::vector< LabelCounts >
 
using nnfw::cker::LabelToDimSizes = std::vector< int32_t >
 

Enumerations

enum  nnfw::cker::DimensionType {
  nnfw::cker::kBroadcasting = 0 , nnfw::cker::kBatch = 1 , nnfw::cker::kFree = 2 , nnfw::cker::kContract = 3 ,
  nnfw::cker::kReduce = 4
}
 

Macro Definition Documentation

◆ NDIMS_CASE

#define NDIMS_CASE (   N)
Value:
case N: \
{ \
if (should_inflate) \
{ \
auto output_map = output->shaped<T, N>(reshape); \
auto input_map = input.shaped<T, N>(strided_shape_dims); \
functor::InflateFunctor<Eigen::ThreadPoolDevice, T, N>()(device, input_map, strides, \
output_map); \
} \
else \
{ \
auto input_map = input.shaped<T, N>(reshape); \
auto output_map = output->shaped<T, N>(strided_shape_dims); \
functor::StrideFunctor<Eigen::ThreadPoolDevice, T, N>()(device, input_map, strides, \
output_map); \
} \
} \
break;