ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::cpu::ops::SplitVLayer Class Reference

#include <SplitVLayer.h>

Collaboration diagram for onert::backend::cpu::ops::SplitVLayer:

Public Member Functions

 SplitVLayer ()
 
template<typename T >
void splitV (void)
 
void configure (const IPortableTensor *input, const IPortableTensor *size_splits, const IPortableTensor *size_dim, uint16_t num_splits, std::vector< IPortableTensor * > &outputs)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Detailed Description

Definition at line 27 of file SplitVLayer.h.

Constructor & Destructor Documentation

◆ SplitVLayer()

onert::backend::cpu::ops::SplitVLayer::SplitVLayer ( )

Definition at line 59 of file SplitVLayer.cc.

60 : _input(nullptr), _size_splits(nullptr), _split_dim(nullptr), _num_splits(0), _outputs()
61{
62 // DO NOTHING
63}

Member Function Documentation

◆ configure()

void onert::backend::cpu::ops::SplitVLayer::configure ( const IPortableTensor input,
const IPortableTensor size_splits,
const IPortableTensor size_dim,
uint16_t  num_splits,
std::vector< IPortableTensor * > &  outputs 
)

Definition at line 86 of file SplitVLayer.cc.

89{
90 assert(input != nullptr);
91
92 _num_splits = num_splits;
93 _size_splits = size_splits;
94 _input = input;
95 _split_dim = split_dim;
96 _outputs = outputs;
97}

◆ run()

void onert::backend::cpu::ops::SplitVLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 99 of file SplitVLayer.cc.

100{
101 if (_input->data_type() == OperandType::FLOAT32)
102 {
103 splitV<float>();
104 }
105 else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
106 {
107 splitV<uint8_t>();
108 }
109 else if (_input->data_type() == OperandType::INT32)
110 {
111 splitV<int32_t>();
112 }
113 else if (_input->data_type() == OperandType::INT64)
114 {
115 splitV<int64_t>();
116 }
117 else
118 {
119 throw std::runtime_error{"SplitV: unsupported input type"};
120 }
121}
ir::DataType data_type() const override final

References onert::backend::IPortableTensor::data_type().

◆ splitV()

template<typename T >
void onert::backend::cpu::ops::SplitVLayer::splitV ( void  )

Definition at line 65 of file SplitVLayer.cc.

66{
68 op_params.axis = *getBuffer<int32_t>(_split_dim);
69 op_params.num_split = _num_splits;
70
71 std::vector<T *> outputPtrs;
72 std::vector<nnfw::cker::Shape> outshape;
73
74 for (const auto output : _outputs)
75 {
76 assert(output->total_size() == sizeOfData(output->data_type(), output->getShape().dims()));
77 outputPtrs.emplace_back(getBuffer<T>(output));
78 outshape.emplace_back(getShape(output));
79 }
80
81 assert(_input->total_size() == sizeOfData(_input->data_type(), _input->getShape().dims()));
82 nnfw::cker::SplitV<T>(op_params, getShape(_input), getBuffer<T>(_input), outshape,
83 outputPtrs.data());
84}
size_t total_size() const override final
ir::Shape getShape() const override final
Get ir::Shape of tensor.
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
uint32_t sizeOfData(OperandType type, const std::vector< int32_t > &dimensions)

References nnfw::cker::SplitVParams::axis, onert::backend::IPortableTensor::data_type(), onert::backend::IPortableTensor::getShape(), onert::backend::cpu::ops::getShape(), nnfw::cker::SplitVParams::num_split, onert::backend::cpu::ops::sizeOfData(), and onert::backend::IPortableTensor::total_size().


The documentation for this class was generated from the following files: