ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
onert::backend::cpu::ops::ConcatLayer Class Reference

#include <ConcatLayer.h>

Collaboration diagram for onert::backend::cpu::ops::ConcatLayer:

Public Member Functions

 ConcatLayer ()
 
template<typename T >
void concatenationGeneral ()
 
void concatenationQuant8 ()
 
void configure (const std::vector< const IPortableTensor * > &inputs, int32_t axis, IPortableTensor *output)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Detailed Description

Definition at line 27 of file ConcatLayer.h.

Constructor & Destructor Documentation

◆ ConcatLayer()

onert::backend::cpu::ops::ConcatLayer::ConcatLayer ( )

Definition at line 26 of file ConcatLayer.cc.

26 : _inputs(), _output(nullptr), _axis(0)
27{
28 // DO NOTHING
29}

Member Function Documentation

◆ concatenationGeneral()

template<typename T >
void onert::backend::cpu::ops::ConcatLayer::concatenationGeneral ( )

Definition at line 31 of file ConcatLayer.cc.

32{
33 uint32_t num_inputs = _inputs.size();
34
36 op_params.axis = _axis;
37 op_params.inputs_count = num_inputs;
38
39 std::vector<nnfw::cker::Shape *> inputDimsPtr;
40 std::vector<nnfw::cker::Shape> inputDims;
41 inputDimsPtr.reserve(num_inputs);
42 inputDims.reserve(num_inputs);
43
44 for (uint32_t i = 0; i < num_inputs; i++)
45 {
46 inputDims.push_back(getShape(_inputs[i]));
47 inputDimsPtr.push_back(&inputDims[i]);
48 }
49
50 std::vector<const T *> inputDataPtrs;
51
52 for (const auto input : _inputs)
53 {
54 inputDataPtrs.emplace_back(getBuffer<T>(input));
55 }
56
57 nnfw::cker::Concatenation<T>(op_params, inputDimsPtr.data(), inputDataPtrs.data(),
58 getShape(_output), getBuffer<T>(_output));
59}
nnfw::cker::Shape getShape(const IPortableTensor *tensor)

References nnfw::cker::ConcatenationParams::axis, onert::backend::cpu::ops::getShape(), and nnfw::cker::ConcatenationParams::inputs_count.

◆ concatenationQuant8()

void onert::backend::cpu::ops::ConcatLayer::concatenationQuant8 ( )

Definition at line 60 of file ConcatLayer.cc.

61{
62 uint32_t num_inputs = _inputs.size();
63
64 std::vector<int32_t> input_zeropoints(num_inputs);
65 std::vector<float> input_scales(num_inputs);
66 for (uint32_t i = 0; i < num_inputs; i++)
67 {
68 input_zeropoints[i] = _inputs[i]->data_zero_point();
69 input_scales[i] = _inputs[i]->data_scale();
70 }
71
73 op_params.axis = _axis;
74 op_params.inputs_count = num_inputs;
75 op_params.input_zeropoint = input_zeropoints.data();
76 op_params.input_scale = input_scales.data();
77 op_params.output_zeropoint = _output->data_zero_point();
78 op_params.output_scale = _output->data_scale();
79
80 std::vector<nnfw::cker::Shape *> inputDimsPtr;
81 std::vector<nnfw::cker::Shape> inputDims;
82 inputDimsPtr.reserve(num_inputs);
83 inputDims.reserve(num_inputs);
84 for (uint32_t i = 0; i < num_inputs; i++)
85 {
86 inputDims.push_back(getShape(_inputs[i]));
87 inputDimsPtr.push_back(&inputDims[i]);
88 }
89
90 std::vector<const uint8_t *> inputDataPtrs;
91 for (const auto input : _inputs)
92 {
93 inputDataPtrs.emplace_back(getBuffer<uint8_t>(input));
94 }
95
96 nnfw::cker::ConcatenationWithScaling(op_params, inputDimsPtr.data(), inputDataPtrs.data(),
97 getShape(_output), getBuffer<uint8_t>(_output));
98}
float data_scale() const override final
int32_t data_zero_point() const override final
void ConcatenationWithScaling(const ConcatenationParams &params, const Shape *const *input_shapes, const uint8_t *const *input_data, const Shape &output_shape, uint8_t *output_data)
const int32_t * input_zeropoint
Definition Types.h:224

References nnfw::cker::ConcatenationParams::axis, nnfw::cker::ConcatenationWithScaling(), onert::backend::IPortableTensor::data_scale(), onert::backend::IPortableTensor::data_zero_point(), onert::backend::cpu::ops::getShape(), nnfw::cker::ConcatenationParams::input_scale, nnfw::cker::ConcatenationParams::input_zeropoint, nnfw::cker::ConcatenationParams::inputs_count, nnfw::cker::ConcatenationParams::output_scale, and nnfw::cker::ConcatenationParams::output_zeropoint.

Referenced by run().

◆ configure()

void onert::backend::cpu::ops::ConcatLayer::configure ( const std::vector< const IPortableTensor * > &  inputs,
int32_t  axis,
IPortableTensor output 
)

Definition at line 100 of file ConcatLayer.cc.

102{
103 assert(inputs.size() > 0);
104 assert(output != nullptr);
105
106 _inputs = inputs;
107 _axis = axis;
108 _output = output;
109}

◆ run()

void onert::backend::cpu::ops::ConcatLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 111 of file ConcatLayer.cc.

112{
113 switch (_output->data_type())
114 {
115 case OperandType::FLOAT32:
116 concatenationGeneral<float>();
117 break;
118 case OperandType::QUANT_UINT8_ASYMM:
120 break;
121 case OperandType::QUANT_INT8_ASYMM:
122 concatenationGeneral<int8_t>();
123 break;
124 case OperandType::INT32:
125 concatenationGeneral<int32_t>();
126 break;
127 case OperandType::INT64:
128 concatenationGeneral<int64_t>();
129 break;
130 default:
131 throw std::runtime_error("Concat: unsupported data type");
132 }
133}
ir::DataType data_type() const override final

References concatenationQuant8(), and onert::backend::IPortableTensor::data_type().


The documentation for this class was generated from the following files: