ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::cpu::ops::ConcatLayer Class Reference

#include <ConcatLayer.h>

Collaboration diagram for onert::backend::cpu::ops::ConcatLayer:

Public Member Functions

 ConcatLayer ()
 
template<typename T >
void concatenationGeneral ()
 
void concatenationQuant8 ()
 
void configure (const std::vector< const IPortableTensor * > &inputs, int32_t axis, IPortableTensor *output)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Detailed Description

Definition at line 27 of file ConcatLayer.h.

Constructor & Destructor Documentation

◆ ConcatLayer()

onert::backend::cpu::ops::ConcatLayer::ConcatLayer ( )

Definition at line 55 of file ConcatLayer.cc.

55 : _inputs(), _output(nullptr), _axis(0)
56{
57 // DO NOTHING
58}

Member Function Documentation

◆ concatenationGeneral()

template<typename T >
void onert::backend::cpu::ops::ConcatLayer::concatenationGeneral ( )

Definition at line 60 of file ConcatLayer.cc.

61{
62 uint32_t num_inputs = _inputs.size();
63
65 op_params.axis = _axis;
66 op_params.inputs_count = num_inputs;
67
68 std::vector<nnfw::cker::Shape *> inputDimsPtr;
69 std::vector<nnfw::cker::Shape> inputDims;
70 inputDimsPtr.reserve(num_inputs);
71 inputDims.reserve(num_inputs);
72
73 for (uint32_t i = 0; i < num_inputs; i++)
74 {
75 inputDims.push_back(getShape(_inputs[i]));
76 inputDimsPtr.push_back(&inputDims[i]);
77 }
78
79 std::vector<const T *> inputDataPtrs;
80
81 for (const auto input : _inputs)
82 {
83 inputDataPtrs.emplace_back(getBuffer<T>(input));
84 }
85
86 nnfw::cker::Concatenation<T>(op_params, inputDimsPtr.data(), inputDataPtrs.data(),
87 getShape(_output), getBuffer<T>(_output));
88}
nnfw::cker::Shape getShape(const IPortableTensor *tensor)

References nnfw::cker::ConcatenationParams::axis, onert::backend::cpu::ops::getShape(), and nnfw::cker::ConcatenationParams::inputs_count.

◆ concatenationQuant8()

void onert::backend::cpu::ops::ConcatLayer::concatenationQuant8 ( )

Definition at line 89 of file ConcatLayer.cc.

90{
91 uint32_t num_inputs = _inputs.size();
92
93 std::vector<int32_t> input_zeropoints(num_inputs);
94 std::vector<float> input_scales(num_inputs);
95 for (uint32_t i = 0; i < num_inputs; i++)
96 {
97 input_zeropoints[i] = _inputs[i]->data_zero_point();
98 input_scales[i] = _inputs[i]->data_scale();
99 }
100
102 op_params.axis = _axis;
103 op_params.inputs_count = num_inputs;
104 op_params.input_zeropoint = input_zeropoints.data();
105 op_params.input_scale = input_scales.data();
106 op_params.output_zeropoint = _output->data_zero_point();
107 op_params.output_scale = _output->data_scale();
108
109 std::vector<nnfw::cker::Shape *> inputDimsPtr;
110 std::vector<nnfw::cker::Shape> inputDims;
111 inputDimsPtr.reserve(num_inputs);
112 inputDims.reserve(num_inputs);
113 for (uint32_t i = 0; i < num_inputs; i++)
114 {
115 inputDims.push_back(getShape(_inputs[i]));
116 inputDimsPtr.push_back(&inputDims[i]);
117 }
118
119 std::vector<const uint8_t *> inputDataPtrs;
120 for (const auto input : _inputs)
121 {
122 inputDataPtrs.emplace_back(getBuffer<uint8_t>(input));
123 }
124
125 nnfw::cker::ConcatenationWithScaling(op_params, inputDimsPtr.data(), inputDataPtrs.data(),
126 getShape(_output), getBuffer<uint8_t>(_output));
127}
float data_scale() const override final
int32_t data_zero_point() const override final
void ConcatenationWithScaling(const ConcatenationParams &params, const Shape *const *input_shapes, const uint8_t *const *input_data, const Shape &output_shape, uint8_t *output_data)
const int32_t * input_zeropoint
Definition Types.h:224

References nnfw::cker::ConcatenationParams::axis, nnfw::cker::ConcatenationWithScaling(), onert::backend::IPortableTensor::data_scale(), onert::backend::IPortableTensor::data_zero_point(), onert::backend::cpu::ops::getShape(), nnfw::cker::ConcatenationParams::input_scale, nnfw::cker::ConcatenationParams::input_zeropoint, nnfw::cker::ConcatenationParams::inputs_count, nnfw::cker::ConcatenationParams::output_scale, and nnfw::cker::ConcatenationParams::output_zeropoint.

Referenced by run().

◆ configure()

void onert::backend::cpu::ops::ConcatLayer::configure ( const std::vector< const IPortableTensor * > &  inputs,
int32_t  axis,
IPortableTensor output 
)

Definition at line 129 of file ConcatLayer.cc.

131{
132 assert(inputs.size() > 0);
133 assert(output != nullptr);
134
135 _inputs = inputs;
136 _axis = axis;
137 _output = output;
138}

◆ run()

void onert::backend::cpu::ops::ConcatLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 140 of file ConcatLayer.cc.

141{
142 switch (_output->data_type())
143 {
144 case OperandType::FLOAT32:
145 concatenationGeneral<float>();
146 break;
147 case OperandType::QUANT_UINT8_ASYMM:
149 break;
150 case OperandType::QUANT_INT8_ASYMM:
151 concatenationGeneral<int8_t>();
152 break;
153 case OperandType::QUANT_INT16_SYMM:
154 concatenationGeneral<int16_t>();
155 break;
156 case OperandType::INT32:
157 concatenationGeneral<int32_t>();
158 break;
159 case OperandType::INT64:
160 concatenationGeneral<int64_t>();
161 break;
162 default:
163 throw std::runtime_error("Concat: unsupported data type");
164 }
165}
ir::DataType data_type() const override final

References concatenationQuant8(), and onert::backend::IPortableTensor::data_type().


The documentation for this class was generated from the following files: