ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::cpu::ops::ConcatLayer Class Reference

#include <ConcatLayer.h>

Collaboration diagram for onert::backend::cpu::ops::ConcatLayer:

Public Member Functions

 ConcatLayer ()
 
template<typename T >
void concatenationGeneral ()
 
void concatenationQuant8 ()
 
void configure (const std::vector< const IPortableTensor * > &inputs, int32_t axis, IPortableTensor *output)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Detailed Description

Definition at line 33 of file ConcatLayer.h.

Constructor & Destructor Documentation

◆ ConcatLayer()

onert::backend::cpu::ops::ConcatLayer::ConcatLayer ( )

Definition at line 32 of file ConcatLayer.cc.

32 : _inputs(), _output(nullptr), _axis(0)
33{
34 // DO NOTHING
35}

Member Function Documentation

◆ concatenationGeneral()

template<typename T >
void onert::backend::cpu::ops::ConcatLayer::concatenationGeneral ( )

Definition at line 37 of file ConcatLayer.cc.

38{
39 uint32_t num_inputs = _inputs.size();
40
42 op_params.axis = _axis;
43 op_params.inputs_count = num_inputs;
44
45 std::vector<nnfw::cker::Shape *> inputDimsPtr;
46 std::vector<nnfw::cker::Shape> inputDims;
47 inputDimsPtr.reserve(num_inputs);
48 inputDims.reserve(num_inputs);
49
50 for (uint32_t i = 0; i < num_inputs; i++)
51 {
52 inputDims.push_back(getShape(_inputs[i]));
53 inputDimsPtr.push_back(&inputDims[i]);
54 }
55
56 std::vector<const T *> inputDataPtrs;
57
58 for (const auto input : _inputs)
59 {
60 inputDataPtrs.emplace_back(getBuffer<T>(input));
61 }
62
63 nnfw::cker::Concatenation<T>(op_params, inputDimsPtr.data(), inputDataPtrs.data(),
64 getShape(_output), getBuffer<T>(_output));
65}
nnfw::cker::Shape getShape(const IPortableTensor *tensor)

References nnfw::cker::ConcatenationParams::axis, onert::backend::cpu::ops::getShape(), and nnfw::cker::ConcatenationParams::inputs_count.

◆ concatenationQuant8()

void onert::backend::cpu::ops::ConcatLayer::concatenationQuant8 ( )

Definition at line 66 of file ConcatLayer.cc.

67{
68 uint32_t num_inputs = _inputs.size();
69
70 std::vector<int32_t> input_zeropoints(num_inputs);
71 std::vector<float> input_scales(num_inputs);
72 for (uint32_t i = 0; i < num_inputs; i++)
73 {
74 input_zeropoints[i] = _inputs[i]->data_zero_point();
75 input_scales[i] = _inputs[i]->data_scale();
76 }
77
79 op_params.axis = _axis;
80 op_params.inputs_count = num_inputs;
81 op_params.input_zeropoint = input_zeropoints.data();
82 op_params.input_scale = input_scales.data();
83 op_params.output_zeropoint = _output->data_zero_point();
84 op_params.output_scale = _output->data_scale();
85
86 std::vector<nnfw::cker::Shape *> inputDimsPtr;
87 std::vector<nnfw::cker::Shape> inputDims;
88 inputDimsPtr.reserve(num_inputs);
89 inputDims.reserve(num_inputs);
90 for (uint32_t i = 0; i < num_inputs; i++)
91 {
92 inputDims.push_back(getShape(_inputs[i]));
93 inputDimsPtr.push_back(&inputDims[i]);
94 }
95
96 std::vector<const uint8_t *> inputDataPtrs;
97 for (const auto input : _inputs)
98 {
99 inputDataPtrs.emplace_back(getBuffer<uint8_t>(input));
100 }
101
102 nnfw::cker::ConcatenationWithScaling(op_params, inputDimsPtr.data(), inputDataPtrs.data(),
103 getShape(_output), getBuffer<uint8_t>(_output));
104}
float data_scale() const override final
int32_t data_zero_point() const override final
void ConcatenationWithScaling(const ConcatenationParams &params, const Shape *const *input_shapes, const uint8_t *const *input_data, const Shape &output_shape, uint8_t *output_data)
const int32_t * input_zeropoint
Definition Types.h:224

References nnfw::cker::ConcatenationParams::axis, nnfw::cker::ConcatenationWithScaling(), onert::backend::IPortableTensor::data_scale(), onert::backend::IPortableTensor::data_zero_point(), onert::backend::cpu::ops::getShape(), nnfw::cker::ConcatenationParams::input_scale, nnfw::cker::ConcatenationParams::input_zeropoint, nnfw::cker::ConcatenationParams::inputs_count, nnfw::cker::ConcatenationParams::output_scale, and nnfw::cker::ConcatenationParams::output_zeropoint.

Referenced by run().

◆ configure()

void onert::backend::cpu::ops::ConcatLayer::configure ( const std::vector< const IPortableTensor * > &  inputs,
int32_t  axis,
IPortableTensor output 
)

Definition at line 106 of file ConcatLayer.cc.

108{
109 assert(inputs.size() > 0);
110 assert(output != nullptr);
111
112 _inputs = inputs;
113 _axis = axis;
114 _output = output;
115}

◆ run()

void onert::backend::cpu::ops::ConcatLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 117 of file ConcatLayer.cc.

118{
119 switch (_output->data_type())
120 {
121 case OperandType::FLOAT32:
122 concatenationGeneral<float>();
123 break;
124 case OperandType::QUANT_UINT8_ASYMM:
126 break;
127 case OperandType::QUANT_INT8_ASYMM:
128 concatenationGeneral<int8_t>();
129 break;
130 case OperandType::INT32:
131 concatenationGeneral<int32_t>();
132 break;
133 case OperandType::INT64:
134 concatenationGeneral<int64_t>();
135 break;
136 default:
137 throw std::runtime_error("Concat: unsupported data type");
138 }
139}
ir::DataType data_type() const override final

References concatenationQuant8(), and onert::backend::IPortableTensor::data_type().

Referenced by package.infer.session::inference().


The documentation for this class was generated from the following files: