ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
onert::backend::cpu::ops::ReduceLayer Class Reference

#include <ReduceLayer.h>

Collaboration diagram for onert::backend::cpu::ops::ReduceLayer:

Public Member Functions

 ReduceLayer ()
 
 ~ReduceLayer ()
 
void configure (const IPortableTensor *input, const IPortableTensor *axes, IPortableTensor *output, ReduceType reduceType, bool keep_dims)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Detailed Description

Definition at line 46 of file ReduceLayer.h.

Constructor & Destructor Documentation

◆ ReduceLayer()

onert::backend::cpu::ops::ReduceLayer::ReduceLayer ( )

Definition at line 155 of file ReduceLayer.cc.

156 : _input(nullptr), _axes(nullptr), _output(nullptr), _reduce_kernel(new nnfw::cker::Reduce()),
157 _kernel(), _reduceType(ReduceType::kInvalid)
158{
159 // DO NOTHING
160}

◆ ~ReduceLayer()

onert::backend::cpu::ops::ReduceLayer::~ReduceLayer ( )
default

Member Function Documentation

◆ configure()

void onert::backend::cpu::ops::ReduceLayer::configure ( const IPortableTensor input,
const IPortableTensor axes,
IPortableTensor output,
ReduceType  reduceType,
bool  keep_dims 
)

Definition at line 164 of file ReduceLayer.cc.

166{
167 _input = input;
168 _axes = axes;
169 _output = output;
170 _reduceType = reduceType;
171
172 switch (_reduceType)
173 {
174 case ReduceType::kSum:
175 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
176 {
177 _kernel = std::bind(&evalSumQuantized, std::placeholders::_1, std::placeholders::_2,
178 std::placeholders::_3, keep_dims, *_reduce_kernel);
179 return;
180 }
181 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kSum);
182 break;
184 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kProd);
185 break;
186 case ReduceType::kMax:
187 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kMax);
188 break;
189 case ReduceType::kMin:
190 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kMin);
191 break;
192 case ReduceType::kAny:
193 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kAny);
194 break;
195 case ReduceType::kAll:
196 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kAll);
197 break;
198 default:
199 throw std::runtime_error{"Reduce: Unsupported reduce type"};
200 }
201}
ir::DataType data_type() const override final

References onert::backend::IPortableTensor::data_type(), onert::backend::cpu::ops::kAll, onert::backend::cpu::ops::kAny, onert::backend::cpu::ops::kMax, onert::backend::cpu::ops::kMin, onert::backend::cpu::ops::kProd, and onert::backend::cpu::ops::kSum.

◆ run()

void onert::backend::cpu::ops::ReduceLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 203 of file ReduceLayer.cc.

204{
205 const auto axes = getReducerAxes(_axes);
206#ifdef USE_NEON
207 int32_t rank = _input->getShape().rank();
208 if (_input->data_type() == ir::DataType::FLOAT32 && _reduceType == ReduceType::kSum &&
209 axes.size() == 1 && (axes[0] == -1 || axes[0] == rank - 1))
210 {
211 OptimizedReduceSum(getBuffer<float>(_input), getShape(_input), getBuffer<float>(_output));
212 return;
213 }
214#endif // NEON
215 _kernel(_input, _output, axes);
216}
ir::Shape getShape() const override final
Get ir::Shape of tensor.
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
std::vector< int32_t > getReducerAxes(const IPortableTensor *axes)

References onert::backend::IPortableTensor::data_type(), onert::backend::cpu::ops::getReducerAxes(), onert::backend::IPortableTensor::getShape(), onert::backend::cpu::ops::getShape(), and onert::backend::cpu::ops::kSum.


The documentation for this class was generated from the following files: