ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::cpu::ops::ReduceLayer Class Reference

#include <ReduceLayer.h>

Collaboration diagram for onert::backend::cpu::ops::ReduceLayer:

Public Member Functions

 ReduceLayer ()
 
 ~ReduceLayer ()
 
void configure (const IPortableTensor *input, const IPortableTensor *axes, IPortableTensor *output, ReduceType reduceType, bool keep_dims)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Detailed Description

Definition at line 55 of file ReduceLayer.h.

Constructor & Destructor Documentation

◆ ReduceLayer()

onert::backend::cpu::ops::ReduceLayer::ReduceLayer ( )

Definition at line 161 of file ReduceLayer.cc.

162 : _input(nullptr), _axes(nullptr), _output(nullptr), _reduce_kernel(new nnfw::cker::Reduce()),
163 _kernel(), _reduceType(ReduceType::kInvalid)
164{
165 // DO NOTHING
166}

◆ ~ReduceLayer()

onert::backend::cpu::ops::ReduceLayer::~ReduceLayer ( )
default

Member Function Documentation

◆ configure()

void onert::backend::cpu::ops::ReduceLayer::configure ( const IPortableTensor input,
const IPortableTensor axes,
IPortableTensor output,
ReduceType  reduceType,
bool  keep_dims 
)

Definition at line 170 of file ReduceLayer.cc.

172{
173 _input = input;
174 _axes = axes;
175 _output = output;
176 _reduceType = reduceType;
177
178 switch (_reduceType)
179 {
180 case ReduceType::kSum:
181 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
182 {
183 _kernel = std::bind(&evalSumQuantized, std::placeholders::_1, std::placeholders::_2,
184 std::placeholders::_3, keep_dims, *_reduce_kernel);
185 return;
186 }
187 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kSum);
188 break;
190 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kProd);
191 break;
192 case ReduceType::kMax:
193 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kMax);
194 break;
195 case ReduceType::kMin:
196 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kMin);
197 break;
198 case ReduceType::kAny:
199 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kAny);
200 break;
201 case ReduceType::kAll:
202 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kAll);
203 break;
204 default:
205 throw std::runtime_error{"Reduce: Unsupported reduce type"};
206 }
207}
ir::DataType data_type() const override final

References onert::backend::IPortableTensor::data_type(), onert::backend::cpu::ops::kAll, onert::backend::cpu::ops::kAny, onert::backend::cpu::ops::kMax, onert::backend::cpu::ops::kMin, onert::backend::cpu::ops::kProd, and onert::backend::cpu::ops::kSum.

◆ run()

void onert::backend::cpu::ops::ReduceLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 209 of file ReduceLayer.cc.

210{
211 const auto axes = getReducerAxes(_axes);
212#ifdef USE_NEON
213 int32_t rank = _input->getShape().rank();
214 if (_input->data_type() == ir::DataType::FLOAT32 && _reduceType == ReduceType::kSum &&
215 axes.size() == 1 && (axes[0] == -1 || axes[0] == rank - 1))
216 {
217 OptimizedReduceSum(getBuffer<float>(_input), getShape(_input), getBuffer<float>(_output));
218 return;
219 }
220#endif // NEON
221 _kernel(_input, _output, axes);
222}
ir::Shape getShape() const override final
Get ir::Shape of tensor.
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
std::vector< int32_t > getReducerAxes(const IPortableTensor *axes)

References onert::backend::IPortableTensor::data_type(), onert::backend::cpu::ops::getReducerAxes(), onert::backend::IPortableTensor::getShape(), onert::backend::cpu::ops::getShape(), and onert::backend::cpu::ops::kSum.

Referenced by package.infer.session::inference().


The documentation for this class was generated from the following files: