ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::cpu::ops::ArgMinMaxLayer Class Reference

#include <ArgMinMaxLayer.h>

Collaboration diagram for onert::backend::cpu::ops::ArgMinMaxLayer:

Public Member Functions

 ArgMinMaxLayer ()
 
void configure (const IPortableTensor *indices, IPortableTensor *output, const IPortableTensor *axis, bool is_arg_max)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Detailed Description

Definition at line 33 of file ArgMinMaxLayer.h.

Constructor & Destructor Documentation

◆ ArgMinMaxLayer()

onert::backend::cpu::ops::ArgMinMaxLayer::ArgMinMaxLayer ( )
inline

Definition at line 36 of file ArgMinMaxLayer.h.

36: _input(nullptr), _output(nullptr), _axis(nullptr), _is_arg_max(true) {}

Member Function Documentation

◆ configure()

void onert::backend::cpu::ops::ArgMinMaxLayer::configure ( const IPortableTensor indices,
IPortableTensor output,
const IPortableTensor axis,
bool  is_arg_max 
)

Definition at line 47 of file ArgMinMaxLayer.cc.

49{
50 _input = input;
51 _output = output;
52 _axis = axis;
53 _is_arg_max = is_arg_max;
54}

◆ run()

void onert::backend::cpu::ops::ArgMinMaxLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 56 of file ArgMinMaxLayer.cc.

57{
58 if (_axis->total_size() != sizeof(int32_t))
59 {
60 throw std::runtime_error("ArgMinMax: wrong shape of axis");
61 }
62 auto axis = *getBuffer<int32_t>(_axis);
63 if (axis < 0)
64 {
65 axis += _input->getShape().rank();
66 }
67#define TF_LITE_ARG_MIN_MAX(input_type, axis_type, output_type) \
68 ArgMinMax(getShape(_input), getBuffer<input_type>(_input), getShape(_output), \
69 getBuffer<output_type>(_output), axis, GetComparefunction<input_type>(_is_arg_max));
70 if (_output->data_type() == ir::DataType::INT32)
71 {
72 switch (_input->data_type())
73 {
74 case ir::DataType::FLOAT32:
75 TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t);
76 break;
77 case ir::DataType::QUANT_UINT8_ASYMM:
78 case ir::DataType::UINT8:
79 TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t);
80 break;
81 case ir::DataType::QUANT_INT8_ASYMM:
82 TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t);
83 break;
84 case ir::DataType::INT32:
85 TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int32_t);
86 break;
87 default:
88 throw std::runtime_error("ArgMinMax: unsupported data type");
89 }
90 }
91 else if (_output->data_type() == ir::DataType::INT64)
92 {
93 switch (_input->data_type())
94 {
95 case ir::DataType::FLOAT32:
96 TF_LITE_ARG_MIN_MAX(float, int32_t, int64_t);
97 break;
98 case ir::DataType::QUANT_UINT8_ASYMM:
99 case ir::DataType::UINT8:
100 TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int64_t);
101 break;
102 case ir::DataType::QUANT_INT8_ASYMM:
103 TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int64_t);
104 break;
105 case ir::DataType::INT32:
106 TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int64_t);
107 break;
108 default:
109 throw std::runtime_error("ArgMinMax: unsupported data type");
110 }
111 }
112 else
113 {
114 throw std::runtime_error("ArgMinMax: unsupported data type");
115 }
116
117#undef TF_LITE_ARG_MIN_MAX
118}
#define TF_LITE_ARG_MIN_MAX(input_type, axis_type, output_type)
size_t total_size() const override final
ir::DataType data_type() const override final
ir::Shape getShape() const override final
Get ir::Shape of tensor.

References onert::backend::IPortableTensor::data_type(), onert::backend::IPortableTensor::getShape(), TF_LITE_ARG_MIN_MAX, and onert::backend::IPortableTensor::total_size().

Referenced by package.infer.session::inference().


The documentation for this class was generated from the following files: