ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
onert::backend::cpu::ops::ArgMinMaxLayer Class Reference

#include <ArgMinMaxLayer.h>

Collaboration diagram for onert::backend::cpu::ops::ArgMinMaxLayer:

Public Member Functions

 ArgMinMaxLayer ()
 
void configure (const IPortableTensor *indices, IPortableTensor *output, const IPortableTensor *axis, bool is_arg_max)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Detailed Description

Definition at line 27 of file ArgMinMaxLayer.h.

Constructor & Destructor Documentation

◆ ArgMinMaxLayer()

onert::backend::cpu::ops::ArgMinMaxLayer::ArgMinMaxLayer ( )
inline

Definition at line 30 of file ArgMinMaxLayer.h.

30: _input(nullptr), _output(nullptr), _axis(nullptr), _is_arg_max(true) {}

Member Function Documentation

◆ configure()

void onert::backend::cpu::ops::ArgMinMaxLayer::configure ( const IPortableTensor indices,
IPortableTensor output,
const IPortableTensor axis,
bool  is_arg_max 
)

Definition at line 41 of file ArgMinMaxLayer.cc.

43{
44 _input = input;
45 _output = output;
46 _axis = axis;
47 _is_arg_max = is_arg_max;
48}

◆ run()

void onert::backend::cpu::ops::ArgMinMaxLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 50 of file ArgMinMaxLayer.cc.

51{
52 if (_axis->total_size() != sizeof(int32_t))
53 {
54 throw std::runtime_error("ArgMinMax: wrong shape of axis");
55 }
56 auto axis = *getBuffer<int32_t>(_axis);
57 if (axis < 0)
58 {
59 axis += _input->getShape().rank();
60 }
61#define TF_LITE_ARG_MIN_MAX(input_type, axis_type, output_type) \
62 ArgMinMax(getShape(_input), getBuffer<input_type>(_input), getShape(_output), \
63 getBuffer<output_type>(_output), axis, GetComparefunction<input_type>(_is_arg_max));
64 if (_output->data_type() == ir::DataType::INT32)
65 {
66 switch (_input->data_type())
67 {
68 case ir::DataType::FLOAT32:
69 TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t);
70 break;
71 case ir::DataType::QUANT_UINT8_ASYMM:
72 case ir::DataType::UINT8:
73 TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t);
74 break;
75 case ir::DataType::QUANT_INT8_ASYMM:
76 TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t);
77 break;
78 case ir::DataType::INT32:
79 TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int32_t);
80 break;
81 default:
82 throw std::runtime_error("ArgMinMax: unsupported data type");
83 }
84 }
85 else if (_output->data_type() == ir::DataType::INT64)
86 {
87 switch (_input->data_type())
88 {
89 case ir::DataType::FLOAT32:
90 TF_LITE_ARG_MIN_MAX(float, int32_t, int64_t);
91 break;
92 case ir::DataType::QUANT_UINT8_ASYMM:
93 case ir::DataType::UINT8:
94 TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int64_t);
95 break;
96 case ir::DataType::QUANT_INT8_ASYMM:
97 TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int64_t);
98 break;
99 case ir::DataType::INT32:
100 TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int64_t);
101 break;
102 default:
103 throw std::runtime_error("ArgMinMax: unsupported data type");
104 }
105 }
106 else
107 {
108 throw std::runtime_error("ArgMinMax: unsupported data type");
109 }
110
111#undef TF_LITE_ARG_MIN_MAX
112}
#define TF_LITE_ARG_MIN_MAX(input_type, axis_type, output_type)
size_t total_size() const override final
ir::DataType data_type() const override final
ir::Shape getShape() const override final
Get ir::Shape of tensor.

References onert::backend::IPortableTensor::data_type(), onert::backend::IPortableTensor::getShape(), TF_LITE_ARG_MIN_MAX, and onert::backend::IPortableTensor::total_size().


The documentation for this class was generated from the following files: