ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::cpu::ops::BroadcastToLayer Class Reference

#include <BroadcastToLayer.h>

Collaboration diagram for onert::backend::cpu::ops::BroadcastToLayer:

Public Member Functions

 BroadcastToLayer ()
 
void configure (const IPortableTensor *input, const IPortableTensor *shape, IPortableTensor *output)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Detailed Description

Definition at line 34 of file BroadcastToLayer.h.

Constructor & Destructor Documentation

◆ BroadcastToLayer()

onert::backend::cpu::ops::BroadcastToLayer::BroadcastToLayer ( )

Definition at line 30 of file BroadcastToLayer.cc.

30 : _input(nullptr), _shape(nullptr), _output(nullptr)
31{
32 // DO NOTHING
33}

Member Function Documentation

◆ configure()

void onert::backend::cpu::ops::BroadcastToLayer::configure ( const IPortableTensor input,
const IPortableTensor shape,
IPortableTensor output 
)

Definition at line 35 of file BroadcastToLayer.cc.

37{
38 _input = input;
39 _shape = shape;
40 _output = output;
41}

◆ run()

void onert::backend::cpu::ops::BroadcastToLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 43 of file BroadcastToLayer.cc.

44{
45 // NOTE : It was implemented follows tf.broadcast_to operation works and
46 // Api Document(https://www.tensorflow.org/api_docs/python/tf/broadcast_to)
47
48 switch (_output->data_type())
49 {
50 // ToDo : It need to support INT8 and UINT8 also when will be applied quantization.
51 case OperandType::FLOAT32:
52 nnfw::cker::BroadcastTo<float>(getShape(_input), reinterpret_cast<float *>(_input->buffer()),
53 getShape(_output), getBuffer<float>(_output));
54 break;
55 case OperandType::INT32:
56 nnfw::cker::BroadcastTo<int32_t>(getShape(_input),
57 reinterpret_cast<int32_t *>(_input->buffer()),
58 getShape(_output), getBuffer<int32_t>(_output));
59 break;
60 case OperandType::UINT32:
61 nnfw::cker::BroadcastTo<uint32_t>(getShape(_input),
62 reinterpret_cast<uint32_t *>(_input->buffer()),
63 getShape(_output), getBuffer<uint32_t>(_output));
64 break;
65 default:
66 throw std::runtime_error{"BroadcastToLayer: unsupported data type"};
67 }
68}
ir::DataType data_type() const override final
virtual uint8_t * buffer() const =0
nnfw::cker::Shape getShape(const IPortableTensor *tensor)

References onert::backend::ITensor::buffer(), onert::backend::IPortableTensor::data_type(), and onert::backend::cpu::ops::getShape().

Referenced by package.infer.session::inference().


The documentation for this class was generated from the following files: