ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::ruy::ops::FullyConnectedLayer Class Reference

#include <FullyConnectedLayer.h>

Collaboration diagram for onert::backend::ruy::ops::FullyConnectedLayer:

Public Member Functions

 FullyConnectedLayer ()
 
 ~FullyConnectedLayer ()
 
void fullyConnectedFloat32 ()
 
void configure (const IPortableTensor *input, const IPortableTensor *weights, const IPortableTensor *bias, ir::Activation activation, IPortableTensor *output, const std::shared_ptr< ExternalContext > &external_context)
 
void run () override
 
void prepare () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 

Detailed Description

Definition at line 35 of file FullyConnectedLayer.h.

Constructor & Destructor Documentation

◆ FullyConnectedLayer()

onert::backend::ruy::ops::FullyConnectedLayer::FullyConnectedLayer ( )

Definition at line 32 of file FullyConnectedLayer.cc.

33 : _input(nullptr), _weights(nullptr), _bias(nullptr), _output(nullptr),
34 _activation(ir::Activation::NONE), _external_context(nullptr)
35{
36 // DO NOTHING
37}

◆ ~FullyConnectedLayer()

onert::backend::ruy::ops::FullyConnectedLayer::~FullyConnectedLayer ( )
default

Member Function Documentation

◆ configure()

void onert::backend::ruy::ops::FullyConnectedLayer::configure ( const IPortableTensor input,
const IPortableTensor weights,
const IPortableTensor bias,
ir::Activation  activation,
IPortableTensor output,
const std::shared_ptr< ExternalContext > &  external_context 
)

Definition at line 61 of file FullyConnectedLayer.cc.

65{
66 _input = input;
67 _weights = weights;
68 _bias = bias;
69 _activation = activation;
70 _output = output;
71 _external_context = external_context;
72}

◆ fullyConnectedFloat32()

void onert::backend::ruy::ops::FullyConnectedLayer::fullyConnectedFloat32 ( )

Definition at line 41 of file FullyConnectedLayer.cc.

42{
43 float output_activation_min = 0, output_activation_max = 0;
44 CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
46
47 op_params.float_activation_min = output_activation_min;
48 op_params.float_activation_max = output_activation_max;
49 op_params.activation = convertActivationType(_activation);
50 op_params.lhs_cacheable = _weights->is_constant();
51 op_params.rhs_cacheable = _input->is_constant();
52
54 op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
55 getTensorShape(_weights), reinterpret_cast<const float *>(_weights->buffer()),
56 getTensorShape(_bias), reinterpret_cast<const float *>(_bias ? _bias->buffer() : nullptr),
57 getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()),
58 _external_context->ruy_context());
59}
bool is_constant() const override final
Return true if the tensor is constant.
virtual uint8_t * buffer() const =0
void FullyConnected(const FullyConnectedParams &params, const Shape &input_shape, const float *input_data, const Shape &weights_shape, const float *weights_data, const Shape &, const float *optional_bias_data, const Shape &output_shape, float *output_data, ::ruy::Context *ruy_context)
nnfw::ruy::Shape getTensorShape(const IPortableTensor *tensor)
nnfw::ruy::FusedActivationFunctionType convertActivationType(const ir::Activation activation)
void CalculateActivationRange(ir::Activation activation, T *activation_min, T *activation_max)
FusedActivationFunctionType activation
Definition Types.h:83

References nnfw::ruy::FullyConnectedParams::activation, onert::backend::ITensor::buffer(), onert::util::CalculateActivationRange(), onert::backend::ruy::ops::convertActivationType(), nnfw::ruy::FullyConnectedParams::float_activation_max, nnfw::ruy::FullyConnectedParams::float_activation_min, nnfw::ruy::FullyConnected(), onert::backend::ruy::ops::getTensorShape(), onert::backend::IPortableTensor::is_constant(), nnfw::ruy::FullyConnectedParams::lhs_cacheable, and nnfw::ruy::FullyConnectedParams::rhs_cacheable.

Referenced by run().

◆ prepare()

void onert::backend::ruy::ops::FullyConnectedLayer::prepare ( )
overridevirtual

Reimplemented from onert::exec::IFunction.

Definition at line 86 of file FullyConnectedLayer.cc.

87{
88 if (_bias && _bias->is_constant())
89 {
90 const int bias_size = getTensorShape(_bias).FlatSize();
91 if (nnfw::ruy::IsZeroVector(reinterpret_cast<float *>(_bias->buffer()), bias_size))
92 {
93 _bias = nullptr;
94 }
95 }
96}
int FlatSize() const
Definition Shape.h:181
bool IsZeroVector(const float *vector, int v_size)
Definition TensorUtils.h:29

References onert::backend::ITensor::buffer(), nnfw::ruy::Shape::FlatSize(), onert::backend::ruy::ops::getTensorShape(), onert::backend::IPortableTensor::is_constant(), and nnfw::ruy::IsZeroVector().

◆ run()

void onert::backend::ruy::ops::FullyConnectedLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 74 of file FullyConnectedLayer.cc.

75{
76 if (_input->data_type() == OperandType::FLOAT32)
77 {
79 }
80 else
81 {
82 throw std::runtime_error{"FullyConnected: unsupported data type"};
83 }
84}
ir::DataType data_type() const override final

References onert::backend::IPortableTensor::data_type(), and fullyConnectedFloat32().

Referenced by package.infer.session::inference().


The documentation for this class was generated from the following files: