ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::ruy::ops::FullyConnectedLayer Class Reference

#include <FullyConnectedLayer.h>

Collaboration diagram for onert::backend::ruy::ops::FullyConnectedLayer:

Public Member Functions

 FullyConnectedLayer ()
 
 ~FullyConnectedLayer ()
 
void fullyConnectedFloat32 ()
 
void configure (const IPortableTensor *input, const IPortableTensor *weights, const IPortableTensor *bias, ir::Activation activation, IPortableTensor *output, const std::shared_ptr< ExternalContext > &external_context)
 
void run () override
 
void prepare () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 

Detailed Description

Definition at line 29 of file FullyConnectedLayer.h.

Constructor & Destructor Documentation

◆ FullyConnectedLayer()

onert::backend::ruy::ops::FullyConnectedLayer::FullyConnectedLayer ( )

Definition at line 62 of file FullyConnectedLayer.cc.

63 : _input(nullptr), _weights(nullptr), _bias(nullptr), _output(nullptr),
64 _activation(ir::Activation::NONE), _external_context(nullptr)
65{
66 // DO NOTHING
67}

◆ ~FullyConnectedLayer()

onert::backend::ruy::ops::FullyConnectedLayer::~FullyConnectedLayer ( )
default

Member Function Documentation

◆ configure()

void onert::backend::ruy::ops::FullyConnectedLayer::configure ( const IPortableTensor input,
const IPortableTensor weights,
const IPortableTensor bias,
ir::Activation  activation,
IPortableTensor output,
const std::shared_ptr< ExternalContext > &  external_context 
)

Definition at line 91 of file FullyConnectedLayer.cc.

95{
96 _input = input;
97 _weights = weights;
98 _bias = bias;
99 _activation = activation;
100 _output = output;
101 _external_context = external_context;
102}

◆ fullyConnectedFloat32()

void onert::backend::ruy::ops::FullyConnectedLayer::fullyConnectedFloat32 ( )

Definition at line 71 of file FullyConnectedLayer.cc.

72{
73 float output_activation_min = 0, output_activation_max = 0;
74 CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
76
77 op_params.float_activation_min = output_activation_min;
78 op_params.float_activation_max = output_activation_max;
79 op_params.activation = convertActivationType(_activation);
80 op_params.lhs_cacheable = _weights->is_constant();
81 op_params.rhs_cacheable = _input->is_constant();
82
84 op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
85 getTensorShape(_weights), reinterpret_cast<const float *>(_weights->buffer()),
86 getTensorShape(_bias), reinterpret_cast<const float *>(_bias ? _bias->buffer() : nullptr),
87 getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()),
88 _external_context->ruy_context());
89}
bool is_constant() const override final
Return true if the tensor is constant.
virtual uint8_t * buffer() const =0
void FullyConnected(const FullyConnectedParams &params, const Shape &input_shape, const float *input_data, const Shape &weights_shape, const float *weights_data, const Shape &, const float *optional_bias_data, const Shape &output_shape, float *output_data, ::ruy::Context *ruy_context)
nnfw::ruy::Shape getTensorShape(const IPortableTensor *tensor)
nnfw::ruy::FusedActivationFunctionType convertActivationType(const ir::Activation activation)
void CalculateActivationRange(ir::Activation activation, T *activation_min, T *activation_max)
FusedActivationFunctionType activation
Definition Types.h:83

References nnfw::ruy::FullyConnectedParams::activation, onert::backend::ITensor::buffer(), onert::util::CalculateActivationRange(), onert::backend::ruy::ops::convertActivationType(), nnfw::ruy::FullyConnectedParams::float_activation_max, nnfw::ruy::FullyConnectedParams::float_activation_min, nnfw::ruy::FullyConnected(), onert::backend::ruy::ops::getTensorShape(), onert::backend::IPortableTensor::is_constant(), nnfw::ruy::FullyConnectedParams::lhs_cacheable, and nnfw::ruy::FullyConnectedParams::rhs_cacheable.

Referenced by run().

◆ prepare()

void onert::backend::ruy::ops::FullyConnectedLayer::prepare ( )
overridevirtual

Reimplemented from onert::exec::IFunction.

Definition at line 116 of file FullyConnectedLayer.cc.

117{
118 if (_bias && _bias->is_constant())
119 {
120 const int bias_size = getTensorShape(_bias).FlatSize();
121 if (nnfw::ruy::IsZeroVector(reinterpret_cast<float *>(_bias->buffer()), bias_size))
122 {
123 _bias = nullptr;
124 }
125 }
126}
int FlatSize() const
Definition Shape.h:181
bool IsZeroVector(const float *vector, int v_size)
Definition TensorUtils.h:29

References onert::backend::ITensor::buffer(), nnfw::ruy::Shape::FlatSize(), onert::backend::ruy::ops::getTensorShape(), onert::backend::IPortableTensor::is_constant(), and nnfw::ruy::IsZeroVector().

◆ run()

void onert::backend::ruy::ops::FullyConnectedLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 104 of file FullyConnectedLayer.cc.

105{
106 if (_input->data_type() == OperandType::FLOAT32)
107 {
109 }
110 else
111 {
112 throw std::runtime_error{"FullyConnected: unsupported data type"};
113 }
114}
ir::DataType data_type() const override final

References onert::backend::IPortableTensor::data_type(), and fullyConnectedFloat32().


The documentation for this class was generated from the following files: