19#include "../KernelGenerator.h"
21#include "../Validator.h"
29void Validator::visit(
const ir::operation::FullyConnected &) {
_supported =
true; }
31void KernelGenerator::visit(
const ir::operation::FullyConnected &node)
33 using ir::operation::FullyConnected;
35 const auto output_index{node.getOutputs().at(0)};
36 const auto input_index{node.getInputs().at(FullyConnected::Input::INPUT)};
37 const auto weight_index{node.getInputs().at(FullyConnected::Input::WEIGHT)};
38 const auto bias_index{node.getInputs().at(FullyConnected::Input::BIAS)};
39 const auto activation = node.param().activation;
40 const auto weights_format = node.param().weights_format;
42 throw std::runtime_error(
"Unsupported FullyConnected Weights Format");
44 auto output_tensor = _tensor_reg->getPortableTensor(output_index);
45 auto input_tensor = _tensor_reg->getPortableTensor(input_index);
46 auto weight_tensor = _tensor_reg->getPortableTensor(weight_index);
47 auto bias_tensor = bias_index.undefined() ? nullptr : _tensor_reg->getPortableTensor(bias_index);
49 auto fn = std::make_unique<ops::FullyConnectedLayer>();
51 fn->configure(input_tensor, weight_tensor,
bias_tensor, activation, output_tensor,
63 : _input(nullptr), _weights(nullptr), _bias(nullptr), _output(nullptr),
64 _activation(ir::Activation::
NONE), _external_context(nullptr)
73 float output_activation_min = 0, output_activation_max = 0;
88 _external_context->ruy_context());
94 const std::shared_ptr<ExternalContext> &external_context)
99 _activation = activation;
101 _external_context = external_context;
106 if (_input->
data_type() == OperandType::FLOAT32)
112 throw std::runtime_error{
"FullyConnected: unsupported data type"};
A tensor class that is portable for other backends.
ir::DataType data_type() const override final
bool is_constant() const override final
Return true if the tensor is constant.
virtual uint8_t * buffer() const =0
std::unique_ptr< exec::IFunction > _return_fn
void fullyConnectedFloat32()
void configure(const IPortableTensor *input, const IPortableTensor *weights, const IPortableTensor *bias, ir::Activation activation, IPortableTensor *output, const std::shared_ptr< ExternalContext > &external_context)
void FullyConnected(const FullyConnectedParams ¶ms, const Shape &input_shape, const float *input_data, const Shape &weights_shape, const float *weights_data, const Shape &, const float *optional_bias_data, const Shape &output_shape, float *output_data, ::ruy::Context *ruy_context)
bool IsZeroVector(const float *vector, int v_size)
nnfw::ruy::Shape getTensorShape(const IPortableTensor *tensor)
nnfw::ruy::FusedActivationFunctionType convertActivationType(const ir::Activation activation)
void CalculateActivationRange(ir::Activation activation, T *activation_min, T *activation_max)
float float_activation_min
FusedActivationFunctionType activation
float float_activation_max