31std::unique_ptr<backend::train::Tensor>
34 const auto &origin_shape = origin_tensor->
getShape();
35 assert(origin_shape.rank() == 2);
37 auto transposed_info = origin_tensor->
get_info();
38 auto transposed_shape =
ir::Shape{origin_shape.
dim(1), origin_shape.dim(0)};
39 transposed_info.shape(transposed_shape);
41 return std::make_unique<backend::train::Tensor>(transposed_info);
57 _back_prop_input{nullptr}, _back_prop_output{nullptr}, _transposed_weights{nullptr},
58 _transposed_input{nullptr}, _transposed_back_prop_output{nullptr},
59 _act_back_prop_output{nullptr}
72 _back_prop_input = back_prop_input;
73 _grad_weights = grad_weights;
74 _grad_bias = grad_bias;
75 _back_prop_output = back_prop_output;
78 throw std::runtime_error{
79 "train FullyConnectedLayer: Weight formats other than default are not supported."};
81 if (input->get_info().shape().rank() != 2 || weights->
get_info().
shape().rank() != 2 ||
82 output->get_info().shape().rank() != 2 || back_prop_input->
get_info().
shape().rank() != 2 ||
85 throw std::runtime_error{
86 "train FullyConnectedLayer: Input other ranks than 2 are not supported."};
88 _transposed_weights = createTransposedTensor(weights);
89 _transposed_weights->setBuffer(std::make_shared<basic::Allocator>(weights->
total_size()));
91 _transposed_input = createTransposedTensor(input);
92 _transposed_input->setBuffer(std::make_shared<basic::Allocator>(input->total_size()));
94 _transposed_back_prop_output = createTransposedTensor(back_prop_output);
95 _transposed_back_prop_output->setBuffer(
96 std::make_shared<basic::Allocator>(back_prop_output->
total_size()));
100 _act_back_prop_output = std::make_unique<Tensor>(_back_prop_output->
get_info());
101 _act_back_prop_output->setBuffer(
102 std::make_shared<basic::Allocator>(_back_prop_output->
total_size()));
110 const auto data_type = _back_prop_output->
data_type();
114 case OperandType::FLOAT32:
116 assert(data_type == _grad_weights->
data_type());
117 assert(_grad_bias ==
nullptr || data_type == _grad_bias->
data_type());
122 throw std::runtime_error{
"train FullyConnectedLayer: unsupported data type"};
126void FullyConnectedLayer::backwardFloat32()
135 catch (
const std::exception &e)
137 throw std::runtime_error{
"train FullyConnectedLayer: " + std::string(e.what())};
139 assert(backprop_act !=
nullptr);
144 transpose_param.
perm[0] = 1;
145 transpose_param.
perm[1] = 0;
149 float output_activation_min = 0;
150 float output_activation_max = 0;
160 auto transposed_weights = _transposed_weights.get();
161 assert(transposed_weights->getShape().rank() == 2);
163 getShape(transposed_weights), getBuffer<float>(transposed_weights));
166 getShape(transposed_weights), getBuffer<float>(transposed_weights),
168 getBuffer<float>(_back_prop_input));
172 auto transposed_input = _transposed_input.get();
173 assert(transposed_input->getShape().rank() == 2);
175 getShape(transposed_input), getBuffer<float>(transposed_input));
177 auto transposed_back_prop_output = _transposed_back_prop_output.get();
178 assert(transposed_back_prop_output->getShape().rank() == 2);
180 getShape(transposed_back_prop_output),
181 getBuffer<float>(transposed_back_prop_output));
184 op_params,
getShape(transposed_back_prop_output), getBuffer<float>(transposed_back_prop_output),
185 getShape(transposed_input), getBuffer<float>(transposed_input),
getShape(
nullptr),
nullptr,
186 getShape(_grad_weights), getBuffer<float>(_grad_weights));
193 getBuffer<float>(backprop_act),
getShape(_grad_bias),
194 getBuffer<float>(_grad_bias));
A tensor class that is portable for other backends.
size_t total_size() const override final
const ir::OperandInfo & get_info() const
ir::DataType data_type() const override final
ir::Shape getShape() const override final
Get ir::Shape of tensor.
ir::Activation _activation
const IPortableTensor * _weights
const IPortableTensor * _bias
IPortableTensor * _output
const IPortableTensor * _input
void forward(bool training) override
void configureBackward(const IPortableTensor *input, const IPortableTensor *weights, IPortableTensor *output, IPortableTensor *back_prop_input, IPortableTensor *grad_weights, IPortableTensor *grad_bias, const IPortableTensor *back_prop_output, ir::Activation activation, ir::FullyConnectedWeightsFormat weights_format)
const Shape & shape() const
Return tensor shape.
void FullyConnectedBiasGrad(const Shape &incomming_shape, const T *incomming_data, const Shape &grad_shape, T *grad_data)
void FullyConnected(const FullyConnectedParams ¶ms, const Shape &input_shape, const float *input_data, const Shape &weights_shape, const float *weights_data, const Shape &, const float *bias_data, const Shape &, float *output_data)
void Transpose(const TransposeParams &unshrunk_params, const Shape &unshrunk_input_shape, const T *input_data, const Shape &unshrunk_output_shape, T *output_data)
const IPortableTensor * backpropActivation(const ir::Activation &activation, const IPortableTensor *output, const IPortableTensor *input_backprop, IPortableTensor *output_backprop)
backpropagate acitvation
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
Get shape of tensor.
FullyConnectedWeightsFormat
FusedActivationFunctionType activation
float float_activation_max
float float_activation_min