31std::unique_ptr<backend::train::Tensor>
34 const auto &origin_shape = origin_tensor->
getShape();
35 assert(origin_shape.rank() == 2);
37 auto transposed_info = origin_tensor->
get_info();
38 auto transposed_shape =
ir::Shape{origin_shape.
dim(1), origin_shape.dim(0)};
39 transposed_info.shape(transposed_shape);
41 return std::make_unique<backend::train::Tensor>(transposed_info);
51 _back_prop_input{nullptr}, _back_prop_output{nullptr}, _transposed_weights{nullptr},
52 _transposed_input{nullptr}, _transposed_back_prop_output{nullptr},
53 _act_back_prop_output{nullptr}
66 _back_prop_input = back_prop_input;
67 _grad_weights = grad_weights;
68 _grad_bias = grad_bias;
69 _back_prop_output = back_prop_output;
72 throw std::runtime_error{
73 "train FullyConnectedLayer: Weight formats other than default are not supported."};
75 if (input->get_info().shape().rank() != 2 || weights->
get_info().
shape().rank() != 2 ||
76 output->get_info().shape().rank() != 2 || back_prop_input->
get_info().
shape().rank() != 2 ||
79 throw std::runtime_error{
80 "train FullyConnectedLayer: Input other ranks than 2 are not supported."};
82 _transposed_weights = createTransposedTensor(weights);
83 _transposed_weights->setBuffer(std::make_shared<basic::Allocator>(weights->
total_size()));
85 _transposed_input = createTransposedTensor(input);
86 _transposed_input->setBuffer(std::make_shared<basic::Allocator>(input->total_size()));
88 _transposed_back_prop_output = createTransposedTensor(back_prop_output);
89 _transposed_back_prop_output->setBuffer(
90 std::make_shared<basic::Allocator>(back_prop_output->
total_size()));
94 _act_back_prop_output = std::make_unique<Tensor>(_back_prop_output->
get_info());
95 _act_back_prop_output->setBuffer(
96 std::make_shared<basic::Allocator>(_back_prop_output->
total_size()));
104 const auto data_type = _back_prop_output->
data_type();
108 case OperandType::FLOAT32:
110 assert(data_type == _grad_weights->
data_type());
111 assert(_grad_bias ==
nullptr || data_type == _grad_bias->
data_type());
116 throw std::runtime_error{
"train FullyConnectedLayer: unsupported data type"};
120void FullyConnectedLayer::backwardFloat32()
129 catch (
const std::exception &e)
131 throw std::runtime_error{
"train FullyConnectedLayer: " + std::string(e.what())};
133 assert(backprop_act !=
nullptr);
138 transpose_param.
perm[0] = 1;
139 transpose_param.
perm[1] = 0;
143 float output_activation_min = 0;
144 float output_activation_max = 0;
154 auto transposed_weights = _transposed_weights.get();
155 assert(transposed_weights->getShape().rank() == 2);
157 getShape(transposed_weights), getBuffer<float>(transposed_weights));
160 getShape(transposed_weights), getBuffer<float>(transposed_weights),
162 getBuffer<float>(_back_prop_input));
166 auto transposed_input = _transposed_input.get();
167 assert(transposed_input->getShape().rank() == 2);
169 getShape(transposed_input), getBuffer<float>(transposed_input));
171 auto transposed_back_prop_output = _transposed_back_prop_output.get();
172 assert(transposed_back_prop_output->getShape().rank() == 2);
174 getShape(transposed_back_prop_output),
175 getBuffer<float>(transposed_back_prop_output));
178 op_params,
getShape(transposed_back_prop_output), getBuffer<float>(transposed_back_prop_output),
179 getShape(transposed_input), getBuffer<float>(transposed_input),
getShape(
nullptr),
nullptr,
180 getShape(_grad_weights), getBuffer<float>(_grad_weights));
187 getBuffer<float>(backprop_act),
getShape(_grad_bias),
188 getBuffer<float>(_grad_bias));
A tensor class that is portable for other backends.
size_t total_size() const override final
const ir::OperandInfo & get_info() const
ir::DataType data_type() const override final
ir::Shape getShape() const override final
Get ir::Shape of tensor.
ir::Activation _activation
const IPortableTensor * _weights
const IPortableTensor * _bias
IPortableTensor * _output
const IPortableTensor * _input
void forward(bool training) override
void configureBackward(const IPortableTensor *input, const IPortableTensor *weights, IPortableTensor *output, IPortableTensor *back_prop_input, IPortableTensor *grad_weights, IPortableTensor *grad_bias, const IPortableTensor *back_prop_output, ir::Activation activation, ir::FullyConnectedWeightsFormat weights_format)
const Shape & shape() const
Return tensor shape.
void FullyConnectedBiasGrad(const Shape &incomming_shape, const T *incomming_data, const Shape &grad_shape, T *grad_data)
void FullyConnected(const FullyConnectedParams ¶ms, const Shape &input_shape, const float *input_data, const Shape &weights_shape, const float *weights_data, const Shape &, const float *bias_data, const Shape &, float *output_data)
void Transpose(const TransposeParams &unshrunk_params, const Shape &unshrunk_input_shape, const T *input_data, const Shape &unshrunk_output_shape, T *output_data)
const IPortableTensor * backpropActivation(const ir::Activation &activation, const IPortableTensor *output, const IPortableTensor *input_backprop, IPortableTensor *output_backprop)
backpropagate acitvation
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
Get shape of tensor.
FullyConnectedWeightsFormat
FusedActivationFunctionType activation
float float_activation_max
float float_activation_min