33template <
typename Tensor>
36 const auto &origin_shape = origin_weights->
getShape();
37 assert(origin_shape.rank() == 4);
39 auto transposed_info = origin_weights->
get_info();
41 auto transposed_shape =
42 ir::Shape{origin_shape.
dim(1), origin_shape.dim(2), origin_shape.dim(3), origin_shape.dim(0)};
43 transposed_info.shape(transposed_shape);
45 return std::make_unique<Tensor>(transposed_info);
61 _back_prop_input{nullptr}, _back_prop_output{nullptr}, _transposed_weights{nullptr}
74 _back_prop_input = back_prop_input;
75 _grad_weights = grad_weights;
76 _grad_bias = grad_bias;
77 _back_prop_output = back_prop_output;
80 throw std::runtime_error(
"train ConvolutionLayer: Unsupported dilation yet");
83 _transposed_weights = createTransposedWeights<Tensor>(weights);
84 _transposed_weights->setBuffer(
85 std::make_shared<basic::Allocator>(_transposed_weights->total_size()));
87 _conv_back_prop_output = std::make_unique<BackPropTensor>(back_prop_output->
get_info());
88 _conv_back_prop_output->setBuffer(
89 std::make_shared<basic::Allocator>(_conv_back_prop_output->total_size()));
91 _transposed_grad_weights = createTransposedWeights<GradientTensor>(weights);
92 _transposed_grad_weights->setBuffer(
93 std::make_shared<basic::Allocator>(_transposed_grad_weights->total_size()));
97 _act_back_prop_output = std::make_unique<BackPropTensor>(_back_prop_output->
get_info());
98 _act_back_prop_output->setBuffer(
99 std::make_shared<basic::Allocator>(_act_back_prop_output->total_size()));
106 const auto data_type = _back_prop_output->
data_type();
110 case OperandType::FLOAT32:
112 assert(data_type == _grad_bias->
data_type());
117 throw std::runtime_error{
"train ConvolutionLayer: unsupported data type"};
121void ConvolutionLayer::backwardFloat32()
130 catch (
const std::exception &e)
132 throw std::runtime_error{
"train ConvolutionLayer: " + std::string(e.what())};
134 assert(backprop_act !=
nullptr);
147 auto transposed_weights = _transposed_weights.get();
148 assert(transposed_weights->getShape().rank() == 4);
150 transpose_param.
perm_count = transposed_weights->getShape().rank();
151 transpose_param.
perm[0] = 1;
152 transpose_param.
perm[1] = 2;
153 transpose_param.
perm[2] = 3;
154 transpose_param.
perm[3] = 0;
156 getShape(transposed_weights), getBuffer<float>(transposed_weights));
160 conv_train_params,
getShape(backprop_act), getBuffer<float>(backprop_act),
165 auto transposed_grad_weights = _transposed_grad_weights.get();
166 assert(_grad_weights->
getShape().rank() == 4);
167 assert(transposed_grad_weights->getShape().rank() == 4);
171 getBuffer<float>(transposed_grad_weights));
175 transpose_grad_param.
perm_count = transposed_grad_weights->getShape().rank();
176 transpose_grad_param.
perm[0] = 3;
177 transpose_grad_param.
perm[1] = 0;
178 transpose_grad_param.
perm[2] = 1;
179 transpose_grad_param.
perm[3] = 2;
181 getBuffer<float>(transposed_grad_weights),
getShape(_grad_weights),
182 getBuffer<float>(_grad_weights));
A tensor class that is portable for other backends.
const ir::OperandInfo & get_info() const
ir::DataType data_type() const override final
ir::Shape getShape() const override final
Get ir::Shape of tensor.
ir::Activation _activation
const IPortableTensor * _bias
const IPortableTensor * _kernel
const IPortableTensor * _input
uint32_t _dilationWidthFactor
IPortableTensor * _output
ir::PaddingType _paddingType
uint32_t _dilationHeightFactor
void forward(bool training) override
void configureBackward(const IPortableTensor *weights, IPortableTensor *back_prop_input, IPortableTensor *grad_weights, IPortableTensor *grad_bias, const IPortableTensor *back_prop_output, const ir::Activation activation)
void ConvFilterGrad(const ConvParams ¶ms, const Shape &incoming_shape, const float *incoming_data, const Shape &input_shape, const float *input_data, const int padding_bottom, const int padding_right, const Shape &filter_backprop_shape, float *filter_backprop_data)
void ConvInputGrad(const ConvParams ¶ms, const Shape &incoming_shape, const float *incoming_data, const Shape &filter_shape, const float *filter_data, const int padding_bottom, const int padding_right, const Shape &grad_shape, float *grad_data)
void Transpose(const TransposeParams &unshrunk_params, const Shape &unshrunk_input_shape, const T *input_data, const Shape &unshrunk_output_shape, T *output_data)
void biasGrad(const IPortableTensor *input_backprop, IPortableTensor *bias_grad)
backpropagate bias
const IPortableTensor * backpropActivation(const ir::Activation &activation, const IPortableTensor *output, const IPortableTensor *input_backprop, IPortableTensor *output_backprop)
backpropagate acitvation
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
Get shape of tensor.
PaddingValues padding_values
int16_t dilation_width_factor
int16_t dilation_height_factor