33template <
typename Tensor>
36 const auto &origin_shape = origin_weights->
getShape();
37 assert(origin_shape.rank() == 4);
39 auto transposed_info = origin_weights->
get_info();
41 auto transposed_shape =
42 ir::Shape{origin_shape.
dim(1), origin_shape.dim(2), origin_shape.dim(3), origin_shape.dim(0)};
43 transposed_info.shape(transposed_shape);
45 return std::make_unique<Tensor>(transposed_info);
55 _back_prop_input{nullptr}, _back_prop_output{nullptr}, _transposed_weights{nullptr}
68 _back_prop_input = back_prop_input;
69 _grad_weights = grad_weights;
70 _grad_bias = grad_bias;
71 _back_prop_output = back_prop_output;
74 throw std::runtime_error(
"train ConvolutionLayer: Unsupported dilation yet");
77 _transposed_weights = createTransposedWeights<Tensor>(weights);
78 _transposed_weights->setBuffer(
79 std::make_shared<basic::Allocator>(_transposed_weights->total_size()));
81 _conv_back_prop_output = std::make_unique<BackPropTensor>(back_prop_output->
get_info());
82 _conv_back_prop_output->setBuffer(
83 std::make_shared<basic::Allocator>(_conv_back_prop_output->total_size()));
85 _transposed_grad_weights = createTransposedWeights<GradientTensor>(weights);
86 _transposed_grad_weights->setBuffer(
87 std::make_shared<basic::Allocator>(_transposed_grad_weights->total_size()));
91 _act_back_prop_output = std::make_unique<BackPropTensor>(_back_prop_output->
get_info());
92 _act_back_prop_output->setBuffer(
93 std::make_shared<basic::Allocator>(_act_back_prop_output->total_size()));
100 const auto data_type = _back_prop_output->
data_type();
104 case OperandType::FLOAT32:
106 assert(data_type == _grad_bias->
data_type());
111 throw std::runtime_error{
"train ConvolutionLayer: unsupported data type"};
115void ConvolutionLayer::backwardFloat32()
124 catch (
const std::exception &e)
126 throw std::runtime_error{
"train ConvolutionLayer: " + std::string(e.what())};
128 assert(backprop_act !=
nullptr);
141 auto transposed_weights = _transposed_weights.get();
142 assert(transposed_weights->getShape().rank() == 4);
144 transpose_param.
perm_count = transposed_weights->getShape().rank();
145 transpose_param.
perm[0] = 1;
146 transpose_param.
perm[1] = 2;
147 transpose_param.
perm[2] = 3;
148 transpose_param.
perm[3] = 0;
150 getShape(transposed_weights), getBuffer<float>(transposed_weights));
154 conv_train_params,
getShape(backprop_act), getBuffer<float>(backprop_act),
159 auto transposed_grad_weights = _transposed_grad_weights.get();
160 assert(_grad_weights->
getShape().rank() == 4);
161 assert(transposed_grad_weights->getShape().rank() == 4);
165 getBuffer<float>(transposed_grad_weights));
169 transpose_grad_param.
perm_count = transposed_grad_weights->getShape().rank();
170 transpose_grad_param.
perm[0] = 3;
171 transpose_grad_param.
perm[1] = 0;
172 transpose_grad_param.
perm[2] = 1;
173 transpose_grad_param.
perm[3] = 2;
175 getBuffer<float>(transposed_grad_weights),
getShape(_grad_weights),
176 getBuffer<float>(_grad_weights));
A tensor class that is portable for other backends.
const ir::OperandInfo & get_info() const
ir::DataType data_type() const override final
ir::Shape getShape() const override final
Get ir::Shape of tensor.
ir::Activation _activation
const IPortableTensor * _bias
const IPortableTensor * _kernel
const IPortableTensor * _input
uint32_t _dilationWidthFactor
IPortableTensor * _output
ir::PaddingType _paddingType
uint32_t _dilationHeightFactor
void forward(bool training) override
void configureBackward(const IPortableTensor *weights, IPortableTensor *back_prop_input, IPortableTensor *grad_weights, IPortableTensor *grad_bias, const IPortableTensor *back_prop_output, const ir::Activation activation)
void ConvFilterGrad(const ConvParams ¶ms, const Shape &incoming_shape, const float *incoming_data, const Shape &input_shape, const float *input_data, const int padding_bottom, const int padding_right, const Shape &filter_backprop_shape, float *filter_backprop_data)
void ConvInputGrad(const ConvParams ¶ms, const Shape &incoming_shape, const float *incoming_data, const Shape &filter_shape, const float *filter_data, const int padding_bottom, const int padding_right, const Shape &grad_shape, float *grad_data)
void Transpose(const TransposeParams &unshrunk_params, const Shape &unshrunk_input_shape, const T *input_data, const Shape &unshrunk_output_shape, T *output_data)
void biasGrad(const IPortableTensor *input_backprop, IPortableTensor *bias_grad)
backpropagate bias
const IPortableTensor * backpropActivation(const ir::Activation &activation, const IPortableTensor *output, const IPortableTensor *input_backprop, IPortableTensor *output_backprop)
backpropagate acitvation
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
Get shape of tensor.
PaddingValues padding_values
int16_t dilation_width_factor
int16_t dilation_height_factor