30 _back_prop_input{nullptr}, _back_prop_output{nullptr}, _act_back_prop_output{nullptr},
31 _use_padded_filter{false}, _padded_filter{nullptr}, _filter_buffers{nullptr},
32 _filter_dim_buffers{nullptr}
43 _back_prop_input = back_prop_input;
44 _back_prop_output = back_prop_output;
45 _grad_weights = grad_weights;
46 _grad_bias = grad_bias;
49 throw std::runtime_error(
"train DepthwiseConvolutionLayer: Unsupported dilation yet");
53 _act_back_prop_output = std::make_unique<BackPropTensor>(_back_prop_output->
get_info());
54 _act_back_prop_output->setBuffer(
55 std::make_shared<basic::Allocator>(_act_back_prop_output->total_size()));
58 const int64_t k_packet_size = [&]() {
59 const auto data_type = _back_prop_output->
data_type();
62 case OperandType::FLOAT32:
64 return nnfw::cker::eigen_support::kPacketSize<float>();
67 throw std::runtime_error(
"train DepthwiseConvolutionLayer: unsupported data type");
71 const auto incoming_shape =
getShape(_back_prop_output);
72 const int out_depth = incoming_shape.Dims(3);
74 const int padded_filter_inner_dim_size =
75 ((out_depth + k_packet_size - 1) / k_packet_size) * k_packet_size;
83 filter_dim_buffers_info.shape({thread_count, padded_filter_inner_dim_size});
84 _filter_dim_buffers = std::make_unique<Tensor>(filter_dim_buffers_info);
85 _filter_dim_buffers->setBuffer(
86 std::make_shared<basic::Allocator>(_filter_dim_buffers->total_size()));
88 _use_padded_filter = (out_depth % k_packet_size) == 0 ?
false :
true;
91 const int batch = incoming_shape.Dims(0);
93 const int filter_rows = filter_shape.Dims(1);
94 const int filter_cols = filter_shape.Dims(2);
95 const int filter_spatial_size = filter_rows * filter_cols;
99 padded_filter_info.shape({batch, filter_spatial_size, padded_filter_inner_dim_size});
100 _padded_filter = std::make_unique<Tensor>(padded_filter_info);
101 _padded_filter->setBuffer(std::make_shared<basic::Allocator>(_padded_filter->total_size()));
104 filter_buffers_info.shape({thread_count, filter_spatial_size, padded_filter_inner_dim_size});
105 _filter_buffers = std::make_unique<Tensor>(filter_buffers_info);
106 _filter_buffers->setBuffer(std::make_shared<basic::Allocator>(_filter_buffers->total_size()));
113 const auto data_type = _back_prop_output->
data_type();
117 case OperandType::FLOAT32:
119 assert(data_type == _grad_bias->
data_type());
124 throw std::runtime_error{
"train DepthwiseConvolutionLayer: unsupported data type"};
128void DepthwiseConvolutionLayer::backwardFloat32()
137 catch (
const std::exception &e)
139 throw std::runtime_error{
"train DepthwiseConvolutionLayer: " + std::string(e.what())};
141 assert(backprop_act !=
nullptr);
155 getBuffer<float>(
_kernel), getBuffer<float>(_padded_filter.get()),
getShape(_back_prop_input),
156 getBuffer<float>(_back_prop_input), _use_padded_filter, getBuffer<float>(_filter_buffers.get()),
157 getBuffer<float>(_filter_dim_buffers.get()));
162 getBuffer<float>(
_input),
getShape(_grad_weights), getBuffer<float>(_grad_weights),
163 getBuffer<float>(_padded_filter.get()), getBuffer<float>(_filter_buffers.get()));
A tensor class that is portable for other backends.
const ir::OperandInfo & get_info() const
ir::DataType data_type() const override final
const IPortableTensor * _input
const IPortableTensor * _bias
ir::Activation _activation
const IPortableTensor * _kernel
IPortableTensor * _output
void configureBackward(IPortableTensor *back_prop_input, IPortableTensor *grad_weights, IPortableTensor *grad_bias, const IPortableTensor *back_prop_output, const ir::Activation activation)
void forward(bool training) override
DepthwiseConvolutionLayer()
Class to save tensor's shape and type.
void backpropFilter(const DepthwiseConvParams ¶ms, const Shape &incoming_shape, const T *incoming_data, const Shape &input_shape, const T *input_data, const Shape &filter_grad_shape, T *filter_grad_data, T *padded_filter_data, T *filter_buffers_data)
void backpropInput(const DepthwiseConvParams ¶ms, const Shape &incoming_shape, const T *incoming_data, const Shape &filter_shape, const T *filter_data, T *padded_filter_data, const Shape &grad_shape, T *grad_data, bool pad_filter, T *filter_buffers_data, T *filter_dim_buffers_data)
void biasGrad(const IPortableTensor *input_backprop, IPortableTensor *bias_grad)
backpropagate bias
const IPortableTensor * backpropActivation(const ir::Activation &activation, const IPortableTensor *output, const IPortableTensor *input_backprop, IPortableTensor *output_backprop)
backpropagate acitvation
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
Get shape of tensor.
int16_t dilation_height_factor
int16_t dilation_width_factor
PaddingValues padding_values