ONE - On-device Neural Engine
Loading...
Searching...
No Matches
DepthwiseConvolutionLayer.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
18
19#include "OperationUtils.h"
20
24
26{
27
29 : cpu::ops::DepthwiseConvolutionLayer(), _grad_weights{nullptr}, _grad_bias{nullptr},
30 _back_prop_input{nullptr}, _back_prop_output{nullptr}, _act_back_prop_output{nullptr},
31 _use_padded_filter{false}, _padded_filter{nullptr}, _filter_buffers{nullptr},
32 _filter_dim_buffers{nullptr}
33{
34 // DO NOTHING
35}
36
38 IPortableTensor *grad_weights,
39 IPortableTensor *grad_bias,
40 const IPortableTensor *back_prop_output,
41 const ir::Activation activation)
42{
43 _back_prop_input = back_prop_input;
44 _back_prop_output = back_prop_output;
45 _grad_weights = grad_weights;
46 _grad_bias = grad_bias;
47
48 if (_dilationWidth != 1 || _dilationHeight != 1)
49 throw std::runtime_error("train DepthwiseConvolutionLayer: Unsupported dilation yet");
50
51 if (activation != ir::Activation::NONE)
52 {
53 _act_back_prop_output = std::make_unique<BackPropTensor>(_back_prop_output->get_info());
54 _act_back_prop_output->setBuffer(
55 std::make_shared<basic::Allocator>(_act_back_prop_output->total_size()));
56 }
57
58 const int64_t k_packet_size = [&]() {
59 const auto data_type = _back_prop_output->data_type();
60 switch (data_type)
61 {
62 case OperandType::FLOAT32:
63 {
64 return nnfw::cker::eigen_support::kPacketSize<float>();
65 }
66 default:
67 throw std::runtime_error("train DepthwiseConvolutionLayer: unsupported data type");
68 }
69 }();
70
71 const auto incoming_shape = getShape(_back_prop_output);
72 const int out_depth = incoming_shape.Dims(3);
73
74 const int padded_filter_inner_dim_size =
75 ((out_depth + k_packet_size - 1) / k_packet_size) * k_packet_size;
76
77 // prepare out_bprop and in_bprop buffer for cker
78 // NOTE The Eigen library uses both main thread as well as a thread pool.
79 // Therefore, it needs to add an additional memory buffer for main thread.
80 const int thread_count = nnfw::cker::eigen_support::getThreadCount() + 1;
81
82 auto filter_dim_buffers_info = ir::OperandInfo(_back_prop_input->get_info());
83 filter_dim_buffers_info.shape({thread_count, padded_filter_inner_dim_size});
84 _filter_dim_buffers = std::make_unique<Tensor>(filter_dim_buffers_info);
85 _filter_dim_buffers->setBuffer(
86 std::make_shared<basic::Allocator>(_filter_dim_buffers->total_size()));
87
88 _use_padded_filter = (out_depth % k_packet_size) == 0 ? false : true;
89
90 const auto filter_shape = getShape(_kernel);
91 const int batch = incoming_shape.Dims(0);
92
93 const int filter_rows = filter_shape.Dims(1);
94 const int filter_cols = filter_shape.Dims(2);
95 const int filter_spatial_size = filter_rows * filter_cols;
96
97 // prepare padded_filter buffer for cker
98 auto padded_filter_info = ir::OperandInfo(_kernel->get_info());
99 padded_filter_info.shape({batch, filter_spatial_size, padded_filter_inner_dim_size});
100 _padded_filter = std::make_unique<Tensor>(padded_filter_info);
101 _padded_filter->setBuffer(std::make_shared<basic::Allocator>(_padded_filter->total_size()));
102
103 auto filter_buffers_info = ir::OperandInfo(_kernel->get_info());
104 filter_buffers_info.shape({thread_count, filter_spatial_size, padded_filter_inner_dim_size});
105 _filter_buffers = std::make_unique<Tensor>(filter_buffers_info);
106 _filter_buffers->setBuffer(std::make_shared<basic::Allocator>(_filter_buffers->total_size()));
107}
108
110
112{
113 const auto data_type = _back_prop_output->data_type();
114 assert(data_type == _input->data_type());
115 switch (data_type)
116 {
117 case OperandType::FLOAT32:
118 {
119 assert(data_type == _grad_bias->data_type());
120 backwardFloat32();
121 break;
122 }
123 default:
124 throw std::runtime_error{"train DepthwiseConvolutionLayer: unsupported data type"};
125 }
126}
127
128void DepthwiseConvolutionLayer::backwardFloat32()
129{
130 // Calculate gradient for activation
131 const IPortableTensor *backprop_act;
132 try
133 {
134 backprop_act =
135 backpropActivation(_activation, _output, _back_prop_output, _act_back_prop_output.get());
136 }
137 catch (const std::exception &e)
138 {
139 throw std::runtime_error{"train DepthwiseConvolutionLayer: " + std::string(e.what())};
140 }
141 assert(backprop_act != nullptr);
142
144 dconv_params.stride_width = _strideWidth;
145 dconv_params.stride_height = _strideHeight;
146 dconv_params.padding_values.width = _paddingLeft;
147 dconv_params.padding_values.height = _paddingTop;
148 dconv_params.depth_multiplier = _multiplier;
151
152 // Calculate gradient for input
154 dconv_params, getShape(backprop_act), getBuffer<float>(backprop_act), getShape(_kernel),
155 getBuffer<float>(_kernel), getBuffer<float>(_padded_filter.get()), getShape(_back_prop_input),
156 getBuffer<float>(_back_prop_input), _use_padded_filter, getBuffer<float>(_filter_buffers.get()),
157 getBuffer<float>(_filter_dim_buffers.get()));
158
159 // Calculate gradient for weights
161 dconv_params, getShape(backprop_act), getBuffer<float>(backprop_act), getShape(_input),
162 getBuffer<float>(_input), getShape(_grad_weights), getBuffer<float>(_grad_weights),
163 getBuffer<float>(_padded_filter.get()), getBuffer<float>(_filter_buffers.get()));
164
165 // Calculate gradient for bias
166 if (_bias)
167 {
168 assert(_grad_bias);
169 biasGrad(backprop_act, _grad_bias);
170 }
171}
172
173} // namespace onert::backend::train::ops
A tensor class that is portable for other backends.
const ir::OperandInfo & get_info() const
ir::DataType data_type() const override final
void configureBackward(IPortableTensor *back_prop_input, IPortableTensor *grad_weights, IPortableTensor *grad_bias, const IPortableTensor *back_prop_output, const ir::Activation activation)
Class to save tensor's shape and type.
Definition OperandInfo.h:54
void backpropFilter(const DepthwiseConvParams &params, const Shape &incoming_shape, const T *incoming_data, const Shape &input_shape, const T *input_data, const Shape &filter_grad_shape, T *filter_grad_data, T *padded_filter_data, T *filter_buffers_data)
void backpropInput(const DepthwiseConvParams &params, const Shape &incoming_shape, const T *incoming_data, const Shape &filter_shape, const T *filter_data, T *padded_filter_data, const Shape &grad_shape, T *grad_data, bool pad_filter, T *filter_buffers_data, T *filter_dim_buffers_data)
void biasGrad(const IPortableTensor *input_backprop, IPortableTensor *bias_grad)
backpropagate bias
const IPortableTensor * backpropActivation(const ir::Activation &activation, const IPortableTensor *output, const IPortableTensor *input_backprop, IPortableTensor *output_backprop)
backpropagate acitvation
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
Get shape of tensor.
PaddingValues padding_values
Definition Types.h:234