ONE - On-device Neural Engine
Loading...
Searching...
No Matches
PALTransposeConv.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2020 The TensorFlow Authors. All Rights Reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#ifndef LUCI_INTERPRETER_PAL_TRANSPOSE_CONV_H
19#define LUCI_INTERPRETER_PAL_TRANSPOSE_CONV_H
20
21#include "PALUtils.h"
22
24{
25
26inline void TransposeConv(const ConvParams &params,
27 const luci_interpreter::RuntimeShape &input_shape,
28 const float *input_data,
29 const luci_interpreter::RuntimeShape &filter_shape,
30 const float *filter_data,
31 const luci_interpreter::RuntimeShape &bias_shape, const float *bias_data,
32 const luci_interpreter::RuntimeShape &output_shape, float *output_data)
33{
34 const int stride_width = params.stride_width;
35 const int stride_height = params.stride_height;
36 const int pad_width = params.padding_values.width;
37 const int pad_height = params.padding_values.height;
38
39 const int batches = input_shape.dims(0);
40 const int input_depth = input_shape.dims(3);
41 const int output_depth = filter_shape.dims(0);
42 const int input_height = input_shape.dims(1);
43 const int input_width = input_shape.dims(2);
44 const int filter_height = filter_shape.dims(1);
45 const int filter_width = filter_shape.dims(2);
46 const int output_height = output_shape.dims(1);
47 const int output_width = output_shape.dims(2);
48 const float output_activation_min = params.float_activation_min;
49 const float output_activation_max = params.float_activation_max;
50
51 // Although transpose convolution simplifies to convolution with transposed
52 // weights for strides of 1, non-unitary striding complicates matters. To
53 // keep this reference implementation as clear as possible, we use a
54 // "scatter" access pattern, where we loop through all the input elements,
55 // computing their influence on the output, rather than looping through the
56 // output elements in the typical "gather" access pattern of a conv. We
57 // therefore must initialize the output array to zero.
58 const int num_elements = output_shape.flatSize();
59 for (int i = 0; i < num_elements; i++)
60 {
61 output_data[i] = 0.0f;
62 }
63
64 // Loop through input elements one at a time.
65 for (int batch = 0; batch < batches; ++batch)
66 {
67 for (int in_y = 0; in_y < input_height; ++in_y)
68 {
69 for (int in_x = 0; in_x < input_width; ++in_x)
70 {
71 for (int in_channel = 0; in_channel < input_depth; ++in_channel)
72 {
73 // Loop through the output elements it will influence
74 const int out_x_origin = (in_x * stride_width) - pad_width;
75 const int out_y_origin = (in_y * stride_height) - pad_height;
76 for (int filter_y = 0; filter_y < filter_height; ++filter_y)
77 {
78 for (int filter_x = 0; filter_x < filter_width; ++filter_x)
79 {
80 for (int out_channel = 0; out_channel < output_depth; ++out_channel)
81 {
82 // Compute output element location
83 const int out_x = out_x_origin + filter_x;
84 const int out_y = out_y_origin + filter_y;
85 // We cannot accumulate out of bounds
86 if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) &&
87 (out_y < output_height))
88 {
89 float input_value =
90 input_data[offset(input_shape.dimsData(), batch, in_y, in_x, in_channel)];
91 float filter_value = filter_data[offset(filter_shape.dimsData(), out_channel,
92 filter_y, filter_x, in_channel)];
93 output_data[offset(output_shape.dimsData(), batch, out_y, out_x, out_channel)] +=
94 input_value * filter_value;
95 }
96 }
97 }
98 }
99 }
100 }
101 }
102 }
103
104 for (int batch = 0; batch < batches; ++batch)
105 {
106 for (int out_y = 0; out_y < output_height; ++out_y)
107 {
108 for (int out_x = 0; out_x < output_width; ++out_x)
109 {
110 for (int out_channel = 0; out_channel < output_depth; ++out_channel)
111 {
112 float acc =
113 output_data[offset(output_shape.dimsData(), batch, out_y, out_x, out_channel)];
114 if (bias_data)
115 acc += bias_data[out_channel];
116
117 output_data[offset(output_shape.dimsData(), batch, out_y, out_x, out_channel)] =
118 activationFunctionWithMinMax(acc, output_activation_min, output_activation_max);
119 }
120 }
121 }
122 }
123}
124
125} // namespace luci_interpreter_pal
126
127#endif // LUCI_INTERPRETER_PAL_TRANSPOSE_CONV_H
int32_t dims(int i) const
Definition Tensor.h:108
const luci_interpreter::RuntimeShape output_shape
int offset(const int32_t *dims_data, int i0, int i1, int i2, int i3)
Definition PALUtils.h:193
void TransposeConv(const ConvParams &params, const luci_interpreter::RuntimeShape &input_shape, const float *input_data, const luci_interpreter::RuntimeShape &filter_shape, const float *filter_data, const luci_interpreter::RuntimeShape &bias_shape, const float *bias_data, const luci_interpreter::RuntimeShape &output_shape, float *output_data)
T activationFunctionWithMinMax(T x, T output_activation_min, T output_activation_max)
Definition PALUtils.h:204