49 const auto *input_data =
reinterpret_cast<const T *
>(input.atOffset(0));
50 const auto *kernel_data =
reinterpret_cast<const T *
>(kernel.
atOffset(0));
51 auto *result_data =
reinterpret_cast<T *
>(result.atOffset(0));
53 const Shape &input_shape = input.getShape();
57 const std::vector<std::int32_t> &strides = attributes.
strides;
58 const std::vector<std::int32_t> &padding_before = attributes.
padding_before;
59 const std::int32_t num_groups = attributes.
num_groups;
65 const std::int32_t kernel_height = kernel_shape.
dim(1);
66 const std::int32_t kernel_width = kernel_shape.
dim(2);
67 const std::int32_t input_height = input_shape.
dim(1);
68 const std::int32_t input_width = input_shape.
dim(2);
70 const std::int32_t num_in_channels = input_shape.
dim(3);
71 const std::int32_t num_out_channels =
output_shape.dim(3);
73 assert(num_in_channels % num_groups == 0);
74 assert(num_out_channels % num_groups == 0);
76 const std::int32_t out_group_size = num_out_channels / num_groups;
77 const std::int32_t in_group_size = num_in_channels / num_groups;
79 assert(kernel_shape.
dim(3) == in_group_size);
80 assert(kernel_shape.
dim(0) == num_out_channels);
82 for (std::int32_t batch = 0; batch < batch_size; ++batch)
84 for (std::int32_t out_y = 0; out_y < output_height; ++out_y)
86 for (std::int32_t out_x = 0; out_x < output_width; ++out_x)
88 for (std::int32_t group = 0; group < num_groups; ++group)
90 const std::int32_t out_group_offset = group * out_group_size;
91 const std::int32_t in_group_offset = group * in_group_size;
93 for (std::int32_t out_c = 0; out_c < out_group_size; ++out_c)
95 const std::int32_t in_y_origin = (out_y * strides[0]) - padding_before[0];
96 const std::int32_t in_x_origin = (out_x * strides[1]) - padding_before[1];
100 for (std::int32_t kernel_y = 0; kernel_y < kernel_height; ++kernel_y)
102 for (std::int32_t kernel_x = 0; kernel_x < kernel_width; ++kernel_x)
104 for (std::int32_t in_c = 0; in_c < in_group_size; ++in_c)
106 const std::int32_t in_y = in_y_origin + kernel_y;
107 const std::int32_t in_x = in_x_origin + kernel_x;
109 if ((in_y >= 0 && in_y < input_height) && (in_x >= 0 && in_x < input_width))
111 const std::int32_t in_offset =
112 calcOffset(input_shape, batch, in_y, in_x, in_group_offset + in_c);
113 const std::int32_t kernel_offset =
114 calcOffset(kernel_shape, out_group_offset + out_c, kernel_y, kernel_x, in_c);
115 const T input_val = input_data[in_offset];
116 const T kernel_val = kernel_data[kernel_offset];
117 sum += kernel_val * input_val;
123 const std::int32_t out_offset =
124 calcOffset(
output_shape, batch, out_y, out_x, out_group_offset + out_c);
125 result_data[out_offset] = sum;
146 throw std::runtime_error{
"Quantized Conv2D cannot be executed without fused bias"};
149 const auto &input_type = input.getType();
150 const auto &kernel_type = kernel.
getType();
151 const auto &bias_type = fused_bias->
getType();
152 const auto &output_type = result.getType();
155 assert(input_type.isQuantized());
156 assert(kernel_type.isQuantized());
157 assert(bias_type.isQuantized());
158 assert(output_type.isQuantized());
159 assert(input_type.getElementType() == DataType::UINT8);
160 assert(kernel_type.getElementType() == DataType::UINT8);
161 assert(bias_type.getElementType() == DataType::INT32);
162 assert(output_type.getElementType() == DataType::UINT8);
164 int32_t input_offset = -input_type.getQuantization().getZeroPoint();
165 int32_t kernel_offset = -kernel_type.getQuantization().getZeroPoint();
166 int32_t output_offset = output_type.getQuantization().getZeroPoint();
168 double input_scale = input_type.getQuantization().getScale();
169 double kernel_scale = kernel_type.getQuantization().getScale();
170 double output_scale = output_type.getQuantization().getScale();
172 double real_multiplier = input_scale * kernel_scale / output_scale;
173 int32_t output_multiplier = 0;
174 int output_shift = 0;
177 const Shape &in_shape = input.getShape();
179 const Shape &out_shape = result.getShape();
180 const auto &strides = attributes.
strides;
183 assert(attributes.
data_format == DataFormat::NHWC);
185 assert(in_shape.
rank() == 4);
186 assert(kernel_shape.
rank() == 4);
187 assert(kernel_shape.
dim(3) == in_shape.
dim(3));
188 assert(kernel_shape.
dim(0) == out_shape.
dim(3));
189 assert(strides.size() == 2);
190 assert(pads.size() == 2);
192 int32_t stride_height = strides[0];
193 int32_t stride_width = strides[1];
195 int32_t pad_height = pads[0];
196 int32_t pad_width = pads[1];
198 int32_t input_height = in_shape.
dim(1);
199 int32_t input_width = in_shape.
dim(2);
206 int32_t output_min = std::numeric_limits<uint8_t>::min();
207 int32_t output_max = std::numeric_limits<uint8_t>::max();
209 for (
int batch = 0; batch < out_shape.
dim(0); ++batch)
211 for (
int out_y = 0; out_y < out_shape.
dim(1); ++out_y)
213 for (
int out_x = 0; out_x < out_shape.
dim(2); ++out_x)
215 for (
int out_channel = 0; out_channel < out_shape.
dim(3); ++out_channel)
217 const int in_x_origin = (out_x * stride_width) - pad_width;
218 const int in_y_origin = (out_y * stride_height) - pad_height;
220 for (
int filter_y = 0; filter_y < kernel_shape.
dim(1); ++filter_y)
222 for (
int filter_x = 0; filter_x < kernel_shape.
dim(2); ++filter_x)
224 for (
int in_channel = 0; in_channel < kernel_shape.
dim(3); ++in_channel)
226 const int in_x = in_x_origin + filter_x;
227 const int in_y = in_y_origin + filter_y;
230 if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) && (in_y < input_height))
232 Index in_index{batch, in_y, in_x, in_channel};
233 Index ker_index{out_channel, filter_y, filter_x, in_channel};
234 int32_t input_val = input_accessor.
at(in_index);
235 int32_t kernel_val = kernel_accessor.
at(ker_index);
236 acc += (kernel_val + kernel_offset) * (input_val + input_offset);
241 acc += bias_accessor.
at(
Index{out_channel});
243 acc += output_offset;
244 acc = std::max(acc, output_min);
245 acc = std::min(acc, output_max);
246 Index out_index{batch, out_y, out_x, out_channel};
247 res_accessor.
at(out_index) =
static_cast<uint8_t
>(acc);