ONE - On-device Neural Engine
Loading...
Searching...
No Matches
TestUtils.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#ifndef LUCI_INTERPRETER_KERNELS_TESTUTILS_H
19#define LUCI_INTERPRETER_KERNELS_TESTUTILS_H
20
21#include "luci_interpreter/core/Tensor.h"
23
24#include <type_traits>
25#include <limits> // std::numeric_limits
26
27#include <gtest/gtest.h>
28#include <gmock/gmock.h>
29
30namespace luci_interpreter
31{
32namespace kernels
33{
34namespace testing
35{
36
37template <typename T>
38std::vector<T> quantize(const float *data, size_t num_elements, float scale, int32_t zero_point);
39
40template <DataType DT>
41Tensor makeInputTensor(const Shape &shape, const std::vector<typename DataTypeImpl<DT>::Type> &data,
43{
44 Tensor tensor(DT, shape, {}, "");
45 memory_manager->allocate_memory(tensor);
46 tensor.writeData(data.data(), data.size() * sizeof(typename DataTypeImpl<DT>::Type));
47 return tensor;
48}
49
60template <DataType DT>
61Tensor makeInputTensor(const Shape &shape, float scale, int32_t zero_point,
62 const std::vector<float> &data, IMemoryManager *memory_manager)
63{
64 using NativeT = typename DataTypeImpl<DT>::Type;
65 Tensor tensor(DT, shape, {{scale}, {zero_point}}, "");
66 std::vector<NativeT> quantized_data =
67 quantize<NativeT>(data.data(), data.size(), scale, zero_point);
68 memory_manager->allocate_memory(tensor);
69 tensor.writeData(quantized_data.data(), quantized_data.size() * sizeof(NativeT));
70 return tensor;
71}
72
84template <DataType DT>
85Tensor makeInputTensor(const Shape &shape, const std::vector<float> &scales,
86 const std::vector<int32_t> &zero_points, int quantized_dimension,
87 const std::vector<float> &data, IMemoryManager *memory_manager)
88{
89 using NativeT = typename DataTypeImpl<DT>::Type;
90 assert(quantized_dimension < shape.num_dims());
91 Tensor tensor(DT, shape, {scales, zero_points, quantized_dimension}, "");
92
93 // quantize_dimension breaks shape into two parts:
94 // inner dimensions that contains continuous data with one quantization type
95 // outer dimensions that contains other dimensions
96 size_t outer_dims_size = 1;
97 int32_t quant_dim_size = shape.dim(quantized_dimension);
98 size_t inner_dims_size = 1;
99 assert(quant_dim_size == scales.size());
100 assert(quant_dim_size == zero_points.size());
101
102 for (int i = 0; i < quantized_dimension; ++i)
103 outer_dims_size *= shape.dim(i);
104 for (int i = quantized_dimension + 1; i < shape.num_dims(); ++i)
105 inner_dims_size *= shape.dim(i);
106
108
109 std::vector<NativeT> quantized_data;
110 quantized_data.reserve(shape.num_elements());
111 for (size_t outer_it = 0; outer_it < outer_dims_size; ++outer_it)
112 for (int32_t channel = 0; channel < quant_dim_size; ++channel)
113 {
114 int32_t zero_point = zero_points[channel];
115 float scale = scales[channel];
116 size_t offset = inner_dims_size * (quant_dim_size * outer_it + channel);
117 std::vector<NativeT> part_quantized_data =
118 quantize<NativeT>(data.data() + offset, inner_dims_size, scale, zero_point);
120 part_quantized_data.end());
121 }
122 assert(quantized_data.size() == shape.num_elements());
123 memory_manager->allocate_memory(tensor);
124 tensor.writeData(quantized_data.data(), quantized_data.size() * sizeof(NativeT));
125 return tensor;
126}
127
128Tensor makeOutputTensor(DataType element_type);
129Tensor makeOutputTensor(DataType element_type, float scale, int32_t zero_point);
130
131std::vector<int32_t> extractTensorShape(const Tensor &tensor);
132
133// Returns the corresponding DataType given the type T.
134template <typename T> constexpr DataType getElementType()
135{
136 if (std::is_same<T, float>::value)
137 return DataType::FLOAT32;
138 if (std::is_same<T, double>::value)
139 return DataType::FLOAT64;
140 if (std::is_same<T, uint8_t>::value)
141 return DataType::U8;
142 if (std::is_same<T, uint16_t>::value)
143 return DataType::U16;
144 if (std::is_same<T, uint32_t>::value)
145 return DataType::U32;
146 if (std::is_same<T, uint64_t>::value)
147 return DataType::U64;
148 if (std::is_same<T, int8_t>::value)
149 return DataType::S8;
150 if (std::is_same<T, int16_t>::value)
151 return DataType::S16;
152 if (std::is_same<T, int32_t>::value)
153 return DataType::S32;
154 if (std::is_same<T, int64_t>::value)
155 return DataType::S64;
156 if (std::is_same<T, bool>::value)
157 return DataType::BOOL;
158 return DataType::Unknown;
159}
160
161template <typename T> std::vector<T> extractTensorData(const Tensor &tensor)
162{
163 const auto *data_ptr = tensor.data<T>();
164 return std::vector<T>(data_ptr, data_ptr + tensor.shape().num_elements());
165}
166
167std::vector<float> dequantizeTensorData(const Tensor &tensor);
168
169// Array version of `::testing::FloatNear` matcher.
170::testing::Matcher<std::vector<float>> FloatArrayNear(const std::vector<float> &values,
171 float max_abs_error = 1.0e-5f);
172
173// Array version of `::testing::DoubleNear` matcher.
174::testing::Matcher<std::vector<double>> DoubleArrayNear(const std::vector<double> &values,
175 double max_abs_error = 1.0e-12);
176
177template <typename T>
178std::vector<T> quantize(const float *data, size_t num_elements, float scale, int32_t zero_point)
179{
180 static_assert(std::is_integral<T>::value, "Integral type expected.");
181
182 float q_min{}, q_max{};
183 if (std::is_signed<T>::value)
184 {
185 q_min = -std::numeric_limits<T>::max();
186 q_max = std::numeric_limits<T>::max();
187 }
188 else
189 {
190 q_min = 0;
191 q_max = std::numeric_limits<T>::max();
192 }
193
194 std::vector<T> q;
195 for (size_t i = 0; i < num_elements; ++i)
196 {
197 const auto &f = data[i];
198 q.push_back(static_cast<T>(
199 std::max<float>(q_min, std::min<float>(q_max, std::round(zero_point + (f / scale))))));
200 }
201 return q;
202}
203
204template <typename T>
205std::vector<float> dequantize(const T *data, size_t num_elements, float scale, int32_t zero_point)
206{
207 static_assert(std::is_integral<T>::value, "Integral type expected.");
208 std::vector<float> f;
209 for (size_t i = 0; i < num_elements; ++i)
210 {
211 const T &q = data[i];
212 f.push_back(scale * (q - zero_point));
213 }
214 return f;
215}
216
217// NOTE Returns scale and zero point for _asymmetric_ range (both signed and unsigned).
218template <typename T> std::pair<float, int32_t> quantizationParams(float f_min, float f_max)
219{
220 static_assert(std::is_integral<T>::value, "Integral type expected.");
221 int32_t zero_point = 0;
222 float scale = 0;
223 const T qmin = std::numeric_limits<T>::lowest();
224 const T qmax = std::numeric_limits<T>::max();
225 const float qmin_double = qmin;
226 const float qmax_double = qmax;
227 // 0 should always be a representable value. Let's assume that the initial
228 // min,max range contains 0.
229 assert(f_max >= 0);
230 assert(f_min <= 0);
231 if (f_min == f_max)
232 {
233 // Special case where the min,max range is a point. Should be {0}.
234 assert(f_max == 0);
235 assert(f_min == 0);
236 return {scale, zero_point};
237 }
238
239 // General case.
240 //
241 // First determine the scale.
242 scale = (f_max - f_min) / (qmax_double - qmin_double);
243
244 // Zero-point computation.
245 // First the initial floating-point computation. The zero-point can be
246 // determined from solving an affine equation for any known pair
247 // (real value, corresponding quantized value).
248 // We know two such pairs: (rmin, qmin) and (rmax, qmax).
249 // The arithmetic error on the zero point computed from either pair
250 // will be roughly machine_epsilon * (sum of absolute values of terms)
251 // so we want to use the variant that adds the smaller terms.
252 const float zero_point_from_min = qmin_double - f_min / scale;
253 const float zero_point_from_max = qmax_double - f_max / scale;
254
255 const float zero_point_from_min_error = std::abs(qmin_double) + std::abs(f_min / scale);
256
257 const float zero_point_from_max_error = std::abs(qmax_double) + std::abs(f_max / scale);
258
262
263 // Now we need to nudge the zero point to be an integer
264 // (our zero points are integer, and this is motivated by the requirement
265 // to be able to represent the real value "0" exactly as a quantized value,
266 // which is required in multiple places, for example in Im2col with SAME
267 // padding).
268
269 T nudged_zero_point = 0;
271 {
273 }
275 {
277 }
278 else
279 {
280 nudged_zero_point = static_cast<T>(std::round(zero_point_double));
281 }
282
283 // The zero point should always be in the range of quantized value,
284 // // [qmin, qmax].
285 assert(qmax >= nudged_zero_point);
286 assert(qmin <= nudged_zero_point);
287 zero_point = nudged_zero_point;
288 // finally, return the values
289 return {scale, zero_point};
290}
291
292inline float getTolerance(float min, float max, int quantize_steps)
293{
294 return ((max - min) / quantize_steps);
295}
296
297} // namespace testing
298} // namespace kernels
299} // namespace luci_interpreter
300
301#endif // LUCI_INTERPRETER_KERNELS_TESTUTILS_H
int32_t dim(int i) const
Definition Tensor.h:41
int32_t num_elements() const
Definition Tensor.h:53
int num_dims() const
Definition Tensor.h:39
__global uchar * offset(const Image *img, int x, int y)
Definition helpers.h:540
const T * data(const std::vector< T, Alloc > &v)
Tensor makeOutputTensor(DataType element_type)
Definition TestUtils.cpp:33
std::vector< float > dequantize(const T *data, size_t num_elements, float scale, int32_t zero_point)
Definition TestUtils.h:205
std::vector< int32_t > extractTensorShape(const Tensor &tensor)
float getTolerance(float min, float max, int quantize_steps)
Definition TestUtils.h:292
Matcher< std::vector< double > > DoubleArrayNear(const std::vector< double > &values, double max_abs_error)
constexpr DataType getElementType()
Definition TestUtils.h:134
std::vector< float > dequantizeTensorData(const Tensor &tensor)
Definition TestUtils.cpp:40
Matcher< std::vector< float > > FloatArrayNear(const std::vector< float > &values, float max_abs_error)
std::vector< T > quantize(const float *data, size_t num_elements, float scale, int32_t zero_point)
Definition TestUtils.h:178
std::pair< float, int32_t > quantizationParams(float f_min, float f_max)
Definition TestUtils.h:218
std::vector< T > extractTensorData(const Tensor &tensor)
Definition TestUtils.h:161
Tensor makeInputTensor(const Shape &shape, const std::vector< typename DataTypeImpl< DT >::Type > &data, IMemoryManager *memory_manager)
Definition TestUtils.h:41
DataType
"scalar" value type
Definition DataType.h:32
T must_cast(loco::Node *node)
C++ scalar type corresponding to each DataType.
Definition DataType.h:58