ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Convert.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2020 The TensorFlow Authors. All Rights Reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include "Convert.h"
19
20#include <stdexcept>
21
22tflite::Padding as_tflite_padding(const tflchef::Padding &value)
23{
24 switch (value)
25 {
26 case tflchef::SAME:
27 return tflite::Padding_SAME;
28 case tflchef::VALID:
29 return tflite::Padding_VALID;
30 default:
31 break;
32 }
33
34 throw std::runtime_error{"Unknown padding value"};
35}
36
37tflite::ActivationFunctionType as_tflite_activation(const tflchef::Activation &value)
38{
39 switch (value)
40 {
41 case tflchef::NONE:
42 return tflite::ActivationFunctionType_NONE;
43 case tflchef::RELU:
44 return tflite::ActivationFunctionType_RELU;
45 case tflchef::RELU_N1_TO_1:
46 return tflite::ActivationFunctionType_RELU_N1_TO_1;
47 case tflchef::RELU6:
48 return tflite::ActivationFunctionType_RELU6;
49 case tflchef::TANH:
50 return tflite::ActivationFunctionType_TANH;
51 case tflchef::SIGN_BIT:
52 return tflite::ActivationFunctionType_SIGN_BIT;
53 default:
54 break;
55 }
56
57 throw std::runtime_error{"Unknown activation"};
58}
59
60tflite::TensorType as_tflite_tensortype(const tflchef::TensorType &value)
61{
62 switch (value)
63 {
64 case tflchef::FLOAT32:
65 return tflite::TensorType_FLOAT32;
66 case tflchef::FLOAT16:
67 return tflite::TensorType_FLOAT16;
68 case tflchef::INT32:
69 return tflite::TensorType_INT32;
70 case tflchef::UINT8:
71 return tflite::TensorType_UINT8;
72 case tflchef::INT64:
73 return tflite::TensorType_INT64;
74 case tflchef::STRING:
75 return tflite::TensorType_STRING;
76 case tflchef::BOOL:
77 return tflite::TensorType_BOOL;
78 case tflchef::INT16:
79 return tflite::TensorType_INT16;
80 case tflchef::INT8:
81 return tflite::TensorType_INT8;
82 case tflchef::INT4:
83 return tflite::TensorType_INT4;
84 default:
85 break;
86 }
87
88 throw std::runtime_error{"Unknown tensor type"};
89}
90
91tflite::MirrorPadMode as_tflite_mirrorpadmode(const tflchef::MirrorPadMode &value)
92{
93 switch (value)
94 {
95 case tflchef::REFLECT:
96 return tflite::MirrorPadMode_REFLECT;
97 case tflchef::SYMMETRIC:
98 return tflite::MirrorPadMode_SYMMETRIC;
99 default:
100 break;
101 }
102
103 throw std::runtime_error{"Unknown mirrorpad mode"};
104}
105
106tflite::DimensionType as_tflite_dimensiontype(const tflchef::DimensionType &value)
107{
108 switch (value)
109 {
110 case tflchef::DimensionType::DENSE:
111 return tflite::DimensionType_DENSE;
112 case tflchef::DimensionType::SPARSE_CSR:
113 return tflite::DimensionType_SPARSE_CSR;
114 default:
115 break;
116 }
117
118 throw std::runtime_error("Unknown dimension type");
119}
120
121tflite::SparseIndexVector as_tflite_sparse_idx_vec_type(const tflchef::SparseIndexVecType &value)
122{
123 switch (value)
124 {
125 case tflchef::SparseIndexVecType::SparseIdxVecType_NONE:
126 return tflite::SparseIndexVector_NONE;
127 case tflchef::SparseIndexVecType::INT32VEC:
128 return tflite::SparseIndexVector_Int32Vector;
129 case tflchef::SparseIndexVecType::UINT16VEC:
130 return tflite::SparseIndexVector_Uint16Vector;
131 case tflchef::SparseIndexVecType::UINT8VEC:
132 return tflite::SparseIndexVector_Uint8Vector;
133 default:
134 break;
135 }
136
137 throw std::runtime_error("Unknown SparseIndexVector type");
138}
139
142 const ::tflchef::TensorSparsity_IndexVec &value)
143{
144 auto sparse_idx_type = value.type();
145
146 switch (sparse_idx_type)
147 {
148 case tflchef::SparseIndexVecType::SparseIdxVecType_NONE:
150 case tflchef::SparseIndexVecType::INT32VEC:
151 {
152 auto values_vec_int32 = std::vector<int32_t>{value.dim().begin(), value.dim().end()};
153 auto values_int32 = fb.CreateVector(values_vec_int32);
154 return tflite::CreateInt32Vector(fb, values_int32).Union();
155 }
156 case tflchef::SparseIndexVecType::UINT16VEC:
157 {
158 auto values_vec_uint16 = std::vector<uint16_t>{value.dim().begin(), value.dim().end()};
159 auto values_uint16 = fb.CreateVector(values_vec_uint16);
160 return tflite::CreateUint16Vector(fb, values_uint16).Union();
161 }
162 case tflchef::SparseIndexVecType::UINT8VEC:
163 {
164 auto values_vec_uint8 = std::vector<uint8_t>{value.dim().begin(), value.dim().end()};
165 auto values_uint8 = fb.CreateVector(values_vec_uint8);
166 return tflite::CreateUint8Vector(fb, values_uint8).Union();
167 }
168 default:
169 break;
170 }
171
172 throw std::runtime_error("Unknown SparseIndexVector type");
173}
174
175// namespace sparsity code referenced from
176// https://github.com/tensorflow/tensorflow/blob/3f878cff5b698b82eea85db2b60d65a2e320850e/
177// tensorflow/lite/kernels/internal/utils/sparsity_format_converter.cc
178
179namespace sparsity
180{
181
182template <typename T>
183FormatConverter<T>::FormatConverter(const std::vector<int> &shape,
184 const std::vector<int> &traversal_order,
185 const std::vector<TfLiteDimensionType> &format,
186 const std::vector<int> &block_size,
187 const std::vector<int> &block_map)
188 : dense_shape_(shape), traversal_order_(traversal_order), block_size_(block_size),
189 block_map_(block_map)
190{
191 dense_size_ = 1;
192 int block_dim = 0;
193 blocked_shape_.resize(shape.size());
194 format_.resize(shape.size() + block_map.size());
195 for (int i = 0; i < shape.size(); i++)
196 {
197 format_[i] = format[traversal_order[i]];
198 dense_size_ *= shape[i];
199 if (block_dim < block_map.size() && block_map[block_dim] == i)
200 {
201 blocked_shape_[i] = shape[i] / block_size[block_dim];
202 block_dim++;
203 }
204 else
205 {
206 blocked_shape_[i] = shape[i];
207 }
208 }
209
210 // Only dense blocks are supported.
211 for (int i = 0; i < block_map.size(); i++)
212 {
213 format_[i + shape.size()] = kTfLiteDimDense;
214 }
215}
216
217template <typename T> bool FormatConverter<T>::DenseToSparse(const T *src_data)
218{
219 int num_original_dims = dense_shape_.size();
220 int num_block_dims = block_map_.size();
221 int num_expanded_dims = num_original_dims + num_block_dims;
222 std::vector<int> expanded_shape(num_expanded_dims);
223 for (int i = 0; i < num_expanded_dims; i++)
224 {
225 if (i < num_original_dims)
226 {
227 expanded_shape[i] = blocked_shape_[i];
228 }
229 else
230 {
231 expanded_shape[i] = block_size_[i - num_original_dims];
232 }
233 }
234
235 std::vector<int> shape_offset(num_original_dims);
236 shape_offset[shape_offset.size() - 1] = 1;
237 for (int i = num_original_dims - 1; i > 0; --i)
238 {
239 shape_offset[i - 1] = shape_offset[i] * dense_shape_[i];
240 }
241
242 std::vector<int> expanded_shape_offset(num_expanded_dims);
243 for (int i = 0; i < num_original_dims; ++i)
244 {
245 expanded_shape_offset[i] = shape_offset[i];
246 }
247 for (int i = 0; i < num_block_dims; ++i)
248 {
249 int mapped_dim = block_map_[i];
250 expanded_shape_offset[num_original_dims + i] = shape_offset[mapped_dim];
251 expanded_shape_offset[mapped_dim] *= block_size_[i];
252 }
253
254 std::vector<int> dst_ordered_offset(num_expanded_dims);
255 for (int i = 0; i < num_expanded_dims; ++i)
256 {
257 dst_ordered_offset[i] = expanded_shape_offset[traversal_order_[i]];
258 }
259
260 std::vector<bool> dst_dim_has_nonzeroes(num_expanded_dims);
261 std::fill(dst_dim_has_nonzeroes.begin(), dst_dim_has_nonzeroes.end(), false);
262 std::vector<int> inner_compressed_dim(num_expanded_dims);
263 int most_recent_compressed_dim = -1;
264 std::vector<int> num_segments_of_next_compressed_dim(num_expanded_dims);
265 int segment_count = 1;
266 for (int i = num_expanded_dims - 1; i >= 0; --i)
267 {
268 inner_compressed_dim[i] = most_recent_compressed_dim;
269 if (format_[i] == kTfLiteDimSparseCSR)
270 {
271 most_recent_compressed_dim = i;
272 num_segments_of_next_compressed_dim[i] = segment_count;
273 segment_count = 1;
274 }
275 else
276 {
277 num_segments_of_next_compressed_dim[i] = -1;
278 segment_count *= expanded_shape[traversal_order_[i]];
279 }
280 }
281
282 dim_metadata_.resize(num_expanded_dims * 2);
283 std::vector<int> dst_sparse_dims;
284 dst_sparse_dims.reserve(num_expanded_dims);
285 for (int i = 0; i < num_expanded_dims; ++i)
286 {
287 dim_metadata_[i * 2].clear();
288 dim_metadata_[i * 2 + 1].clear();
289 if (format_[i] == kTfLiteDimDense)
290 {
291 // If dimension is dense, just store the shape.
292 dim_metadata_[i * 2].push_back(expanded_shape[traversal_order_[i]]);
293 }
294 else
295 {
296 dim_metadata_[i * 2].push_back(0); // Segment array always begins with 0.
297 dst_sparse_dims.push_back(i); // Add dimension to the sparse list.
298 }
299 }
300
301 // This algorithm assumes that the block size is small enough for all the
302 // elements to fit in cache, so the strided accesses from different traversal
303 // order and the write-first-erase-later strategy shouldn't be too slow
304 int dst_dim_idx = num_expanded_dims;
305 std::vector<int> coordinate(num_expanded_dims, 0);
306 int dense_tensor_idx = 0;
307 while (dst_dim_idx >= 0)
308 {
309 if (dst_dim_idx == num_expanded_dims)
310 {
311 // We have a complete coordinate. Add the element to the value array if it
312 // is not zero, or if the last dimension is dense.
313 if (!IsZero(src_data[dense_tensor_idx]))
314 {
315 data_.push_back(src_data[dense_tensor_idx]);
316 // Mark all sparse dimensions that their current indices have nonzeroes.
317 for (auto dst_dim : dst_sparse_dims)
318 {
319 if (!dst_dim_has_nonzeroes[dst_dim])
320 {
321 // Only add the index to the indices array if the current nonzero
322 // is the first nonzero of the block.
323 dim_metadata_[2 * dst_dim + 1].push_back(coordinate[dst_dim]);
324 dst_dim_has_nonzeroes[dst_dim] = true;
325 }
326 }
327 }
328 else if (format_[num_expanded_dims - 1] == kTfLiteDimDense)
329 {
330 data_.push_back(src_data[dense_tensor_idx]);
331 }
332 --dst_dim_idx;
333 }
334 else
335 {
336 int original_dim_idx = traversal_order_[dst_dim_idx];
337 int dim_size = expanded_shape[original_dim_idx];
338 if (dst_dim_has_nonzeroes[dst_dim_idx])
339 {
340 // If the previous block has nonzeroes, reset the flag to false since
341 // we have just moved to a new block.
342 dst_dim_has_nonzeroes[dst_dim_idx] = false;
343 }
344 else if (format_[dst_dim_idx] == kTfLiteDimSparseCSR)
345 {
346 // This block is empty. Delete unnecessary values if compressed.
347 int next_compressed_dim = inner_compressed_dim[dst_dim_idx];
348 int erase_offset = dim_metadata_[2 * dst_dim_idx + 1].size() *
349 num_segments_of_next_compressed_dim[dst_dim_idx];
350 if (next_compressed_dim >= 0)
351 {
352 auto &segments = dim_metadata_[2 * inner_compressed_dim[dst_dim_idx]];
353 segments.erase(segments.begin() + 1 + erase_offset, segments.end());
354 }
355 else
356 {
357 data_.erase(data_.begin() + erase_offset, data_.end());
358 }
359 }
360 if (++coordinate[dst_dim_idx] < dim_size)
361 {
362 // The current dst_dim_idx is valid (not out of bound).
363 dense_tensor_idx += dst_ordered_offset[dst_dim_idx];
364 ++dst_dim_idx;
365 }
366 else
367 {
368 // dst_dim_idx has reached its dim size. Update segment array and go
369 // back to incrementing the previous dimension (dst_dim_idx - 1).
370 if (format_[dst_dim_idx] == kTfLiteDimSparseCSR)
371 {
372 dim_metadata_[2 * dst_dim_idx].push_back(dim_metadata_[2 * dst_dim_idx + 1].size());
373 }
374 coordinate[dst_dim_idx] = -1;
375 dense_tensor_idx -= dst_ordered_offset[dst_dim_idx] * dim_size;
376 --dst_dim_idx;
377 }
378 }
379 }
380
381 return true;
382}
383
384template <typename T> bool FormatConverter<T>::IsZero(const T val)
385{
386 return (val == static_cast<T>(0));
387}
388
389template class FormatConverter<float>;
390template class FormatConverter<uint16_t>; // float16
391
392} // namespace sparsity
Helper class to hold data needed in creation of a FlatBuffer. To serialize data, you typically call o...
Offset< Vector< T > > CreateVector(const T *v, size_t len)
Serialize an array into a FlatBuffer vector.
FormatConverter(const std::vector< int > &shape, const TfLiteSparsity &sparsity)
int32_t size[5]
Definition Slice.cpp:35
tflite::ActivationFunctionType as_tflite_activation(const tflchef::Activation &value)
Definition Convert.cpp:37
tflite::MirrorPadMode as_tflite_mirrorpadmode(const tflchef::MirrorPadMode &value)
Definition Convert.cpp:91
tflite::DimensionType as_tflite_dimensiontype(const tflchef::DimensionType &value)
Definition Convert.cpp:106
tflite::SparseIndexVector as_tflite_sparse_idx_vec_type(const tflchef::SparseIndexVecType &value)
Definition Convert.cpp:121
flatbuffers::Offset< void > as_tflite_sparse_index_vec(flatbuffers::FlatBufferBuilder &fb, const ::tflchef::TensorSparsity_IndexVec &value)
Definition Convert.cpp:141
tflite::Padding as_tflite_padding(const tflchef::Padding &value)
Definition Convert.cpp:22
tflite::TensorType as_tflite_tensortype(const tflchef::TensorType &value)
Definition Convert.cpp:60