ONE - On-device Neural Engine
Loading...
Searching...
No Matches
sparsity::FormatConverter< T > Class Template Reference

#include <SparsityFormatConverter.h>

Public Member Functions

 FormatConverter (const std::vector< int > &shape, const TfLiteSparsity &sparsity)
 
const std::vector< T > & GetData ()
 
const std::vector< std::vector< int > > & GetDimMetadata ()
 
bool SparseToDense (const T *src_data)
 
 FormatConverter (const std::vector< int32_t > &shape, const std::vector< int32_t > &traversal_order, const std::vector< TfLiteDimensionType > &format, const std::vector< int32_t > &block_size={}, const std::vector< int32_t > &block_map={})
 
bool DenseToSparse (const T *src_data)
 
const std::vector< T > & GetData ()
 
const std::vector< std::vector< int32_t > > & GetDimMetadata ()
 

Detailed Description

template<typename T>
class sparsity::FormatConverter< T >

Definition at line 69 of file SparsityFormatConverter.h.

Constructor & Destructor Documentation

◆ FormatConverter() [1/2]

template<typename T >
sparsity::FormatConverter< T >::FormatConverter ( const std::vector< int > &  shape,
const TfLiteSparsity sparsity 
)

Definition at line 69 of file SparsityFormatConverter.cpp.

70{
71 auto traversal_order = TfLiteIntArrayToVector(sparsity.traversal_order);
72 auto block_map = TfLiteIntArrayToVector(sparsity.block_map);
73
74 std::vector<TfLiteDimensionType> format(sparsity.dim_metadata_size);
75 std::vector<int> dense_size(sparsity.dim_metadata_size);
76 std::vector<std::vector<int>> segments(sparsity.dim_metadata_size);
77 std::vector<std::vector<int>> indices(sparsity.dim_metadata_size);
78 for (int i = 0; i < sparsity.dim_metadata_size; i++)
79 {
80 format[i] = sparsity.dim_metadata[i].format;
81 dense_size[i] = sparsity.dim_metadata[i].dense_size;
82 segments[i] = TfLiteIntArrayToVector(sparsity.dim_metadata[i].array_segments);
83 indices[i] = TfLiteIntArrayToVector(sparsity.dim_metadata[i].array_indices);
84 }
85
86 InitSparseToDenseConverter(shape, std::move(traversal_order), std::move(format),
87 std::move(dense_size), std::move(segments), std::move(indices),
88 std::move(block_map));
89}

◆ FormatConverter() [2/2]

template<typename T >
sparsity::FormatConverter< T >::FormatConverter ( const std::vector< int32_t > &  shape,
const std::vector< int32_t > &  traversal_order,
const std::vector< TfLiteDimensionType > &  format,
const std::vector< int32_t > &  block_size = {},
const std::vector< int32_t > &  block_map = {} 
)

Member Function Documentation

◆ DenseToSparse()

template<typename T >
bool sparsity::FormatConverter< T >::DenseToSparse ( const T *  src_data)

Definition at line 217 of file Convert.cpp.

218{
219 int num_original_dims = dense_shape_.size();
220 int num_block_dims = block_map_.size();
221 int num_expanded_dims = num_original_dims + num_block_dims;
222 std::vector<int> expanded_shape(num_expanded_dims);
223 for (int i = 0; i < num_expanded_dims; i++)
224 {
225 if (i < num_original_dims)
226 {
227 expanded_shape[i] = blocked_shape_[i];
228 }
229 else
230 {
231 expanded_shape[i] = block_size_[i - num_original_dims];
232 }
233 }
234
235 std::vector<int> shape_offset(num_original_dims);
236 shape_offset[shape_offset.size() - 1] = 1;
237 for (int i = num_original_dims - 1; i > 0; --i)
238 {
239 shape_offset[i - 1] = shape_offset[i] * dense_shape_[i];
240 }
241
242 std::vector<int> expanded_shape_offset(num_expanded_dims);
243 for (int i = 0; i < num_original_dims; ++i)
244 {
245 expanded_shape_offset[i] = shape_offset[i];
246 }
247 for (int i = 0; i < num_block_dims; ++i)
248 {
249 int mapped_dim = block_map_[i];
250 expanded_shape_offset[num_original_dims + i] = shape_offset[mapped_dim];
251 expanded_shape_offset[mapped_dim] *= block_size_[i];
252 }
253
254 std::vector<int> dst_ordered_offset(num_expanded_dims);
255 for (int i = 0; i < num_expanded_dims; ++i)
256 {
257 dst_ordered_offset[i] = expanded_shape_offset[traversal_order_[i]];
258 }
259
260 std::vector<bool> dst_dim_has_nonzeroes(num_expanded_dims);
261 std::fill(dst_dim_has_nonzeroes.begin(), dst_dim_has_nonzeroes.end(), false);
262 std::vector<int> inner_compressed_dim(num_expanded_dims);
263 int most_recent_compressed_dim = -1;
264 std::vector<int> num_segments_of_next_compressed_dim(num_expanded_dims);
265 int segment_count = 1;
266 for (int i = num_expanded_dims - 1; i >= 0; --i)
267 {
268 inner_compressed_dim[i] = most_recent_compressed_dim;
269 if (format_[i] == kTfLiteDimSparseCSR)
270 {
271 most_recent_compressed_dim = i;
272 num_segments_of_next_compressed_dim[i] = segment_count;
273 segment_count = 1;
274 }
275 else
276 {
277 num_segments_of_next_compressed_dim[i] = -1;
278 segment_count *= expanded_shape[traversal_order_[i]];
279 }
280 }
281
282 dim_metadata_.resize(num_expanded_dims * 2);
283 std::vector<int> dst_sparse_dims;
284 dst_sparse_dims.reserve(num_expanded_dims);
285 for (int i = 0; i < num_expanded_dims; ++i)
286 {
287 dim_metadata_[i * 2].clear();
288 dim_metadata_[i * 2 + 1].clear();
289 if (format_[i] == kTfLiteDimDense)
290 {
291 // If dimension is dense, just store the shape.
292 dim_metadata_[i * 2].push_back(expanded_shape[traversal_order_[i]]);
293 }
294 else
295 {
296 dim_metadata_[i * 2].push_back(0); // Segment array always begins with 0.
297 dst_sparse_dims.push_back(i); // Add dimension to the sparse list.
298 }
299 }
300
301 // This algorithm assumes that the block size is small enough for all the
302 // elements to fit in cache, so the strided accesses from different traversal
303 // order and the write-first-erase-later strategy shouldn't be too slow
304 int dst_dim_idx = num_expanded_dims;
305 std::vector<int> coordinate(num_expanded_dims, 0);
306 int dense_tensor_idx = 0;
307 while (dst_dim_idx >= 0)
308 {
309 if (dst_dim_idx == num_expanded_dims)
310 {
311 // We have a complete coordinate. Add the element to the value array if it
312 // is not zero, or if the last dimension is dense.
313 if (!IsZero(src_data[dense_tensor_idx]))
314 {
315 data_.push_back(src_data[dense_tensor_idx]);
316 // Mark all sparse dimensions that their current indices have nonzeroes.
317 for (auto dst_dim : dst_sparse_dims)
318 {
319 if (!dst_dim_has_nonzeroes[dst_dim])
320 {
321 // Only add the index to the indices array if the current nonzero
322 // is the first nonzero of the block.
323 dim_metadata_[2 * dst_dim + 1].push_back(coordinate[dst_dim]);
324 dst_dim_has_nonzeroes[dst_dim] = true;
325 }
326 }
327 }
328 else if (format_[num_expanded_dims - 1] == kTfLiteDimDense)
329 {
330 data_.push_back(src_data[dense_tensor_idx]);
331 }
332 --dst_dim_idx;
333 }
334 else
335 {
336 int original_dim_idx = traversal_order_[dst_dim_idx];
337 int dim_size = expanded_shape[original_dim_idx];
338 if (dst_dim_has_nonzeroes[dst_dim_idx])
339 {
340 // If the previous block has nonzeroes, reset the flag to false since
341 // we have just moved to a new block.
342 dst_dim_has_nonzeroes[dst_dim_idx] = false;
343 }
344 else if (format_[dst_dim_idx] == kTfLiteDimSparseCSR)
345 {
346 // This block is empty. Delete unnecessary values if compressed.
347 int next_compressed_dim = inner_compressed_dim[dst_dim_idx];
348 int erase_offset = dim_metadata_[2 * dst_dim_idx + 1].size() *
349 num_segments_of_next_compressed_dim[dst_dim_idx];
350 if (next_compressed_dim >= 0)
351 {
352 auto &segments = dim_metadata_[2 * inner_compressed_dim[dst_dim_idx]];
353 segments.erase(segments.begin() + 1 + erase_offset, segments.end());
354 }
355 else
356 {
357 data_.erase(data_.begin() + erase_offset, data_.end());
358 }
359 }
360 if (++coordinate[dst_dim_idx] < dim_size)
361 {
362 // The current dst_dim_idx is valid (not out of bound).
363 dense_tensor_idx += dst_ordered_offset[dst_dim_idx];
364 ++dst_dim_idx;
365 }
366 else
367 {
368 // dst_dim_idx has reached its dim size. Update segment array and go
369 // back to incrementing the previous dimension (dst_dim_idx - 1).
370 if (format_[dst_dim_idx] == kTfLiteDimSparseCSR)
371 {
372 dim_metadata_[2 * dst_dim_idx].push_back(dim_metadata_[2 * dst_dim_idx + 1].size());
373 }
374 coordinate[dst_dim_idx] = -1;
375 dense_tensor_idx -= dst_ordered_offset[dst_dim_idx] * dim_size;
376 --dst_dim_idx;
377 }
378 }
379 }
380
381 return true;
382}
int32_t size[5]
Definition Slice.cpp:35

References sparsity::kTfLiteDimDense, sparsity::kTfLiteDimSparseCSR, and size.

◆ GetData() [1/2]

template<typename T >
const std::vector< T > & sparsity::FormatConverter< T >::GetData ( )
inline

Definition at line 78 of file SparsityFormatConverter.h.

78{ return data_; }

◆ GetData() [2/2]

template<typename T >
const std::vector< T > & sparsity::FormatConverter< T >::GetData ( )
inline

Definition at line 63 of file Convert.h.

63{ return data_; }

◆ GetDimMetadata() [1/2]

template<typename T >
const std::vector< std::vector< int > > & sparsity::FormatConverter< T >::GetDimMetadata ( )
inline

Definition at line 79 of file SparsityFormatConverter.h.

79{ return dim_metadata_; }

◆ GetDimMetadata() [2/2]

template<typename T >
const std::vector< std::vector< int32_t > > & sparsity::FormatConverter< T >::GetDimMetadata ( )
inline

Definition at line 64 of file Convert.h.

64{ return dim_metadata_; }

◆ SparseToDense()

template<typename T >
bool sparsity::FormatConverter< T >::SparseToDense ( const T *  src_data)

Definition at line 202 of file SparsityFormatConverter.cpp.

203{
204 data_.resize(dense_size_);
205 std::fill(data_.begin(), data_.end(), T(0));
206
207 int total_rank = traversal_order_.size();
208 int src_data_ptr = 0;
209 std::vector<int> indices(total_rank);
210 Populate(src_data, indices, 0, 0, &src_data_ptr, data_.data());
211
212 return true;
213}

The documentation for this class was generated from the following files: