142 const ::tflchef::TensorSparsity_IndexVec &value)
144 auto sparse_idx_type = value.type();
146 switch (sparse_idx_type)
148 case tflchef::SparseIndexVecType::SparseIdxVecType_NONE:
150 case tflchef::SparseIndexVecType::INT32VEC:
152 auto values_vec_int32 = std::vector<int32_t>{value.dim().begin(), value.dim().end()};
154 return tflite::CreateInt32Vector(fb, values_int32).Union();
156 case tflchef::SparseIndexVecType::UINT16VEC:
158 auto values_vec_uint16 = std::vector<uint16_t>{value.dim().begin(), value.dim().end()};
159 auto values_uint16 = fb.
CreateVector(values_vec_uint16);
160 return tflite::CreateUint16Vector(fb, values_uint16).Union();
162 case tflchef::SparseIndexVecType::UINT8VEC:
164 auto values_vec_uint8 = std::vector<uint8_t>{value.dim().begin(), value.dim().end()};
166 return tflite::CreateUint8Vector(fb, values_uint8).Union();
172 throw std::runtime_error(
"Unknown SparseIndexVector type");
219 int num_original_dims = dense_shape_.size();
220 int num_block_dims = block_map_.size();
221 int num_expanded_dims = num_original_dims + num_block_dims;
222 std::vector<int> expanded_shape(num_expanded_dims);
223 for (
int i = 0; i < num_expanded_dims; i++)
225 if (i < num_original_dims)
227 expanded_shape[i] = blocked_shape_[i];
231 expanded_shape[i] = block_size_[i - num_original_dims];
235 std::vector<int> shape_offset(num_original_dims);
236 shape_offset[shape_offset.size() - 1] = 1;
237 for (
int i = num_original_dims - 1; i > 0; --i)
239 shape_offset[i - 1] = shape_offset[i] * dense_shape_[i];
242 std::vector<int> expanded_shape_offset(num_expanded_dims);
243 for (
int i = 0; i < num_original_dims; ++i)
245 expanded_shape_offset[i] = shape_offset[i];
247 for (
int i = 0; i < num_block_dims; ++i)
249 int mapped_dim = block_map_[i];
250 expanded_shape_offset[num_original_dims + i] = shape_offset[mapped_dim];
251 expanded_shape_offset[mapped_dim] *= block_size_[i];
254 std::vector<int> dst_ordered_offset(num_expanded_dims);
255 for (
int i = 0; i < num_expanded_dims; ++i)
257 dst_ordered_offset[i] = expanded_shape_offset[traversal_order_[i]];
260 std::vector<bool> dst_dim_has_nonzeroes(num_expanded_dims);
261 std::fill(dst_dim_has_nonzeroes.begin(), dst_dim_has_nonzeroes.end(),
false);
262 std::vector<int> inner_compressed_dim(num_expanded_dims);
263 int most_recent_compressed_dim = -1;
264 std::vector<int> num_segments_of_next_compressed_dim(num_expanded_dims);
265 int segment_count = 1;
266 for (
int i = num_expanded_dims - 1; i >= 0; --i)
268 inner_compressed_dim[i] = most_recent_compressed_dim;
271 most_recent_compressed_dim = i;
272 num_segments_of_next_compressed_dim[i] = segment_count;
277 num_segments_of_next_compressed_dim[i] = -1;
278 segment_count *= expanded_shape[traversal_order_[i]];
282 dim_metadata_.resize(num_expanded_dims * 2);
283 std::vector<int> dst_sparse_dims;
284 dst_sparse_dims.reserve(num_expanded_dims);
285 for (
int i = 0; i < num_expanded_dims; ++i)
287 dim_metadata_[i * 2].clear();
288 dim_metadata_[i * 2 + 1].clear();
292 dim_metadata_[i * 2].push_back(expanded_shape[traversal_order_[i]]);
296 dim_metadata_[i * 2].push_back(0);
297 dst_sparse_dims.push_back(i);
304 int dst_dim_idx = num_expanded_dims;
305 std::vector<int> coordinate(num_expanded_dims, 0);
306 int dense_tensor_idx = 0;
307 while (dst_dim_idx >= 0)
309 if (dst_dim_idx == num_expanded_dims)
313 if (!IsZero(src_data[dense_tensor_idx]))
315 data_.push_back(src_data[dense_tensor_idx]);
317 for (
auto dst_dim : dst_sparse_dims)
319 if (!dst_dim_has_nonzeroes[dst_dim])
323 dim_metadata_[2 * dst_dim + 1].push_back(coordinate[dst_dim]);
324 dst_dim_has_nonzeroes[dst_dim] =
true;
330 data_.push_back(src_data[dense_tensor_idx]);
336 int original_dim_idx = traversal_order_[dst_dim_idx];
337 int dim_size = expanded_shape[original_dim_idx];
338 if (dst_dim_has_nonzeroes[dst_dim_idx])
342 dst_dim_has_nonzeroes[dst_dim_idx] =
false;
347 int next_compressed_dim = inner_compressed_dim[dst_dim_idx];
348 int erase_offset = dim_metadata_[2 * dst_dim_idx + 1].size() *
349 num_segments_of_next_compressed_dim[dst_dim_idx];
350 if (next_compressed_dim >= 0)
352 auto &segments = dim_metadata_[2 * inner_compressed_dim[dst_dim_idx]];
353 segments.erase(segments.begin() + 1 + erase_offset, segments.end());
357 data_.erase(data_.begin() + erase_offset, data_.end());
360 if (++coordinate[dst_dim_idx] < dim_size)
363 dense_tensor_idx += dst_ordered_offset[dst_dim_idx];
372 dim_metadata_[2 * dst_dim_idx].push_back(dim_metadata_[2 * dst_dim_idx + 1].
size());
374 coordinate[dst_dim_idx] = -1;
375 dense_tensor_idx -= dst_ordered_offset[dst_dim_idx] * dim_size;