ONE - On-device Neural Engine
Loading...
Searching...
No Matches
ModelChef.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "tflchef/ModelChef.h"
19#include <souschef/Registry.h>
20
21#include "Convert.h"
22
23#include <souschef/DataChefs.h>
24
25#include "OpChef.h"
26#include "OpChefs.h"
27
28#include <souschef/Dataset.h>
29#include <souschef/Dims.h>
30
31#include "Log.h"
32
33#include <iterator>
34#include <map>
35#include <string>
36#include <vector>
37
38#include <cassert>
39#include <fstream>
40#include <iostream>
41#include <numeric>
42#include <sstream>
43#include <stdexcept>
44
45using namespace souschef;
46
47namespace
48{
49
50struct DataChefRegistry final : public Registry<DataChefFactory>
51{
52};
53
54DataChefRegistry &data_chef_registry(const tflchef::TensorType &type)
55{
56 static DataChefRegistry s32;
57 static DataChefRegistry s64;
58 static DataChefRegistry fp32;
59 static DataChefRegistry u8;
60 static DataChefRegistry string;
61 static DataChefRegistry boolean;
62 static DataChefRegistry s16;
63 static DataChefRegistry fp16;
64 static DataChefRegistry s8;
65 static DataChefRegistry s4;
66
67 switch (type)
68 {
69 case tflchef::INT32:
70 return s32;
71 case tflchef::INT64:
72 return s64;
73 case tflchef::FLOAT32:
74 return fp32;
75 case tflchef::FLOAT16:
76 return fp16;
77 case tflchef::UINT8:
78 return u8;
79 case tflchef::STRING:
80 return string;
81 case tflchef::BOOL:
82 return boolean;
83 case tflchef::INT16:
84 return s16;
85 case tflchef::INT8:
86 return s8;
87 case tflchef::INT4:
88 return s4;
89 default:
90 break;
91 }
92
93 throw std::runtime_error{"Unknown tensor type"};
94}
95
96struct OpChefRegistry final : public Registry<OpChefFactory>
97{
98};
99
100OpChefRegistry &op_chef_registry(void)
101{
102 static OpChefRegistry registry;
103 return registry;
104}
105
107std::map<tflite::BuiltinOperator, int32_t>
108gather_builtincode_map(const ::tflchef::ModelRecipe &model_recipe)
109{
110 // Key and value of the map are BuiltinOperator and operator version
111 std::map<tflite::BuiltinOperator, int32_t> builtin_map;
112
113 for (const auto &operation : model_recipe.operation())
114 {
115 if (operation.type() == "Custom")
116 continue;
117
118 auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
119 // Various operation version is unified as the highest version among them
120 if (builtin_map.find(op_chef->code()) == builtin_map.end() ||
121 builtin_map[op_chef->code()] < operation.version())
122 builtin_map[op_chef->code()] = operation.version();
123 }
124
125 // Add ops used in Graphs(subgraphs)
126 for (int g = 0; g < model_recipe.graph_size(); ++g)
127 {
128 const auto &graph = model_recipe.graph(g);
129 for (const auto &operation : graph.operation())
130 {
131 if (operation.type() == "Custom")
132 continue;
133
134 auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
135 // Various operation version is unified as the highest version among them
136 if (builtin_map.find(op_chef->code()) == builtin_map.end() ||
137 builtin_map[op_chef->code()] < operation.version())
138 builtin_map[op_chef->code()] = operation.version();
139 }
140 }
141
142 return builtin_map;
143}
144
146std::set<std::string> gather_customcode_set(const ::tflchef::ModelRecipe &model_recipe)
147{
148 std::set<std::string> customcode_set;
149 for (const auto &operation : model_recipe.operation())
150 {
151 if (operation.type() == "Custom")
152 {
153 assert(not operation.custom_code().empty());
154 customcode_set.insert(operation.custom_code());
155 }
156 }
157
158 // Add ops used in Graphs(subgraphs)
159 for (int g = 0; g < model_recipe.graph_size(); ++g)
160 {
161 const auto &graph = model_recipe.graph(g);
162 for (const auto &operation : graph.operation())
163 {
164 if (operation.type() == "Custom")
165 {
166 assert(not operation.custom_code().empty());
167 customcode_set.insert(operation.custom_code());
168 }
169 }
170 }
171
172 return customcode_set;
173}
174
175} // namespace
176
177namespace
178{
179
180class ModelChef
181{
182public:
183 ModelChef() = default;
184
185public:
186 void init(void);
187 void cook(const ::tflchef::ModelRecipe &model_recipe);
188
189private:
190 void prepare_initial_buffer(void);
191 void gather_operator_codes(const ::tflchef::ModelRecipe &model_recipe);
192 void gather_signature_defs(const ::tflchef::ModelRecipe &model_recipe);
193
194 template <typename T> void cook_operands(const T &graph);
195
196 template <typename T>
197 void cook_operations(const T &graph, std::map<std::string, int32_t> &symbol_table);
198
199 template <typename T>
200 void cook_graph(const T &graph, std::map<std::string, int32_t> &symbol_table);
201
202 bool finalize_ext_buffer(void);
203
204public:
205 const char *get_buffer_pointer(void) const;
206 size_t get_size(void) const;
207
208private:
209 std::unique_ptr<flatbuffers::FlatBufferBuilder> _flatbuffer_builder;
210
211 std::vector<flatbuffers::Offset<::tflite::SignatureDef>> _signdef_vec;
212 std::vector<flatbuffers::Offset<::tflite::Buffer>> _buffer_vec;
213 std::vector<flatbuffers::Offset<::tflite::OperatorCode>> _code_vec;
214 std::vector<flatbuffers::Offset<::tflite::SubGraph>> _subgraph_vec;
215 std::map<tflite::BuiltinOperator, int32_t> _builtin_code_map;
216 std::vector<std::string> _custom_code_vec;
217 // _symbol_tables stores symbol_table of each sub graph
218 // this is used to find tensor ID(index) with tensor name
219 std::vector<std::map<std::string, int32_t>> _symbol_tables;
220
221 // per graph that needs clear afer graph is processed
222 // Operand-related
223 std::vector<flatbuffers::Offset<::tflite::Tensor>> _tensor_vec;
224 // Operation-related
225 std::vector<flatbuffers::Offset<::tflite::Operator>> _operator_vec;
226
227 std::string _graph_name;
228
229 // store Buffer data to external of FB and use (Buffer) offset/size fields
230 bool _ext_offset = false;
231 std::map<int32_t, std::vector<uint8_t>> _buffer_data_map;
232 std::string _ext_data;
233};
234
235void ModelChef::init(void)
236{
237 _flatbuffer_builder =
238 std::unique_ptr<flatbuffers::FlatBufferBuilder>(new flatbuffers::FlatBufferBuilder(1024));
239}
240
241std::vector<flatbuffers::Offset<tflite::DimensionMetadata>>
242make_dim_metadata_vec(flatbuffers::FlatBufferBuilder *flatbuffer_builder, int32_t dims_count,
243 const std::vector<int> &traversal_order_vec,
244 const std::vector<sparsity::TfLiteDimensionType> &format_vec,
245 const std::vector<std::vector<int32_t>> &dim_metadata_src)
246{
247 // Build sparsity parameter.
248 std::vector<flatbuffers::Offset<tflite::DimensionMetadata>> dim_metadata_vec(dims_count);
249 for (int32_t i = 0; i < dims_count; i++)
250 {
251 const int32_t metadata_idx = 2 * i;
252 if (format_vec[traversal_order_vec[i]] == sparsity::kTfLiteDimSparseCSR)
253 {
254 auto array_segments =
255 tflite::CreateInt32Vector(*flatbuffer_builder,
256 flatbuffer_builder->CreateVector(dim_metadata_src[metadata_idx]))
257 .Union();
258 auto array_indices =
259 tflite::CreateInt32Vector(
260 *flatbuffer_builder, flatbuffer_builder->CreateVector(dim_metadata_src[metadata_idx + 1]))
261 .Union();
262 dim_metadata_vec[i] =
263 tflite::CreateDimensionMetadata(*flatbuffer_builder, tflite::DimensionType_SPARSE_CSR, 0,
264 tflite::SparseIndexVector_Int32Vector, array_segments,
265 tflite::SparseIndexVector_Int32Vector, array_indices);
266 }
267 else
268 {
269 dim_metadata_vec[i] = tflite::CreateDimensionMetadata(
270 *flatbuffer_builder, tflite::DimensionType_DENSE, dim_metadata_src[metadata_idx][0]);
271 }
272 }
273 return dim_metadata_vec;
274}
275
276template <typename T> void ModelChef::cook_operands(const T &graph)
277{
278 int32_t buffer_start = _buffer_vec.size();
279 int32_t buffer_index = 0;
280
281 // Create buffer(s) 1~n(I) for input(s)
282 const auto size_input = graph.input_size();
283 for (int ci = 0; ci < size_input; ++ci)
284 {
285 tflite::BufferBuilder buffer_builder{*_flatbuffer_builder};
286 _buffer_vec.emplace_back(buffer_builder.Finish());
287 }
288 // Create buffer(s) n(I)+1~n(I)+n(O) for output(s)
289 const auto size_output = graph.output_size();
290 for (int co = 0; co < size_output; ++co)
291 {
292 tflite::BufferBuilder buffer_builder{*_flatbuffer_builder};
293 _buffer_vec.emplace_back(buffer_builder.Finish());
294 }
295
296 auto input_names = as_dataset(graph.input()).vectorize();
297 auto output_names = as_dataset(graph.output()).vectorize();
298
299 for (const auto &operand : graph.operand())
300 {
301 assert(operand.has_name());
302 assert(operand.has_type());
303
305
307 std::vector<int32_t> dims;
308 if (operand.has_shape())
309 {
310 dims = as_dims(operand.shape());
311 shape = _flatbuffer_builder->CreateVector(dims);
312 }
313
314 auto name = _flatbuffer_builder->CreateString(operand.name());
315
316 buffer_index = 0;
317
318 // Create Buffer if filler is specified
319 if (operand.has_filler())
320 {
321 const auto &filler = operand.filler();
322
323 assert(filler.has_tag());
324
325 auto args = ranged_arguments(filler.arg().begin(), filler.arg().end());
326 auto chef = data_chef_registry(operand.type()).lookup(filler.tag()).create(args);
327
328 assert(chef != nullptr);
329
330 // Create Data
331 int32_t count = (element_count(dims) > 0) ? element_count(dims) : filler.arg_size();
332 auto data_vec = chef->generate(count);
333
334 if (operand.has_make_sparse() && operand.make_sparse())
335 {
336 assert(not operand.has_sparsity());
337 assert(operand.has_shape());
338 assert(operand.type() != tflchef::TensorType::INT4);
339
340 const int32_t dims_count = dims.size();
341 std::vector<int> traversal_order_vec;
342 std::vector<sparsity::TfLiteDimensionType> format_vec;
343 for (int32_t o = 0; o < dims_count; ++o)
344 traversal_order_vec.push_back(o);
345 for (int32_t o = 0; o < dims_count - 1; ++o)
346 format_vec.push_back(sparsity::kTfLiteDimDense);
347 format_vec.push_back(sparsity::kTfLiteDimSparseCSR);
348
349 if (operand.type() == tflchef::FLOAT32)
350 {
351 ::sparsity::FormatConverter<float> converter(dims, traversal_order_vec, format_vec);
352 converter.DenseToSparse(reinterpret_cast<const float *>(data_vec.data()));
353 const auto &sparse_data = converter.GetData();
354
355 std::vector<uint8_t> sparse_uint8;
356 for (int c = 0; c < sparse_data.size(); ++c)
357 {
358 const float value = sparse_data.at(c);
359 const uint8_t *arr = reinterpret_cast<const uint8_t *>(&value);
360 for (uint32_t b = 0; b < sizeof(float); ++b)
361 {
362 sparse_uint8.emplace_back(arr[b]);
363 }
364 }
365 if (_ext_offset)
366 {
367 buffer_index = _buffer_vec.size();
368 _buffer_data_map[buffer_index] = sparse_uint8;
369
370 auto buffer = tflite::CreateBuffer(*_flatbuffer_builder, 0, 1, 1);
371 _buffer_vec.emplace_back(buffer);
372 }
373 else
374 {
375 auto data = _flatbuffer_builder->CreateVector(sparse_uint8);
376 // Create Buffer
377 tflite::BufferBuilder buffer_builder{*_flatbuffer_builder};
378 buffer_builder.add_data(data);
379 auto buffer = buffer_builder.Finish();
380
381 // Update Buffer Index & Vector
382 buffer_index = _buffer_vec.size();
383 _buffer_vec.emplace_back(buffer);
384 }
385
386 // save SparsityParameters
387 auto traversal_order = _flatbuffer_builder->CreateVector(traversal_order_vec);
388
389 // Create block map
390 std::vector<int> block_map_vec{};
391 auto block_map = _flatbuffer_builder->CreateVector(block_map_vec);
392
393 // Create dimension metadata
394 const auto &dim_metadata_src = converter.GetDimMetadata();
395 auto dim_metadata_vec =
396 make_dim_metadata_vec(_flatbuffer_builder.get(), dims_count, traversal_order_vec,
397 format_vec, dim_metadata_src);
398 auto dim_metadata = _flatbuffer_builder->CreateVector(dim_metadata_vec);
399 sparsity_index = tflite::CreateSparsityParameters(*_flatbuffer_builder, traversal_order,
400 block_map, dim_metadata);
401 }
402 else if (operand.type() == tflchef::FLOAT16)
403 {
404 ::sparsity::FormatConverter<uint16_t> converter(dims, traversal_order_vec, format_vec);
405 converter.DenseToSparse(reinterpret_cast<const uint16_t *>(data_vec.data()));
406 const auto &sparse_data = converter.GetData();
407
408 std::vector<uint8_t> sparse_uint8;
409 for (int c = 0; c < sparse_data.size(); ++c)
410 {
411 const uint16_t value = sparse_data.at(c);
412 const uint8_t *arr = reinterpret_cast<const uint8_t *>(&value);
413 for (uint32_t b = 0; b < sizeof(uint16_t); ++b)
414 {
415 sparse_uint8.emplace_back(arr[b]);
416 }
417 }
418 if (_ext_offset)
419 {
420 buffer_index = _buffer_vec.size();
421 _buffer_data_map[buffer_index] = sparse_uint8;
422
423 auto buffer = tflite::CreateBuffer(*_flatbuffer_builder, 0, 1, 1);
424 _buffer_vec.emplace_back(buffer);
425 }
426 else
427 {
428 auto data = _flatbuffer_builder->CreateVector(sparse_uint8);
429
430 // Create Buffer
431 tflite::BufferBuilder buffer_builder{*_flatbuffer_builder};
432 buffer_builder.add_data(data);
433 auto buffer = buffer_builder.Finish();
434
435 // Update Buffer Index & Vector
436 buffer_index = _buffer_vec.size();
437 _buffer_vec.emplace_back(buffer);
438 }
439
440 // save SparsityParameters
441 auto traversal_order = _flatbuffer_builder->CreateVector(traversal_order_vec);
442
443 // Create block map
444 std::vector<int> block_map_vec{};
445 auto block_map = _flatbuffer_builder->CreateVector(block_map_vec);
446
447 // Create dimension metadata
448 const auto &dim_metadata_src = converter.GetDimMetadata();
449 auto dim_metadata_vec =
450 make_dim_metadata_vec(_flatbuffer_builder.get(), dims_count, traversal_order_vec,
451 format_vec, dim_metadata_src);
452 auto dim_metadata = _flatbuffer_builder->CreateVector(dim_metadata_vec);
453 sparsity_index = tflite::CreateSparsityParameters(*_flatbuffer_builder, traversal_order,
454 block_map, dim_metadata);
455 }
456 else
457 {
458 throw std::runtime_error{"NYI: unsupported operand type"};
459 }
460 }
461 else
462 {
463 // pack for INT4 and replace data_vec
464 if (operand.type() == tflchef::TensorType::INT4)
465 {
466 uint32_t packed = (count + 1) / 2;
467 std::vector<uint8_t> data_packed(packed);
468 for (uint32_t idx = 0; idx < packed; ++idx)
469 {
470 uint32_t sidx = idx * 2;
471 data_packed[idx] = data_vec[sidx++] & 0x0f;
472 if (sidx < count)
473 data_packed[idx] |= data_vec[sidx] << 4;
474 }
475 data_vec = data_packed;
476 }
477
478 if (_ext_offset)
479 {
480 buffer_index = _buffer_vec.size();
481 _buffer_data_map[buffer_index] = data_vec;
482
483 auto buffer = tflite::CreateBuffer(*_flatbuffer_builder, 0, 1, 1);
484 _buffer_vec.emplace_back(buffer);
485 }
486 else
487 {
488 auto data = _flatbuffer_builder->CreateVector(data_vec);
489
490 // Create Buffer
491 tflite::BufferBuilder buffer_builder{*_flatbuffer_builder};
492 buffer_builder.add_data(data);
493 auto buffer = buffer_builder.Finish();
494
495 // Update Buffer Index & Vector
496 buffer_index = _buffer_vec.size();
497 _buffer_vec.emplace_back(buffer);
498 }
499 }
500 }
501 else
502 {
503 // if this is input or output, assign to that buffer_index
504 int idx = 0;
505 for (auto it = input_names.begin(); it != input_names.end(); ++it, ++idx)
506 {
507 if (*it == operand.name())
508 {
509 buffer_index = buffer_start + idx;
510 break;
511 }
512 }
513 if (buffer_index == 0)
514 {
515 idx = 0;
516 for (auto it = output_names.begin(); it != output_names.end(); ++it, ++idx)
517 {
518 if (*it == operand.name())
519 {
520 buffer_index = buffer_start + size_input + idx;
521 break;
522 }
523 }
524 }
525 if (buffer_index == 0)
526 {
527 // we couldn't find the buffer; create an empty buffer for this tensor
528 buffer_index = _buffer_vec.size();
529
530 tflite::BufferBuilder buffer_builder{*_flatbuffer_builder};
531 _buffer_vec.emplace_back(buffer_builder.Finish());
532 }
533 }
534 assert(buffer_index != 0);
535
537
538 // Create QuantizationParameters if quant is specified
539 if (operand.has_quant())
540 {
541 const auto &quant = operand.quant();
542
543 // Create each parameters
544 // NOTE if some parameters are not given, those will be set to default value
545 std::vector<float> quant_max_vec(quant.max_size());
546 std::vector<float> quant_min_vec(quant.min_size());
547 std::vector<float> quant_scale_vec(quant.scale_size());
548 std::vector<int64_t> quant_zero_point_vec(quant.zero_point_size());
549
550 for (uint32_t i = 0; i < quant.max_size(); ++i)
551 quant_max_vec.at(i) = quant.max(i);
552 for (uint32_t i = 0; i < quant.min_size(); ++i)
553 quant_min_vec.at(i) = quant.min(i);
554 for (uint32_t i = 0; i < quant.scale_size(); ++i)
555 quant_scale_vec.at(i) = quant.scale(i);
556 for (uint32_t i = 0; i < quant.zero_point_size(); ++i)
557 quant_zero_point_vec.at(i) = quant.zero_point(i);
558
559 auto quant_max = _flatbuffer_builder->CreateVector(quant_max_vec);
560 auto quant_min = _flatbuffer_builder->CreateVector(quant_min_vec);
561 auto quant_scale = _flatbuffer_builder->CreateVector(quant_scale_vec);
562 auto quant_zero_point = _flatbuffer_builder->CreateVector(quant_zero_point_vec);
563
564 // Create QuantizationParameters
565 tflite::QuantizationParametersBuilder quant_builder{*_flatbuffer_builder};
566 quant_builder.add_max(quant_max);
567 quant_builder.add_min(quant_min);
568 quant_builder.add_scale(quant_scale);
569 quant_builder.add_zero_point(quant_zero_point);
570 quant_builder.add_quantized_dimension(quant.quantized_dimension());
571
572 // Update QuantizationParameters Index
573 quant_index = quant_builder.Finish();
574 }
575
576 if (operand.has_sparsity())
577 {
578 const auto &sparsity = operand.sparsity();
579
580 // Create traversal order
581 std::vector<int> traversal_order_vec{sparsity.traversal_order().dim().begin(),
582 sparsity.traversal_order().dim().end()};
583 auto traversal_order = _flatbuffer_builder->CreateVector(traversal_order_vec);
584
585 // Create block map
586 std::vector<int> block_map_vec{sparsity.block_map().dim().begin(),
587 sparsity.block_map().dim().end()};
588 auto block_map = _flatbuffer_builder->CreateVector(block_map_vec);
589
590 // Create dimension metadata
591 std::vector<flatbuffers::Offset<tflite::DimensionMetadata>> dim_metadata_vec;
592 auto recipe_dim_metadata = sparsity.dim_metadata();
593 for (const auto &dm : recipe_dim_metadata)
594 {
595 // Create array segments
596 auto tflite_array_segments =
597 as_tflite_sparse_index_vec(*_flatbuffer_builder, dm.array_segments());
598
599 // Create array indices
600 auto tflite_array_indices =
601 as_tflite_sparse_index_vec(*_flatbuffer_builder, dm.array_indices());
602
603 auto tflite_dim_metadata_builder = tflite::DimensionMetadataBuilder{*_flatbuffer_builder};
604 tflite_dim_metadata_builder.add_format(as_tflite_dimensiontype(dm.format()));
605 tflite_dim_metadata_builder.add_dense_size(dm.dense_size());
606 tflite_dim_metadata_builder.add_array_segments(tflite_array_segments);
607 tflite_dim_metadata_builder.add_array_segments_type(
608 as_tflite_sparse_idx_vec_type(dm.array_segments().type()));
609 tflite_dim_metadata_builder.add_array_indices(tflite_array_indices);
610 tflite_dim_metadata_builder.add_array_indices_type(
611 as_tflite_sparse_idx_vec_type(dm.array_indices().type()));
612 auto tflite_dim_metadata = tflite_dim_metadata_builder.Finish();
613 dim_metadata_vec.emplace_back(tflite_dim_metadata);
614 }
615 auto dim_metadata = _flatbuffer_builder->CreateVector(dim_metadata_vec);
616
617 sparsity_index = tflite::CreateSparsityParameters(*_flatbuffer_builder, traversal_order,
618 block_map, dim_metadata);
619 }
620
622 if (operand.has_shape_signature())
623 {
624 auto signature = as_dims(operand.shape_signature());
625 shape_signature = _flatbuffer_builder->CreateVector(signature);
626 }
627
628 // Create Tensor
629 tflite::TensorBuilder tensor_builder{*_flatbuffer_builder};
630
631 tensor_builder.add_shape(shape);
632 tensor_builder.add_type(as_tflite_tensortype(operand.type()));
633 tensor_builder.add_buffer(buffer_index);
634 tensor_builder.add_name(name);
635 tensor_builder.add_is_variable(operand.is_variable());
636 if (operand.has_quant())
637 tensor_builder.add_quantization(quant_index);
638 tensor_builder.add_sparsity(sparsity_index);
639 if (operand.has_shape_signature())
640 tensor_builder.add_shape_signature(shape_signature);
641
642 // Append!
643 _tensor_vec.emplace_back(tensor_builder.Finish());
644 }
645}
646
647template <typename T>
648void ModelChef::cook_operations(const T &graph, std::map<std::string, int32_t> &symbol_table)
649{
650 auto lookup = [&](const std::string &name) {
651 if (symbol_table.find(name) != symbol_table.end())
652 return symbol_table.at(name);
653 else if (name == "")
654 return -1; // -1 in TFLite means that optional input tensor is empty.
655 else
656 {
657 std::string msg = "tflchef : input not found in " + _graph_name + " graph";
658 throw std::runtime_error(msg.c_str());
659 }
660 };
661
662 // Create Operator
663 for (const auto &operation : graph.operation())
664 {
665 assert(operation.has_type());
666
667 std::string op_type = operation.type();
668 if (not operation.custom_code().empty())
669 op_type = operation.custom_code();
670
671 auto op_chef = op_chef_registry().lookup(op_type).create(&operation);
672
673 // Create 'inputs'
674 std::vector<int32_t> input_vec = as_dataset(operation.input()).map(lookup).vectorize();
675 auto inputs = _flatbuffer_builder->CreateVector(input_vec);
676
677 // Create 'outputs'
678 std::vector<int32_t> output_vec = as_dataset(operation.output()).map(lookup).vectorize();
679 auto outputs = _flatbuffer_builder->CreateVector(output_vec);
680
681 // Create Option
682 auto options = op_chef->value(*_flatbuffer_builder);
683
684 // Create Custom option
685 auto circle_custom_options = op_chef->custom_value(*_flatbuffer_builder);
686
687 // Create Operator
688 tflite::OperatorBuilder op_builder{*_flatbuffer_builder};
689
690 // Note that opcode_index is an index into the operator_codes vector.
691 // operator_codes consists of buildtin_code and custom_code, which is inserted sequentially.
692 uint32_t opcode_index = 0;
693 auto op_it = _builtin_code_map.find(op_chef->code());
694 // builtin operator
695 if (op_it != _builtin_code_map.end())
696 {
697 opcode_index = std::distance(_builtin_code_map.begin(), op_it);
698 }
699 // custom operator
700 else
701 {
702 assert(not operation.custom_code().empty());
703 const auto &custom_code = operation.custom_code();
704 auto op_it = std::find(_custom_code_vec.begin(), _custom_code_vec.end(), custom_code);
705 assert(op_it != _custom_code_vec.end());
706 opcode_index = _builtin_code_map.size();
707 opcode_index += std::distance(_custom_code_vec.begin(), op_it);
708 }
709
710 op_builder.add_opcode_index(opcode_index);
711 op_builder.add_inputs(inputs);
712 op_builder.add_outputs(outputs);
713 op_builder.add_builtin_options_type(op_chef->type());
714 op_builder.add_builtin_options(options);
715 op_builder.add_custom_options(circle_custom_options);
716 op_builder.add_custom_options_format(tflite::CustomOptionsFormat_FLEXBUFFERS);
717 // Append Operator
718 _operator_vec.emplace_back(op_builder.Finish());
719 }
720}
721
722template <typename T>
723void ModelChef::cook_graph(const T &graph, std::map<std::string, int32_t> &symbol_table)
724{
725 LOGGER(l);
726
727 assert(symbol_table.empty()); // FIX_CALLER_UNLESS
728 assert(_tensor_vec.empty()); // FIX_CALLER_UNLESS
729 assert(_operator_vec.empty()); // FIX_CALLER_UNLESS
730
731 // default name for graph
732 if (graph.has_name())
733 _graph_name = graph.name();
734
735 auto lookup = [&](const std::string &name) {
736 if (symbol_table.find(name) != symbol_table.end())
737 return symbol_table.at(name);
738 else if (name == "")
739 return -1; // -1 in TFLite means that optional input tensor is empty.
740 else
741 {
742 std::string msg = "tflchef : input not found in " + _graph_name + " graph";
743 throw std::runtime_error(msg.c_str());
744 }
745 };
746
747 cook_operands(graph);
748
749 for (const auto &operand : graph.operand())
750 {
751 // Update Tensor Name -> Tensor Index Map
752 int32_t tensor_index = symbol_table.size();
753 const auto &tensor_name = operand.name();
754
755 INFO(l) << "Symbol [" << tensor_name << "] = Tensor " << tensor_index << std::endl;
756
757 symbol_table[tensor_name] = tensor_index;
758 }
759
760 cook_operations(graph, symbol_table);
761
762 // Create network input/output vector
763 std::vector<int32_t> input_vec = as_dataset(graph.input()).map(lookup).vectorize();
764 std::vector<int32_t> output_vec = as_dataset(graph.output()).map(lookup).vectorize();
765
766 // Create "SubGraph" arguments
767 auto tensors = _flatbuffer_builder->CreateVector(_tensor_vec);
768 auto inputs = _flatbuffer_builder->CreateVector(input_vec);
769 auto outputs = _flatbuffer_builder->CreateVector(output_vec);
770 auto operators = _flatbuffer_builder->CreateVector(_operator_vec);
771 auto name = _flatbuffer_builder->CreateString(_graph_name);
772
773 tflite::SubGraphBuilder subgraph_builder{*_flatbuffer_builder};
774
775 subgraph_builder.add_tensors(tensors);
776 subgraph_builder.add_inputs(inputs);
777 subgraph_builder.add_outputs(outputs);
778 subgraph_builder.add_operators(operators);
779 subgraph_builder.add_name(name);
780
781 _subgraph_vec.emplace_back(subgraph_builder.Finish());
782}
783
784void ModelChef::gather_operator_codes(const ::tflchef::ModelRecipe &model_recipe)
785{
786 // Create OperatorCode with Builtin Operator
787 _builtin_code_map = gather_builtincode_map(model_recipe);
788 for (auto const &opcode : _builtin_code_map)
789 {
790 tflite::OperatorCodeBuilder code_builder{*_flatbuffer_builder};
791 // 127 is BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES
792 // This is the way to handle deprecated builtin code
793 // See
794 // https://github.com/tensorflow/tensorflow/blob/a0afe8f9218be5eb9ed5dffc2dff652996da8c28/tensorflow/lite/schema/schema.fbs#L1061-L1077
795 if (opcode.first < 127)
796 {
797 code_builder.add_deprecated_builtin_code(opcode.first);
798 }
799 else
800 {
801 code_builder.add_deprecated_builtin_code(
802 ::tflite::BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES);
803 }
804 code_builder.add_version(opcode.second);
805 code_builder.add_builtin_code(opcode.first);
806 auto code = code_builder.Finish();
807 // Update OperatorCode vector
808 _code_vec.emplace_back(code);
809 }
810
811 // Create OperatorCode with Custom Operator
812 {
813 std::set<std::string> custom_code_set = gather_customcode_set(model_recipe);
814 std::vector<std::string> custom_code_vec{custom_code_set.begin(), custom_code_set.end()};
815 _custom_code_vec = custom_code_vec;
816 }
817
818 for (const auto &opcode : _custom_code_vec)
819 {
820 auto custom_code = _flatbuffer_builder->CreateString(opcode);
821 tflite::OperatorCodeBuilder code_builder{*_flatbuffer_builder};
822 code_builder.add_deprecated_builtin_code(tflite::BuiltinOperator_CUSTOM);
823 code_builder.add_custom_code(custom_code);
824 code_builder.add_builtin_code(tflite::BuiltinOperator_CUSTOM);
825 auto code = code_builder.Finish();
826 // Update OperatorCode vector
827 _code_vec.emplace_back(code);
828 }
829}
830
831void ModelChef::prepare_initial_buffer(void)
832{
833 // Create an Empty Buffer
834 //
835 // Buffer 0 SHOULD be an empty buffer in TensorFlow Lite model file
836 // (Please refer to the comment for Tensor.buffer field in schema)
837 tflite::BufferBuilder buffer_builder{*_flatbuffer_builder};
838 _buffer_vec.emplace_back(buffer_builder.Finish());
839}
840
841void ModelChef::gather_signature_defs(const ::tflchef::ModelRecipe &model_recipe)
842{
843 for (int s = 0; s < model_recipe.signature_def_size(); ++s)
844 {
845 // load from recipe
846 const auto &rec_signature_def = model_recipe.signature_def(s);
847
848 std::vector<flatbuffers::Offset<::tflite::TensorMap>> tensormap_inputs;
849 std::vector<flatbuffers::Offset<::tflite::TensorMap>> tensormap_outputs;
850
851 // which subgraph index to cook
852 auto subgraph_index = 0;
853 if (rec_signature_def.has_subgraph_index())
854 {
855 subgraph_index = rec_signature_def.subgraph_index();
856 }
857 assert(subgraph_index < _symbol_tables.size());
858 auto &symbol_table = _symbol_tables[subgraph_index];
859
860 // cook for inputs
861 for (int si = 0; si < rec_signature_def.inputs_size(); ++si)
862 {
863 // recipe for input TensorMap
864 const auto &rec_tm_input = rec_signature_def.inputs(si);
865 auto name = _flatbuffer_builder->CreateString(rec_tm_input.name());
866 uint32_t tensor_index = 0;
867 // either tensor or tensor_index should exist
868 assert(rec_tm_input.has_tensor() || rec_tm_input.has_tensor_index());
869 if (rec_tm_input.has_tensor())
870 {
871 // we can get tensor_index from symbol_table
872 const auto &tensor = rec_tm_input.tensor();
873 tensor_index = symbol_table[tensor];
874 }
875 else
876 {
877 // or we can use tensor_index itself
878 tensor_index = rec_tm_input.tensor_index();
879 }
880
881 ::tflite::TensorMapBuilder tensormap_builder{*_flatbuffer_builder};
882 tensormap_builder.add_name(name);
883 tensormap_builder.add_tensor_index(tensor_index);
884 tensormap_inputs.push_back(tensormap_builder.Finish());
885 }
886 // cook for outputs, same as inputs
887 for (int so = 0; so < rec_signature_def.outputs_size(); ++so)
888 {
889 const auto &rec_tm_output = rec_signature_def.outputs(so);
890 auto name = _flatbuffer_builder->CreateString(rec_tm_output.name());
891 uint32_t tensor_index = 0;
892 assert(rec_tm_output.has_tensor() || rec_tm_output.has_tensor_index());
893 if (rec_tm_output.has_tensor())
894 {
895 const auto &tensor = rec_tm_output.tensor();
896 tensor_index = symbol_table[tensor];
897 }
898 else
899 {
900 tensor_index = rec_tm_output.tensor_index();
901 }
902
903 ::tflite::TensorMapBuilder tensormap_builder{*_flatbuffer_builder};
904 tensormap_builder.add_name(name);
905 tensormap_builder.add_tensor_index(tensor_index);
906 tensormap_outputs.push_back(tensormap_builder.Finish());
907 }
908
909 auto inputs = _flatbuffer_builder->CreateVector(tensormap_inputs);
910 auto outputs = _flatbuffer_builder->CreateVector(tensormap_outputs);
911 auto signature_key = _flatbuffer_builder->CreateString(rec_signature_def.signature_key());
912 // TODO add validation for signature_key
913
914 ::tflite::SignatureDefBuilder signature_def_builder{*_flatbuffer_builder};
915 signature_def_builder.add_inputs(inputs);
916 signature_def_builder.add_outputs(outputs);
917 signature_def_builder.add_signature_key(signature_key);
918 signature_def_builder.add_subgraph_index(rec_signature_def.subgraph_index());
919
920 _signdef_vec.emplace_back(signature_def_builder.Finish());
921 }
922}
923
924bool ModelChef::finalize_ext_buffer(void)
925{
926 // NOTE modification of std::string object in the middle may reallocate it.
927 // we will use std::string::reserve() to prevent this.
928
929 auto align16 = [](size_t &v) {
930 while (v % 16 != 0)
931 v++;
932 };
933
934 // get total memory for flatbuffer + all buffer_data
935 size_t result_size = _flatbuffer_builder->GetSize();
936 align16(result_size);
937 for (auto &it : _buffer_data_map)
938 {
939 std::vector<uint8_t> &buffer_data = it.second;
940 result_size += buffer_data.size();
941 align16(result_size);
942 }
943 align16(result_size);
944 result_size += 16; // additional for safety
945
946 std::string result;
947 auto *buff_ptr = reinterpret_cast<const char *>(_flatbuffer_builder->GetBufferPointer());
948
949 auto padalign16 = [](std::string &str) {
950 while (str.size() % 16 != 0)
951 str += '\0';
952 };
953
954 result.reserve(result_size);
955 result.append(buff_ptr, _flatbuffer_builder->GetSize());
956
957 auto mutable_model = tflite::GetMutableModel(result.data());
958 auto mutable_buffers = mutable_model->mutable_buffers();
959 bool ret = true;
960
961 padalign16(result);
962 for (auto &it : _buffer_data_map)
963 {
964 int32_t buffer_index = it.first;
965 std::vector<uint8_t> &buffer_data = it.second;
966 uint64_t offset = result.size();
967 uint64_t size = buffer_data.size();
968
969 tflite::Buffer *mutable_buffer = mutable_buffers->GetMutableObject(buffer_index);
970 ret &= mutable_buffer->mutate_offset(offset);
971 ret &= mutable_buffer->mutate_size(size);
972
973 result.append(buffer_data.begin(), buffer_data.end());
974 padalign16(result);
975 }
976 padalign16(result);
977
978 // use final result
979 _ext_data = result;
980
981 return ret;
982}
983
984void ModelChef::cook(const ::tflchef::ModelRecipe &model_recipe)
985{
986 // use Custom/Buffer offset
987 _ext_offset = model_recipe.has_ext_offset() ? model_recipe.ext_offset() : false;
988
989 prepare_initial_buffer();
990
991 gather_operator_codes(model_recipe);
992
993 //
994 // Create Main graph
995 //
996
997 _graph_name = "main";
998 // Tensor Name -> Tensor ID mapping (per Graph)
999 std::map<std::string, int32_t> symbol_table;
1000 cook_graph<::tflchef::ModelRecipe>(model_recipe, symbol_table);
1001 _symbol_tables.push_back(symbol_table);
1002
1003 //
1004 // Create subgraphs if exist
1005 //
1006 for (int g = 0; g < model_recipe.graph_size(); ++g)
1007 {
1008 const auto &graph = model_recipe.graph(g);
1009
1010 std::ostringstream stringStream;
1011 stringStream << "sub_" << (g + 1);
1012
1013 _graph_name = stringStream.str();
1014
1015 symbol_table.clear();
1016 _tensor_vec.clear();
1017 _operator_vec.clear();
1018 cook_graph<::tflchef::Graph>(graph, symbol_table);
1019 _symbol_tables.push_back(symbol_table);
1020 }
1021
1022 gather_signature_defs(model_recipe);
1023
1024 // Create "Model" arguments
1025 auto buffers = _flatbuffer_builder->CreateVector(_buffer_vec);
1026 auto signdefs = _flatbuffer_builder->CreateVector(_signdef_vec);
1027 auto operator_codes = _flatbuffer_builder->CreateVector(_code_vec);
1028 auto subgraphs = _flatbuffer_builder->CreateVector(_subgraph_vec);
1029 auto description = _flatbuffer_builder->CreateString("Generated by tflchef");
1030
1031 // Create "Model"
1032 tflite::ModelBuilder model_builder{*_flatbuffer_builder};
1033
1034 model_builder.add_version(3);
1035 model_builder.add_operator_codes(operator_codes);
1036 model_builder.add_signature_defs(signdefs);
1037 model_builder.add_subgraphs(subgraphs);
1038 model_builder.add_description(description);
1039 model_builder.add_buffers(buffers);
1040
1041 auto model = model_builder.Finish();
1042
1043 // Finalize
1044 ::tflite::FinishModelBuffer(*_flatbuffer_builder, model);
1045
1046 if (_ext_offset)
1047 finalize_ext_buffer();
1048}
1049
1050const char *ModelChef::get_buffer_pointer(void) const
1051{
1052 if (_ext_offset)
1053 return _ext_data.data();
1054 return reinterpret_cast<const char *>(_flatbuffer_builder->GetBufferPointer());
1055}
1056
1057size_t ModelChef::get_size(void) const
1058{
1059 if (_ext_offset)
1060 return _ext_data.size();
1061 return _flatbuffer_builder->GetSize();
1062}
1063
1064} // namespace
1065
1066namespace
1067{
1068
1069class GeneratedModelImpl final : public tflchef::GeneratedModel::Impl
1070{
1071public:
1072 GeneratedModelImpl()
1073 {
1074 // DO NOTHING
1075 }
1076
1077public:
1078 const char *base(void) const override { return _mc.get_buffer_pointer(); }
1079
1080 size_t size(void) const override { return _mc.get_size(); }
1081
1082public:
1083 ModelChef &model_chef(void) { return _mc; }
1084
1085private:
1086 ModelChef _mc;
1087};
1088
1089} // namespace
1090
1091namespace tflchef
1092{
1093
1097GeneratedModel cook(const ::tflchef::ModelRecipe &model_recipe)
1098{
1099// Initialize Op Chef Registry
1100#define OP_CHEF(NAME, FACTORY_CLASS) \
1101 op_chef_registry().add(#NAME, std::unique_ptr<FACTORY_CLASS>(new FACTORY_CLASS()));
1102#include "OpChef.def"
1103#undef OP_CHEF
1104
1105// Initialize Data Chef Registry
1106#define DATA_CHEF(TYPE, NAME, FACTORY_CLASS) \
1107 data_chef_registry(::tflchef::TYPE) \
1108 .add(#NAME, std::unique_ptr<FACTORY_CLASS>(new FACTORY_CLASS()));
1109#include "DataChef.def"
1110#undef DATA_CHEF
1111
1112 std::unique_ptr<GeneratedModelImpl> gen_model(new GeneratedModelImpl());
1113
1114 ModelChef &mc = gen_model->model_chef();
1115
1116 mc.init();
1117 mc.cook(model_recipe);
1118
1119 // Return "GenerateModel"
1120 return GeneratedModel{std::move(gen_model)};
1121}
1122
1123} // namespace tflchef
OpBuilder op_builder(coco::Module *m)
Definition IRBuilder.h:144
#define LOGGER(name)
Definition Log.h:65
#define INFO(name)
Definition Log.h:68
Helper class to hold data needed in creation of a FlatBuffer. To serialize data, you typically call o...
Offset< Vector< T > > CreateVector(const T *v, size_t len)
Serialize an array into a FlatBuffer vector.
__global uchar * offset(const Image *img, int x, int y)
Definition helpers.h:540
GeneratedModel cook(const ModelRecipe &model_recipe)
Code * code(const SessionID &sess)
Definition Session.cpp:54
args
Definition infer.py:21
result
Definition infer.py:103
arr
Definition infer.py:92
str
Definition infer.py:18
flatbuffers::Offset< flatbuffers::Vector< uint8_t > > circle_custom_options(flatbuffers::FlatBufferBuilder &fb, const luci::CircleNode *node)
const char * tensor_name(const circle::Tensor *tensor)
This file provides string <-> number cast helpers.
Definition Arguments.h:24
Dims< int32_t > as_dims(const SHAPETYPE &shape)
Definition Dims.h:30
Dataset< T > as_dataset(const ::google::protobuf::RepeatedPtrField< T > &field)
Definition Dataset.h:72
RangedArguments< InputIt > ranged_arguments(InputIt beg, InputIt end)
int32_t element_count(const Dims< int32_t > &dims)
Definition Dims.h:42
GeneratedModel cook(const ModelRecipe &model_recipe)
int32_t size[5]
Definition Slice.cpp:35
virtual size_t size(void) const =0
virtual const char * base(void) const =0
tflite::DimensionType as_tflite_dimensiontype(const tflchef::DimensionType &value)
Definition Convert.cpp:106
tflite::SparseIndexVector as_tflite_sparse_idx_vec_type(const tflchef::SparseIndexVecType &value)
Definition Convert.cpp:121
flatbuffers::Offset< void > as_tflite_sparse_index_vec(flatbuffers::FlatBufferBuilder &fb, const ::tflchef::TensorSparsity_IndexVec &value)
Definition Convert.cpp:141
tflite::TensorType as_tflite_tensortype(const tflchef::TensorType &value)
Definition Convert.cpp:60