ONE - On-device Neural Engine
Loading...
Searching...
No Matches
ModelChef.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
19#include <souschef/Registry.h>
20
21#include "Convert.h"
22
23#include <souschef/DataChefs.h>
24
25#include "OpChef.h"
26#include "OpChefs.h"
27
28#include <souschef/Dataset.h>
29#include <souschef/Dims.h>
30
31#include "Log.h"
32
33#include <iterator>
34#include <map>
35#include <string>
36#include <vector>
37
38#include <cassert>
39#include <fstream>
40#include <iostream>
41#include <numeric>
42#include <sstream>
43#include <stdexcept>
44
45using namespace souschef;
46
47namespace
48{
49
50class GeneratedModelImpl final : public circlechef::GeneratedModel::Impl
51{
52public:
53 GeneratedModelImpl(std::unique_ptr<flatbuffers::FlatBufferBuilder> &&builder)
54 : _builder{std::move(builder)}
55 {
56 // DO NOTHING
57 }
58
59public:
60 const char *base(void) const override
61 {
62 // Return the base address of generated flatbuffer model
63 return reinterpret_cast<const char *>(_builder->GetBufferPointer());
64 }
65
66public:
67 size_t size(void) const override
68 {
69 // Return the size of generated flatbuffer model
70 return _builder->GetSize();
71 }
72
73private:
74 std::unique_ptr<flatbuffers::FlatBufferBuilder> _builder;
75};
76
77} // namespace
78
79namespace
80{
81
82struct DataChefRegistry final : public Registry<DataChefFactory>
83{
84};
85
86DataChefRegistry &data_chef_registry(const circlechef::TensorType &type)
87{
88 static DataChefRegistry s32;
89 static DataChefRegistry s64;
90 static DataChefRegistry fp32;
91 static DataChefRegistry u8;
92 static DataChefRegistry u4;
93 static DataChefRegistry string;
94 static DataChefRegistry boolean;
95 static DataChefRegistry s16;
96 static DataChefRegistry s4;
97
98 switch (type)
99 {
100 case circlechef::INT32:
101 return s32;
102 case circlechef::INT64:
103 return s64;
104 case circlechef::FLOAT32:
105 return fp32;
106 case circlechef::UINT8:
107 return u8;
108 case circlechef::UINT4:
109 return u4;
110 case circlechef::STRING:
111 return string;
112 case circlechef::BOOL:
113 return boolean;
114 case circlechef::INT16:
115 return s16;
116 case circlechef::INT4:
117 return s4;
118 default:
119 break;
120 }
121
122 throw std::runtime_error{"Unknown tensor type"};
123}
124
125struct OpChefRegistry final : public Registry<OpChefFactory>
126{
127};
128
129OpChefRegistry &op_chef_registry(void)
130{
131 static OpChefRegistry registry;
132 return registry;
133}
134
136std::map<circle::BuiltinOperator, int32_t>
137gather_builtincode_map(const ::circlechef::ModelRecipe &model_recipe)
138{
139 // Key and value of the map are BuiltinOperator and operator version
140 std::map<circle::BuiltinOperator, int32_t> builtin_map;
141
142 for (const auto &operation : model_recipe.operation())
143 {
144 if (operation.type() == "Custom")
145 continue;
146
147 auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
148 // Various operation version is unified as the highest version among them
149 if (builtin_map.find(op_chef->code()) == builtin_map.end() ||
150 builtin_map[op_chef->code()] < operation.version())
151 builtin_map[op_chef->code()] = operation.version();
152 }
153
154 // Add ops used in Graphs(subgraphs)
155 for (int g = 0; g < model_recipe.graph_size(); ++g)
156 {
157 const auto &graph = model_recipe.graph(g);
158 for (const auto &operation : graph.operation())
159 {
160 if (operation.type() == "Custom")
161 continue;
162
163 auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
164 // Various operation version is unified as the highest version among them
165 if (builtin_map.find(op_chef->code()) == builtin_map.end() ||
166 builtin_map[op_chef->code()] < operation.version())
167 builtin_map[op_chef->code()] = operation.version();
168 }
169 }
170
171 return builtin_map;
172}
173
175std::set<std::string> gather_customcode_set(const ::circlechef::ModelRecipe &model_recipe)
176{
177 std::set<std::string> customcode_set;
178 for (const auto &operation : model_recipe.operation())
179 {
180 if (operation.type() == "Custom")
181 {
182 assert(not operation.custom_code().empty());
183 customcode_set.insert(operation.custom_code());
184 }
185 }
186
187 // Add ops used in Graphs(subgraphs)
188 for (int g = 0; g < model_recipe.graph_size(); ++g)
189 {
190 const auto &graph = model_recipe.graph(g);
191 for (const auto &operation : graph.operation())
192 {
193 if (operation.type() == "Custom")
194 {
195 assert(not operation.custom_code().empty());
196 customcode_set.insert(operation.custom_code());
197 }
198 }
199 }
200
201 return customcode_set;
202}
203
204} // namespace
205
206namespace
207{
208
209struct CookParams
210{
211 std::vector<flatbuffers::Offset<::circle::Buffer>> &buffer_vec;
212 std::vector<flatbuffers::Offset<::circle::OperatorCode>> &code_vec;
213 std::vector<flatbuffers::Offset<::circle::SubGraph>> &subgraph_vec;
214 std::unique_ptr<flatbuffers::FlatBufferBuilder> &flatbuffer_builder;
215 std::map<circle::BuiltinOperator, int32_t> &builtin_code_map;
216 std::string noname;
217};
218
219template <typename T> void cook_graph(const T &graph, CookParams &cp)
220{
221 LOGGER(l);
222
223 std::vector<flatbuffers::Offset<::circle::Buffer>> &buffer_vec = cp.buffer_vec;
224 std::vector<flatbuffers::Offset<::circle::OperatorCode>> &code_vec = cp.code_vec;
225 std::vector<flatbuffers::Offset<::circle::SubGraph>> &subgraph_vec = cp.subgraph_vec;
226 std::unique_ptr<flatbuffers::FlatBufferBuilder> &flatbuffer_builder = cp.flatbuffer_builder;
227 std::map<circle::BuiltinOperator, int32_t> &builtin_code_map = cp.builtin_code_map;
228
229 // Operand-related
230 std::vector<flatbuffers::Offset<::circle::Tensor>> tensor_vec;
231
232 // Operation-related
233 std::vector<flatbuffers::Offset<::circle::Operator>> operator_vec;
234
235 // default name for graph
236 std::string graph_name = cp.noname;
237 if (graph.has_name())
238 graph_name = graph.name();
239
240 // Tensor Name -> Tensor ID mapping (per Graph)
241 std::map<std::string, int32_t> symbol_table;
242
243 auto lookup = [&symbol_table, &graph_name](const std::string &name) {
244 if (symbol_table.find(name) != symbol_table.end())
245 return symbol_table.at(name);
246 else if (name == "")
247 return -1; // -1 in circle means that optional input tensor is empty.
248 else
249 {
250 std::string msg = "circlechef : input not found in " + graph_name + " graph";
251 throw std::runtime_error(msg.c_str());
252 }
253 };
254
255 int32_t buffer_start = buffer_vec.size();
256 int32_t buffer_index = 0;
257
258 // Create buffer(s) 1~n(I) for input(s)
259 const auto size_input = graph.input_size();
260 for (int ci = 0; ci < size_input; ++ci)
261 {
262 circle::BufferBuilder buffer_builder{*flatbuffer_builder};
263 buffer_vec.emplace_back(buffer_builder.Finish());
264 }
265 // Create buffer(s) n(I)+1~n(I)+n(O) for output(s)
266 const auto size_output = graph.output_size();
267 for (int co = 0; co < size_output; ++co)
268 {
269 circle::BufferBuilder buffer_builder{*flatbuffer_builder};
270 buffer_vec.emplace_back(buffer_builder.Finish());
271 }
272
273 auto input_names = as_dataset(graph.input()).vectorize();
274 auto output_names = as_dataset(graph.output()).vectorize();
275
276 for (const auto &operand : graph.operand())
277 {
278 assert(operand.has_name());
279
280 assert(operand.has_type());
281
283 std::vector<int32_t> dims;
284 if (operand.has_shape())
285 {
286 dims = as_dims(operand.shape());
287 shape = flatbuffer_builder->CreateVector(dims);
288 }
289
290 auto name = flatbuffer_builder->CreateString(operand.name());
291
292 buffer_index = 0;
293
294 // Create Buffer if filler is specified
295 if (operand.has_filler())
296 {
297 const auto &filler = operand.filler();
298
299 assert(filler.has_tag());
300
301 auto args = ranged_arguments(filler.arg().begin(), filler.arg().end());
302 auto chef = data_chef_registry(operand.type()).lookup(filler.tag()).create(args);
303
304 assert(chef != nullptr);
305
306 // Create Data
307 int32_t count = (element_count(dims) > 0) ? element_count(dims) : filler.arg_size();
308 auto data_vec = chef->generate(count);
309 // pack for INT4 and replace data_vec
310 if (operand.type() == circlechef::TensorType::INT4)
311 {
312 uint32_t packed = (count + 1) / 2;
313 std::vector<uint8_t> data_packed(packed);
314 for (uint32_t idx = 0; idx < packed; ++idx)
315 {
316 uint32_t sidx = idx * 2;
317 data_packed[idx] = data_vec[sidx++] & 0x0f;
318 if (sidx < count)
319 data_packed[idx] |= data_vec[sidx] << 4;
320 }
321 data_vec = data_packed;
322 }
323 // pack for UINT4 and replace data_vec
324 else if (operand.type() == circlechef::TensorType::UINT4)
325 {
326 uint32_t packed = (count + 1) / 2;
327 std::vector<uint8_t> data_packed(packed);
328 for (uint32_t idx = 0; idx < packed; ++idx)
329 {
330 uint32_t sidx = idx * 2;
331 data_packed[idx] = data_vec[sidx++] & 0x0f;
332 if (sidx < count)
333 data_packed[idx] |= data_vec[sidx] << 4;
334 }
335 data_vec = data_packed;
336 }
337 auto data = flatbuffer_builder->CreateVector(data_vec);
338
339 // Create Buffer
340 circle::BufferBuilder buffer_builder{*flatbuffer_builder};
341 buffer_builder.add_data(data);
342 auto buffer = buffer_builder.Finish();
343
344 // Update Buffer Index & Vector
345 buffer_index = buffer_vec.size();
346 buffer_vec.emplace_back(buffer);
347 }
348 else
349 {
350 // if this is input or output, assign to that buffer_index
351 int idx = 0;
352 for (auto it = input_names.begin(); it != input_names.end(); ++it, ++idx)
353 {
354 if (*it == operand.name())
355 {
356 buffer_index = buffer_start + idx;
357 break;
358 }
359 }
360 if (buffer_index == 0)
361 {
362 idx = 0;
363 for (auto it = output_names.begin(); it != output_names.end(); ++it, ++idx)
364 {
365 if (*it == operand.name())
366 {
367 buffer_index = buffer_start + size_input + idx;
368 break;
369 }
370 }
371 }
372 if (buffer_index == 0)
373 {
374 // we couldn't find the buffer; create an empty buffer for this tensor
375 buffer_index = buffer_vec.size();
376
377 circle::BufferBuilder buffer_builder{*flatbuffer_builder};
378 buffer_vec.emplace_back(buffer_builder.Finish());
379 }
380 }
381 assert(buffer_index != 0);
382
384
385 // Create QuantizationParameters if quant is specified
386 if (operand.has_quant())
387 {
388 const auto &quant = operand.quant();
389
390 // Create each parameters
391 // NOTE if some parameters are not given, those will be set to default value
392 std::vector<float> quant_max_vec(quant.max_size());
393 std::vector<float> quant_min_vec(quant.min_size());
394 std::vector<float> quant_scale_vec(quant.scale_size());
395 std::vector<int64_t> quant_zero_point_vec(quant.zero_point_size());
396
397 for (uint32_t i = 0; i < quant.max_size(); ++i)
398 quant_max_vec.at(i) = quant.max(i);
399 for (uint32_t i = 0; i < quant.min_size(); ++i)
400 quant_min_vec.at(i) = quant.min(i);
401 for (uint32_t i = 0; i < quant.scale_size(); ++i)
402 quant_scale_vec.at(i) = quant.scale(i);
403 for (uint32_t i = 0; i < quant.zero_point_size(); ++i)
404 quant_zero_point_vec.at(i) = quant.zero_point(i);
405
406 auto quant_max = flatbuffer_builder->CreateVector(quant_max_vec);
407 auto quant_min = flatbuffer_builder->CreateVector(quant_min_vec);
408 auto quant_scale = flatbuffer_builder->CreateVector(quant_scale_vec);
409 auto quant_zero_point = flatbuffer_builder->CreateVector(quant_zero_point_vec);
410
411 // Create QuantizationParameters
412 circle::QuantizationParametersBuilder quant_builder{*flatbuffer_builder};
413 quant_builder.add_max(quant_max);
414 quant_builder.add_min(quant_min);
415 quant_builder.add_scale(quant_scale);
416 quant_builder.add_zero_point(quant_zero_point);
417 quant_builder.add_quantized_dimension(quant.quantized_dimension());
418
419 // Update QuantizationParameters Index
420 quant_index = quant_builder.Finish();
421 }
422
424 if (operand.has_shape_signature())
425 {
426 auto signature = as_dims(operand.shape_signature());
427 shape_signature = flatbuffer_builder->CreateVector(signature);
428 }
429
430 // Create Tensor
431 circle::TensorBuilder tensor_builder{*flatbuffer_builder};
432
433 tensor_builder.add_shape(shape);
434 tensor_builder.add_type(as_circle_tensortype(operand.type()));
435 tensor_builder.add_buffer(buffer_index);
436 tensor_builder.add_name(name);
437 if (operand.has_quant())
438 tensor_builder.add_quantization(quant_index);
439 if (operand.has_shape_signature())
440 tensor_builder.add_shape_signature(shape_signature);
441
442 // Append!
443 tensor_vec.emplace_back(tensor_builder.Finish());
444
445 // Update Tensor Name -> Tensor Index Map
446 int32_t tensor_index = symbol_table.size();
447 const auto &tensor_name = operand.name();
448
449 INFO(l) << "Symbol [" << tensor_name << "] = Tensor " << tensor_index << std::endl;
450
451 symbol_table[tensor_name] = tensor_index;
452 }
453
454 // Create Operator
455 for (const auto &operation : graph.operation())
456 {
457 assert(operation.has_type());
458
459 std::string op_type = operation.type();
460 if (not operation.custom_code().empty())
461 op_type = operation.custom_code();
462
463 auto op_chef = op_chef_registry().lookup(op_type).create(&operation);
464
465 // Create 'inputs'
466 std::vector<int32_t> input_vec = as_dataset(operation.input()).map(lookup).vectorize();
467 auto inputs = flatbuffer_builder->CreateVector(input_vec);
468
469 // Create 'outputs'
470 std::vector<int32_t> output_vec = as_dataset(operation.output()).map(lookup).vectorize();
471 auto outputs = flatbuffer_builder->CreateVector(output_vec);
472
473 // Create Option
474 auto options = op_chef->value(*flatbuffer_builder);
475
476 // Create Custom option
477 auto circle_custom_options = op_chef->custom_value(*flatbuffer_builder);
478
479 // Create Operator
480 circle::OperatorBuilder op_builder{*flatbuffer_builder};
481
482 // Get operator code index from builtin_code_map with assumption, order of
483 // builtin_code_map is same as that of code_vec
484 auto op_it = builtin_code_map.find(op_chef->code());
485 assert(op_it != builtin_code_map.end());
486 uint32_t opcode_index = std::distance(builtin_code_map.begin(), op_it);
487
488 op_builder.add_opcode_index(opcode_index);
489 op_builder.add_inputs(inputs);
490 op_builder.add_outputs(outputs);
491 op_builder.add_builtin_options_type(op_chef->type());
492 op_builder.add_builtin_options(options);
493 op_builder.add_custom_options(circle_custom_options);
494 op_builder.add_custom_options_format(circle::CustomOptionsFormat_FLEXBUFFERS);
495 // Append Operator
496 operator_vec.emplace_back(op_builder.Finish());
497 }
498
499 // Create network input/output vector
500 std::vector<int32_t> input_vec = as_dataset(graph.input()).map(lookup).vectorize();
501 std::vector<int32_t> output_vec = as_dataset(graph.output()).map(lookup).vectorize();
502
503 // Create "SubGraph" arguments
504 auto tensors = flatbuffer_builder->CreateVector(tensor_vec);
505 auto inputs = flatbuffer_builder->CreateVector(input_vec);
506 auto outputs = flatbuffer_builder->CreateVector(output_vec);
507 auto operators = flatbuffer_builder->CreateVector(operator_vec);
508 auto name = flatbuffer_builder->CreateString(graph_name);
509
510 circle::SubGraphBuilder subgraph_builder{*flatbuffer_builder};
511
512 subgraph_builder.add_tensors(tensors);
513 subgraph_builder.add_inputs(inputs);
514 subgraph_builder.add_outputs(outputs);
515 subgraph_builder.add_operators(operators);
516 subgraph_builder.add_name(name);
517
518 subgraph_vec.emplace_back(subgraph_builder.Finish());
519}
520
521} // namespace
522
523namespace circlechef
524{
525
529GeneratedModel cook(const ::circlechef::ModelRecipe &model_recipe)
530{
531// Initialize Op Chef Registry
532#define OP_CHEF(NAME, FACTORY_CLASS) \
533 op_chef_registry().add(#NAME, std::unique_ptr<FACTORY_CLASS>(new FACTORY_CLASS()));
534#include "OpChef.def"
535#undef OP_CHEF
536
537// Initialize Data Chef Registry
538#define DATA_CHEF(TYPE, NAME, FACTORY_CLASS) \
539 data_chef_registry(::circlechef::TYPE) \
540 .add(#NAME, std::unique_ptr<FACTORY_CLASS>(new FACTORY_CLASS()));
541#include "DataChef.def"
542#undef DATA_CHEF
543
544 //
545 // Create FlatBufferBuilder
546 //
547 auto flatbuffer_builder =
548 std::unique_ptr<flatbuffers::FlatBufferBuilder>(new flatbuffers::FlatBufferBuilder(1024));
549
550 // Operand-related
551 std::vector<flatbuffers::Offset<::circle::Buffer>> buffer_vec;
552
553 // Operation-related
554 std::vector<flatbuffers::Offset<::circle::OperatorCode>> code_vec;
555
556 // Graphs-related
557 std::vector<flatbuffers::Offset<::circle::SubGraph>> subgraph_vec;
558
559 // Create OperatorCode with Builtin Operator
560 std::map<circle::BuiltinOperator, int32_t> builtin_code_map =
561 gather_builtincode_map(model_recipe);
562 for (auto const &opcode : builtin_code_map)
563 {
564 circle::OperatorCodeBuilder code_builder{*flatbuffer_builder};
565 int8_t dep_code = 127; // BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES
566 if (opcode.first < circle::BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES)
567 dep_code = static_cast<int8_t>(opcode.first);
568 code_builder.add_deprecated_builtin_code(dep_code);
569 code_builder.add_builtin_code(opcode.first);
570 code_builder.add_version(opcode.second);
571 auto code = code_builder.Finish();
572 // Update OperatorCode vector
573 code_vec.emplace_back(code);
574 }
575
576 // Create OperatorCode with Custom Operator
577 std::set<std::string> custom_code_set = gather_customcode_set(model_recipe);
578 if (custom_code_set.size() &&
579 builtin_code_map.find(circle::BuiltinOperator_CUSTOM) == builtin_code_map.end())
580 builtin_code_map[circle::BuiltinOperator_CUSTOM] = 1;
581
582 for (auto opcode : custom_code_set)
583 {
584 auto custom_code = flatbuffer_builder->CreateString(opcode);
585 circle::OperatorCodeBuilder code_builder{*flatbuffer_builder};
586 code_builder.add_builtin_code(circle::BuiltinOperator_CUSTOM);
587 code_builder.add_custom_code(custom_code);
588 auto code = code_builder.Finish();
589 // Update OperatorCode vector
590 code_vec.emplace_back(code);
591 }
592
593 // Create an Empty Buffer
594 //
595 // Buffer 0 SHOULD be an empty buffer in TensorFlow Lite model file
596 // (Please refer to the comment for Tensor.buffer field in schema)
597 {
598 circle::BufferBuilder buffer_builder{*flatbuffer_builder};
599 buffer_vec.emplace_back(buffer_builder.Finish());
600 }
601
602 //
603 // Create Main graph
604 //
605 CookParams cp{buffer_vec, code_vec, subgraph_vec, flatbuffer_builder, builtin_code_map, "main"};
606
607 cook_graph<::circlechef::ModelRecipe>(model_recipe, cp);
608
609 //
610 // Create subgraphs if exist
611 //
612 for (int g = 0; g < model_recipe.graph_size(); ++g)
613 {
614 const auto &graph = model_recipe.graph(g);
615
616 std::ostringstream stringStream;
617 stringStream << "sub_" << (g + 1);
618
619 CookParams cp{buffer_vec, code_vec, subgraph_vec,
620 flatbuffer_builder, builtin_code_map, stringStream.str()};
621
622 cook_graph<::circlechef::Graph>(graph, cp);
623 }
624
625 // Create "Model" arguments
626 auto buffers = flatbuffer_builder->CreateVector(buffer_vec);
627 auto operator_codes = flatbuffer_builder->CreateVector(code_vec);
628 auto subgraphs = flatbuffer_builder->CreateVector(subgraph_vec);
629 auto description = flatbuffer_builder->CreateString("Generated by circlechef");
630
631 // Create "Model"
632 circle::ModelBuilder model_builder{*flatbuffer_builder};
633
634 model_builder.add_version(3);
635 model_builder.add_operator_codes(operator_codes);
636 model_builder.add_subgraphs(subgraphs);
637 model_builder.add_description(description);
638 model_builder.add_buffers(buffers);
639
640 auto model = model_builder.Finish();
641
642 // Finalize
643 ::circle::FinishModelBuffer(*flatbuffer_builder, model);
644
645 // Return "GenerateModel"
646 return GeneratedModel{
647 std::unique_ptr<GeneratedModelImpl>(new GeneratedModelImpl(std::move(flatbuffer_builder)))};
648}
649
650} // namespace circlechef
OpBuilder op_builder(coco::Module *m)
Definition IRBuilder.h:144
circle::TensorType as_circle_tensortype(const circlechef::TensorType &value)
Definition Convert.cpp:53
#define LOGGER(name)
Definition Log.h:65
#define INFO(name)
Definition Log.h:68
Helper class to hold data needed in creation of a FlatBuffer. To serialize data, you typically call o...
GeneratedModel cook(const ModelRecipe &model_recipe)
args
Definition infer.py:21
flatbuffers::Offset< flatbuffers::Vector< uint8_t > > circle_custom_options(flatbuffers::FlatBufferBuilder &fb, const luci::CircleNode *node)
const char * tensor_name(const circle::Tensor *tensor)
This file provides string <-> number cast helpers.
Definition Arguments.h:24
Dims< int32_t > as_dims(const SHAPETYPE &shape)
Definition Dims.h:30
Dataset< T > as_dataset(const ::google::protobuf::RepeatedPtrField< T > &field)
Definition Dataset.h:72
RangedArguments< InputIt > ranged_arguments(InputIt beg, InputIt end)
int32_t element_count(const Dims< int32_t > &dims)
Definition Dims.h:42
virtual const char * base(void) const =0
virtual size_t size(void) const =0