ONE - On-device Neural Engine
Loading...
Searching...
No Matches
ModelChef.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
19#include <souschef/Registry.h>
20
21#include "Convert.h"
22
23#include <souschef/DataChefs.h>
24
25#include "OpChef.h"
26#include "OpChefs.h"
27
28#include <souschef/Dataset.h>
29#include <souschef/Dims.h>
30
31#include "Log.h"
32
33#include <iterator>
34#include <map>
35#include <string>
36#include <vector>
37
38#include <cassert>
39#include <fstream>
40#include <iostream>
41#include <numeric>
42#include <sstream>
43#include <stdexcept>
44
45using namespace souschef;
46
47namespace
48{
49
50class GeneratedModelImpl final : public circlechef::GeneratedModel::Impl
51{
52public:
53 GeneratedModelImpl(std::unique_ptr<flatbuffers::FlatBufferBuilder> &&builder)
54 : _builder{std::move(builder)}
55 {
56 // DO NOTHING
57 }
58
59public:
60 const char *base(void) const override
61 {
62 // Return the base address of generated flatbuffer model
63 return reinterpret_cast<const char *>(_builder->GetBufferPointer());
64 }
65
66public:
67 size_t size(void) const override
68 {
69 // Return the size of generated flatbuffer model
70 return _builder->GetSize();
71 }
72
73private:
74 std::unique_ptr<flatbuffers::FlatBufferBuilder> _builder;
75};
76
77} // namespace
78
79namespace
80{
81
82struct DataChefRegistry final : public Registry<DataChefFactory>
83{
84};
85
86DataChefRegistry &data_chef_registry(const circlechef::TensorType &type)
87{
88 static DataChefRegistry s32;
89 static DataChefRegistry s64;
90 static DataChefRegistry fp32;
91 static DataChefRegistry u8;
92 static DataChefRegistry u4;
93 static DataChefRegistry string;
94 static DataChefRegistry boolean;
95 static DataChefRegistry s16;
96 static DataChefRegistry s4;
97
98 switch (type)
99 {
100 case circlechef::INT32:
101 return s32;
102 case circlechef::INT64:
103 return s64;
104 case circlechef::FLOAT32:
105 return fp32;
106 case circlechef::UINT8:
107 return u8;
108 case circlechef::UINT4:
109 return u4;
110 case circlechef::STRING:
111 return string;
112 case circlechef::BOOL:
113 return boolean;
114 case circlechef::INT16:
115 return s16;
116 case circlechef::INT4:
117 return s4;
118 default:
119 break;
120 }
121
122 throw std::runtime_error{"Unknown tensor type"};
123}
124
125struct OpChefRegistry final : public Registry<OpChefFactory>
126{
127};
128
129OpChefRegistry &op_chef_registry(void)
130{
131 static OpChefRegistry registry;
132 return registry;
133}
134
136std::map<circle::BuiltinOperator, int32_t>
137gather_builtincode_map(const ::circlechef::ModelRecipe &model_recipe)
138{
139 // Key and value of the map are BuiltinOperator and operator version
140 std::map<circle::BuiltinOperator, int32_t> builtin_map;
141
142 for (const auto &operation : model_recipe.operation())
143 {
144 if (operation.type() == "Custom")
145 continue;
146
147 auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
148 // Various operation version is unified as the highest version among them
149 if (builtin_map.find(op_chef->code()) == builtin_map.end() ||
150 builtin_map[op_chef->code()] < operation.version())
151 builtin_map[op_chef->code()] = operation.version();
152 }
153
154 // Add ops used in Graphs(subgraphs)
155 for (int g = 0; g < model_recipe.graph_size(); ++g)
156 {
157 const auto &graph = model_recipe.graph(g);
158 for (const auto &operation : graph.operation())
159 {
160 if (operation.type() == "Custom")
161 continue;
162
163 auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
164 // Various operation version is unified as the highest version among them
165 if (builtin_map.find(op_chef->code()) == builtin_map.end() ||
166 builtin_map[op_chef->code()] < operation.version())
167 builtin_map[op_chef->code()] = operation.version();
168 }
169 }
170
171 return builtin_map;
172}
173
175std::set<std::string> gather_customcode_set(const ::circlechef::ModelRecipe &model_recipe)
176{
177 std::set<std::string> customcode_set;
178 for (const auto &operation : model_recipe.operation())
179 {
180 if (operation.type() == "Custom")
181 {
182 assert(not operation.custom_code().empty());
183 customcode_set.insert(operation.custom_code());
184 }
185 }
186
187 // Add ops used in Graphs(subgraphs)
188 for (int g = 0; g < model_recipe.graph_size(); ++g)
189 {
190 const auto &graph = model_recipe.graph(g);
191 for (const auto &operation : graph.operation())
192 {
193 if (operation.type() == "Custom")
194 {
195 assert(not operation.custom_code().empty());
196 customcode_set.insert(operation.custom_code());
197 }
198 }
199 }
200
201 return customcode_set;
202}
203
204} // namespace
205
206namespace
207{
208
209struct CookParams
210{
211 std::vector<flatbuffers::Offset<::circle::Buffer>> &buffer_vec;
212 std::vector<flatbuffers::Offset<::circle::OperatorCode>> &code_vec;
213 std::vector<flatbuffers::Offset<::circle::SubGraph>> &subgraph_vec;
214 std::unique_ptr<flatbuffers::FlatBufferBuilder> &flatbuffer_builder;
215 std::map<circle::BuiltinOperator, int32_t> &builtin_code_map;
216 std::string noname;
217};
218
219template <typename T> void cook_graph(const T &graph, CookParams &cp)
220{
221 LOGGER(l);
222
223 std::vector<flatbuffers::Offset<::circle::Buffer>> &buffer_vec = cp.buffer_vec;
224 std::vector<flatbuffers::Offset<::circle::OperatorCode>> &code_vec = cp.code_vec;
225 std::vector<flatbuffers::Offset<::circle::SubGraph>> &subgraph_vec = cp.subgraph_vec;
226 std::unique_ptr<flatbuffers::FlatBufferBuilder> &flatbuffer_builder = cp.flatbuffer_builder;
227 std::map<circle::BuiltinOperator, int32_t> &builtin_code_map = cp.builtin_code_map;
228
229 // Operand-related
230 std::vector<flatbuffers::Offset<::circle::Tensor>> tensor_vec;
231
232 // Operation-related
233 std::vector<flatbuffers::Offset<::circle::Operator>> operator_vec;
234
235 // default name for graph
236 std::string graph_name = cp.noname;
237 if (graph.has_name())
238 graph_name = graph.name();
239
240 // Tensor Name -> Tensor ID mapping (per Graph)
241 std::map<std::string, int32_t> symbol_table;
242
243 auto lookup = [&symbol_table, &graph_name](const std::string &name) {
244 if (symbol_table.find(name) != symbol_table.end())
245 return symbol_table.at(name);
246 else if (name == "")
247 return -1; // -1 in circle means that optional input tensor is empty.
248 else
249 {
250 std::string msg = "circlechef : input not found in " + graph_name + " graph";
251 throw std::runtime_error(msg.c_str());
252 }
253 };
254
255 int32_t buffer_start = buffer_vec.size();
256 int32_t buffer_index = 0;
257
258 // Create buffer(s) 1~n(I) for input(s)
259 const auto size_input = graph.input_size();
260 for (int ci = 0; ci < size_input; ++ci)
261 {
262 circle::BufferBuilder buffer_builder{*flatbuffer_builder};
263 buffer_vec.emplace_back(buffer_builder.Finish());
264 }
265 // Create buffer(s) n(I)+1~n(I)+n(O) for output(s)
266 const auto size_output = graph.output_size();
267 for (int co = 0; co < size_output; ++co)
268 {
269 circle::BufferBuilder buffer_builder{*flatbuffer_builder};
270 buffer_vec.emplace_back(buffer_builder.Finish());
271 }
272
273 auto input_names = as_dataset(graph.input()).vectorize();
274 auto output_names = as_dataset(graph.output()).vectorize();
275
276 for (const auto &operand : graph.operand())
277 {
278 assert(operand.has_name());
279
280 assert(operand.has_type());
281
283 std::vector<int32_t> dims;
284 if (operand.has_shape())
285 {
286 dims = as_dims(operand.shape());
287 shape = flatbuffer_builder->CreateVector(dims);
288 }
289
290 auto name = flatbuffer_builder->CreateString(operand.name());
291
292 buffer_index = 0;
293
294 // Create Buffer if filler is specified
295 if (operand.has_filler())
296 {
297 const auto &filler = operand.filler();
298
299 assert(filler.has_tag());
300
301 auto args = ranged_arguments(filler.arg().begin(), filler.arg().end());
302 auto chef = data_chef_registry(operand.type()).lookup(filler.tag()).create(args);
303
304 assert(chef != nullptr);
305
306 // Create Data
307 int32_t count = (element_count(dims) > 0) ? element_count(dims) : filler.arg_size();
308 auto data_vec = chef->generate(count);
309 // pack for INT4 and replace data_vec
310 if (operand.type() == circlechef::TensorType::INT4)
311 {
312 uint32_t packed = (count + 1) / 2;
313 std::vector<uint8_t> data_packed(packed);
314 for (uint32_t idx = 0; idx < packed; ++idx)
315 {
316 uint32_t sidx = idx * 2;
317 data_packed[idx] = data_vec[sidx++] & 0x0f;
318 if (sidx < count)
319 data_packed[idx] |= data_vec[sidx] << 4;
320 }
321 data_vec = data_packed;
322 }
323 // pack for UINT4 and replace data_vec
324 else if (operand.type() == circlechef::TensorType::UINT4)
325 {
326 uint32_t packed = (count + 1) / 2;
327 std::vector<uint8_t> data_packed(packed);
328 for (uint32_t idx = 0; idx < packed; ++idx)
329 {
330 uint32_t sidx = idx * 2;
331 data_packed[idx] = data_vec[sidx++] & 0x0f;
332 if (sidx < count)
333 data_packed[idx] |= data_vec[sidx] << 4;
334 }
335 data_vec = data_packed;
336 }
337 auto data = flatbuffer_builder->CreateVector(data_vec);
338
339 // Create Buffer
340 circle::BufferBuilder buffer_builder{*flatbuffer_builder};
341 buffer_builder.add_data(data);
342 auto buffer = buffer_builder.Finish();
343
344 // Update Buffer Index & Vector
345 buffer_index = buffer_vec.size();
346 buffer_vec.emplace_back(buffer);
347 }
348 else
349 {
350 // if this is input or output, assign to that buffer_index
351 int idx = 0;
352 for (auto it = input_names.begin(); it != input_names.end(); ++it, ++idx)
353 {
354 if (*it == operand.name())
355 {
356 buffer_index = buffer_start + idx;
357 break;
358 }
359 }
360 if (buffer_index == 0)
361 {
362 idx = 0;
363 for (auto it = output_names.begin(); it != output_names.end(); ++it, ++idx)
364 {
365 if (*it == operand.name())
366 {
367 buffer_index = buffer_start + size_input + idx;
368 break;
369 }
370 }
371 }
372 if (buffer_index == 0)
373 {
374 // we couldn't find the buffer; create an empty buffer for this tensor
375 buffer_index = buffer_vec.size();
376
377 circle::BufferBuilder buffer_builder{*flatbuffer_builder};
378 buffer_vec.emplace_back(buffer_builder.Finish());
379 }
380 }
381 assert(buffer_index != 0);
382
384
385 // Create QuantizationParameters if quant is specified
386 if (operand.has_quant())
387 {
388 const auto &quant = operand.quant();
389
390 // Create each parameters
391 // NOTE if some parameters are not given, those will be set to default value
392 std::vector<float> quant_max_vec(quant.max_size());
393 std::vector<float> quant_min_vec(quant.min_size());
394 std::vector<float> quant_scale_vec(quant.scale_size());
395 std::vector<int64_t> quant_zero_point_vec(quant.zero_point_size());
396
397 for (uint32_t i = 0; i < quant.max_size(); ++i)
398 quant_max_vec.at(i) = quant.max(i);
399 for (uint32_t i = 0; i < quant.min_size(); ++i)
400 quant_min_vec.at(i) = quant.min(i);
401 for (uint32_t i = 0; i < quant.scale_size(); ++i)
402 quant_scale_vec.at(i) = quant.scale(i);
403 for (uint32_t i = 0; i < quant.zero_point_size(); ++i)
404 quant_zero_point_vec.at(i) = quant.zero_point(i);
405
406 auto quant_max = flatbuffer_builder->CreateVector(quant_max_vec);
407 auto quant_min = flatbuffer_builder->CreateVector(quant_min_vec);
408 auto quant_scale = flatbuffer_builder->CreateVector(quant_scale_vec);
409 auto quant_zero_point = flatbuffer_builder->CreateVector(quant_zero_point_vec);
410
411 // Create QuantizationParameters
412 circle::QuantizationParametersBuilder quant_builder{*flatbuffer_builder};
413 quant_builder.add_max(quant_max);
414 quant_builder.add_min(quant_min);
415 quant_builder.add_scale(quant_scale);
416 quant_builder.add_zero_point(quant_zero_point);
417 quant_builder.add_quantized_dimension(quant.quantized_dimension());
418
419 // Update QuantizationParameters Index
420 quant_index = quant_builder.Finish();
421 }
422
423 // Create MXQuantization if mx quant is specified
424 if (operand.has_mx_quant())
425 {
426 if (operand.has_quant())
427 throw std::runtime_error("Affine quantization can not exist with MX quantization.");
428
429 const auto &quant = operand.mx_quant();
430
431 int32_t axis = quant.axis();
432
433 // Create MXQuantization
434 circle::MXQuantizationBuilder mx_quant_builder{*flatbuffer_builder};
435 mx_quant_builder.add_axis(axis);
436 auto mx_quant_index = mx_quant_builder.Finish();
437
438 // Create QuantizationParameters
439 circle::QuantizationParametersBuilder quant_builder{*flatbuffer_builder};
440 quant_builder.add_details_type(circle::QuantizationDetails_MXQuantization);
441 quant_builder.add_details(mx_quant_index.Union());
442
443 // Update QuantizationParameters Index
444 quant_index = quant_builder.Finish();
445 }
446
448 if (operand.has_shape_signature())
449 {
450 auto signature = as_dims(operand.shape_signature());
451 shape_signature = flatbuffer_builder->CreateVector(signature);
452 }
453
454 // Create Tensor
455 circle::TensorBuilder tensor_builder{*flatbuffer_builder};
456
457 tensor_builder.add_shape(shape);
458 tensor_builder.add_type(as_circle_tensortype(operand.type()));
459 tensor_builder.add_buffer(buffer_index);
460 tensor_builder.add_name(name);
461 if (operand.has_quant() or operand.has_mx_quant())
462 tensor_builder.add_quantization(quant_index);
463 if (operand.has_shape_signature())
464 tensor_builder.add_shape_signature(shape_signature);
465
466 // Append!
467 tensor_vec.emplace_back(tensor_builder.Finish());
468
469 // Update Tensor Name -> Tensor Index Map
470 int32_t tensor_index = symbol_table.size();
471 const auto &tensor_name = operand.name();
472
473 INFO(l) << "Symbol [" << tensor_name << "] = Tensor " << tensor_index << std::endl;
474
475 symbol_table[tensor_name] = tensor_index;
476 }
477
478 // Create Operator
479 for (const auto &operation : graph.operation())
480 {
481 assert(operation.has_type());
482
483 std::string op_type = operation.type();
484 if (not operation.custom_code().empty())
485 op_type = operation.custom_code();
486
487 auto op_chef = op_chef_registry().lookup(op_type).create(&operation);
488
489 // Create 'inputs'
490 std::vector<int32_t> input_vec = as_dataset(operation.input()).map(lookup).vectorize();
491 auto inputs = flatbuffer_builder->CreateVector(input_vec);
492
493 // Create 'outputs'
494 std::vector<int32_t> output_vec = as_dataset(operation.output()).map(lookup).vectorize();
495 auto outputs = flatbuffer_builder->CreateVector(output_vec);
496
497 // Create Option
498 auto options = op_chef->value(*flatbuffer_builder);
499
500 // Create Custom option
501 auto circle_custom_options = op_chef->custom_value(*flatbuffer_builder);
502
503 // Create Operator
504 circle::OperatorBuilder op_builder{*flatbuffer_builder};
505
506 // Get operator code index from builtin_code_map with assumption, order of
507 // builtin_code_map is same as that of code_vec
508 auto op_it = builtin_code_map.find(op_chef->code());
509 assert(op_it != builtin_code_map.end());
510 uint32_t opcode_index = std::distance(builtin_code_map.begin(), op_it);
511
512 op_builder.add_opcode_index(opcode_index);
513 op_builder.add_inputs(inputs);
514 op_builder.add_outputs(outputs);
515 op_builder.add_builtin_options_type(op_chef->type());
516 op_builder.add_builtin_options(options);
517 op_builder.add_custom_options(circle_custom_options);
518 op_builder.add_custom_options_format(circle::CustomOptionsFormat_FLEXBUFFERS);
519 // Append Operator
520 operator_vec.emplace_back(op_builder.Finish());
521 }
522
523 // Create network input/output vector
524 std::vector<int32_t> input_vec = as_dataset(graph.input()).map(lookup).vectorize();
525 std::vector<int32_t> output_vec = as_dataset(graph.output()).map(lookup).vectorize();
526
527 // Create "SubGraph" arguments
528 auto tensors = flatbuffer_builder->CreateVector(tensor_vec);
529 auto inputs = flatbuffer_builder->CreateVector(input_vec);
530 auto outputs = flatbuffer_builder->CreateVector(output_vec);
531 auto operators = flatbuffer_builder->CreateVector(operator_vec);
532 auto name = flatbuffer_builder->CreateString(graph_name);
533
534 circle::SubGraphBuilder subgraph_builder{*flatbuffer_builder};
535
536 subgraph_builder.add_tensors(tensors);
537 subgraph_builder.add_inputs(inputs);
538 subgraph_builder.add_outputs(outputs);
539 subgraph_builder.add_operators(operators);
540 subgraph_builder.add_name(name);
541
542 subgraph_vec.emplace_back(subgraph_builder.Finish());
543}
544
545} // namespace
546
547namespace circlechef
548{
549
553GeneratedModel cook(const ::circlechef::ModelRecipe &model_recipe)
554{
555// Initialize Op Chef Registry
556#define OP_CHEF(NAME, FACTORY_CLASS) \
557 op_chef_registry().add(#NAME, std::unique_ptr<FACTORY_CLASS>(new FACTORY_CLASS()));
558#include "OpChef.def"
559#undef OP_CHEF
560
561// Initialize Data Chef Registry
562#define DATA_CHEF(TYPE, NAME, FACTORY_CLASS) \
563 data_chef_registry(::circlechef::TYPE) \
564 .add(#NAME, std::unique_ptr<FACTORY_CLASS>(new FACTORY_CLASS()));
565#include "DataChef.def"
566#undef DATA_CHEF
567
568 //
569 // Create FlatBufferBuilder
570 //
571 auto flatbuffer_builder =
572 std::unique_ptr<flatbuffers::FlatBufferBuilder>(new flatbuffers::FlatBufferBuilder(1024));
573
574 // Operand-related
575 std::vector<flatbuffers::Offset<::circle::Buffer>> buffer_vec;
576
577 // Operation-related
578 std::vector<flatbuffers::Offset<::circle::OperatorCode>> code_vec;
579
580 // Graphs-related
581 std::vector<flatbuffers::Offset<::circle::SubGraph>> subgraph_vec;
582
583 // Create OperatorCode with Builtin Operator
584 std::map<circle::BuiltinOperator, int32_t> builtin_code_map =
585 gather_builtincode_map(model_recipe);
586 for (auto const &opcode : builtin_code_map)
587 {
588 circle::OperatorCodeBuilder code_builder{*flatbuffer_builder};
589 int8_t dep_code = 127; // BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES
590 if (opcode.first < circle::BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES)
591 dep_code = static_cast<int8_t>(opcode.first);
592 code_builder.add_deprecated_builtin_code(dep_code);
593 code_builder.add_builtin_code(opcode.first);
594 code_builder.add_version(opcode.second);
595 auto code = code_builder.Finish();
596 // Update OperatorCode vector
597 code_vec.emplace_back(code);
598 }
599
600 // Create OperatorCode with Custom Operator
601 std::set<std::string> custom_code_set = gather_customcode_set(model_recipe);
602 if (custom_code_set.size() &&
603 builtin_code_map.find(circle::BuiltinOperator_CUSTOM) == builtin_code_map.end())
604 builtin_code_map[circle::BuiltinOperator_CUSTOM] = 1;
605
606 for (auto opcode : custom_code_set)
607 {
608 auto custom_code = flatbuffer_builder->CreateString(opcode);
609 circle::OperatorCodeBuilder code_builder{*flatbuffer_builder};
610 code_builder.add_builtin_code(circle::BuiltinOperator_CUSTOM);
611 code_builder.add_custom_code(custom_code);
612 auto code = code_builder.Finish();
613 // Update OperatorCode vector
614 code_vec.emplace_back(code);
615 }
616
617 // Create an Empty Buffer
618 //
619 // Buffer 0 SHOULD be an empty buffer in TensorFlow Lite model file
620 // (Please refer to the comment for Tensor.buffer field in schema)
621 {
622 circle::BufferBuilder buffer_builder{*flatbuffer_builder};
623 buffer_vec.emplace_back(buffer_builder.Finish());
624 }
625
626 //
627 // Create Main graph
628 //
629 CookParams cp{buffer_vec, code_vec, subgraph_vec, flatbuffer_builder, builtin_code_map, "main"};
630
631 cook_graph<::circlechef::ModelRecipe>(model_recipe, cp);
632
633 //
634 // Create subgraphs if exist
635 //
636 for (int g = 0; g < model_recipe.graph_size(); ++g)
637 {
638 const auto &graph = model_recipe.graph(g);
639
640 std::ostringstream stringStream;
641 stringStream << "sub_" << (g + 1);
642
643 CookParams cp{buffer_vec, code_vec, subgraph_vec,
644 flatbuffer_builder, builtin_code_map, stringStream.str()};
645
646 cook_graph<::circlechef::Graph>(graph, cp);
647 }
648
649 // Create "Model" arguments
650 auto buffers = flatbuffer_builder->CreateVector(buffer_vec);
651 auto operator_codes = flatbuffer_builder->CreateVector(code_vec);
652 auto subgraphs = flatbuffer_builder->CreateVector(subgraph_vec);
653 auto description = flatbuffer_builder->CreateString("Generated by circlechef");
654
655 // Create "Model"
656 circle::ModelBuilder model_builder{*flatbuffer_builder};
657
658 model_builder.add_version(3);
659 model_builder.add_operator_codes(operator_codes);
660 model_builder.add_subgraphs(subgraphs);
661 model_builder.add_description(description);
662 model_builder.add_buffers(buffers);
663
664 auto model = model_builder.Finish();
665
666 // Finalize
667 ::circle::FinishModelBuffer(*flatbuffer_builder, model);
668
669 // Return "GenerateModel"
670 return GeneratedModel{
671 std::unique_ptr<GeneratedModelImpl>(new GeneratedModelImpl(std::move(flatbuffer_builder)))};
672}
673
674} // namespace circlechef
OpBuilder op_builder(coco::Module *m)
Definition IRBuilder.h:144
circle::TensorType as_circle_tensortype(const circlechef::TensorType &value)
Definition Convert.cpp:53
#define LOGGER(name)
Definition Log.h:65
#define INFO(name)
Definition Log.h:68
Helper class to hold data needed in creation of a FlatBuffer. To serialize data, you typically call o...
GeneratedModel cook(const ModelRecipe &model_recipe)
args
Definition infer.py:21
std::vector< int > dims(const std::string &src)
Definition Utils.h:35
flatbuffers::Offset< flatbuffers::Vector< uint8_t > > circle_custom_options(flatbuffers::FlatBufferBuilder &fb, const luci::CircleNode *node)
const char * tensor_name(const circle::Tensor *tensor)
name
Definition setup.py:158
This file provides string <-> number cast helpers.
Definition Arguments.h:24
Dims< int32_t > as_dims(const SHAPETYPE &shape)
Definition Dims.h:30
Dataset< T > as_dataset(const ::google::protobuf::RepeatedPtrField< T > &field)
Definition Dataset.h:72
RangedArguments< InputIt > ranged_arguments(InputIt beg, InputIt end)
int32_t element_count(const Dims< int32_t > &dims)
Definition Dims.h:42
virtual const char * base(void) const =0
virtual size_t size(void) const =0