ONE - On-device Neural Engine
Loading...
Searching...
No Matches
RecipeChef.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <tflchef/RecipeChef.h>
19
20#include "Convert.h"
21#include "TFliteImport.h"
22#include "TFliteOpChef.h"
23#include "TFliteOpChefs.h"
24#include "TFliteOpRegistry.h"
25
26#include <fstream>
27#include <sstream>
28
29namespace tflchef
30{
31
32void set_inputs(TFliteImport *import, tflchef::Operation *operation, const tflite::Operator *op)
33{
34 auto tensors = import->tensors();
35 const std::vector<int32_t> &inputs = as_index_vector(op->inputs());
36
37 for (auto input : inputs)
38 {
39 if (input == -1)
40 {
41 operation->add_input("");
42 }
43 else
44 {
45 auto tensor = tensors->Get(input);
46 std::string name = mio::tflite::tensor_name(tensor);
47 operation->add_input(name);
48 }
49 }
50}
51
52void set_outputs(TFliteImport *import, tflchef::Operation *operation, const tflite::Operator *op)
53{
54 auto tensors = import->tensors();
55 const std::vector<int32_t> &outputs = as_index_vector(op->outputs());
56
57 for (auto output : outputs)
58 {
59 auto tensor = tensors->Get(output);
60 std::string name = mio::tflite::tensor_name(tensor);
61 operation->add_output(name);
62 }
63}
64
71std::unique_ptr<ModelRecipe> generate_recipe(const tflite::Model *model)
72{
73 std::unique_ptr<ModelRecipe> model_recipe{new ModelRecipe()};
74
75 TFliteImport tflite_import(model);
76
77 auto const num_subgraph = tflite_import.num_subgraph();
78
80 for (uint32_t n = 0; n < num_subgraph; ++n)
81 {
82 tflite_import.select_sub_graph(n);
83
84 auto tensors = tflite_import.tensors();
85 auto buffers = tflite_import.buffers();
86 auto operators = tflite_import.operators();
87
88 tflchef::Graph *graph = nullptr;
89 if (n != 0)
90 graph = model_recipe->add_graph();
91
92 // operand fillers for adding all operators
93 for (uint32_t i = 0; i < operators->size(); ++i)
94 {
95 const auto *op = operators->Get(i);
96 tflite::BuiltinOperator builtincode = tflite_import.builtin_code(op);
97
98 if (const auto *graph_builder = TFliteOpRegistry::get().lookup(builtincode))
99 {
100 graph_builder->filler(op, &tflite_import, model_recipe.get());
101 }
102 else
103 {
104 std::string opcodename = tflite_import.opcode_name(op);
105 throw std::runtime_error{"Not supported: " + opcodename};
106 }
107 }
108
109 // add all operands(tensors)
110 for (uint32_t i = 0; i < tensors->size(); ++i)
111 {
112 auto tensor = tensors->Get(i);
113
114 // check buffer
115 if (tensor->buffer() >= buffers->size())
116 throw std::runtime_error{"file load failed"};
117
118 ::tflchef::Operand *operand;
119 if (graph != nullptr)
120 operand = graph->add_operand();
121 else
122 operand = model_recipe->add_operand();
123 operand->set_name(mio::tflite::tensor_name(tensor));
124 operand->set_type(as_tflchef_type(tensor->type()));
125 operand->set_is_variable(tensor->is_variable());
126
127 if (tensor->shape())
128 {
129 std::vector<int32_t> dims = as_index_vector(tensor->shape());
130 ::tflchef::TensorShape *shape = operand->mutable_shape();
131 for (auto dim : dims)
132 {
133 shape->add_dim(dim);
134 }
135 }
136
137 // filler for weights, bias and so on
138 std::vector<int32_t> expvalues;
139 std::vector<float> expfvalues;
140 if (tflite_import.get_tensor_filler(i))
141 {
142 tflchef::TensorFiller *filler = operand->mutable_filler();
143 // Note: it is OK to use random weights for functionality validation
144 filler->set_tag("gaussian");
145 filler->add_arg("0.0"); // average
146 filler->add_arg("0.1"); // standard deviation
147 }
148 else if (tflite_import.get_tensor_filler(i, expvalues))
149 {
150 tflchef::TensorFiller *filler = operand->mutable_filler();
151 filler->set_tag("explicit");
152 for (auto value : expvalues)
153 {
154 std::ostringstream ss;
155 ss << value;
156 filler->add_arg(ss.str());
157 }
158 }
159 else if (tflite_import.get_tensor_filler(i, expfvalues))
160 {
161 tflchef::TensorFiller *filler = operand->mutable_filler();
162 filler->set_tag("explicit");
163 for (auto value : expfvalues)
164 {
165 std::ostringstream ss;
166 ss << value;
167 filler->add_arg(ss.str());
168 }
169 }
170
171 auto quant = tensor->quantization();
172 if (quant != nullptr)
173 {
174 // Note: Calling 'operand->mutable_quant()' will create empty 'quant' node
175 // in the recipe file. We want this only when valid parameter exist.
176 if (quant->min() != nullptr && quant->min()->size() > 0)
177 {
178 tflchef::TensorQuantization *chef_quant = operand->mutable_quant();
179 for (uint32_t idx = 0; idx < quant->min()->size(); ++idx)
180 chef_quant->add_min(quant->min()->Get(idx));
181 }
182 if (quant->max() != nullptr && quant->max()->size() > 0)
183 {
184 tflchef::TensorQuantization *chef_quant = operand->mutable_quant();
185 for (uint32_t idx = 0; idx < quant->max()->size(); idx++)
186 chef_quant->add_max(quant->max()->Get(idx));
187 }
188 if (quant->scale() != nullptr && quant->scale()->size() > 0)
189 {
190 tflchef::TensorQuantization *chef_quant = operand->mutable_quant();
191 for (uint32_t idx = 0; idx < quant->scale()->size(); ++idx)
192 chef_quant->add_scale(quant->scale()->Get(idx));
193 }
194 if (quant->zero_point() != nullptr && quant->zero_point()->size() > 0)
195 {
196 tflchef::TensorQuantization *chef_quant = operand->mutable_quant();
197 for (uint32_t idx = 0; idx < quant->zero_point()->size(); ++idx)
198 chef_quant->add_zero_point(quant->zero_point()->Get(idx));
199 }
200 tflchef::TensorQuantization *chef_quant = operand->mutable_quant();
201 chef_quant->set_quantized_dimension(quant->quantized_dimension());
202 }
203
204 auto sparsity = tensor->sparsity();
205 if (sparsity != nullptr)
206 {
207 tflchef::TensorSparsity *chef_sparsity = operand->mutable_sparsity();
208 // traversal_order
209 auto chef_traversal_order = chef_sparsity->mutable_traversal_order();
210 for (const auto &to : *(sparsity->traversal_order()))
211 {
212 chef_traversal_order->add_dim(to);
213 }
214 // block_map
215 auto chef_block_map = chef_sparsity->mutable_block_map();
216 for (const auto &bm : *(sparsity->block_map()))
217 {
218 chef_block_map->add_dim(bm);
219 }
220 // dim_metadata
221 for (const auto &dm : *(sparsity->dim_metadata()))
222 {
223 auto chef_dm = chef_sparsity->add_dim_metadata();
224 // format
225 chef_dm->set_format(as_tflchef_sparse_dim_type(dm->format()));
226 // dense_size
227 chef_dm->set_dense_size(dm->dense_size());
228 // array_segments
229 auto chef_array_segments = chef_dm->mutable_array_segments();
230 switch (dm->array_segments_type())
231 {
232 case tflite::SparseIndexVector_NONE:
233 // DO NOTHING
234 break;
235 case tflite::SparseIndexVector_Int32Vector:
236 for (const auto &as : *(dm->array_segments_as_Int32Vector()->values()))
237 {
238 chef_array_segments->add_dim(as);
239 }
240 break;
241 case tflite::SparseIndexVector_Uint16Vector:
242 for (const auto &as : *(dm->array_segments_as_Uint16Vector()->values()))
243 {
244 chef_array_segments->add_dim(as);
245 }
246 break;
247 case tflite::SparseIndexVector_Uint8Vector:
248 for (const auto &as : *(dm->array_segments_as_Uint8Vector()->values()))
249 {
250 chef_array_segments->add_dim(as);
251 }
252 break;
253 default:
254 throw std::runtime_error("unsupported sparse index vector type");
255 }
256 // array_indices
257 auto chef_array_indices = chef_dm->mutable_array_indices();
258 switch (dm->array_indices_type())
259 {
260 case tflite::SparseIndexVector_NONE:
261 // DO NOTHING
262 break;
263 case tflite::SparseIndexVector_Int32Vector:
264 for (const auto &as : *(dm->array_indices_as_Int32Vector()->values()))
265 {
266 chef_array_indices->add_dim(as);
267 }
268 break;
269 case tflite::SparseIndexVector_Uint16Vector:
270 for (const auto &as : *(dm->array_indices_as_Uint16Vector()->values()))
271 {
272 chef_array_indices->add_dim(as);
273 }
274 break;
275 case tflite::SparseIndexVector_Uint8Vector:
276 for (const auto &as : *(dm->array_indices_as_Uint8Vector()->values()))
277 {
278 chef_array_indices->add_dim(as);
279 }
280 break;
281 default:
282 throw std::runtime_error("unsupported sparse index vector type");
283 }
284 }
285 }
286
287 auto shape_signature = tensor->shape_signature();
288 if (shape_signature != nullptr)
289 {
290 tflchef::ShapeSignature *chef_shape_signature = operand->mutable_shape_signature();
291 for (uint32_t j = 0; j < shape_signature->size(); ++j)
292 {
293 chef_shape_signature->add_dim(shape_signature->Get(j));
294 }
295 }
296 }
297
298 // add all operators
299 for (uint32_t i = 0; i < operators->size(); ++i)
300 {
301 const auto *op = operators->Get(i);
302 tflite::BuiltinOperator builtincode = tflite_import.builtin_code(op);
303
304 if (const auto *graph_builder = TFliteOpRegistry::get().lookup(builtincode))
305 {
306 tflchef::Operation *operation =
307 graph ? graph->add_operation() : model_recipe->add_operation();
308 ctx.tflop = op;
309 ctx.chefop = operation;
310 graph_builder->build(&ctx);
311
312 // common for all operators: inputs, outputs
313 set_inputs(&tflite_import, operation, op);
314 set_outputs(&tflite_import, operation, op);
315 }
316 else
317 {
318 std::string opcodename = tflite_import.opcode_name(op);
319 throw std::runtime_error{"Not supported: " + opcodename};
320 }
321 }
322
323 // network inputs/outputs
324 const std::vector<int32_t> &inputs = tflite_import.inputs();
325 const std::vector<int32_t> &outputs = tflite_import.outputs();
326
327 for (const auto input : inputs)
328 {
329 auto tensor = tensors->Get(input);
330 std::string name = mio::tflite::tensor_name(tensor);
331
332 if (graph != nullptr)
333 graph->add_input(name);
334 else
335 model_recipe->add_input(name);
336 }
337 for (const auto output : outputs)
338 {
339 auto tensor = tensors->Get(output);
340 std::string name = mio::tflite::tensor_name(tensor);
341
342 if (graph != nullptr)
343 graph->add_output(name);
344 else
345 model_recipe->add_output(name);
346 }
347 }
348
349 return std::move(model_recipe);
350}
351
352bool write_recipe(const std::string &filename, std::unique_ptr<ModelRecipe> &recipe)
353{
354 std::fstream fo(filename, std::ios::binary | std::ios::out);
355
356 if (!fo.is_open())
357 {
358 throw std::runtime_error{"file store failed"};
359 }
360
361 // Note: SerializeToString() or SerializeToOstream() writes in binary mode
362 // DebugString() and Utf8DebugString() will print as a human readable text
363 fo << recipe->Utf8DebugString();
364
365 fo.close();
366
367 return true;
368}
369
370} // namespace tflchef
bool get_tensor_filler(uint32_t tensor_index)
This will return true if the tensor by index, needs a filler option.
Loads TF lite file and provides helpers to access attributes.
std::string opcode_name(const tflite::Operator *op) const
const TFliteBuffers_t * buffers()
uint32_t num_subgraph() const
const TFliteOperators_t * operators()
const std::vector< int32_t > & outputs() const
const TFliteTensors_t * tensors()
bool select_sub_graph(uint32_t subgraph)
tflite::BuiltinOperator builtin_code(const tflite::Operator *op) const
const std::vector< int32_t > & inputs() const
static TFliteOpRegistry & get()
const char * tensor_name(const ::tflite::Tensor *tensor)
Definition Helper.cpp:92
void set_inputs(TFliteImport *import, tflchef::Operation *operation, const tflite::Operator *op)
tflchef::DimensionType as_tflchef_sparse_dim_type(const tflite::DimensionType type)
Definition Convert.cpp:97
std::vector< T > as_index_vector(const flatbuffers::Vector< T > *flat_array)
Definition Convert.h:47
void set_outputs(TFliteImport *import, tflchef::Operation *operation, const tflite::Operator *op)
tflchef::TensorType as_tflchef_type(const tflite::TensorType type)
Definition Convert.cpp:22
bool write_recipe(const std::string &filename, std::unique_ptr< ModelRecipe > &recipe)
Write ModelRecipe to file with given name.
std::unique_ptr< ModelRecipe > generate_recipe(const tflite::Model *model)
Create ModelRecipe from tflite::Model.
tflchef::Operation * chefop
const tflite::Operator * tflop