ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Frontend.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "Frontend.h"
18#include "Context.h"
19#include "Convert.h"
20#include "TensorBags.h"
22
25
26#include <iostream>
27
28using namespace nncc::core::ADT;
29
30namespace tflimport
31{
32
37 const IndexVector &inputs)
38{
39 for (uint32_t n = 0; n < inputs.size(); ++n)
40 {
41 auto const tensor_id = inputs.at(n);
42
43 auto const tensor_name = ctx.name(tensor_id);
44 auto const tensor_shape = ctx.shape(tensor_id);
45 auto const tensor_bag = bags.bag(tensor_id);
46
47 auto input = m->entity()->input()->create(tensor_shape);
48
49 input->name(tensor_name);
50 input->bag(tensor_bag);
51 input->reorder<tensor::LexicalLayout>();
52
53 m->input()->insert(input);
54 }
55}
56
61 const IndexVector &outputs)
62{
63 for (uint32_t n = 0; n < outputs.size(); ++n)
64 {
65 auto const tensor_id = outputs.at(n);
66
67 auto const tensor_name = ctx.name(tensor_id);
68 auto const tensor_shape = ctx.shape(tensor_id);
69 auto const tensor_bag = bags.bag(tensor_id);
70
71 auto output = m->entity()->output()->create(tensor_shape);
72
73 output->name(tensor_name);
74 output->bag(tensor_bag);
75 output->reorder<tensor::LexicalLayout>();
76
77 m->output()->insert(output);
78 }
79}
80
85{
86 auto d = ctx->d();
87
88 // for each bag, check if bag is not allocated but tflite tensor has values
89 for (auto &iter : ctx->bags())
90 {
91 auto tfl_tensor_id = iter.first;
92 auto bag = iter.second;
93
94 auto tfl_buffer = ctx->buffer().tensor_buffer<float>(ctx->graph(), tfl_tensor_id);
95
96 // TODO remove this line when support int32 is ready
97 if (ctx->tensor().type(tfl_tensor_id) == tflite::TensorType::TensorType_INT32)
98 {
99 std::cout << "*** INT32 COPYING IS NOT SUPPORTED ***" << std::endl;
100 continue;
101 }
102
103 assert(ctx->tensor().type(tfl_tensor_id) == tflite::TensorType::TensorType_FLOAT32);
104
105 auto span = d->f32()->weight(bag); // TODO support other type
106
107 if (!(span.data() == nullptr && span.size() == 0)) // already allocated
108 continue;
109
110 if (tfl_buffer.ptr == nullptr || tfl_buffer.len == 0) // no data to copy
111 continue;
112
113 d->f32()->allocate(bag);
114
115 auto ifm_span = d->f32()->weight(bag);
116 for (uint32_t idx = 0; idx < tfl_buffer.len; ++idx)
117 {
118 ifm_span[idx] = tfl_buffer.ptr[idx];
119 }
120 }
121}
122
123} // namespace tflimport
124
125Frontend::Frontend(std::unique_ptr<RawModel> &&raw) : _raw{std::move(raw)}
126{
127 // DO NOTHING
128}
129
131{
132 auto model = _raw->model();
133
134 assert(model->version() == 3);
135 assert(model->subgraphs()->size() == 1);
136
137 auto graph = model->subgraphs()->Get(0);
138
139 auto m = coco::Module::create();
140 auto d = coco::Data::create();
141
142 tflimport::TensorContext tensor_context;
143 tflimport::TensorBags tensor_bags;
144
145 tensor_context.prepare(graph);
146 tensor_bags.prepare(graph, m);
147
148 auto inputs = tflimport::as_index_vector(graph->inputs());
149 auto outputs = tflimport::as_index_vector(graph->outputs());
150
151 tflimport::set_module_inputs(m.get(), tensor_context, tensor_bags, inputs);
152 tflimport::set_module_outputs(m.get(), tensor_context, tensor_bags, outputs);
153
154 auto blk = m->entity()->block()->create();
155 m->block()->append(blk);
156
157 auto opcodes = model->operator_codes();
158
159 tflimport::TflBufferContext buffer_context(model);
160 tflimport::TflOpCodeContext opcode_context(opcodes);
161
162 auto operators = graph->operators();
163
164 tflimport::GraphBuilderContext opbuilder_context(m.get(), d.get(), blk, tensor_bags,
165 tensor_context, buffer_context, graph);
166
167 for (int i = 0; i < operators->Length(); ++i)
168 {
169 const auto *op = operators->Get(i);
170 tflite::BuiltinOperator builtincode = opcode_context.builtin_code(op);
171
172 if (const auto *graph_builder = tflimport::GraphBuilderRegistry::get().lookup(builtincode))
173 {
174 if (!graph_builder->validate(op))
175 {
176 throw std::runtime_error{"Invalid operator"};
177 }
178
179 graph_builder->build(op, &opbuilder_context);
180 }
181 else
182 {
183 std::string opcodename = opcode_context.opcode_name(op);
184 throw std::runtime_error{"Not supported: " + opcodename};
185 }
186
187 // copying unfilled tensor value
188 copy_tensors(&opbuilder_context);
189 }
190
191 // Create "Bundle"
192 enco::Bundle bundle;
193
194 bundle.module(std::move(m));
195 bundle.data(std::move(d));
196
197 return std::move(bundle);
198}
enco::Bundle load(void) const override
Definition Frontend.cpp:40
Top-level element of coco IR which represents a neural network.
Definition Module.h:34
static std::unique_ptr< Module > create(void)
Definition Module.cpp:127
coco::Data * data(void) const
Definition Bundle.h:38
coco::Module * module(void) const
Definition Bundle.h:34
Class to store context to build IR from tflite.
Definition Context.h:133
TensorContext & tensor()
Definition Context.h:152
TflBufferContext & buffer()
Definition Context.h:154
const tflite::SubGraph * graph()
Definition Context.h:155
static GraphBuilderRegistry & get()
Pre-creates coco:Bags for each operands(tensors)
Definition TensorBags.h:38
coco::Bag * bag(int32_t tensor_id)
Definition TensorBags.h:52
void prepare(const tflite::SubGraph *graph, std::unique_ptr< coco::Module > &m)
Definition TensorBags.h:40
Extracts and holds operand(tensor) information such as name, shape, and type.
Definition Context.h:39
void prepare(const tflite::SubGraph *graph)
Definition Context.cpp:35
const std::string & name(uint32_t tensor_id)
Definition Context.h:43
const tflite::TensorType & type(uint32_t tensor_id)
Definition Context.h:45
const tensor::Shape & shape(uint32_t tensor_id)
Definition Context.h:44
Class to read and provide buffer information of tflite.
Definition Context.h:87
TflBuffer< T > tensor_buffer(const tflite::SubGraph *graph, uint32_t tensor_idx) const
Definition Context.h:101
Class that holds operator codes and related methods.
Definition Context.h:57
void set_module_outputs(coco::Module *m, TensorContext &ctx, TensorBags &bags, const IndexVector &outputs)
Set module output operands and its information.
Definition Frontend.cpp:60
std::vector< int32_t > IndexVector
Definition Convert.h:29
void set_module_inputs(coco::Module *m, TensorContext &ctx, TensorBags &bags, const IndexVector &inputs)
Set module input operands and its information.
Definition Frontend.cpp:36
void copy_tensors(GraphBuilderContext *ctx)
Copy values of tfl tensors into coco::Data if the data was not copied.
Definition Frontend.cpp:84
IndexVector as_index_vector(const flatbuffers::Vector< int32_t > *array)
Converts flatbuffers::Vector to IndexVector.
Definition Convert.cpp:28
static std::unique_ptr< Data > create(void)
Definition Data.cpp:202