39 for (uint32_t n = 0; n < inputs.size(); ++n)
41 auto const tensor_id = inputs.at(n);
43 auto const tensor_name = ctx.
name(tensor_id);
44 auto const tensor_shape = ctx.
shape(tensor_id);
45 auto const tensor_bag = bags.
bag(tensor_id);
47 auto input =
m->entity()->input()->create(tensor_shape);
49 input->name(tensor_name);
50 input->bag(tensor_bag);
53 m->input()->insert(input);
63 for (uint32_t n = 0; n < outputs.size(); ++n)
65 auto const tensor_id = outputs.at(n);
67 auto const tensor_name = ctx.
name(tensor_id);
68 auto const tensor_shape = ctx.
shape(tensor_id);
69 auto const tensor_bag = bags.
bag(tensor_id);
71 auto output =
m->entity()->output()->create(tensor_shape);
73 output->name(tensor_name);
74 output->bag(tensor_bag);
77 m->output()->insert(output);
89 for (
auto &iter : ctx->
bags())
91 auto tfl_tensor_id = iter.first;
92 auto bag = iter.second;
97 if (ctx->
tensor().
type(tfl_tensor_id) == tflite::TensorType::TensorType_INT32)
99 std::cout <<
"*** INT32 COPYING IS NOT SUPPORTED ***" << std::endl;
103 assert(ctx->
tensor().
type(tfl_tensor_id) == tflite::TensorType::TensorType_FLOAT32);
105 auto span = d->f32()->weight(bag);
107 if (!(span.data() ==
nullptr && span.size() == 0))
110 if (tfl_buffer.ptr ==
nullptr || tfl_buffer.len == 0)
113 d->f32()->allocate(bag);
115 auto ifm_span = d->f32()->weight(bag);
116 for (uint32_t idx = 0; idx < tfl_buffer.len; ++idx)
118 ifm_span[idx] = tfl_buffer.ptr[idx];
132 auto model = _raw->model();
134 assert(model->version() == 3);
135 assert(model->subgraphs()->size() == 1);
137 auto graph = model->subgraphs()->Get(0);
154 auto blk =
m->entity()->block()->create();
155 m->block()->append(blk);
157 auto opcodes = model->operator_codes();
162 auto operators = graph->operators();
165 tensor_context, buffer_context, graph);
167 for (
int i = 0; i < operators->Length(); ++i)
169 const auto *op = operators->Get(i);
170 tflite::BuiltinOperator builtincode = opcode_context.builtin_code(op);
174 if (!graph_builder->validate(op))
176 throw std::runtime_error{
"Invalid operator"};
179 graph_builder->build(op, &opbuilder_context);
183 std::string opcodename = opcode_context.opcode_name(op);
184 throw std::runtime_error{
"Not supported: " + opcodename};
195 bundle.
data(std::move(d));
197 return std::move(bundle);
enco::Bundle load(void) const override
Top-level element of coco IR which represents a neural network.
static std::unique_ptr< Module > create(void)
coco::Data * data(void) const
coco::Module * module(void) const
Class to store context to build IR from tflite.
TflBufferContext & buffer()
const tflite::SubGraph * graph()
static GraphBuilderRegistry & get()
Pre-creates coco:Bags for each operands(tensors)
coco::Bag * bag(int32_t tensor_id)
void prepare(const tflite::SubGraph *graph, std::unique_ptr< coco::Module > &m)
Extracts and holds operand(tensor) information such as name, shape, and type.
void prepare(const tflite::SubGraph *graph)
const std::string & name(uint32_t tensor_id)
const tflite::TensorType & type(uint32_t tensor_id)
const tensor::Shape & shape(uint32_t tensor_id)
Class to read and provide buffer information of tflite.
TflBuffer< T > tensor_buffer(const tflite::SubGraph *graph, uint32_t tensor_idx) const
Class that holds operator codes and related methods.
void set_module_outputs(coco::Module *m, TensorContext &ctx, TensorBags &bags, const IndexVector &outputs)
Set module output operands and its information.
std::vector< int32_t > IndexVector
void set_module_inputs(coco::Module *m, TensorContext &ctx, TensorBags &bags, const IndexVector &inputs)
Set module input operands and its information.
void copy_tensors(GraphBuilderContext *ctx)
Copy values of tfl tensors into coco::Data if the data was not copied.
IndexVector as_index_vector(const flatbuffers::Vector< int32_t > *array)
Converts flatbuffers::Vector to IndexVector.
static std::unique_ptr< Data > create(void)