ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Context.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef __CONTEXT_H__
18#define __CONTEXT_H__
19
20#include "Convert.h"
21#include "TensorBags.h"
22
23#include <coco/IR/Data.h>
24#include <coco/IR/Module.h>
25
26#include <schema_generated.h>
27
28#include <map>
29
30using namespace nncc::core::ADT;
31
32namespace tflimport
33{
34
39{
40public:
41 void prepare(const tflite::SubGraph *graph);
42
43 const std::string &name(uint32_t tensor_id) { return _name_ctx[tensor_id]; }
44 const tensor::Shape &shape(uint32_t tensor_id) { return _shape_ctx[tensor_id]; }
45 const tflite::TensorType &type(uint32_t tensor_id) { return _type_ctx[tensor_id]; }
46
47private:
48 std::map<uint32_t, std::string> _name_ctx;
49 std::map<uint32_t, tensor::Shape> _shape_ctx;
50 std::map<uint32_t, tflite::TensorType> _type_ctx;
51};
52
57{
58public:
60
64 tflite::BuiltinOperator builtin_code(const tflite::Operator *op) const;
65
73 std::string opcode_name(const tflite::Operator *op) const;
74
75public:
76 static bool is_valid(const tflite::OperatorCode *opcode);
77 static bool is_custom(const tflite::OperatorCode *opcode);
78
79private:
80 std::vector<const tflite::OperatorCode *> _opcodes;
81};
82
87{
88public:
89 template <typename T> struct TflBuffer
90 {
91 TflBuffer(const T *p, size_t s) : ptr{p}, len{s} {};
92 const T *ptr;
93 size_t len;
94 };
95
96public:
97 explicit TflBufferContext(const tflite::Model *tfl_model);
98
99public:
100 template <typename T>
101 TflBuffer<T> tensor_buffer(const tflite::SubGraph *graph, uint32_t tensor_idx) const
102 {
103 TflBufferContext::TflBuffer<T> res{nullptr, 0};
104 const auto *tensor = graph->tensors()->Get(tensor_idx);
105 uint32_t tfl_buf_id = tensor->buffer();
106
107 assert(_buffer_ctx.size() > tfl_buf_id);
108
109 const tflite::Buffer *tfl_buffer = _buffer_ctx.at(tfl_buf_id);
110
111 if (auto *array = tfl_buffer->data())
112 {
113 if (size_t size = array->size())
114 {
115 assert(size % sizeof(T) == 0);
116
117 res.len = size / sizeof(T);
118 res.ptr = reinterpret_cast<const T *>(array->data());
119 }
120 }
121
122 return res;
123 }
124
125private:
126 std::map<uint32_t /* Buffer ID */, const tflite::Buffer *> _buffer_ctx;
127};
128
133{
134public:
136 TensorBags &tensor_bags, TensorContext &tensor_context,
137 TflBufferContext &buffer_context, const tflite::SubGraph *graph)
138 : _m(m), _d(d), _block(block), _tensor_bags(tensor_bags), _tensor_context(tensor_context),
139 _buffer_context(buffer_context), _graph(graph)
140 {
141 // DO NOTHING
142 }
143
147
148public:
149 coco::Module *m() { return _m; }
150 coco::Data *d() { return _d; }
151 coco::Block *block() { return _block; }
152 TensorContext &tensor() { return _tensor_context; }
153 TensorBags &bags() { return _tensor_bags; }
154 TflBufferContext &buffer() { return _buffer_context; }
155 const tflite::SubGraph *graph() { return _graph; }
156
157private:
158 coco::Module *_m;
159 coco::Data *_d;
160 coco::Block *_block;
161 TensorContext &_tensor_context;
162 TensorBags &_tensor_bags;
163 TflBufferContext &_buffer_context;
164 const tflite::SubGraph *_graph;
165};
166
167} // namespace tflimport
168
169#endif // __CONTEXT_H__
A unit of (grouped) instructions.
Definition Block.h:40
Top-level element of coco IR which represents a neural network.
Definition Module.h:34
Class to store context to build IR from tflite.
Definition Context.h:133
TensorContext & tensor()
Definition Context.h:152
GraphBuilderContext(GraphBuilderContext &&)=delete
GraphBuilderContext(coco::Module *m, coco::Data *d, coco::Block *block, TensorBags &tensor_bags, TensorContext &tensor_context, TflBufferContext &buffer_context, const tflite::SubGraph *graph)
Definition Context.h:135
TflBufferContext & buffer()
Definition Context.h:154
GraphBuilderContext(const GraphBuilderContext &)=delete
const tflite::SubGraph * graph()
Definition Context.h:155
Pre-creates coco:Bags for each operands(tensors)
Definition TensorBags.h:38
Extracts and holds operand(tensor) information such as name, shape, and type.
Definition Context.h:39
void prepare(const tflite::SubGraph *graph)
Definition Context.cpp:35
const std::string & name(uint32_t tensor_id)
Definition Context.h:43
const tflite::TensorType & type(uint32_t tensor_id)
Definition Context.h:45
const tensor::Shape & shape(uint32_t tensor_id)
Definition Context.h:44
Class to read and provide buffer information of tflite.
Definition Context.h:87
TflBuffer< T > tensor_buffer(const tflite::SubGraph *graph, uint32_t tensor_idx) const
Definition Context.h:101
Class that holds operator codes and related methods.
Definition Context.h:57
tflite::BuiltinOperator builtin_code(const tflite::Operator *op) const
Returns BuiltinOperator value of the operator.
Definition Context.cpp:59
std::string opcode_name(const tflite::Operator *op) const
Returns human readable name of the operator code of the operator.
Definition Context.cpp:67
static bool is_custom(const tflite::OperatorCode *opcode)
Definition Context.cpp:98
static bool is_valid(const tflite::OperatorCode *opcode)
Definition Context.cpp:92
int32_t size[5]
Definition Slice.cpp:35
Core coco entity for constant weights.
Definition Data.h:31
TflBuffer(const T *p, size_t s)
Definition Context.h:91