ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Runner.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17// Code here refers https://github.com/Neargye/hello_tf_c_api
18//
19// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
20// SPDX-License-Identifier: MIT
21// Copyright (c) 2018 - 2019 Daniil Goncharov <neargye@gmail.com>.
22//
23// Permission is hereby granted, free of charge, to any person obtaining a copy
24// of this software and associated documentation files (the "Software"), to deal
25// in the Software without restriction, including without limitation the rights
26// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
27// copies of the Software, and to permit persons to whom the Software is
28// furnished to do so, subject to the following conditions:
29//
30// The above copyright notice and this permission notice shall be included in all
31// copies or substantial portions of the Software.
32//
33// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
34// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
35// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
36// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
37// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
38// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39// SOFTWARE.
40
42
45
46#include <tensorflow/c/c_api.h>
47
48#include <vector>
49#include <cassert>
50#include <cstring> // std::memcpy()
51#include <stdexcept>
52
53namespace nnkit
54{
55namespace support
56{
57namespace tf
58{
59
62
63namespace
64{
65TF_Tensor *create_tensor(const TF_DataType data_type, const std::int64_t *dims,
66 const std::size_t num_dims, const void *data, const std::size_t len)
67{
68 if (dims == nullptr || data == nullptr)
69 return nullptr;
70
71 TF_Tensor *tensor = TF_AllocateTensor(data_type, dims, static_cast<int>(num_dims), len);
72 if (tensor == nullptr)
73 return nullptr;
74
75 void *tensor_data = TF_TensorData(tensor);
76 if (tensor_data == nullptr)
77 {
78 TF_DeleteTensor(tensor);
79 return nullptr;
80 }
81
82 std::memcpy(tensor_data, data, std::min(len, TF_TensorByteSize(tensor)));
83
84 return tensor;
85}
86
87void deallocate_buffer(void *data, size_t)
88{
89 assert(data);
90 std::free(data);
91}
92
93TF_Buffer *build_TFBuffer(const char *file)
94{
95 const auto f = std::fopen(file, "rb");
96
97 if (f == nullptr)
98 throw std::runtime_error(std::string("cannot open ") + file);
99
100 std::fseek(f, 0, SEEK_END); // to get file size
101 const auto fsize = ftell(f);
102
103 std::fseek(f, 0, SEEK_SET);
104
105 if (fsize < 1)
106 {
107 std::fclose(f);
108 throw std::runtime_error(std::string("file read error: ") + file);
109 }
110
111 const auto data = std::malloc(fsize);
112 std::fread(data, fsize, 1, f);
113 std::fclose(f);
114
115 TF_Buffer *buf = TF_NewBuffer();
116 buf->data = data;
117 buf->length = fsize;
118 buf->data_deallocator = deallocate_buffer;
119
120 return buf;
121}
122
123} // namespace
124
125Runner::Runner(const char *pb_path)
126{
127 // initialize member variables
128 _sess = nullptr;
129 _graph = TF_NewGraph();
130 _status = TF_NewStatus();
131
132 // import graph from file
133 TF_Buffer *buffer = build_TFBuffer(pb_path);
134 if (buffer == nullptr)
135 throw std::runtime_error("Can't read buffer from file");
136
137 TF_ImportGraphDefOptions *opts = TF_NewImportGraphDefOptions();
138
139 TF_GraphImportGraphDef(_graph, buffer, opts, _status);
140
141 TF_DeleteImportGraphDefOptions(opts);
142 TF_DeleteBuffer(buffer);
143
144 if (TF_GetCode(_status) != TF_OK) // TODO Consider wrapper to prevent memory leak
145 throw std::runtime_error("Can't import GraphDef");
146}
147
149{
150 if (_graph)
151 TF_DeleteGraph(_graph);
152
153 if (_sess)
154 {
155 TF_CloseSession(_sess, _status);
156 TF_DeleteSession(_sess, _status);
157 }
158
159 for (auto tensor : _input_tensors)
160 TF_DeleteTensor(tensor);
161
162 for (auto tensor : _output_tensors)
163 TF_DeleteTensor(tensor);
164
165 TF_DeleteStatus(_status);
166}
167
168bool Runner::getTensorShapeFromGraphDef(const std::unique_ptr<ParsedTensor> &tensor,
169 angkor::TensorShape &shape)
170{
171 assert(!tensor->hasShape());
172 TF_Output tensor_op = {TF_GraphOperationByName(_graph, tensor->nodeName().c_str()),
173 tensor->tensorIndex()};
174
175 if (tensor_op.oper == nullptr)
176 return false;
177
178 int dim_size = TF_GraphGetTensorNumDims(_graph, tensor_op, _status);
179 if (dim_size == -1)
180 return false;
181 int64_t dims[dim_size];
182
183 TF_GraphGetTensorShape(_graph, tensor_op, dims, dim_size, _status);
184
185 shape.resize(dim_size);
186 for (int d = 0; d < dim_size; d++)
187 {
188 if (dims[d] == -1)
189 return false;
190 shape.dim(d) = dims[d];
191 }
192 return true;
193}
194
195bool Runner::getTensorDtypeFromGraphDef(const std::unique_ptr<ParsedTensor> &tensor,
196 Runner::DataType &dtype)
197{
198 TF_Output tensor_op = {TF_GraphOperationByName(_graph, tensor->nodeName().c_str()),
199 tensor->tensorIndex()};
200
201 if (tensor_op.oper == nullptr)
202 return false;
203
204 TF_DataType tf_dtype = TF_OperationOutputType(tensor_op);
205
206 switch (tf_dtype)
207 {
208 case TF_DataType::TF_FLOAT:
209 dtype = DataType::FLOAT;
210 break;
211 case TF_DataType::TF_UINT8:
212 dtype = DataType::U8;
213 break;
214 case TF_DataType::TF_UINT16:
215 dtype = DataType::U16;
216 break;
217 case TF_DataType::TF_UINT32:
218 dtype = DataType::U32;
219 break;
220 case TF_DataType::TF_UINT64:
221 dtype = DataType::U64;
222 break;
223 case TF_DataType::TF_INT8:
224 dtype = DataType::S8;
225 break;
226 case TF_DataType::TF_INT16:
227 dtype = DataType::S16;
228 break;
229 case TF_DataType::TF_INT32:
230 dtype = DataType::S32;
231 break;
232 case TF_DataType::TF_INT64:
233 dtype = DataType::S64;
234 break;
235 default:
236 dtype = DataType::Unknown;
237 return false;
238 }
239 return true;
240}
241
242void Runner::prepareInputs(const std::vector<std::unique_ptr<ParsedTensor>> &inputs,
243 TensorDataMap &data_map)
244{
245 assert(_graph);
246
247 for (const auto &tensor : inputs)
248 {
249 TF_Output input_op = {TF_GraphOperationByName(_graph, tensor->nodeName().c_str()),
250 tensor->tensorIndex()};
251
252 if (input_op.oper == nullptr)
253 throw std::runtime_error("Can't init input_op : " + tensor->name());
254
255 std::vector<int64_t> shape;
256 for (int r = 0; r < tensor->shape().rank(); r++)
257 shape.emplace_back(tensor->shape().dim(r));
258
259 int size = 0;
260 if (tensor->isFloatTensor())
261 size = sizeof(float);
262 else
263 throw std::runtime_error("Not supported tensor type");
264
265 TF_Tensor *input_tensor =
266 create_tensor(TF_FLOAT, shape.data(), shape.size(), data_map.data(tensor.get()),
267 num_elements(tensor->shape()) * size);
268
269 _input_ops.emplace_back(input_op);
270 _input_tensors.emplace_back(input_tensor);
271 }
272}
273
274void Runner::prepareOutputs(const std::vector<std::unique_ptr<ParsedTensor>> &outputs)
275{
276 assert(_graph);
277
278 for (const auto &tensor : outputs)
279 {
280 TF_Output output_op = {TF_GraphOperationByName(_graph, tensor->nodeName().c_str()),
281 tensor->tensorIndex()};
282
283 if (output_op.oper == nullptr)
284 throw std::runtime_error("Can't init output_op : " + tensor->name());
285
286 _output_ops.emplace_back(output_op);
287 }
288
289 _output_tensors.resize(_output_ops.size());
290}
291
293{
294 assert(_graph);
295 assert(_output_ops.size() > 0);
296
297 TF_SessionOptions *options = TF_NewSessionOptions();
298 _sess = TF_NewSession(_graph, options, _status);
299 TF_DeleteSessionOptions(options);
300
301 if (TF_GetCode(_status) != TF_OK)
302 throw std::runtime_error(TF_Message(_status));
303
304 TF_SessionRun(_sess,
305 nullptr, // Run options.
306 _input_ops.data(), _input_tensors.data(), _input_ops.size(), _output_ops.data(),
307 _output_tensors.data(), _output_ops.size(), nullptr,
308 0, // Target operations, number of targets.
309 nullptr, // Run metadata.
310 _status // Output status.
311 );
312
313 if (TF_GetCode(_status) != TF_OK)
314 throw std::runtime_error(TF_Message(_status));
315
316 TF_CloseSession(_sess, _status);
317 TF_DeleteSession(_sess, _status);
318 _sess = nullptr;
319}
320
321} // namespace tf
322} // namespace support
323} // namespace nnkit
#define TF_FLOAT
Definition Compat.h:25
uint32_t & dim(uint32_t axis)
Definition Shape.cpp:42
Shape & resize(uint32_t size)
Definition Shape.cpp:36
void prepareInputs(const std::vector< std::unique_ptr< ParsedTensor > > &inputs, TensorDataMap &data_map)
Definition Runner.cpp:242
void prepareOutputs(const std::vector< std::unique_ptr< ParsedTensor > > &outputs)
Definition Runner.cpp:274
bool getTensorShapeFromGraphDef(const std::unique_ptr< ParsedTensor > &tensor, angkor::TensorShape &shape)
Get tensor shape from GraphDef for input tensor only.
Definition Runner.cpp:168
bool getTensorDtypeFromGraphDef(const std::unique_ptr< ParsedTensor > &tensor, Runner::DataType &dtype)
Get tensor data type from GraphDef.
Definition Runner.cpp:195
Runner(const char *pb_path)
Definition Runner.cpp:125
Class to map parsed tensor and memory for tensor values. For parsed tensor, this memory is used to fi...
uint8_t * data(const ParsedTensor *parsed_tensor)
Class to store tensor information parsed from test.info file under moco/test/tf.
uint64_t num_elements(const Shape &)
Definition Shape.cpp:51
DataType
Supported Data Types.
int32_t size[5]
Definition Slice.cpp:35