ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Interpreter.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef LUCI_INTERPRETER_INTERPRETER_H
18#define LUCI_INTERPRETER_INTERPRETER_H
19
20#include "luci_interpreter/core/Tensor.h"
21
22#ifdef USE_STATIC_ALLOC
25#else
27#endif // USE_STATIC_ALLOC
28
29#include "loader/ModuleLoader.h"
30#include <memory>
31
32namespace luci_interpreter
33{
34
35#ifdef ENABLE_TRAINING
36namespace training
37{
38class TrainingOnertMicro;
39} // namespace training
40
41#endif // ENABLE_TRAINING
42
43class Interpreter
44{
45public:
46 // Construct default interpreter with dynamic allocations and with input allocations
47 explicit Interpreter(const char *model_data_raw, bool dealloc_input);
48
49#ifdef USE_STATIC_ALLOC
50 // Construct interpreter with configurations
51 explicit Interpreter(const char *model_data_raw, const InterpreterConfigure &configuration);
52#endif // USE_STATIC_ALLOC
53
55
56 void allocateAndWriteInputTensor(int32_t input_tensor_index, const uint8_t *data,
57 size_t data_size);
58 uint8_t *allocateInputTensor(int32_t input_tensor_index);
59
60 uint8_t *readOutputTensor(int32_t output_tensor_index);
61
62 int32_t getInputDataSizeByIndex(int32_t input_tensor_index);
63 int32_t getOutputDataSizeByIndex(int32_t output_tensor_index);
64
65 void interpret();
66
67#ifdef ENABLE_TRAINING
68 friend class training::TrainingOnertMicro;
69#endif // ENABLE_TRAINING
70
71private:
72 // _default_memory_manager should be before _runtime_module due to
73 // the order of deletion in the destructor
74 MemoryManager _memory_manager{};
75 RuntimeModule _runtime_module{};
76};
77
78} // namespace luci_interpreter
79
80#endif // LUCI_INTERPRETER_INTERPRETER_H
void allocateAndWriteInputTensor(int32_t input_tensor_index, const uint8_t *data, size_t data_size)
int32_t getInputDataSizeByIndex(int32_t input_tensor_index)
uint8_t * allocateInputTensor(int32_t input_tensor_index)
Interpreter(const luci::Module *module)
void readOutputTensor(const luci::CircleOutput *output_node, void *data, size_t data_size)
int32_t getOutputDataSizeByIndex(int32_t output_tensor_index)
const T * data(const std::vector< T, Alloc > &v)