ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Interpreter.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "luci_interpreter/Interpreter.h"
19
20#include "loader/ModuleLoader.h"
21
22#include <stdexcept>
23
24namespace luci_interpreter
25{
26
27namespace
28{
29
30class EventNotifierImpl final : public EventNotifier
31{
32public:
33 EventNotifierImpl(const RuntimeToIR &runtime_to_ir,
34 const std::vector<ExecutionObserver *> &observers)
35 : _runtime_to_ir(runtime_to_ir), _observers(observers)
36 {
37 }
38
39 void postTensorWrite(const Tensor *tensor) override
40 {
41 assert(tensor != nullptr);
42 for (const auto &observer : _observers)
43 {
44 observer->postTensorWrite(_runtime_to_ir.tensor_to_node.at(tensor), tensor);
45 }
46 }
47
48 void preOperatorExecute(const Kernel *kernel) override
49 {
50 assert(kernel != nullptr);
51 for (const auto &observer : _observers)
52 {
53 observer->preOperatorExecute(_runtime_to_ir.kernel_to_node.at(kernel));
54 }
55 }
56
57 void postOperatorExecute(const Kernel *kernel) override
58 {
59 assert(kernel != nullptr);
60 for (const auto &observer : _observers)
61 {
62 observer->postOperatorExecute(_runtime_to_ir.kernel_to_node.at(kernel));
63 }
64 }
65
66private:
67 const RuntimeToIR &_runtime_to_ir;
68 const std::vector<ExecutionObserver *> &_observers;
69};
70
71} // namespace
72
74{
75 _runtime_to_ir = std::make_unique<RuntimeToIR>();
76 _event_notifier = std::make_unique<EventNotifierImpl>(*_runtime_to_ir, _observers);
77 _runtime_module = std::make_unique<RuntimeModule>(_event_notifier.get());
78
79 _default_memory_manager = std::make_unique<SimpleMemoryManager>();
80
81 ModuleLoader loader(module, _runtime_module.get(), *_runtime_to_ir, _node_to_tensor,
82 _default_memory_manager.get());
83 loader.load();
84}
85
88{
89 assert(memory_manager && "Use Interpreter::Interpreter(module) constructor instead");
90
91 _runtime_to_ir = std::make_unique<RuntimeToIR>();
92 _event_notifier = std::make_unique<EventNotifierImpl>(*_runtime_to_ir, _observers);
93 _runtime_module = std::make_unique<RuntimeModule>(_event_notifier.get());
94
95 ModuleLoader loader(module, _runtime_module.get(), *_runtime_to_ir, _node_to_tensor,
96 memory_manager);
97 loader.load();
98}
99
100Interpreter::~Interpreter() = default;
101
103 size_t data_size)
104{
105 Tensor *tensor = _runtime_module->getInputTensors()[input_node->index()];
106 if (tensor == nullptr)
107 {
108 const std::string &name = input_node->name();
109 throw std::runtime_error("Cannot find tensor for input node named \"" + name + "\".");
110 }
111 if (data != nullptr)
112 tensor->writeData(data, data_size);
113}
114
116 size_t data_size)
117{
118 Tensor *tensor = _runtime_module->getOutputTensors()[output_node->index()];
119 if (tensor == nullptr)
120 {
121 const std::string &name = output_node->name();
122 throw std::runtime_error("Cannot find tensor for output node named \"" + name + "\".");
123 }
124 if (data != nullptr)
125 tensor->readData(data, data_size);
126}
127
129{
130 Tensor *tensor = _runtime_module->getOutputTensors()[output_node->index()];
131 if (tensor == nullptr)
132 {
133 const std::string &name = output_node->name();
134 throw std::runtime_error("Cannot find tensor size for output node named \"" + name + "\".");
135 }
136
137 size_t tensor_size = luci_interpreter::getDataTypeSize(tensor->element_type());
138 tensor_size *= tensor->shape().num_elements();
139 return tensor_size;
140}
141
142void Interpreter::interpret() { _runtime_module->execute(); }
143
145{
146 if (std::find(_observers.cbegin(), _observers.cend(), observer) != _observers.cend())
147 throw std::runtime_error("Observer is already attached.");
148 _observers.push_back(observer);
149}
150
152
154
156
158
159} // namespace luci_interpreter
CircleNode used for Input of the Graph.
Definition CircleInput.h:36
void index(const loco::GraphInputIndex &index)
CircleNode for Output of the Graph.
void index(const loco::GraphOutputIndex &index)
Collection of 'loco::Graph's.
Definition Module.h:33
virtual void postOperatorExecute(const luci::CircleNode *node)
virtual void preOperatorExecute(const luci::CircleNode *node)
virtual void postTensorWrite(const luci::CircleNode *node, const Tensor *tensor)
void attachObserver(ExecutionObserver *observer)
size_t getOutputTensorSize(const luci::CircleOutput *output_node)
void writeInputTensor(const luci::CircleInput *input_node, const void *data, size_t data_size)
Interpreter(const luci::Module *module)
void readOutputTensor(const luci::CircleOutput *output_node, void *data, size_t data_size)
const T * data(const std::vector< T, Alloc > &v)
size_t getDataTypeSize(DataType data_type)
Definition DataType.h:33
CircleOutput * output_node(loco::Graph *g, const loco::GraphOutputIndex &index)
Find a CircleOutput node with a given output index.
CircleInput * input_node(loco::Graph *g, const loco::GraphInputIndex &index)
Find a Pull node with a given input index.
NodeName name(void) const