ONE - On-device Neural Engine
Loading...
Searching...
No Matches
nnkit::support::onnx::Runner Class Reference

#include <Runner.h>

Public Member Functions

 Runner (const std::string &path)
 
 ~Runner (void)
 
void prepareInputs (void)
 
void prepareOutputs (void)
 
TensorSetinputs (void)
 
TensorSetoutputs (void)
 
void run (void)
 
 Runner (const Runner &)=delete
 
Runneroperator= (const Runner &)=delete
 

Detailed Description

Definition at line 34 of file Runner.h.

Constructor & Destructor Documentation

◆ Runner() [1/2]

nnkit::support::onnx::Runner::Runner ( const std::string &  path)

Definition at line 30 of file Runner.cpp.

30 : _allocator(std::make_unique<Allocator>())
31{
32 Status status;
33
34 status = OrtCreateEnv(ORT_LOGGING_LEVEL_WARNING, "nnkit", &_env);
35 assert(!status.isError());
36
37 status = OrtCreateSession(_env, path.c_str(), nullptr, &_session);
38 assert(!status.isError());
39}

References nnkit::support::onnx::Status::isError().

◆ ~Runner()

nnkit::support::onnx::Runner::~Runner ( void  )

Definition at line 41 of file Runner.cpp.

42{
43 if (_session)
44 {
45 OrtReleaseSession(_session);
46 }
47
48 if (_env)
49 {
50 OrtReleaseEnv(_env);
51 }
52}

◆ Runner() [2/2]

nnkit::support::onnx::Runner::Runner ( const Runner )
delete

Member Function Documentation

◆ inputs()

TensorSet & nnkit::support::onnx::Runner::inputs ( void  )
inline

◆ operator=()

Runner & nnkit::support::onnx::Runner::operator= ( const Runner )
delete

◆ outputs()

TensorSet & nnkit::support::onnx::Runner::outputs ( void  )
inline

◆ prepareInputs()

void nnkit::support::onnx::Runner::prepareInputs ( void  )

Definition at line 54 of file Runner.cpp.

55{
56 Status status;
57
58 assert(_inputs == nullptr);
59
60 size_t num_input_nodes;
61 status = OrtSessionGetInputCount(_session, &num_input_nodes);
62 status.throwOnError();
63
64 _inputs = std::make_unique<TensorSet>(_allocator.get(), num_input_nodes);
65
66 for (size_t i = 0; i < num_input_nodes; ++i)
67 {
68 char *input_name;
69 status = OrtSessionGetInputName(_session, i, _allocator.get(), &input_name);
70 status.throwOnError();
71
72 assert(input_name != nullptr);
73
74 std::string name{input_name};
75 _allocator->Free(input_name);
76
77 OrtTypeInfo *typeinfo;
78 status = OrtSessionGetInputTypeInfo(_session, i, &typeinfo);
79 status.throwOnError();
80
81 const OrtTensorTypeAndShapeInfo *tensor_info = OrtCastTypeInfoToTensorInfo(typeinfo);
82 ONNXTensorElementDataType type = OrtGetTensorElementType(tensor_info);
83
84 uint32_t num_dims = OrtGetNumOfDimensions(tensor_info);
85 std::vector<size_t> dims(num_dims);
86 OrtGetDimensions(tensor_info, (int64_t *)dims.data(), num_dims);
87
88 // NOTE To run OnnxRuntime, the total size of input tensor must be fixed.
89 // In the present code, the unknown shape that is -1 is arbitrarily changed to 1.
90 //
91 // TODO Add user argument related to unknown shape
92 //
93 for (uint32_t j = 0; j < num_dims; ++j)
94 {
95 if (dims[j] == -1)
96 {
97 dims[j] = 1;
98 }
99 }
100 OrtReleaseTypeInfo(typeinfo);
101
102 _inputs->set(i, name, type, dims);
103 }
104}
type
Definition infer.py:18

References nnkit::support::onnx::Status::throwOnError().

Referenced by nnkit::support::onnx::Backend::prepare().

◆ prepareOutputs()

void nnkit::support::onnx::Runner::prepareOutputs ( void  )

Definition at line 106 of file Runner.cpp.

107{
108 Status status;
109
110 assert(_outputs == nullptr);
111
112 size_t num_output_nodes;
113 status = OrtSessionGetOutputCount(_session, &num_output_nodes);
114 status.throwOnError();
115
116 _outputs = std::make_unique<TensorSet>(_allocator.get(), num_output_nodes);
117
118 for (size_t i = 0; i < num_output_nodes; ++i)
119 {
120 char *output_name;
121 status = OrtSessionGetOutputName(_session, i, _allocator.get(), &output_name);
122 status.throwOnError();
123
124 assert(output_name != nullptr);
125
126 std::string name{output_name};
127 _allocator->Free(output_name);
128
129 OrtTypeInfo *typeinfo;
130 status = OrtSessionGetOutputTypeInfo(_session, i, &typeinfo);
131 status.throwOnError();
132
133 const OrtTensorTypeAndShapeInfo *tensor_info = OrtCastTypeInfoToTensorInfo(typeinfo);
134 ONNXTensorElementDataType type = OrtGetTensorElementType(tensor_info);
135
136 uint32_t num_dims = OrtGetNumOfDimensions(tensor_info);
137 std::vector<size_t> dims(num_dims);
138 OrtGetDimensions(tensor_info, (int64_t *)dims.data(), num_dims);
139
140 // NOTE To run OnnxRuntime, the total size of output tensor must be fixed.
141 // In the present code, the unknown shape that is -1 is arbitrarily changed to 1.
142 //
143 // TODO Add user argument related to unknown shape
144 //
145 for (uint32_t j = 0; j < num_dims; ++j)
146 {
147 if (dims[j] == -1)
148 {
149 dims[j] = 1;
150 }
151 }
152 OrtReleaseTypeInfo(typeinfo);
153
154 _outputs->set(i, name, type, dims);
155 }
156}

References nnkit::support::onnx::Status::throwOnError().

Referenced by nnkit::support::onnx::Backend::prepare().

◆ run()

void nnkit::support::onnx::Runner::run ( void  )

Definition at line 158 of file Runner.cpp.

159{
160 Status status;
161
162 auto pinput_names = _inputs->names();
163 std::vector<const char *> input_names(pinput_names.size());
164 for (size_t i = 0; i < pinput_names.size(); ++i)
165 {
166 input_names[i] = pinput_names[i].c_str();
167 }
168
169 auto poutput_names = _outputs->names();
170 std::vector<const char *> output_names(poutput_names.size());
171 for (size_t i = 0; i < poutput_names.size(); ++i)
172 {
173 output_names[i] = poutput_names[i].c_str();
174 }
175
176 status = OrtRun(_session, NULL, input_names.data(), _inputs->tensors().data(), _inputs->size(),
177 output_names.data(), _outputs->size(), _outputs->mutable_tensors().data());
178 status.throwOnError();
179}

References nnkit::support::onnx::Status::throwOnError().

Referenced by package.infer.session::inference(), and nnkit::support::onnx::Backend::run().


The documentation for this class was generated from the following files: