ONE - On-device Neural Engine
Loading...
Searching...
No Matches
nnfw_session.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "nnfw_session.h"
18
22#include "ir/OpCode.h"
23#include "json/json.h"
24#include "loader/CircleLoader.h"
25#include "loader/ModelLoader.h"
26#include "loader/TFLiteLoader.h"
29#include "util/ConfigSource.h"
30#include "util/Exceptions.h"
31#include "util/logging.h"
32#include "util/TracingCtx.h"
33
34#include <misc/string_helpers.h>
35
36#include <fstream>
37#include <iostream>
38#include <string>
39#include <vector>
40
41/*
42 * API does not accept string argument longer than max length below
43 */
44#define MAX_BACKEND_NAME_LENGTH 32
45#define MAX_OP_NAME_LENGTH 64
46#define MAX_PATH_LENGTH 1024
47#define MAX_TENSOR_NAME_LENGTH 64
48
49namespace
50{
51
52// Is null-terminating in length ?
53bool null_terminating(const char *str, uint32_t length)
54{
55 for (uint32_t i = 0; i < length; i++)
56 {
57 if (*(str + i) == '\0')
58 {
59 return true;
60 }
61 }
62 return false;
63}
64
65onert::ir::Layout convertLayout(NNFW_LAYOUT layout)
66{
67 if (layout == NNFW_LAYOUT_CHANNELS_LAST)
68 {
70 }
71 else if (layout == NNFW_LAYOUT_CHANNELS_FIRST)
72 {
74 }
76}
77
78NNFW_STATUS getTensorIndexImpl(const onert::ir::IGraph &graph, const char *tensorname,
79 uint32_t *index, bool is_input)
80{
81 if (!tensorname || !index)
83
84 if (!null_terminating(tensorname, MAX_TENSOR_NAME_LENGTH))
85 {
86 std::cerr << "nnpackage path is too long" << std::endl;
87 return NNFW_STATUS_ERROR;
88 }
89
90 auto ind_found = is_input ? graph.getInputIndex(tensorname) : graph.getOutputIndex(tensorname);
91
92 if (ind_found.undefined())
93 {
94 // Not found
95 return NNFW_STATUS_ERROR;
96 }
97 else
98 {
99 *index = ind_found.value();
101 }
102}
103
104std::string trim(const std::string &value)
105{
106 std::string whitespace = " \t";
107 auto begin = value.find_first_not_of(whitespace);
108 if (begin == std::string::npos)
109 return ""; // no content
110
111 auto end = value.find_last_not_of(whitespace);
112 auto range = end - begin + 1;
113 return value.substr(begin, range);
114}
115
116bool loadConfigure(const std::string cfgfile, onert::util::CfgKeyValues &keyValues)
117{
118 std::ifstream ifs(cfgfile);
119 if (ifs.is_open())
120 {
121 std::string line;
122 while (std::getline(ifs, line))
123 {
124 auto cmtpos = line.find('#');
125 if (cmtpos != std::string::npos)
126 {
127 line = line.substr(0, cmtpos);
128 }
129 std::istringstream isline(line);
130 std::string key;
131 if (std::getline(isline, key, '='))
132 {
133 std::string value;
134 if (std::getline(isline, value))
135 {
136 key = trim(key);
137 keyValues[key] = trim(value);
138 }
139 }
140 }
141 ifs.close();
142 return true;
143 }
144 return false;
145}
146
147NNFW_TYPE datatype_to_nnfw_dtype(onert::ir::DataType dt)
148{
150 switch (dt)
151 {
152 case DataType::FLOAT32:
154 case DataType::INT32:
156 case DataType::QUANT_UINT8_ASYMM:
158 case DataType::BOOL8:
160 case DataType::UINT8:
162 case DataType::INT64:
164 case DataType::QUANT_INT8_ASYMM:
166 case DataType::QUANT_INT16_SYMM:
168 case DataType::UINT32:
169 case DataType::QUANT_INT8_SYMM:
170 default:
171 throw std::runtime_error("Error: Model has type that runtime API does not support.");
172 }
173}
174
175void fillTensorInfo(nnfw_tensorinfo *ti, const onert::ir::Shape &shape,
176 const onert::ir::DataType &dtype)
177{
178 ti->rank = shape.rank();
179 for (int j = 0; j < ti->rank; ++j)
180 {
181 ti->dims[j] = shape.dim(j);
182 }
183 ti->dtype = datatype_to_nnfw_dtype(dtype);
184}
185
186std::unique_ptr<onert::ir::Model> loadModel(const std::string filename,
187 const std::string model_type)
188{
189 try
190 {
191 if (model_type == "tflite")
192 return onert::loader::loadTFLiteModel(filename.c_str());
193 if (model_type == "circle")
194 return onert::loader::loadCircleModel(filename.c_str());
195
196 return onert::loader::loadModel(filename, model_type);
197 }
198 catch (const std::exception &e)
199 {
200 std::cerr << "Fail to load model: " << e.what() << '\n';
201 }
202
203 return std::unique_ptr<onert::ir::Model>(nullptr);
204}
205
206std::unique_ptr<onert::ir::train::TrainingInfo>
207loadTrainingInfo(const std::shared_ptr<onert::ir::Model> &model)
208{
209 const auto tinfo_name = onert::loader::TRAININFO_METADATA_NAME;
210 if (model->exists_metadata(tinfo_name))
211 {
212 const auto buffer = model->extract_metadata(tinfo_name);
213 return onert::loader::loadTrainingInfo(buffer->base(), buffer->size());
214 }
215 return std::make_unique<onert::ir::train::TrainingInfo>();
216}
217
218uint64_t getBufSize(const nnfw_tensorinfo *info)
219{
220 static int elmsize[] = {
221 sizeof(float), /* NNFW_TYPE_TENSOR_FLOAT32 = 0 */
222 sizeof(int), /* NNFW_TYPE_TENSOR_INT32 = 1 */
223 sizeof(uint8_t), /* NNFW_TYPE_TENSOR_QUANT8_ASYMM = 2 */
224 sizeof(bool), /* NNFW_TYPE_TENSOR_BOOL = 3 */
225 sizeof(uint8_t), /* NNFW_TYPE_TENSOR_UINT8 = 4 */
226 sizeof(int64_t), /* NNFW_TYPE_TENSOR_INT64 = 5 */
227 sizeof(int8_t), /* NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED = 6 */
228 sizeof(int16_t), /* NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED = 7 */
229 };
230
231 uint64_t n = 1;
232 for (int32_t i = 0; i < info->rank; ++i)
233 {
234 assert(info->dims[i] >= 0);
235 n *= info->dims[i];
236 }
237 return elmsize[info->dtype] * n;
238}
239} // namespace
240
241nnfw_session::nnfw_session()
242 : _nnpkg{nullptr}, _coptions{onert::compiler::CompilerOptions::fromGlobalConfig()},
243 _compiler_artifact{nullptr}, _execution{nullptr}, _kernel_registry{nullptr},
244 _train_info{nullptr}, _quant_manager{std::make_unique<onert::odc::QuantizeManager>()},
245 _codegen_manager{std::make_unique<onert::odc::CodegenManager>()}, _model_path{}
246{
247 // DO NOTHING
248}
249
251{
252 if (session == nullptr)
254 try
255 {
256 auto new_session = std::unique_ptr<nnfw_session>(new nnfw_session());
257 new_session->_kernel_registry = std::make_shared<onert::api::CustomKernelRegistry>();
258 *session = new_session.release();
259 }
260 catch (const std::bad_alloc &e)
261 {
262 std::cerr << "Error during session creation" << std::endl;
263 *session = nullptr; // Set nullptr on error to keep the old behavior
265 }
266 catch (const std::exception &e)
267 {
268 std::cerr << "Error during session initialization : " << e.what() << std::endl;
269 *session = nullptr; // Set nullptr on error to keep the old behavior
270 return NNFW_STATUS_ERROR;
271 }
273}
274
276
278{
279 if (!isStateInitialized())
281
282 if (!buffer)
284
285 if (size == 0)
286 return NNFW_STATUS_ERROR;
287
288 try
289 {
290 auto model = onert::loader::loadCircleModel(buffer, size);
291 // TODO: Update _model_path if necessary
292 _nnpkg = std::make_shared<onert::ir::NNPkg>(std::move(model));
293 _train_info = loadTrainingInfo(_nnpkg->primary_model());
294 _state = State::MODEL_LOADED;
295 }
296 catch (const std::exception &e)
297 {
298 std::cerr << "Error during model loading : " << e.what() << std::endl;
299 return NNFW_STATUS_ERROR;
300 }
302}
303
305{
306 if (!isStateInitialized())
308
309 if (!path)
310 {
311 std::cerr << "Path is null." << std::endl;
313 }
314
315 if (!null_terminating(path, MAX_PATH_LENGTH))
316 {
317 std::cerr << "Path is too long" << std::endl;
318 return NNFW_STATUS_ERROR;
319 }
320
321 try
322 {
323 std::filesystem::path filename{path};
324 if (!std::filesystem::is_directory(filename) && filename.has_extension())
325 {
326 std::string model_type = filename.extension().string().substr(1); // + 1 to exclude dot
327 return loadModelFile(filename, model_type);
328 }
329
330 const auto &package_dir = filename;
331
332 // TODO : add support for zipped package file load
333 if (!std::filesystem::is_directory(package_dir))
334 {
335 std::cerr << "invalid path: " << package_dir << std::endl;
336 return NNFW_STATUS_ERROR;
337 }
338
339 const auto manifest_file_name = package_dir / "metadata/MANIFEST";
340 std::ifstream mfs(manifest_file_name);
341
342 // extract the filename of the first(index 0) model
343 // e.g. In MANIFEST file, { "models" : [ "firstmodel.tflite", "2nd.tflite" ] }
344 Json::Value root;
345 mfs >> root;
346 const Json::Value &models = root["models"];
347 const Json::Value &model_types = root["model-types"];
348 const Json::Value &configs = root["configs"];
349
350 if (!configs.empty() && !configs[0].empty())
351 {
352 const auto filepath = package_dir / "metadata" / configs[0].asString();
353
355 if (loadConfigure(filepath.string(), keyValues))
356 {
358 }
359 }
360 _nnpkg = std::make_shared<onert::ir::NNPkg>();
361 auto num_models = models.size();
362 if (num_models == 0 || (num_models - 1) > onert::ir::ModelIndex::max())
363 {
364 std::cerr << "Invalid model size - " << std::to_string(num_models) << std::endl;
365 return NNFW_STATUS_ERROR;
366 }
367
368 // Not support backend mapping to operator index for multiple models yet
369 // TODO Support this
370 if (num_models > 1 && _coptions->manual_scheduler_options.index_to_backend.size() != 0)
371 {
372 std::cerr << "Cannot set backend to operator index for multiple models" << std::endl;
373 return NNFW_STATUS_ERROR;
374 }
375
376 for (uint16_t i = 0; i < num_models; ++i)
377 {
378 const auto model_file_path = package_dir / models[i].asString();
379 const auto model_type = model_types[i].asString();
380 auto model = loadModel(model_file_path.string(), model_type);
381 if (model == nullptr)
382 return NNFW_STATUS_ERROR;
383 _model_path = model_file_path; // TODO Support multiple models
384 model->bindKernelBuilder(_kernel_registry->getBuilder());
385 _nnpkg->push(onert::ir::ModelIndex{i}, std::move(model));
386 }
387
388 _train_info = loadTrainingInfo(_nnpkg->primary_model());
389
390 auto toIODesc = [](std::string str) {
391 auto indices = nnfw::misc::split(str, ':');
392 if (indices.size() != 3)
393 {
394 std::cerr << "IODesc should be 3-tuple." << std::endl;
395 return onert::ir::IODesc{};
396 }
397 auto model_idx = static_cast<uint32_t>(std::stoi(indices.at(0)));
398 auto subgraph_idx = static_cast<uint32_t>(std::stoi(indices.at(1)));
399 auto operand_idx = static_cast<uint32_t>(std::stoi(indices.at(2)));
400 return onert::ir::IODesc{model_idx, subgraph_idx, operand_idx};
401 };
402 // read pkg-inputs and pkg-outputs
403 const Json::Value &pkg_inputs = root["pkg-inputs"];
404 for (uint32_t i = 0; i < pkg_inputs.size(); ++i)
405 _nnpkg->addInput(toIODesc(pkg_inputs[i].asString()));
406 const Json::Value &pkg_outputs = root["pkg-outputs"];
407 for (uint32_t i = 0; i < pkg_outputs.size(); ++i)
408 _nnpkg->addOutput(toIODesc(pkg_outputs[i].asString()));
409 // read model-connect
410 const Json::Value &fromtos = root["model-connect"];
411 for (uint32_t i = 0; i < fromtos.size(); ++i)
412 {
413 const Json::Value &tos = fromtos[i]["to"];
414 for (uint32_t j = 0; j < tos.size(); ++j)
415 _nnpkg->addEdge(toIODesc(fromtos[i]["from"].asString()), toIODesc(tos[j].asString()));
416 }
417
418 _nnpkg->verify();
419 _state = State::MODEL_LOADED;
420 }
421 catch (const std::exception &e)
422 {
423 std::cerr << "Error during model loading : " << e.what() << std::endl;
424 return NNFW_STATUS_ERROR;
425 }
427}
428
430{
431 // NOTE. If users want to run prepare() more than one time, this could be removed.
432 if (!isStateModelLoaded())
433 {
434 std::cerr << "Error during model prepare : ";
435 if (isStateInitialized())
436 {
437 std::cerr << "prepare should be run once";
438 }
439 else
440 {
441 std::cerr << "invalid state";
442 }
443 std::cerr << std::endl;
445 }
446
447 try
448 {
449 auto compiler = onert::compiler::CompilerFactory::get().create(_nnpkg, _coptions.get());
450 _nnpkg.reset();
451 _compiler_artifact = compiler->compile();
452 _execution = std::make_unique<onert::exec::Execution>(_compiler_artifact->_executors);
453 }
454 catch (const std::exception &e)
455 {
456 std::cerr << "Error during model prepare : " << e.what() << std::endl;
457 return NNFW_STATUS_ERROR;
458 }
459
460 _state = State::PREPARED;
462}
463
465{
466 if (!isStatePreparedOrFinishedRun())
467 {
468 std::cerr << "Error during nnfw_session::run : "
469 << "run should be run after prepare" << std::endl;
471 }
472
473 try
474 {
475 _execution->execute();
476 }
478 {
479 // Currently insufficient buffer always means output buffer.
480 std::cerr << "Error during nnfw_session::run : " << e.what() << std::endl;
482 }
483 catch (const std::exception &e)
484 {
485 std::cerr << "Error during nnfw_session::run : " << e.what() << std::endl;
486 return NNFW_STATUS_ERROR;
487 }
488
489 _state = State::FINISHED_RUN;
491}
492
494{
495 if (!isStatePreparedOrFinishedRun())
496 {
497 std::cerr << "Error during nnfw_session::run_async : "
498 << "run_async should be run after prepare" << std::endl;
500 }
501
502 _execution->startExecute();
503
504 _state = State::RUNNING;
506}
507
509{
510 if (!isStateRunning())
511 {
512 std::cerr << "Error during nnfw_session::run_await : "
513 << "run_await should be run after run_async" << std::endl;
514 return NNFW_STATUS_ERROR;
515 }
516
517 _execution->waitFinish();
518
519 _state = State::FINISHED_RUN;
521}
522
523NNFW_STATUS nnfw_session::set_input(uint32_t index, NNFW_TYPE type, const void *buffer,
524 size_t length)
525{
526 if (!isStatePreparedOrFinishedRun())
527 {
528 std::cerr << "Error during nnfw_session::set_input : invalid state" << std::endl;
530 }
531
532 if (!buffer && length != 0)
533 {
534 std::cerr
535 << "Error during nnfw_session::set_input : given buffer is NULL but the length is not 0"
536 << std::endl;
537 return NNFW_STATUS_ERROR;
538 }
539
540 try
541 {
542 // Allow float input internal quantization only
543 if (type == NNFW_TYPE_TENSOR_FLOAT32)
544 _execution->setInputType(onert::ir::IOIndex(index),
545 onert::ir::TypeInfo(onert::ir::DataType::FLOAT32));
546 _execution->setInput(onert::ir::IOIndex(index), buffer, length);
547 }
548 catch (const std::exception &e)
549 {
550 std::cerr << "Error during nnfw_session::set_input : " << e.what() << std::endl;
551 return NNFW_STATUS_ERROR;
552 }
554}
555
556NNFW_STATUS nnfw_session::set_output(uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
557{
558 if (!isStatePreparedOrFinishedRun())
559 {
560 std::cerr << "Error during nnfw_session::set_output : invalid state" << std::endl;
562 }
563
564 if (!buffer && length != 0)
565 {
566 std::cerr
567 << "Error during nnfw_session::set_output : given buffer is NULL but the length is not 0"
568 << std::endl;
569 return NNFW_STATUS_ERROR;
570 }
571
572 try
573 {
574 // Allow float output internal dequantization only
575 if (type == NNFW_TYPE_TENSOR_FLOAT32)
576 _execution->setOutputType(onert::ir::IOIndex(index),
577 onert::ir::TypeInfo(onert::ir::DataType::FLOAT32));
578 _execution->setOutput(onert::ir::IOIndex(index), buffer, length);
579 }
580 catch (const std::exception &e)
581 {
582 std::cerr << "Error during nnfw_session::set_output : " << e.what() << std::endl;
583 return NNFW_STATUS_ERROR;
584 }
586}
587
589{
590 if (isStateInitialized()) // Model is not loaded
592
593 try
594 {
595 if (number == nullptr)
596 {
597 std::cerr << "Error during nnfw_session::input_size, number is null pointer." << std::endl;
599 }
600 *number = getInputSize();
601 }
602 catch (const std::exception &e)
603 {
604 std::cerr << "Error during nnfw_session::input_size : " << e.what() << std::endl;
605 return NNFW_STATUS_ERROR;
606 }
608}
609
611{
612 if (isStateInitialized()) // Model is not loaded
614
615 try
616 {
617 if (number == nullptr)
618 {
619 std::cerr << "Error during nnfw_session::output_size, number is null pointer." << std::endl;
621 }
622 *number = getOutputSize();
623 }
624 catch (const std::exception &e)
625 {
626 std::cerr << "Error during nnfw_session::output_size" << e.what() << std::endl;
627 return NNFW_STATUS_ERROR;
628 }
630}
631
633{
634 if (!isStatePreparedOrFinishedRun())
635 {
636 std::cerr << "Error during nnfw_session::set_input_layout : "
637 << "run should be run after prepare" << std::endl;
639 }
640
641 try
642 {
643 if (layout != NNFW_LAYOUT_NONE && layout != NNFW_LAYOUT_CHANNELS_FIRST &&
645 {
646 std::cerr << "Error during nnfw_session::set_input_layout, not supported layout" << std::endl;
647 return NNFW_STATUS_ERROR;
648 }
649
650 _execution->setInputLayout(onert::ir::IOIndex(index), convertLayout(layout));
651 }
652 catch (const std::exception &e)
653 {
654 std::cerr << "Error during nnfw_session::set_input_layout : " << e.what() << std::endl;
655 return NNFW_STATUS_ERROR;
656 }
658}
659
661{
662 if (!isStatePreparedOrFinishedRun())
663 {
664 std::cerr << "Error during nnfw_session::set_output_layout : "
665 << "run should be run after prepare" << std::endl;
667 }
668
669 try
670 {
671 if (layout != NNFW_LAYOUT_NONE && layout != NNFW_LAYOUT_CHANNELS_FIRST &&
673 {
674 std::cerr << "Error during nnfw_session::set_output_layout, not supported layout"
675 << std::endl;
676 return NNFW_STATUS_ERROR;
677 }
678
679 _execution->setOutputLayout(onert::ir::IOIndex(index), convertLayout(layout));
680 }
681 catch (const std::exception &e)
682 {
683 std::cerr << "Error during nnfw_session::set_output_layout : " << e.what() << std::endl;
684 return NNFW_STATUS_ERROR;
685 }
687}
688
690{
691 // sanity check
692 {
693 if (isStateInitialized())
694 {
695 std::cerr << "Error during set_input_tensorinfo : should be run after load_model"
696 << std::endl;
698 }
699
700 if (ti == nullptr)
701 {
702 std::cerr << "Error during nnfw_session::set_input_tensorinfo : tensorinfo is null"
703 << std::endl;
705 }
706
707 if (ti->rank <= 0 || ti->rank > NNFW_MAX_RANK)
708 {
709 std::cerr << "unsupported rank: " << ti->rank << std::endl;
710 return NNFW_STATUS_ERROR;
711 }
712
713 for (int32_t i = 0; i < ti->rank; ++i)
714 {
715 if (ti->dims[i] <= 0)
716 {
717 std::cerr << "dim must be positive integer but was " << ti->dims[i] << std::endl;
718 return NNFW_STATUS_ERROR;
719 }
720 }
721 }
722
723 onert::ir::Shape new_shape(ti->rank);
724 for (int32_t i = 0; i < ti->rank; i++)
725 new_shape.dim(i) = ti->dims[i];
726
727 if (!isStatePreparedOrFinishedRun())
728 {
729
730 // In this case, if we apply input shape, it will propagate after compilation and excution
731 _nnpkg->changeInputShape(index, new_shape);
732 }
733 else // when called after nnfw_session::prepare()
734 _execution->changeInputShape(onert::ir::IOIndex(index), new_shape);
735
737}
738
740{
741 if (isStateInitialized())
743
744 try
745 {
746 if (ti == nullptr)
747 {
748 std::cerr << "Error during nnfw_session::input_tensorinfo, tensorinfo is null pointer."
749 << std::endl;
751 }
752
753 if (index >= getInputSize())
754 {
755 std::cerr << "Error during nnfw_session::input_tensorinfo, index is out of range."
756 << std::endl;
757 return NNFW_STATUS_ERROR;
758 }
759
760 if (isStateModelLoaded())
761 {
762 auto info = _nnpkg->inputInfo(index);
763 fillTensorInfo(ti, info.shape(), info.typeInfo().type());
764 }
765 else
766 {
767 auto io_index = onert::ir::IOIndex{index};
768 auto shape = _execution->getInputShape(io_index);
769 auto dtype = _compiler_artifact->_executors->inputInfo(io_index).typeInfo().type();
770 fillTensorInfo(ti, shape, dtype);
771 }
772 }
773 catch (const std::exception &e)
774 {
775 std::cerr << "Error during nnfw_session::input_tensorinfo : " << e.what() << std::endl;
776 return NNFW_STATUS_ERROR;
777 }
779}
780
782{
783 if (isStateInitialized())
785
786 if (ti == nullptr)
787 {
788 std::cerr << "Error during nnfw_session::output_tensorinfo, tensorinfo is null pointer."
789 << std::endl;
791 }
792
793 try
794 {
795 if (index >= getOutputSize())
796 {
797 std::cerr << "Error during nnfw_session::output_tensorinfo, index is out of range."
798 << std::endl;
799 return NNFW_STATUS_ERROR;
800 }
801
802 if (isStateModelLoaded())
803 {
804 auto info = _nnpkg->outputInfo(index);
805 fillTensorInfo(ti, info.shape(), info.typeInfo().type());
806 }
807 else
808 {
809 auto io_index = onert::ir::IOIndex{index};
810 auto shape = _execution->getOutputShape(io_index);
811 auto dtype = _compiler_artifact->_executors->outputInfo(io_index).typeInfo().type();
812 fillTensorInfo(ti, shape, dtype);
813 }
814 }
815 catch (const std::exception &e)
816 {
817 std::cerr << "Error during nnfw_session::output_tensorinfo : " << e.what() << std::endl;
818 return NNFW_STATUS_ERROR;
819 }
820
822}
823
825 nnfw_custom_eval eval_func)
826{
827 _kernel_registry->registerKernel(id, eval_func);
829}
830
832{
833 if (!isStateModelLoaded())
835
836 try
837 {
838 if (!backends)
840 if (null_terminating(backends, MAX_BACKEND_NAME_LENGTH) == false)
841 return NNFW_STATUS_ERROR;
842
843 using namespace onert::util;
844
845 _coptions->backend_list = nnfw::misc::split(std::string{backends}, ';');
846 }
847 catch (const std::exception &e)
848 {
849 std::cerr << "Error during nnfw_session::set_available_backends : " << e.what() << std::endl;
850 return NNFW_STATUS_ERROR;
851 }
853}
854
856{
857 // TODO Check dir read & write permission
858
859 if (!dir)
861
862 if (!isStateInitialized())
864
865 _coptions->workspace_dir = std::string(dir);
866
868}
869
871{
872 std::cerr << msg << std::endl;
874}
875
876NNFW_STATUS nnfw_session::set_config(const char *key, const char *value)
877{
878 if (!isStateModelLoaded())
880
881 if (!key || !value)
883
884 using namespace onert::util;
885
886 const std::string skey = key;
887
888 if (skey == config::GRAPH_DOT_DUMP)
889 {
890 _coptions->graph_dump_level = toInt(value);
891 }
892 else if (skey == config::EXECUTOR)
893 {
894 _coptions->executor = value;
895 }
896 else if (skey == config::OP_BACKEND_ALLOPS)
897 {
898 _coptions->manual_scheduler_options.backend_for_all = value;
899 }
900 else if (skey == config::USE_SCHEDULER)
901 {
902 _coptions->he_scheduler = toBool(value);
903 }
904 else if (skey == config::PROFILING_MODE)
905 {
906 _coptions->he_profiling_mode = toBool(value);
907 }
908 else
909 {
910 return NNFW_STATUS_ERROR;
911 }
913}
914
915const onert::ir::IGraph *nnfw_session::primary_subgraph()
916{
917 if (_nnpkg != nullptr)
918 {
919 assert(_execution == nullptr);
920 return _nnpkg->primary_model()->primary_subgraph().get();
921 }
922 else
923 {
924 assert(_execution != nullptr);
925 // We assumed the graph will not change after compilation, but shape could change
926 return &_execution->primary_subgraph();
927 }
928}
929
930uint32_t nnfw_session::getInputSize()
931{
932 if (isStateInitialized())
933 throw std::runtime_error{"Model is not loaded yet"};
934
935 if (isStateModelLoaded())
936 return _nnpkg->inputSize();
937
938 // Session is prepared (general inference)
939 return _compiler_artifact->_executors->inputSize();
940}
941
942uint32_t nnfw_session::getOutputSize()
943{
944 if (isStateInitialized())
945 throw std::runtime_error{"Model is not loaded yet"};
946
947 if (isStateModelLoaded())
948 return _nnpkg->outputSize();
949
950 // Session is prepared (general inference)
951 return _compiler_artifact->_executors->outputSize();
952}
953
954NNFW_STATUS nnfw_session::loadModelFile(const std::string &model_file_path,
955 const std::string &model_type)
956{
957 auto model = loadModel(model_file_path, model_type);
958 if (model == nullptr)
959 return NNFW_STATUS_ERROR;
960
961 _nnpkg = std::make_shared<onert::ir::NNPkg>(std::move(model));
962 _model_path = std::filesystem::path(model_file_path);
963 _compiler_artifact.reset();
964 _execution.reset();
965 _train_info = loadTrainingInfo(_nnpkg->primary_model());
966 _state = State::MODEL_LOADED;
967
969}
970
971NNFW_STATUS nnfw_session::get_config(const char *key, char *value, size_t value_size)
972{
973 if (!isStateModelLoaded())
975
976 if (!key || !value)
978
979 auto check_boundary = [](size_t dest_size, std::string &src) {
980 if (dest_size < src.length() + 1 /* for '\0' */)
981 {
982 std::cerr << "buffer is small to copy config value." << std::endl;
983 return false;
984 }
985 return true;
986 };
987
988 const std::string skey = key;
989
990 if (skey == onert::util::config::BACKENDS)
991 {
992 if (_coptions->backend_list.size() == 0)
993 return NNFW_STATUS_NO_ERROR; // no setting backend is not an error of get_config_str()
994
995 auto str =
996 nnfw::misc::join(_coptions->backend_list.begin(), _coptions->backend_list.end(), ";");
997
998 if (!check_boundary(value_size, str))
999 return NNFW_STATUS_ERROR;
1000
1001 strncpy(value, str.c_str(), value_size);
1002 }
1003 else if (skey == onert::util::config::EXECUTOR)
1004 {
1005 if (!check_boundary(value_size, _coptions->executor))
1006 return NNFW_STATUS_ERROR;
1007
1008 strncpy(value, _coptions->executor.c_str(), _coptions->executor.length());
1009 }
1010 else
1011 {
1012 return NNFW_STATUS_ERROR;
1013 }
1014
1015 return NNFW_STATUS_NO_ERROR;
1016}
1017
1018bool nnfw_session::isStateInitialized()
1019{
1020 if (_state == State::INITIALIZED)
1021 {
1022 assert(_nnpkg == nullptr);
1023 assert(_execution == nullptr);
1024 return true;
1025 }
1026 else
1027 {
1028 return false;
1029 }
1030}
1031
1032bool nnfw_session::isStateModelLoaded()
1033{
1034 if (_state == State::MODEL_LOADED)
1035 {
1036 assert(_nnpkg != nullptr);
1037 assert(_execution == nullptr);
1038 return true;
1039 }
1040 else
1041 {
1042 return false;
1043 }
1044}
1045
1046bool nnfw_session::isStatePrepared()
1047{
1048 if (_state == State::PREPARED)
1049 {
1050 assert(_nnpkg == nullptr);
1051 assert(_execution != nullptr);
1052 return true;
1053 }
1054 else
1055 {
1056 return false;
1057 }
1058}
1059
1060bool nnfw_session::isStateRunning()
1061{
1062 if (_state == State::RUNNING)
1063 {
1064 assert(_nnpkg == nullptr);
1065 assert(_execution != nullptr);
1066 return true;
1067 }
1068 return false;
1069}
1070
1071bool nnfw_session::isStateFinishedRun()
1072{
1073 if (_state == State::FINISHED_RUN)
1074 {
1075 assert(_nnpkg == nullptr);
1076 assert(_execution != nullptr);
1077 return true;
1078 }
1079 else
1080 {
1081 return false;
1082 }
1083}
1084
1085bool nnfw_session::isStatePreparedOrFinishedRun()
1086{
1087 return isStatePrepared() || isStateFinishedRun();
1088}
1089
1090NNFW_STATUS nnfw_session::input_tensorindex(const char *tensorname, uint32_t *index)
1091{
1092 return getTensorIndexImpl(*primary_subgraph(), tensorname, index, true);
1093}
1094
1095NNFW_STATUS nnfw_session::output_tensorindex(const char *tensorname, uint32_t *index)
1096{
1097 return getTensorIndexImpl(*primary_subgraph(), tensorname, index, false);
1098}
1099
1101{
1102 if (backend_settings == NULL)
1103 return NNFW_STATUS_ERROR;
1104
1105 if (!isStateModelLoaded())
1107
1108 // Not supported multiple model
1109 // TODO Support this
1110 if (_nnpkg->model_count() > 1)
1111 {
1112 std::cerr << "Not supported multiple model" << std::endl;
1113 return NNFW_STATUS_ERROR;
1114 }
1115
1116 try
1117 {
1118 // Backend for all
1119 auto &ms_options = _coptions->manual_scheduler_options;
1120 ms_options.setBackendMap(std::string{backend_settings});
1121 }
1122 catch (const std::exception &e)
1123 {
1124 std::cerr << "Error during nnfw_session::set_backends_per_operation" << e.what() << std::endl;
1125 return NNFW_STATUS_ERROR;
1126 }
1127
1128 return NNFW_STATUS_NO_ERROR;
1129}
1130
1132{
1133 if (isStateInitialized())
1134 {
1135 // There is no _train_info in INITIALIZED, since _train_info is set when a model loaded
1136 std::cerr << "Error during nnfw_session::train_get_traininfo : invalid state";
1138 }
1139
1140 if (info == nullptr)
1141 {
1142 std::cerr << "Error during nnfw_session::train_get_traininfo : info is nullptr" << std::endl;
1144 }
1145
1146 // after model loaded, it ensures that _train_info is not nullptr
1147 assert(_train_info != nullptr);
1148
1149 auto convertLossCode = [](const onert::ir::train::LossCode &code) -> NNFW_TRAIN_LOSS {
1150 switch (code)
1151 {
1158 default:
1159 throw std::runtime_error{"fail to convert ir::train::LossCode"};
1160 }
1161 };
1162
1163 auto convertLossReduction =
1165 switch (type)
1166 {
1173 default:
1174 throw std::runtime_error{"fail to convert from ir::train::LossReductionType"};
1175 break;
1176 }
1177 };
1178
1179 auto convertOptimizerCode =
1181 switch (code)
1182 {
1189 default:
1190 throw std::runtime_error{"fail to convert from ir::train::OptimizerCode"};
1191 }
1192 };
1193
1194 const auto &loss = _train_info->lossInfo();
1195 const auto &optim = _train_info->optimizerInfo();
1196
1197 try
1198 {
1199 info->learning_rate = optim.learning_rate;
1200 info->batch_size = _train_info->batchSize();
1201 info->loss_info.loss = convertLossCode(loss.loss_code);
1202 info->loss_info.reduction_type = convertLossReduction(loss.reduction_type);
1203 info->opt = convertOptimizerCode(optim.optim_code);
1204
1205 if (_train_info->getTrainableOps().size() > 0)
1206 {
1207 const uint32_t first_trainable_idx = _train_info->getTrainableOps().cbegin()->value();
1208 const uint32_t last_trainable_idx = _train_info->getTrainableOps().crbegin()->value();
1209 const uint32_t ops_size = primary_subgraph()->operations().size();
1210 const uint32_t trainable_indexes_range = last_trainable_idx - first_trainable_idx + 1;
1211
1212 // check if trainable ops set contains continuous indexes on the back of the set
1213 if (last_trainable_idx == ops_size - 1 &&
1214 trainable_indexes_range == _train_info->getTrainableOps().size())
1215 {
1216 // check if all ops are trainable
1217 if (0 == first_trainable_idx)
1218 {
1219 info->num_of_trainable_ops = NNFW_TRAIN_TRAINABLE_ALL;
1220 }
1221 else
1222 {
1223 info->num_of_trainable_ops = trainable_indexes_range;
1224 }
1225 }
1226 else
1227 {
1228 info->num_of_trainable_ops = NNFW_TRAIN_TRAINABLE_INCORRECT_STATE;
1229 std::cerr << "conversion from set of trainable ops to num_of_trainable_ops is impossible"
1230 << std::endl;
1232 }
1233 }
1234 else
1235 {
1236 // no layer will be trained
1237 info->num_of_trainable_ops = NNFW_TRAIN_TRAINABLE_NONE;
1238 }
1239 }
1240 catch (const std::exception &e)
1241 {
1242 std::cerr << "Error during nnfw_session::train_get_traininfo" << e.what() << std::endl;
1243 return NNFW_STATUS_ERROR;
1244 }
1245
1246 return NNFW_STATUS_NO_ERROR;
1247}
1248
1250{
1251 if (not isStateModelLoaded())
1252 {
1253 std::cerr << "Error during nnfw_session::train_set_traininfo : invalid state" << std::endl;
1255 }
1256
1257 if (info == nullptr)
1258 {
1259 std::cerr << "nnfw_session::train_set_traininfo : info is nullptr" << std::endl;
1261 }
1262
1263 // after model loaded, it ensures that _train_info is not nullptr
1264 assert(_train_info != nullptr);
1265
1266 auto convertLossType = [](const int &type) {
1271 else
1272 throw std::runtime_error("not supported loss type");
1273 };
1274
1275 auto convertLossReductionType = [](const int &type) {
1278 else if (type == NNFW_TRAIN_LOSS_REDUCTION_SUM)
1280 else
1281 throw std::runtime_error("not supported loss reduction type");
1282 };
1283
1284 auto convertOptType = [](const int &type) {
1285 if (type == NNFW_TRAIN_OPTIMIZER_SGD)
1287 else if (type == NNFW_TRAIN_OPTIMIZER_ADAM)
1289 else
1290 throw std::runtime_error("not supported optimizer type");
1291 };
1292
1293 try
1294 {
1296 loss_info.loss_code = convertLossType(info->loss_info.loss);
1297 loss_info.reduction_type = convertLossReductionType(info->loss_info.reduction_type);
1298
1300 opt_info.learning_rate = info->learning_rate;
1301 opt_info.optim_code = convertOptType(info->opt);
1302
1303 _train_info->setBatchSize(info->batch_size);
1304 _train_info->setLossInfo(loss_info);
1305 _train_info->setOptimizerInfo(opt_info);
1306
1307 if (info->num_of_trainable_ops < -1)
1308 {
1309 std::cerr << "Error during nnfw_session::train_set_traininfo: provided num_of_trainable_ops "
1310 "has incorrect value: "
1311 << info->num_of_trainable_ops << std::endl;
1312 return NNFW_STATUS_ERROR;
1313 }
1314
1315 const uint32_t ops_size = primary_subgraph()->operations().size();
1316 std::set<onert::ir::OperationIndex> trainable_ops;
1317
1318 if (NNFW_TRAIN_TRAINABLE_ALL == info->num_of_trainable_ops)
1319 {
1320 for (uint32_t idx = 0; idx < ops_size; ++idx)
1321 {
1322 trainable_ops.emplace(idx);
1323 }
1324 }
1325 else
1326 {
1327 if (static_cast<uint32_t>(info->num_of_trainable_ops) > ops_size)
1328 {
1329 std::cerr
1330 << "Error during nnfw_session::train_set_traininfo: provided num_of_trainable_ops="
1331 << info->num_of_trainable_ops << " is out of operators range equals: " << ops_size
1332 << std::endl;
1333 return NNFW_STATUS_ERROR;
1334 }
1335 for (uint32_t i = 1; i <= static_cast<uint32_t>(info->num_of_trainable_ops); ++i)
1336 {
1337 trainable_ops.emplace(ops_size - i);
1338 }
1339 }
1340 // Note that possible setting an empty trainable_ops set (for NNFW_TRAIN_TRAINABLE_NONE value)
1341 _train_info->setTrainableOps(trainable_ops);
1342 }
1343 catch (const std::exception &e)
1344 {
1345 std::cerr << "Error during nnfw_session::train_set_traininfo : " << e.what() << std::endl;
1346 return NNFW_STATUS_ERROR;
1347 }
1348
1349 return NNFW_STATUS_NO_ERROR;
1350}
1351
1353{
1354 // We may need different state to represent training model is loaded
1355 if (!isStateModelLoaded())
1356 {
1357 std::cerr << "Error during model prepare training: ";
1358 if (_state == State::PREPARED_TRAINING)
1359 std::cerr << "prepare should be run once";
1360 else
1361 std::cerr << "invalid state";
1362 std::cerr << std::endl;
1364 }
1365
1366 // after model loaded, it ensures that _train_info is not nullptr
1367 assert(_train_info != nullptr);
1368
1369 try
1370 {
1371 if (not _train_info->isValid())
1372 throw std::runtime_error{"training info is not valid"};
1373
1374 // initialize trainingStep count
1375 _train_info->trainingStep() = 0;
1376
1377 auto compiler =
1378 onert::compiler::CompilerFactory::get().create(_nnpkg, _coptions.get(), _train_info.get());
1379 _nnpkg.reset();
1380 _compiler_artifact = compiler->compile();
1381 _execution = std::make_unique<onert::exec::Execution>(_compiler_artifact->_executors);
1382 }
1383 catch (const std::exception &e)
1384 {
1385 std::cerr << "Error during nnfw_session::train_prepare : " << e.what() << std::endl;
1386 return NNFW_STATUS_ERROR;
1387 }
1388
1389 _state = State::PREPARED_TRAINING;
1390 return NNFW_STATUS_NO_ERROR;
1391}
1392
1394{
1395 if (!isStatePreparedOrFinishedTraining())
1396 {
1397 std::cerr << "Error during nnfw_session::train_input_tensorinfo : invalid state" << std::endl;
1399 }
1400
1401 // Check index is valid: [0, getInputSize())
1402
1403 // NYI
1404 (void)index;
1405 (void)ti;
1406 return NNFW_STATUS_ERROR;
1407}
1408
1410{
1411 if (!isStatePreparedOrFinishedTraining())
1412 {
1413 std::cerr << "Error during nnfw_session::train_expected_tensorinfo : invalid state"
1414 << std::endl;
1416 }
1417
1418 // Check index is valid: [0, getExpectedSize())
1419
1420 // NYI
1421 (void)index;
1422 (void)ti;
1423 return NNFW_STATUS_ERROR;
1424}
1425
1426NNFW_STATUS nnfw_session::train_set_input(uint32_t index, const void *input,
1427 const nnfw_tensorinfo *input_tensorinfo)
1428{
1429 if (input == nullptr)
1430 {
1431 std::cerr << "Error during nnfw_session::train_set_input : input buffer is null" << std::endl;
1433 }
1434
1435 if (!isStatePreparedOrFinishedTraining())
1436 {
1437 std::cerr << "Error during nnfw_session::train_set_input : invalid state" << std::endl;
1439 }
1440
1441 if (index >= getInputSize())
1442 {
1443 std::cerr << "Error during nnfw_session::train_set_input : index is out of range" << std::endl;
1444 return NNFW_STATUS_ERROR;
1445 }
1446
1447 try
1448 {
1449 auto ind = onert::ir::IOIndex(index);
1450 auto size = _execution->getInputTotalSize(ind);
1451 if (input_tensorinfo && getBufSize(input_tensorinfo) != size)
1452 {
1453 std::cerr
1454 << "Error during nnfw_session::train_set_input : not supporeted to change tensorinfo"
1455 << std::endl;
1456 return NNFW_STATUS_ERROR;
1457 }
1458
1459 _execution->setInput(ind, input, size);
1460 }
1461 catch (const std::exception &e)
1462 {
1463 std::cerr << "Error during nnfw_session::train_set_input : " << e.what() << std::endl;
1464 return NNFW_STATUS_ERROR;
1465 }
1466
1467 return NNFW_STATUS_NO_ERROR;
1468}
1469
1470NNFW_STATUS nnfw_session::train_set_expected(uint32_t index, const void *expected,
1471 const nnfw_tensorinfo *expected_tensorinfo)
1472{
1473 if (expected == nullptr)
1474 {
1475 std::cerr << "Error during nnfw_session::train_set_expected : expected buffer is null"
1476 << std::endl;
1478 }
1479
1480 if (!isStatePreparedOrFinishedTraining())
1481 {
1482 std::cerr << "Error during nnfw_session::train_set_expected : invalid state" << std::endl;
1484 }
1485
1486 if (index >= getOutputSize())
1487 {
1488 std::cerr << "Error during nnfw_session::train_set_expected : index is out of range"
1489 << std::endl;
1490 return NNFW_STATUS_ERROR;
1491 }
1492
1493 try
1494 {
1495 auto output_ind = onert::ir::IOIndex(index);
1496 auto size = _execution->getOutputTotalSize(output_ind);
1497 if (expected_tensorinfo && getBufSize(expected_tensorinfo) != size)
1498 {
1499 std::cerr << "Error during nnfw_session::train_set_expected : invalid tensorinfo"
1500 << std::endl;
1501 return NNFW_STATUS_ERROR;
1502 }
1503
1504 // NOTE Find the loss input index
1505 // Input is added as many as the number of outputs.
1506 // The loss index is calculated from the value obtained by subtracting the
1507 // total output(added loss input) from the total input size.
1508 auto input_index = getInputSize() - getOutputSize() + index;
1509 auto input_ind = onert::ir::IOIndex(input_index);
1510 _execution->setInput(input_ind, expected, size);
1511 }
1512 catch (const std::exception &e)
1513 {
1514 std::cerr << "Error during nnfw_session::train_set_expected : " << e.what() << std::endl;
1515 return NNFW_STATUS_ERROR;
1516 }
1517
1518 return NNFW_STATUS_NO_ERROR;
1519}
1520
1521NNFW_STATUS nnfw_session::train_set_output(uint32_t index, NNFW_TYPE /*type*/, void *buffer,
1522 size_t length)
1523{
1524 if (!isStatePreparedOrFinishedTraining())
1525 {
1526 std::cerr << "Error during nnfw_session::train_set_output : invalid state" << std::endl;
1528 }
1529
1530 if (!buffer && length != 0)
1531 {
1532 std::cerr << "Error during nnfw_session::train_set_output : given buffer is NULL but the "
1533 "length is not 0"
1534 << std::endl;
1535 return NNFW_STATUS_ERROR;
1536 }
1537
1538 try
1539 {
1540 _execution->setOutput(onert::ir::IOIndex(index), buffer, length);
1541 }
1542 catch (const std::exception &e)
1543 {
1544 std::cerr << "Error during nnfw_session::train_set_output : " << e.what() << std::endl;
1545 return NNFW_STATUS_ERROR;
1546 }
1547 return NNFW_STATUS_NO_ERROR;
1548}
1549
1550NNFW_STATUS nnfw_session::train_run(bool update_weights)
1551{
1552 if (!isStatePreparedOrFinishedTraining())
1553 {
1554 std::cerr << "Error during nnfw_session::train_run : invalid state" << std::endl;
1556 }
1557
1558 try
1559 {
1560 if (update_weights)
1561 {
1562 auto &training_step = _train_info->trainingStep();
1563 _execution->train(training_step++);
1564 }
1565 else
1566 _execution->execute();
1567 }
1569 {
1570 // Currently insufficient buffer always means output buffer.
1571 std::cerr << "Error during nnfw_session::train_run : " << e.what() << std::endl;
1573 }
1574 catch (const std::exception &e)
1575 {
1576 std::cerr << "Error during nnfw_session::train_run : " << e.what() << std::endl;
1577 return NNFW_STATUS_ERROR;
1578 }
1579
1580 _state = State::FINISHED_TRAINING;
1581 return NNFW_STATUS_NO_ERROR;
1582}
1583
1584NNFW_STATUS nnfw_session::train_get_loss(uint32_t index, float *loss)
1585{
1586 if (loss == nullptr)
1587 {
1588 std::cerr << "Error during nnfw_session::train_get_loss : loss is null" << std::endl;
1590 }
1591
1592 if (!isStateFinishedTraining())
1593 {
1594 std::cerr << "Error during nnfw_session::train_get_loss : invalid state" << std::endl;
1596 }
1597
1598 if (index >= getOutputSize())
1599 {
1600 std::cerr << "Error during nnfw_session::train_get_loss : index is out of range" << std::endl;
1601 return NNFW_STATUS_ERROR;
1602 }
1603
1604 try
1605 {
1606 auto ind = onert::ir::IOIndex(index);
1607 *loss = _execution->getLoss(ind);
1608 }
1609 catch (const std::exception &e)
1610 {
1611 std::cerr << "Error during nnfw_session::train_get_loss : " << e.what() << std::endl;
1612 return NNFW_STATUS_ERROR;
1613 }
1614
1615 return NNFW_STATUS_NO_ERROR;
1616}
1617
1619{
1620 if (path == nullptr)
1621 {
1622 std::cerr << "Error during nnfw_session::train_export_circle : path is null" << std::endl;
1624 }
1625
1626 // Check training mode is enabled
1627 if (!isStateFinishedTraining())
1628 {
1629 std::cerr << "Error during nnfw_session::train_export_circle : invalid state" << std::endl;
1631 }
1632
1633 try
1634 {
1635 onert::exporter::CircleExporter exporter(_model_path.string(), std::string{path});
1636 exporter.updateWeight(_execution);
1637 }
1638 catch (const std::exception &e)
1639 {
1640 std::cerr << "Error during nnfw_session::train_export_circle : " << e.what() << std::endl;
1641 return NNFW_STATUS_ERROR;
1642 }
1643
1644 return NNFW_STATUS_NO_ERROR;
1645}
1646
1648{
1649 if (path == nullptr)
1650 {
1651 std::cerr << "Error during nnfw_session::train_export_circleplus : path is null" << std::endl;
1653 }
1654
1655 if (!isStatePreparedOrFinishedTraining())
1656 {
1657 std::cerr << "Error during nnfw_session::train_export_circleplus : invalid state" << std::endl;
1659 }
1660
1661 try
1662 {
1663 onert::exporter::CircleExporter exporter(_model_path.string(), std::string{path});
1664 exporter.updateWeight(_execution);
1665 exporter.updateMetadata(_train_info);
1666 }
1667 catch (const std::exception &e)
1668 {
1669 std::cerr << "Error during nnfw_session::train_export_circleplus : " << e.what() << std::endl;
1670 return NNFW_STATUS_ERROR;
1671 }
1672
1673 return NNFW_STATUS_NO_ERROR;
1674}
1675
1677{
1678 if (path == nullptr)
1679 {
1680 std::cerr << "Error during nnfw_session::train_import_checkpoint : path is null" << std::endl;
1682 }
1683
1684 if (!isStatePreparedOrFinishedTraining())
1685 {
1686 std::cerr << "Error during nnfw_session::train_import_checkpoint : invalid state" << std::endl;
1688 }
1689
1690 try
1691 {
1692 onert::loader::train::loadCheckpoint(path, _train_info, _execution);
1693 }
1694 catch (const std::exception &e)
1695 {
1696 std::cerr << "Error during nnfw_session::train_import_checkpoint : " << e.what() << std::endl;
1697 return NNFW_STATUS_ERROR;
1698 }
1699
1700 return NNFW_STATUS_NO_ERROR;
1701}
1702
1704{
1705 if (path == nullptr)
1706 {
1707 std::cerr << "Error during nnfw_session::train_export_checkpoint : path is null" << std::endl;
1709 }
1710
1711 // Check training mode is enabled
1712 if (!isStateFinishedTraining())
1713 {
1714 std::cerr << "Error during nnfw_session::train_export_checkpoint : invalid state" << std::endl;
1716 }
1717
1718 try
1719 {
1720 onert::exporter::train::exportCheckpoint(path, _train_info, _execution);
1721 }
1722 catch (const std::exception &e)
1723 {
1724 std::cerr << "Error during nnfw_session::train_export_checkpoint : " << e.what() << std::endl;
1725 return NNFW_STATUS_ERROR;
1726 }
1727
1728 return NNFW_STATUS_NO_ERROR;
1729}
1730
1731bool nnfw_session::isStatePreparedTraining()
1732{
1733 if (_state == State::PREPARED_TRAINING)
1734 {
1735 assert(_nnpkg == nullptr);
1736 assert(_execution != nullptr);
1737 return true;
1738 }
1739 else
1740 return false;
1741}
1742
1743bool nnfw_session::isStateFinishedTraining()
1744{
1745 if (_state == State::FINISHED_TRAINING)
1746 {
1747 assert(_nnpkg == nullptr);
1748 assert(_execution != nullptr);
1749 return true;
1750 }
1751 else
1752 return false;
1753}
1754
1755bool nnfw_session::isStatePreparedOrFinishedTraining()
1756{
1757 return isStatePreparedTraining() || isStateFinishedTraining();
1758}
1759
1761{
1763 try
1764 {
1765 if (isStateInitialized() || isStateRunning())
1766 {
1767 std::cerr << "invalid state" << std::endl;
1769 }
1770
1771 QuantizeType odc_qtype = onert::odc::ODC_QTYPE_NOT_SET;
1772 switch (qtype)
1773 {
1776 break;
1779 break;
1782 break;
1785 break;
1786 default:
1788 }
1789 _quant_manager->quantizeType(odc_qtype);
1790 }
1791 catch (const std::exception &e)
1792 {
1793 std::cerr << "Error during nnfw_session::set_quantization_type : " << e.what() << std::endl;
1794 return NNFW_STATUS_ERROR;
1795 }
1796
1797 return NNFW_STATUS_NO_ERROR;
1798}
1799
1801{
1802 try
1803 {
1804 if (isStateInitialized() || isStateRunning())
1805 {
1806 std::cerr << "invalid state" << std::endl;
1808 }
1809
1810 _quant_manager->exportModelPath(std::string(path));
1811 }
1812 catch (const std::exception &e)
1813 {
1814 std::cerr << "Error during nnfw_session::set_quantized_model_path : " << e.what() << std::endl;
1815 return NNFW_STATUS_ERROR;
1816 }
1817
1818 return NNFW_STATUS_NO_ERROR;
1819}
1820
1822{
1823 try
1824 {
1825 if (isStateInitialized() || isStateRunning())
1826 {
1827 std::cerr << "invalid state" << std::endl;
1829 }
1830
1831 auto result = _quant_manager->quantize(_model_path.string());
1832 if (!result)
1834
1835 // Replace model
1836 // TODO Support buffer replace, not file reload
1837 return loadModelFile(_quant_manager->exportModelPath(), "circle");
1838 }
1839 catch (const std::exception &e)
1840 {
1841 std::cerr << "Error during nnfw_session::quantize : " << e.what() << std::endl;
1842 return NNFW_STATUS_ERROR;
1843 }
1844}
1845
1847{
1848 try
1849 {
1850 if (isStateInitialized() || isStateRunning())
1851 {
1852 std::cerr << "invalid state" << std::endl;
1854 }
1855
1856 assert(_codegen_manager != nullptr);
1857 _codegen_manager->exportModelPath(std::string(path));
1858 }
1859 catch (const std::exception &e)
1860 {
1861 std::cerr << "Error during nnfw_session::set_codegen_model_path : " << e.what() << std::endl;
1862 return NNFW_STATUS_ERROR;
1863 }
1864
1865 return NNFW_STATUS_NO_ERROR;
1866}
1867
1869{
1870 try
1871 {
1872 if (isStateInitialized() || isStateRunning())
1873 {
1874 std::cerr << "Error during nnfw_session::codegen : Invalid state" << std::endl;
1876 }
1877
1878 std::string target_str{target};
1879 if (target_str.empty() || target_str.size() < 5 ||
1880 target_str.substr(target_str.size() - 4) != "-gen")
1881 {
1882 std::cerr << "Error during nnfw_session::codegen : Invalid target" << std::endl;
1883 return NNFW_STATUS_ERROR;
1884 }
1885
1886 onert::odc::CodegenPreference codegen_pref;
1887 switch (pref)
1888 {
1891 break;
1894 break;
1897 break;
1900 break;
1901 default:
1902 std::cerr << "Error during nnfw_session::codegen : Invalid preference" << std::endl;
1903 return NNFW_STATUS_ERROR;
1904 }
1905
1906 assert(_codegen_manager != nullptr);
1907 auto export_model_path = std::filesystem::path(_codegen_manager->exportModelPath());
1908 const auto model_type = target_str.substr(0, target_str.size() - 4);
1909 // If the export_model_path is not set, it generates a compiled model path
1910 // automatically.
1911 if (export_model_path.empty())
1912 {
1913 // The compiled model path is the same directory of the original model/package with
1914 // target backend extension.
1915 export_model_path = _model_path.replace_extension(model_type);
1916 _codegen_manager->exportModelPath(export_model_path.string());
1917 }
1918
1919 _codegen_manager->codegen(_model_path, target, codegen_pref);
1920
1921 // Replace model
1922 // TODO Support buffer replace, not file reload
1923 return loadModelFile(export_model_path, model_type);
1924 }
1925 catch (const std::exception &e)
1926 {
1927 std::cerr << "Error during nnfw_session::compile : " << e.what() << std::endl;
1928 return NNFW_STATUS_ERROR;
1929 }
1930}
1931
1933{
1934 if (!isStateModelLoaded())
1935 {
1936 std::cerr << "Error during nnfw_session::set_prepare_config : Invalid state" << std::endl;
1938 }
1939
1940 switch (key)
1941 {
1943 _coptions->he_profiling_mode = true;
1944 break;
1945 default:
1946 return NNFW_STATUS_ERROR;
1947 }
1948
1949 return NNFW_STATUS_NO_ERROR;
1950}
1951
1953{
1954 if (!isStateModelLoaded())
1955 {
1956 std::cerr << "Error during nnfw_session::reset_prepare_config : Invalid state" << std::endl;
1958 }
1959
1960 _coptions->he_profiling_mode = false;
1961
1962 return NNFW_STATUS_NO_ERROR;
1963}
1964
1966{
1967 if (!isStatePreparedOrFinishedRun())
1968 {
1969 std::cerr << "Error during nnfw_session::set_execution_config : Invalid state" << std::endl;
1971 }
1972
1973 switch (key)
1974 {
1976 if (_coptions->workspace_dir.empty())
1977 return NNFW_STATUS_ERROR;
1978 _execution->executionOptions().dump_minmax = true;
1979 break;
1981 if (_coptions->workspace_dir.empty())
1982 return NNFW_STATUS_ERROR;
1983 _execution->executionOptions().trace = true;
1984 break;
1986 _execution->executionOptions().profile = true;
1987 break;
1988 default:
1989 return NNFW_STATUS_ERROR;
1990 }
1991
1992 return NNFW_STATUS_NO_ERROR;
1993}
1994
1996{
1997 if (!isStatePreparedOrFinishedRun())
1998 {
1999 std::cerr << "Error during nnfw_session::set_execution_config : Invalid state" << std::endl;
2001 }
2002
2003 _execution->executionOptions().dump_minmax = false;
2004 _execution->executionOptions().trace = false;
2005 _execution->executionOptions().profile = false;
2006
2007 return NNFW_STATUS_NO_ERROR;
2008}
2009
2011{
2012 if (isStateInitialized() || isStateRunning())
2013 {
2014 std::cerr << "invalid state" << std::endl;
2016 }
2017
2018 if (_quant_manager->setMinMaxRecordsThreshold(minmax_records_count))
2019 return NNFW_STATUS_NO_ERROR;
2020 else
2021 return NNFW_STATUS_ERROR;
2022}
2023
2025{
2026 if (isStateRunning())
2027 {
2028 std::cerr << "invalid state" << std::endl;
2030 }
2031
2032 if (_quant_manager->deleteMinMaxFile())
2033 return NNFW_STATUS_NO_ERROR;
2034 else
2035 return NNFW_STATUS_ERROR;
2036}
2037
2038// run with auto compilation
2040{
2041
2042 if (!isStatePreparedOrFinishedRun())
2043 {
2044 std::cerr << "Error during nnfw_session::run_with_auto_compilation : "
2045 << "run should be after preparation" << std::endl;
2047 }
2048
2049 // Check quantization and code-generation parameters
2050 std::string target_str{target};
2051 if (_quant_manager->exportModelPath().empty() || _codegen_manager->exportModelPath().empty() ||
2052 target_str.empty() || target_str.substr(target_str.size() - 4) != "-gen")
2053 {
2054 std::cerr << "Error during nnfw_session::run_with_auto_compilation : "
2055 << "quantization and code generation parameters should be set" << std::endl;
2057 }
2058
2059 // Odc: auto compilation with hidden switching mechanizm
2060 // Check is model already quantized or compiled
2061 std::ifstream file_quantized_model(_quant_manager->exportModelPath());
2062 std::ifstream file_compiled_model(_codegen_manager->exportModelPath());
2063
2064 if (!file_quantized_model.good() && !file_compiled_model.good())
2065 {
2066 // Run float model and try to quantize it
2067 {
2068 // Save execution options
2069 auto saved_options = _execution->executionOptions();
2070 // turn on minmax recording
2071 _execution->executionOptions().dump_minmax = true;
2072
2073 try
2074 {
2075 _execution->execute();
2076 }
2078 {
2079 // Currently insufficient buffer always means output buffer.
2080 std::cerr << "Error during nnfw_session::run_with_auto_compilation : " << e.what()
2081 << std::endl;
2083 }
2084 catch (const std::exception &e)
2085 {
2086 std::cerr << "Error during nnfw_session::run_with_auto_compilation : " << e.what()
2087 << std::endl;
2088 return NNFW_STATUS_ERROR;
2089 }
2090
2091 _state = State::FINISHED_RUN;
2092
2093 // restore min_max option to user defined state
2094 _execution->executionOptions().dump_minmax = saved_options.dump_minmax;
2095
2096 // if enough statistics are collected, then run the quantization
2097 if (_quant_manager->readyForQuantize())
2098 {
2099 try
2100 {
2101 if (isStateInitialized() || isStateRunning())
2102 {
2103 std::cerr << "invalid state" << std::endl;
2105 }
2106
2107 auto result = _quant_manager->quantize(_model_path);
2108 if (!result)
2110
2111 // remove minmax file
2112 result = _quant_manager->deleteMinMaxFile();
2113 if (!result)
2115 }
2116 catch (const std::exception &e)
2117 {
2118 std::cerr
2119 << "Error during nnfw_session::run_with_auto_compilation in quantize operation: "
2120 << e.what() << std::endl;
2121 return NNFW_STATUS_ERROR;
2122 }
2123 }
2124 }
2125 }
2126 else
2127 {
2128 // run compiled or quantized model
2129 NNFW_STATUS status;
2130
2131 // turn off minmax recording
2132 _execution->executionOptions().dump_minmax = false;
2133
2134 // save initial buffers if quantized model or compiled model is not loaded
2135 if (_autoCompilationState == nnfw_session::AutoCompilationState::INITIAL_STATE)
2136 {
2137 auto dotidx = _codegen_manager->exportModelPath().rfind('.');
2138 if (dotidx == std::string::npos)
2139 {
2140 std::cerr << "Error during nnfw_session::run_with_auto_compilation : Invalid compiled "
2141 "model path. Please use a "
2142 "path that includes the extension."
2143 << std::endl;
2144 return NNFW_STATUS_ERROR;
2145 }
2146
2147 std::string compiled_model_type =
2148 _codegen_manager->exportModelPath().substr(dotidx + 1); // + 1 to exclude dot
2149
2150 dotidx = _quant_manager->exportModelPath().rfind('.');
2151 if (dotidx == std::string::npos)
2152 {
2153 std::cerr << "Error during nnfw_session::run_with_auto_compilation : Invalid quantized "
2154 "model path. Please use a "
2155 "path that includes the extension."
2156 << std::endl;
2157 return NNFW_STATUS_ERROR;
2158 }
2159 std::string quantized_model_type =
2160 _quant_manager->exportModelPath().substr(dotidx + 1); // + 1 to exclude dot
2161
2162 // Save initial (float) input and output buffers
2163 auto input_size = _compiler_artifact->_executors->inputSize();
2164 auto output_size = _compiler_artifact->_executors->outputSize();
2165
2166 std::vector<const void *> _input_buffers;
2167 std::vector<void *> _output_buffers;
2168
2169 // Save Inputs buffers
2170 for (size_t input_index = 0; input_index < input_size; input_index++)
2171 {
2172 auto io_input_index = onert::ir::IOIndex(input_index);
2173 auto input_Shape = _execution->getInputShape(io_input_index);
2174 auto input_buffer = _execution->getInputBuffer(io_input_index);
2175
2176 _input_buffers.push_back(input_buffer);
2177 }
2178
2179 // Save Outputs buffers
2180 for (size_t output_index = 0; output_index < output_size; output_index++)
2181 {
2182 auto io_output_index = onert::ir::IOIndex(output_index);
2183
2184 auto output_Shape = _execution->getOutputShape(io_output_index);
2185 auto output_buffer = _execution->getOutputBuffer(io_output_index);
2186
2187 _output_buffers.push_back(output_buffer);
2188 }
2189
2190 // Save execution options
2191 auto saved_options = _execution->executionOptions();
2192
2193 // if there is compiled model - try to load it
2194 if (file_compiled_model.good())
2195 {
2196 // load compiled model
2197 status = loadModelFile(_codegen_manager->exportModelPath(), compiled_model_type);
2198 if (status == NNFW_STATUS_NO_ERROR)
2199 {
2200 _autoCompilationState = nnfw_session::AutoCompilationState::COMPILED_MODEL_LOADED;
2201 }
2202 }
2203 else // there is no compiled model - try to compile and load it
2204 {
2205
2206 // avoiding code duplication use existing "codegen" function. Set up _model_path for the
2207 // codegen function.
2208 // TODO: change it if codegen function will be generalized
2209 _model_path = _quant_manager->exportModelPath();
2210
2211 // try to compile and load compiled model
2212 status = codegen(target, pref);
2213 if (status == NNFW_STATUS_NO_ERROR)
2214 {
2215 _autoCompilationState = nnfw_session::AutoCompilationState::COMPILED_MODEL_LOADED;
2216 // TODO delete quantized model
2217 }
2218 }
2219
2220 // loading compiled model is fail - try to load quantized model
2221 if (_autoCompilationState != nnfw_session::AutoCompilationState::COMPILED_MODEL_LOADED)
2222 {
2223 // load quantized model
2224 status = loadModelFile(_quant_manager->exportModelPath(), quantized_model_type);
2225 if (status != NNFW_STATUS_NO_ERROR)
2226 return status;
2227 else
2228 _autoCompilationState = nnfw_session::AutoCompilationState::QUANTIZED_MODEL_LOADED;
2229 }
2230
2231 status = prepare();
2232 if (status != NNFW_STATUS_NO_ERROR)
2233 return status;
2234
2235 // Restore execution options
2236 _execution->executionOptions() = saved_options;
2237
2238 // Restore inputs to the quantized or compiled model
2239 for (uint32_t input_index = 0; input_index < _input_buffers.size(); input_index++)
2240 {
2241 nnfw_tensorinfo ti;
2242 status = input_tensorinfo(input_index, &ti);
2243 if (status != NNFW_STATUS_NO_ERROR)
2244 return status;
2245
2247 auto input_size_in_bytes = getBufSize(&ti);
2248
2249 status = set_input(input_index, ti.dtype, _input_buffers[input_index], input_size_in_bytes);
2250
2251 if (status != NNFW_STATUS_NO_ERROR)
2252 return status;
2253 }
2254
2255 // Restore outputs to the quantized or compiled model
2256 for (uint32_t output_index = 0; output_index < _output_buffers.size(); output_index++)
2257 {
2258
2259 nnfw_tensorinfo ti;
2260 status = output_tensorinfo(output_index, &ti);
2261 if (status != NNFW_STATUS_NO_ERROR)
2262 return status;
2263
2265
2266 uint64_t output_size_in_bytes = getBufSize(&ti);
2267
2268 status =
2269 set_output(output_index, ti.dtype, _output_buffers[output_index], output_size_in_bytes);
2270 if (status != NNFW_STATUS_NO_ERROR)
2271 return status;
2272 }
2273 }
2274
2275 // Run quantized model
2276 if (!isStatePreparedOrFinishedRun())
2277 {
2278 std::cerr << "Error during nnfw_session::run_with_auto_compilation : "
2279 << "run should be run after prepare" << std::endl;
2281 }
2282
2283 try
2284 {
2285 _execution->execute();
2286 }
2288 {
2289 // Currently insufficient buffer always means output buffer.
2290 std::cerr << "Error during nnfw_session::run_with_auto_compilation : " << e.what()
2291 << std::endl;
2293 }
2294 catch (const std::exception &e)
2295 {
2296 std::cerr << "Error during nnfw_session::run_with_auto_compilation : " << e.what()
2297 << std::endl;
2298 return NNFW_STATUS_ERROR;
2299 }
2300
2301 _state = State::FINISHED_RUN;
2302 }
2303
2304 return NNFW_STATUS_NO_ERROR;
2305}
const char * what() const noexcept override
Definition Exceptions.h:31
static CompilerFactory & get()
std::unique_ptr< ICompiler > create(const std::shared_ptr< ir::NNPkg > &nnpkg, CompilerOptions *copts, const ir::train::TrainingInfo *training_info=nullptr)
void updateMetadata(const std::unique_ptr< onert::ir::train::TrainingInfo > &training_info)
void updateWeight(const std::unique_ptr< onert::exec::Execution > &exec)
static uint16_t max()
Return max index value.
Definition Index.h:148
size_t size() const
Return the number of objects that the manager contains.
volatile const char info[]
SessionID session(const coco::Module *m)
Definition Session.cpp:48
type
Definition infer.py:18
std::unique_ptr< mir::Graph > loadModel(std::string predict_net, std::string init_net, const std::vector< std::vector< int > > &input_shapes)
std::string join(InputIt first, InputIt last, const std::string &concat)
std::vector< std::string > split(const std::string &s, char delim)
nnfw::cker::train::LossReductionType convertLossReductionType(ir::train::LossReductionType type)
convert loss reduction type
void exportCheckpoint(const std::string &filename, const std::unique_ptr< ir::train::TrainingInfo > &train_info, const std::unique_ptr< exec::Execution > &exec)
std::tuple< ModelIndex, SubgraphIndex, IOIndex > IODesc
Definition NNPkg.h:32
::onert::util::Index< uint32_t, IOIndexTag > IOIndex
Definition Index.h:38
void loadCheckpoint(const std::string &filename, const std::unique_ptr< ir::train::TrainingInfo > &train_info, const std::unique_ptr< exec::Execution > &exec)
std::unique_ptr< ir::train::TrainingInfo > loadTrainingInfo(const uint8_t *buffer, const size_t size)
std::unique_ptr< ir::Model > loadCircleModel(const std::string &filename)
const char *const TRAININFO_METADATA_NAME
std::unique_ptr< ir::Model > loadTFLiteModel(const std::string &filename)
std::unique_ptr< ir::Model > loadModel(const std::string &filename, const std::string &type)
Create custom loader and load model from file.
@ ODC_QTYPE_WO_I8_SYM
@ ODC_QTYPE_WO_I16_SYM
std::unordered_map< std::string, std::string > CfgKeyValues
void setConfigKeyValues(const CfgKeyValues &keyValues)
@ NNFW_TYPE_TENSOR_INT64
Definition nnfw.h:86
@ NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED
Definition nnfw.h:102
@ NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED
Definition nnfw.h:93
@ NNFW_TYPE_TENSOR_BOOL
Definition nnfw.h:80
@ NNFW_TYPE_TENSOR_QUANT8_ASYMM
Definition nnfw.h:78
@ NNFW_TYPE_TENSOR_UINT8
Definition nnfw.h:83
NNFW_LAYOUT
Data format of a tensor.
Definition nnfw.h:134
@ NNFW_LAYOUT_CHANNELS_LAST
Definition nnfw.h:141
@ NNFW_LAYOUT_CHANNELS_FIRST
Definition nnfw.h:146
@ NNFW_LAYOUT_NONE
Definition nnfw.h:136
void(* nnfw_custom_eval)(nnfw_custom_kernel_params *params, char *userdata, size_t userdata_size)
NNFW_CODEGEN_PREF
Preference for target-dependent code generation.
@ NNFW_CODEGEN_PREF_DEFAULT
@ NNFW_CODEGEN_PREF_MEMORY_FIRST
@ NNFW_CODEGEN_PREF_COMPILE_TIME_FIRST
@ NNFW_CODEGEN_PREF_PERFORMANCE_FIRST
@ NNFW_TRAIN_TRAINABLE_NONE
@ NNFW_TRAIN_TRAINABLE_ALL
@ NNFW_TRAIN_TRAINABLE_INCORRECT_STATE
NNFW_QUANTIZE_TYPE
Convert between training mode and inference mode.
@ NNFW_QUANTIZE_TYPE_WO_I16_SYM
@ NNFW_QUANTIZE_TYPE_U8_ASYM
@ NNFW_QUANTIZE_TYPE_I16_SYM
@ NNFW_QUANTIZE_TYPE_WO_I8_SYM
NNFW_RUN_CONFIG
Configuration key for execution.
@ NNFW_RUN_CONFIG_PROFILE
@ NNFW_RUN_CONFIG_TRACE
@ NNFW_RUN_CONFIG_DUMP_MINMAX
NNFW_PREPARE_CONFIG
Configuration key for prepare (compile and schedule)
@ NNFW_PREPARE_CONFIG_PROFILE
#define MAX_TENSOR_NAME_LENGTH
#define MAX_BACKEND_NAME_LENGTH
#define MAX_PATH_LENGTH
int32_t size[5]
Definition Slice.cpp:35
int32_t begin[5]
Definition Slice.cpp:33
NNFW_STATUS
Result values returned from a call to an API function.
Definition onert-micro.h:86
@ NNFW_STATUS_INVALID_STATE
Definition onert-micro.h:97
@ NNFW_STATUS_UNEXPECTED_NULL
Definition onert-micro.h:95
@ NNFW_STATUS_NO_ERROR
Definition onert-micro.h:88
@ NNFW_STATUS_DEPRECATED_API
@ NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE
@ NNFW_STATUS_ERROR
Definition onert-micro.h:93
@ NNFW_STATUS_OUT_OF_MEMORY
Definition onert-micro.h:99
NNFW_TRAIN_LOSS_REDUCTION
@ NNFW_TRAIN_LOSS_REDUCTION_UNDEFINED
@ NNFW_TRAIN_LOSS_REDUCTION_SUM
@ NNFW_TRAIN_LOSS_REDUCTION_SUM_OVER_BATCH_SIZE
NNFW_TRAIN_OPTIMIZER
@ NNFW_TRAIN_OPTIMIZER_ADAM
@ NNFW_TRAIN_OPTIMIZER_SGD
@ NNFW_TRAIN_OPTIMIZER_UNDEFINED
NNFW_TRAIN_LOSS
@ NNFW_TRAIN_LOSS_MEAN_SQUARED_ERROR
@ NNFW_TRAIN_LOSS_UNDEFINED
@ NNFW_TRAIN_LOSS_CATEGORICAL_CROSSENTROPY
NNFW_TYPE
Definition onert-micro.h:75
@ NNFW_TYPE_TENSOR_INT32
Definition onert-micro.h:79
@ NNFW_TYPE_TENSOR_FLOAT32
Definition onert-micro.h:77
#define NNFW_MAX_RANK
Maximum rank expressible with nnfw.
This file contains helper functions for std::string.
NNFW_STATUS train_prepare()
NNFW_STATUS run()
NNFW_STATUS train_set_output(uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
NNFW_STATUS train_get_traininfo(nnfw_train_info *info)
NNFW_STATUS set_config(const char *key, const char *value)
NNFW_STATUS train_set_expected(uint32_t index, void *expected)
NNFW_STATUS delete_odc_minmax_file()
NNFW_STATUS set_odc_param_minmax_records_count(int minmax_records_count)
NNFW_STATUS train_run(bool update_weights)
NNFW_STATUS input_tensorinfo(uint32_t index, nnfw_tensorinfo *ti)
NNFW_STATUS output_tensorinfo(uint32_t index, nnfw_tensorinfo *ti)
NNFW_STATUS set_input_tensorinfo(uint32_t index, const nnfw_tensorinfo *ti)
NNFW_STATUS input_tensorindex(const char *tensorname, uint32_t *index)
NNFW_STATUS train_export_checkpoint(const char *path)
NNFW_STATUS set_output(uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
NNFW_STATUS train_import_checkpoint(const char *path)
NNFW_STATUS set_workspace(const char *dir)
static NNFW_STATUS deprecated(const char *msg)
NNFW_STATUS train_expected_tensorinfo(uint32_t index, nnfw_tensorinfo *ti)
NNFW_STATUS reset_prepare_config()
NNFW_STATUS set_codegen_model_path(const char *path)
NNFW_STATUS train_set_input(uint32_t index, void *input)
NNFW_STATUS reset_execute_config()
NNFW_STATUS set_input(uint32_t index, NNFW_TYPE type, const void *buffer, size_t length)
NNFW_STATUS output_size(uint32_t *number)
NNFW_STATUS train_get_loss(uint32_t index, float *loss)
NNFW_STATUS prepare()
NNFW_STATUS load_model_from_path(const char *path)
NNFW_STATUS set_backends_per_operation(const char *backend_settings)
Set backends with string-encoded mapping from operation index to backend type (cpu,...
NNFW_STATUS set_quantization_type(NNFW_QUANTIZE_TYPE qtype)
NNFW_STATUS input_size(uint32_t *number)
NNFW_STATUS run_async()
NNFW_STATUS set_prepare_config(const NNFW_PREPARE_CONFIG key, const char *value)
NNFW_STATUS set_available_backends(const char *backends)
NNFW_STATUS await()
NNFW_STATUS get_config(const char *key, char *value, size_t value_size)
NNFW_STATUS codegen(const char *target, NNFW_CODEGEN_PREF pref)
NNFW_STATUS set_execute_config(const NNFW_RUN_CONFIG key, const char *value)
NNFW_STATUS run_with_auto_compilation(const char *target, NNFW_CODEGEN_PREF pref)
NNFW_STATUS set_output_layout(uint32_t index, NNFW_LAYOUT layout)
NNFW_STATUS train_input_tensorinfo(uint32_t index, nnfw_tensorinfo *ti)
NNFW_STATUS register_custom_operation(const std::string &id, nnfw_custom_eval eval_func)
NNFW_STATUS train_export_circle(const char *path)
NNFW_STATUS output_tensorindex(const char *tensorname, uint32_t *index)
NNFW_STATUS set_quantized_model_path(const char *path)
static NNFW_STATUS create(nnfw_session **session)
Factory method. It creates and initialize nnfw_session.
NNFW_STATUS load_circle_from_buffer(uint8_t *buffer, size_t size)
NNFW_STATUS train_export_circleplus(const char *path)
NNFW_STATUS set_input_layout(uint32_t index, NNFW_LAYOUT layout)
NNFW_STATUS train_set_traininfo(const nnfw_train_info *info)
NNFW_STATUS quantize()
tensor info describes the type and shape of tensors
NNFW_TYPE dtype
int32_t dims[NNFW_MAX_RANK]
Training information to prepare training.
virtual const Operations & operations() const =0
LossReductionType reduction_type
Definition LossInfo.h:45