ONE - On-device Neural Engine
Loading...
Searching...
No Matches
nnfw_session Struct Reference

#include <nnfw_session.h>

Public Member Functions

 ~nnfw_session ()
 
NNFW_STATUS load_model_from_file (const char *package_file_path)
 
NNFW_STATUS train_set_traininfo (const nnfw_train_info *info)
 
NNFW_STATUS train_prepare ()
 
NNFW_STATUS train_input_tensorinfo (uint32_t index, nnfw_tensorinfo *ti)
 
NNFW_STATUS train_expected_tensorinfo (uint32_t index, nnfw_tensorinfo *ti)
 
NNFW_STATUS train_set_input (uint32_t index, void *input)
 
NNFW_STATUS train_set_expected (uint32_t index, void *expected)
 
NNFW_STATUS train_set_output (uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
 
NNFW_STATUS train_run (bool update_weights)
 
NNFW_STATUS train_get_loss (uint32_t index, float *loss)
 
NNFW_STATUS train_export_circle (const char *path)
 
NNFW_STATUS train_export_checkpoint (const char *path)
 
NNFW_STATUS train_import_checkpoint (const char *path)
 
 ~nnfw_session ()
 
NNFW_STATUS load_model_from_path (const char *path)
 
NNFW_STATUS prepare ()
 
NNFW_STATUS run ()
 
NNFW_STATUS run_async ()
 
NNFW_STATUS await ()
 
NNFW_STATUS set_input (uint32_t index, NNFW_TYPE type, const void *buffer, size_t length)
 
NNFW_STATUS set_output (uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
 
NNFW_STATUS input_size (uint32_t *number)
 
NNFW_STATUS output_size (uint32_t *number)
 
NNFW_STATUS set_input_layout (uint32_t index, NNFW_LAYOUT layout)
 
NNFW_STATUS set_output_layout (uint32_t index, NNFW_LAYOUT layout)
 
NNFW_STATUS set_input_tensorinfo (uint32_t index, const nnfw_tensorinfo *ti)
 
NNFW_STATUS input_tensorinfo (uint32_t index, nnfw_tensorinfo *ti)
 
NNFW_STATUS output_tensorinfo (uint32_t index, nnfw_tensorinfo *ti)
 
NNFW_STATUS set_available_backends (const char *backends)
 
NNFW_STATUS set_workspace (const char *dir)
 
NNFW_STATUS set_config (const char *key, const char *value)
 
NNFW_STATUS get_config (const char *key, char *value, size_t value_size)
 
NNFW_STATUS load_circle_from_buffer (uint8_t *buffer, size_t size)
 
NNFW_STATUS register_custom_operation (const std::string &id, nnfw_custom_eval eval_func)
 
NNFW_STATUS input_tensorindex (const char *tensorname, uint32_t *index)
 
NNFW_STATUS output_tensorindex (const char *tensorname, uint32_t *index)
 
NNFW_STATUS run_with_auto_compilation (const char *target, NNFW_CODEGEN_PREF pref)
 
NNFW_STATUS set_odc_param_minmax_records_count (int minmax_records_count)
 
NNFW_STATUS delete_odc_minmax_file ()
 
NNFW_STATUS set_backends_per_operation (const char *backend_settings)
 Set backends with string-encoded mapping from operation index to backend type (cpu, acl_cl)
 
NNFW_STATUS train_get_traininfo (nnfw_train_info *info)
 
NNFW_STATUS train_set_traininfo (const nnfw_train_info *info)
 
NNFW_STATUS train_prepare ()
 
NNFW_STATUS train_input_tensorinfo (uint32_t index, nnfw_tensorinfo *ti)
 
NNFW_STATUS train_expected_tensorinfo (uint32_t index, nnfw_tensorinfo *ti)
 
NNFW_STATUS train_set_input (uint32_t index, const void *input, const nnfw_tensorinfo *input_tensorinfo)
 
NNFW_STATUS train_set_expected (uint32_t index, const void *expected, const nnfw_tensorinfo *expected_tensorinfo)
 
NNFW_STATUS train_set_output (uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
 
NNFW_STATUS train_run (bool update_weights)
 
NNFW_STATUS train_get_loss (uint32_t index, float *loss)
 
NNFW_STATUS train_export_circle (const char *path)
 
NNFW_STATUS train_export_circleplus (const char *path)
 
NNFW_STATUS train_import_checkpoint (const char *path)
 
NNFW_STATUS train_export_checkpoint (const char *path)
 
NNFW_STATUS set_quantization_type (NNFW_QUANTIZE_TYPE qtype)
 
NNFW_STATUS set_quantized_model_path (const char *path)
 
NNFW_STATUS quantize ()
 
NNFW_STATUS set_codegen_model_path (const char *path)
 
NNFW_STATUS codegen (const char *target, NNFW_CODEGEN_PREF pref)
 
NNFW_STATUS set_prepare_config (const NNFW_PREPARE_CONFIG key, const char *value)
 
NNFW_STATUS reset_prepare_config ()
 
NNFW_STATUS set_execute_config (const NNFW_RUN_CONFIG key, const char *value)
 
NNFW_STATUS reset_execute_config ()
 

Static Public Member Functions

static NNFW_STATUS create (nnfw_session **session)
 Factory method. It creates and initialize nnfw_session.
 
static NNFW_STATUS create (nnfw_session **session)
 Factory method. It creates and initialize nnfw_session.
 
static NNFW_STATUS deprecated (const char *msg)
 

Detailed Description

Definition at line 68 of file onert-micro.cpp.

Constructor & Destructor Documentation

◆ ~nnfw_session() [1/2]

nnfw_session::~nnfw_session ( )
default

Definition at line 165 of file onert-micro.cpp.

165{ delete _train_interpreter; }

◆ ~nnfw_session() [2/2]

nnfw_session::~nnfw_session ( )

Member Function Documentation

◆ await()

NNFW_STATUS nnfw_session::await ( )

Definition at line 508 of file nnfw_session.cc.

509{
510 if (!isStateRunning())
511 {
512 std::cerr << "Error during nnfw_session::run_await : "
513 << "run_await should be run after run_async" << std::endl;
514 return NNFW_STATUS_ERROR;
515 }
516
517 _execution->waitFinish();
518
519 _state = State::FINISHED_RUN;
521}
@ NNFW_STATUS_NO_ERROR
Definition onert-micro.h:88
@ NNFW_STATUS_ERROR
Definition onert-micro.h:93

References NNFW_STATUS_ERROR, and NNFW_STATUS_NO_ERROR.

◆ codegen()

NNFW_STATUS nnfw_session::codegen ( const char *  target,
NNFW_CODEGEN_PREF  pref 
)

Definition at line 1868 of file nnfw_session.cc.

1869{
1870 try
1871 {
1872 if (isStateInitialized() || isStateRunning())
1873 {
1874 std::cerr << "Error during nnfw_session::codegen : Invalid state" << std::endl;
1876 }
1877
1878 std::string target_str{target};
1879 if (target_str.empty() || target_str.size() < 5 ||
1880 target_str.substr(target_str.size() - 4) != "-gen")
1881 {
1882 std::cerr << "Error during nnfw_session::codegen : Invalid target" << std::endl;
1883 return NNFW_STATUS_ERROR;
1884 }
1885
1886 onert::odc::CodegenPreference codegen_pref;
1887 switch (pref)
1888 {
1891 break;
1894 break;
1897 break;
1900 break;
1901 default:
1902 std::cerr << "Error during nnfw_session::codegen : Invalid preference" << std::endl;
1903 return NNFW_STATUS_ERROR;
1904 }
1905
1906 assert(_codegen_manager != nullptr);
1907 auto export_model_path = std::filesystem::path(_codegen_manager->exportModelPath());
1908 const auto model_type = target_str.substr(0, target_str.size() - 4);
1909 // If the export_model_path is not set, it generates a compiled model path
1910 // automatically.
1911 if (export_model_path.empty())
1912 {
1913 // The compiled model path is the same directory of the original model/package with
1914 // target backend extension.
1915 export_model_path = _model_path.replace_extension(model_type);
1916 _codegen_manager->exportModelPath(export_model_path.string());
1917 }
1918
1919 _codegen_manager->codegen(_model_path, target, codegen_pref);
1920
1921 // Replace model
1922 // TODO Support buffer replace, not file reload
1923 return loadModelFile(export_model_path, model_type);
1924 }
1925 catch (const std::exception &e)
1926 {
1927 std::cerr << "Error during nnfw_session::compile : " << e.what() << std::endl;
1928 return NNFW_STATUS_ERROR;
1929 }
1930}
Option< std::string > target(optname("--target"), overview("select target language to emit for given architecture." "Valid values are '" NNC_TARGET_ARM_CPP "', '" NNC_TARGET_X86_CPP "', '" NNC_TARGET_ARM_GPU_CPP "', '" NNC_TARGET_INTERPRETER "'"), std::string(), optional(false), optvalues(NNC_TARGET_ARM_CPP "," NNC_TARGET_X86_CPP "," NNC_TARGET_ARM_GPU_CPP "," NNC_TARGET_INTERPRETER), nullptr, separators("="))
Definition Options.h:47
@ NNFW_CODEGEN_PREF_DEFAULT
@ NNFW_CODEGEN_PREF_MEMORY_FIRST
@ NNFW_CODEGEN_PREF_COMPILE_TIME_FIRST
@ NNFW_CODEGEN_PREF_PERFORMANCE_FIRST
@ NNFW_STATUS_INVALID_STATE
Definition onert-micro.h:97

References onert::odc::CODEGEN_PREF_COMPILE_TIME_FIRST, onert::odc::CODEGEN_PREF_DEFAULT, onert::odc::CODEGEN_PREF_MEMORY_FIRST, onert::odc::CODEGEN_PREF_PERFORMANCE_FIRST, NNFW_CODEGEN_PREF_COMPILE_TIME_FIRST, NNFW_CODEGEN_PREF_DEFAULT, NNFW_CODEGEN_PREF_MEMORY_FIRST, NNFW_CODEGEN_PREF_PERFORMANCE_FIRST, NNFW_STATUS_ERROR, and NNFW_STATUS_INVALID_STATE.

Referenced by run_with_auto_compilation().

◆ create() [1/2]

NNFW_STATUS nnfw_session::create ( nnfw_session **  session)
static

Factory method. It creates and initialize nnfw_session.

Note
Use factory instead of constructor to get status

Definition at line 149 of file onert-micro.cpp.

150{
151 if (session == nullptr)
153
154 auto new_session = std::unique_ptr<nnfw_session>(new nnfw_session());
155 *session = new_session.release();
156
157 if (*session == nullptr)
158 {
159 return NNFW_STATUS_ERROR;
160 }
161
163}
SessionID session(const coco::Module *m)
Definition Session.cpp:48
@ NNFW_STATUS_UNEXPECTED_NULL
Definition onert-micro.h:95

References NNFW_STATUS_ERROR, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

Referenced by nnfw_create_session().

◆ create() [2/2]

static NNFW_STATUS nnfw_session::create ( nnfw_session **  session)
static

Factory method. It creates and initialize nnfw_session.

Note
Use factory instead of constructor to get status

◆ delete_odc_minmax_file()

NNFW_STATUS nnfw_session::delete_odc_minmax_file ( )

Definition at line 2024 of file nnfw_session.cc.

2025{
2026 if (isStateRunning())
2027 {
2028 std::cerr << "invalid state" << std::endl;
2030 }
2031
2032 if (_quant_manager->deleteMinMaxFile())
2033 return NNFW_STATUS_NO_ERROR;
2034 else
2035 return NNFW_STATUS_ERROR;
2036}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ deprecated()

NNFW_STATUS nnfw_session::deprecated ( const char *  msg)
static

Definition at line 870 of file nnfw_session.cc.

871{
872 std::cerr << msg << std::endl;
874}
@ NNFW_STATUS_DEPRECATED_API

References NNFW_STATUS_DEPRECATED_API.

Referenced by nnfw_apply_tensorinfo(), nnfw_pop_pipeline_output(), nnfw_prepare_pipeline(), nnfw_push_pipeline_input(), and nnfw_set_op_backend().

◆ get_config()

NNFW_STATUS nnfw_session::get_config ( const char *  key,
char *  value,
size_t  value_size 
)

Definition at line 971 of file nnfw_session.cc.

972{
973 if (!isStateModelLoaded())
975
976 if (!key || !value)
978
979 auto check_boundary = [](size_t dest_size, std::string &src) {
980 if (dest_size < src.length() + 1 /* for '\0' */)
981 {
982 std::cerr << "buffer is small to copy config value." << std::endl;
983 return false;
984 }
985 return true;
986 };
987
988 const std::string skey = key;
989
990 if (skey == onert::util::config::BACKENDS)
991 {
992 if (_coptions->backend_list.size() == 0)
993 return NNFW_STATUS_NO_ERROR; // no setting backend is not an error of get_config_str()
994
995 auto str =
996 nnfw::misc::join(_coptions->backend_list.begin(), _coptions->backend_list.end(), ";");
997
998 if (!check_boundary(value_size, str))
999 return NNFW_STATUS_ERROR;
1000
1001 strncpy(value, str.c_str(), value_size);
1002 }
1003 else if (skey == onert::util::config::EXECUTOR)
1004 {
1005 if (!check_boundary(value_size, _coptions->executor))
1006 return NNFW_STATUS_ERROR;
1007
1008 strncpy(value, _coptions->executor.c_str(), _coptions->executor.length());
1009 }
1010 else
1011 {
1012 return NNFW_STATUS_ERROR;
1013 }
1014
1015 return NNFW_STATUS_NO_ERROR;
1016}
str
Definition infer.py:18
std::string join(InputIt first, InputIt last, const std::string &concat)

References nnfw::misc::join(), NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

◆ input_size()

NNFW_STATUS nnfw_session::input_size ( uint32_t *  number)

Definition at line 588 of file nnfw_session.cc.

589{
590 if (isStateInitialized()) // Model is not loaded
592
593 try
594 {
595 if (number == nullptr)
596 {
597 std::cerr << "Error during nnfw_session::input_size, number is null pointer." << std::endl;
599 }
600 *number = getInputSize();
601 }
602 catch (const std::exception &e)
603 {
604 std::cerr << "Error during nnfw_session::input_size : " << e.what() << std::endl;
605 return NNFW_STATUS_ERROR;
606 }
608}
int number
Definition jpeg2hdf5.py:87

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

Referenced by run_with_auto_compilation().

◆ input_tensorindex()

NNFW_STATUS nnfw_session::input_tensorindex ( const char *  tensorname,
uint32_t *  index 
)

Definition at line 1090 of file nnfw_session.cc.

1091{
1092 return getTensorIndexImpl(*primary_subgraph(), tensorname, index, true);
1093}

◆ input_tensorinfo()

NNFW_STATUS nnfw_session::input_tensorinfo ( uint32_t  index,
nnfw_tensorinfo ti 
)

Definition at line 739 of file nnfw_session.cc.

740{
741 if (isStateInitialized())
743
744 try
745 {
746 if (ti == nullptr)
747 {
748 std::cerr << "Error during nnfw_session::input_tensorinfo, tensorinfo is null pointer."
749 << std::endl;
751 }
752
753 if (index >= getInputSize())
754 {
755 std::cerr << "Error during nnfw_session::input_tensorinfo, index is out of range."
756 << std::endl;
757 return NNFW_STATUS_ERROR;
758 }
759
760 if (isStateModelLoaded())
761 {
762 auto info = _nnpkg->inputInfo(index);
763 fillTensorInfo(ti, info.shape(), info.typeInfo().type());
764 }
765 else
766 {
767 auto io_index = onert::ir::IOIndex{index};
768 auto shape = _execution->getInputShape(io_index);
769 auto dtype = _compiler_artifact->_executors->inputInfo(io_index).typeInfo().type();
770 fillTensorInfo(ti, shape, dtype);
771 }
772 }
773 catch (const std::exception &e)
774 {
775 std::cerr << "Error during nnfw_session::input_tensorinfo : " << e.what() << std::endl;
776 return NNFW_STATUS_ERROR;
777 }
779}
A wrapper class for unsigned integral Index NOTE : Max value of the underlying type is used as the in...
Definition Index.h:39
volatile const char info[]
loco::GraphInputIndex index(const TFPlaceholder *node)
Definition TFNode.cpp:54

References info, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

Referenced by run_with_auto_compilation(), package.infer.session::set_inputs(), and train_set_input().

◆ load_circle_from_buffer()

NNFW_STATUS nnfw_session::load_circle_from_buffer ( uint8_t *  buffer,
size_t  size 
)

Definition at line 277 of file nnfw_session.cc.

278{
279 if (!isStateInitialized())
281
282 if (!buffer)
284
285 if (size == 0)
286 return NNFW_STATUS_ERROR;
287
288 try
289 {
291 // TODO: Update _model_path if necessary
292 _nnpkg = std::make_shared<onert::ir::NNPkg>(std::move(model));
293 _train_info = loadTrainingInfo(_nnpkg->primary_model());
294 _state = State::MODEL_LOADED;
295 }
296 catch (const std::exception &e)
297 {
298 std::cerr << "Error during model loading : " << e.what() << std::endl;
299 return NNFW_STATUS_ERROR;
300 }
302}
std::unique_ptr< ir::Model > loadCircleModel(const std::string &filename)
int32_t size[5]
Definition Slice.cpp:35

References onert::loader::loadCircleModel(), NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, and size.

◆ load_model_from_file()

NNFW_STATUS nnfw_session::load_model_from_file ( const char *  package_file_path)

Definition at line 272 of file onert-micro.cpp.

273{
274 _model_buf = readFile(file_path);
275 _config.model_ptr = _model_buf.data();
276 _config.model_size = _model_buf.size();
277 // load training info
278 loadTrainingInfo(_config.model_ptr);
279 // TODO: this import should start on nnfw_prepare if inference_interpreter is introduced
280 _train_interpreter->importTrainModel(_config.model_ptr, _config);
282}
OMStatus importTrainModel(char *model_ptr, const OMConfig &config)
DataBuffer readFile(const char *path)

References onert_micro::OMTrainingInterpreter::importTrainModel(), onert_micro::OMConfig::model_ptr, onert_micro::OMConfig::model_size, NNFW_STATUS_NO_ERROR, and readFile().

◆ load_model_from_path()

NNFW_STATUS nnfw_session::load_model_from_path ( const char *  path)

Definition at line 304 of file nnfw_session.cc.

305{
306 if (!isStateInitialized())
308
309 if (!path)
310 {
311 std::cerr << "Path is null." << std::endl;
313 }
314
315 if (!null_terminating(path, MAX_PATH_LENGTH))
316 {
317 std::cerr << "Path is too long" << std::endl;
318 return NNFW_STATUS_ERROR;
319 }
320
321 try
322 {
323 std::filesystem::path filename{path};
324 if (!std::filesystem::is_directory(filename) && filename.has_extension())
325 {
326 std::string model_type = filename.extension().string().substr(1); // + 1 to exclude dot
327 return loadModelFile(filename, model_type);
328 }
329
330 const auto &package_dir = filename;
331
332 // TODO : add support for zipped package file load
333 if (!std::filesystem::is_directory(package_dir))
334 {
335 std::cerr << "invalid path: " << package_dir << std::endl;
336 return NNFW_STATUS_ERROR;
337 }
338
339 const auto manifest_file_name = package_dir / "metadata/MANIFEST";
340 std::ifstream mfs(manifest_file_name);
341
342 // extract the filename of the first(index 0) model
343 // e.g. In MANIFEST file, { "models" : [ "firstmodel.tflite", "2nd.tflite" ] }
344 Json::Value root;
345 mfs >> root;
346 const Json::Value &models = root["models"];
347 const Json::Value &model_types = root["model-types"];
348 const Json::Value &configs = root["configs"];
349
350 if (!configs.empty() && !configs[0].empty())
351 {
352 const auto filepath = package_dir / "metadata" / configs[0].asString();
353
355 if (loadConfigure(filepath.string(), keyValues))
356 {
358 }
359 }
360 _nnpkg = std::make_shared<onert::ir::NNPkg>();
361 auto num_models = models.size();
362 if (num_models == 0 || (num_models - 1) > onert::ir::ModelIndex::max())
363 {
364 std::cerr << "Invalid model size - " << std::to_string(num_models) << std::endl;
365 return NNFW_STATUS_ERROR;
366 }
367
368 // Not support backend mapping to operator index for multiple models yet
369 // TODO Support this
370 if (num_models > 1 && _coptions->manual_scheduler_options.index_to_backend.size() != 0)
371 {
372 std::cerr << "Cannot set backend to operator index for multiple models" << std::endl;
373 return NNFW_STATUS_ERROR;
374 }
375
376 for (uint16_t i = 0; i < num_models; ++i)
377 {
378 const auto model_file_path = package_dir / models[i].asString();
379 const auto model_type = model_types[i].asString();
380 auto model = loadModel(model_file_path.string(), model_type);
381 if (model == nullptr)
382 return NNFW_STATUS_ERROR;
383 _model_path = model_file_path; // TODO Support multiple models
384 model->bindKernelBuilder(_kernel_registry->getBuilder());
385 _nnpkg->push(onert::ir::ModelIndex{i}, std::move(model));
386 }
387
388 _train_info = loadTrainingInfo(_nnpkg->primary_model());
389
390 auto toIODesc = [](std::string str) {
391 auto indices = nnfw::misc::split(str, ':');
392 if (indices.size() != 3)
393 {
394 std::cerr << "IODesc should be 3-tuple." << std::endl;
395 return onert::ir::IODesc{};
396 }
397 auto model_idx = static_cast<uint32_t>(std::stoi(indices.at(0)));
398 auto subgraph_idx = static_cast<uint32_t>(std::stoi(indices.at(1)));
399 auto operand_idx = static_cast<uint32_t>(std::stoi(indices.at(2)));
400 return onert::ir::IODesc{model_idx, subgraph_idx, operand_idx};
401 };
402 // read pkg-inputs and pkg-outputs
403 const Json::Value &pkg_inputs = root["pkg-inputs"];
404 for (uint32_t i = 0; i < pkg_inputs.size(); ++i)
405 _nnpkg->addInput(toIODesc(pkg_inputs[i].asString()));
406 const Json::Value &pkg_outputs = root["pkg-outputs"];
407 for (uint32_t i = 0; i < pkg_outputs.size(); ++i)
408 _nnpkg->addOutput(toIODesc(pkg_outputs[i].asString()));
409 // read model-connect
410 const Json::Value &fromtos = root["model-connect"];
411 for (uint32_t i = 0; i < fromtos.size(); ++i)
412 {
413 const Json::Value &tos = fromtos[i]["to"];
414 for (uint32_t j = 0; j < tos.size(); ++j)
415 _nnpkg->addEdge(toIODesc(fromtos[i]["from"].asString()), toIODesc(tos[j].asString()));
416 }
417
418 _nnpkg->verify();
419 _state = State::MODEL_LOADED;
420 }
421 catch (const std::exception &e)
422 {
423 std::cerr << "Error during model loading : " << e.what() << std::endl;
424 return NNFW_STATUS_ERROR;
425 }
427}
static uint16_t max()
Return max index value.
Definition Index.h:148
Op * root(Op *)
Return the root Op from a given Op node.
Definition Op.cpp:144
std::unique_ptr< mir::Graph > loadModel(std::string predict_net, std::string init_net, const std::vector< std::vector< int > > &input_shapes)
std::vector< std::string > split(const std::string &s, char delim)
std::tuple< ModelIndex, SubgraphIndex, IOIndex > IODesc
Definition NNPkg.h:32
std::unordered_map< std::string, std::string > CfgKeyValues
void setConfigKeyValues(const CfgKeyValues &keyValues)
#define MAX_PATH_LENGTH

References onert::util::Index< uint16_t, ModelIndexTag >::max(), MAX_PATH_LENGTH, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, onert::util::setConfigKeyValues(), and nnfw::misc::split().

◆ output_size()

NNFW_STATUS nnfw_session::output_size ( uint32_t *  number)

Definition at line 610 of file nnfw_session.cc.

611{
612 if (isStateInitialized()) // Model is not loaded
614
615 try
616 {
617 if (number == nullptr)
618 {
619 std::cerr << "Error during nnfw_session::output_size, number is null pointer." << std::endl;
621 }
622 *number = getOutputSize();
623 }
624 catch (const std::exception &e)
625 {
626 std::cerr << "Error during nnfw_session::output_size" << e.what() << std::endl;
627 return NNFW_STATUS_ERROR;
628 }
630}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

Referenced by run_with_auto_compilation().

◆ output_tensorindex()

NNFW_STATUS nnfw_session::output_tensorindex ( const char *  tensorname,
uint32_t *  index 
)

Definition at line 1095 of file nnfw_session.cc.

1096{
1097 return getTensorIndexImpl(*primary_subgraph(), tensorname, index, false);
1098}

◆ output_tensorinfo()

NNFW_STATUS nnfw_session::output_tensorinfo ( uint32_t  index,
nnfw_tensorinfo ti 
)

Definition at line 781 of file nnfw_session.cc.

782{
783 if (isStateInitialized())
785
786 if (ti == nullptr)
787 {
788 std::cerr << "Error during nnfw_session::output_tensorinfo, tensorinfo is null pointer."
789 << std::endl;
791 }
792
793 try
794 {
795 if (index >= getOutputSize())
796 {
797 std::cerr << "Error during nnfw_session::output_tensorinfo, index is out of range."
798 << std::endl;
799 return NNFW_STATUS_ERROR;
800 }
801
802 if (isStateModelLoaded())
803 {
804 auto info = _nnpkg->outputInfo(index);
805 fillTensorInfo(ti, info.shape(), info.typeInfo().type());
806 }
807 else
808 {
809 auto io_index = onert::ir::IOIndex{index};
810 auto shape = _execution->getOutputShape(io_index);
811 auto dtype = _compiler_artifact->_executors->outputInfo(io_index).typeInfo().type();
812 fillTensorInfo(ti, shape, dtype);
813 }
814 }
815 catch (const std::exception &e)
816 {
817 std::cerr << "Error during nnfw_session::output_tensorinfo : " << e.what() << std::endl;
818 return NNFW_STATUS_ERROR;
819 }
820
822}

References info, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

Referenced by run_with_auto_compilation(), and package.infer.session::set_outputs().

◆ prepare()

NNFW_STATUS nnfw_session::prepare ( )

Definition at line 429 of file nnfw_session.cc.

430{
431 // NOTE. If users want to run prepare() more than one time, this could be removed.
432 if (!isStateModelLoaded())
433 {
434 std::cerr << "Error during model prepare : ";
435 if (isStateInitialized())
436 {
437 std::cerr << "prepare should be run once";
438 }
439 else
440 {
441 std::cerr << "invalid state";
442 }
443 std::cerr << std::endl;
445 }
446
447 try
448 {
449 auto compiler = onert::compiler::CompilerFactory::get().create(_nnpkg, _coptions.get());
450 _nnpkg.reset();
451 _compiler_artifact = compiler->compile();
452 _execution = std::make_unique<onert::exec::Execution>(_compiler_artifact->_executors);
453 }
454 catch (const std::exception &e)
455 {
456 std::cerr << "Error during model prepare : " << e.what() << std::endl;
457 return NNFW_STATUS_ERROR;
458 }
459
460 _state = State::PREPARED;
462}
static CompilerFactory & get()
std::unique_ptr< ICompiler > create(const std::shared_ptr< ir::NNPkg > &nnpkg, CompilerOptions *copts, const ir::train::TrainingInfo *training_info=nullptr)

References onert::compiler::CompilerFactory::create(), onert::compiler::CompilerFactory::get(), NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

Referenced by run_with_auto_compilation().

◆ quantize()

NNFW_STATUS nnfw_session::quantize ( )

Definition at line 1821 of file nnfw_session.cc.

1822{
1823 try
1824 {
1825 if (isStateInitialized() || isStateRunning())
1826 {
1827 std::cerr << "invalid state" << std::endl;
1829 }
1830
1831 auto result = _quant_manager->quantize(_model_path.string());
1832 if (!result)
1834
1835 // Replace model
1836 // TODO Support buffer replace, not file reload
1837 return loadModelFile(_quant_manager->exportModelPath(), "circle");
1838 }
1839 catch (const std::exception &e)
1840 {
1841 std::cerr << "Error during nnfw_session::quantize : " << e.what() << std::endl;
1842 return NNFW_STATUS_ERROR;
1843 }
1844}
result
Definition infer.py:103

References NNFW_STATUS_ERROR, and NNFW_STATUS_INVALID_STATE.

◆ register_custom_operation()

NNFW_STATUS nnfw_session::register_custom_operation ( const std::string &  id,
nnfw_custom_eval  eval_func 
)

Definition at line 824 of file nnfw_session.cc.

826{
827 _kernel_registry->registerKernel(id, eval_func);
829}

References NNFW_STATUS_NO_ERROR.

◆ reset_execute_config()

NNFW_STATUS nnfw_session::reset_execute_config ( )

Definition at line 1995 of file nnfw_session.cc.

1996{
1997 if (!isStatePreparedOrFinishedRun())
1998 {
1999 std::cerr << "Error during nnfw_session::set_execution_config : Invalid state" << std::endl;
2001 }
2002
2003 _execution->executionOptions().dump_minmax = false;
2004 _execution->executionOptions().trace = false;
2005 _execution->executionOptions().profile = false;
2006
2007 return NNFW_STATUS_NO_ERROR;
2008}

References NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ reset_prepare_config()

NNFW_STATUS nnfw_session::reset_prepare_config ( )

Definition at line 1952 of file nnfw_session.cc.

1953{
1954 if (!isStateModelLoaded())
1955 {
1956 std::cerr << "Error during nnfw_session::reset_prepare_config : Invalid state" << std::endl;
1958 }
1959
1960 _coptions->he_profiling_mode = false;
1961
1962 return NNFW_STATUS_NO_ERROR;
1963}

References NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ run()

NNFW_STATUS nnfw_session::run ( )

Definition at line 464 of file nnfw_session.cc.

465{
466 if (!isStatePreparedOrFinishedRun())
467 {
468 std::cerr << "Error during nnfw_session::run : "
469 << "run should be run after prepare" << std::endl;
471 }
472
473 try
474 {
475 _execution->execute();
476 }
478 {
479 // Currently insufficient buffer always means output buffer.
480 std::cerr << "Error during nnfw_session::run : " << e.what() << std::endl;
482 }
483 catch (const std::exception &e)
484 {
485 std::cerr << "Error during nnfw_session::run : " << e.what() << std::endl;
486 return NNFW_STATUS_ERROR;
487 }
488
489 _state = State::FINISHED_RUN;
491}
const char * what() const noexcept override
Definition Exceptions.h:31
@ NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE

References NNFW_STATUS_ERROR, NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and onert::OnertException::what().

Referenced by package.infer.session::inference().

◆ run_async()

NNFW_STATUS nnfw_session::run_async ( )

Definition at line 493 of file nnfw_session.cc.

494{
495 if (!isStatePreparedOrFinishedRun())
496 {
497 std::cerr << "Error during nnfw_session::run_async : "
498 << "run_async should be run after prepare" << std::endl;
500 }
501
502 _execution->startExecute();
503
504 _state = State::RUNNING;
506}

References NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ run_with_auto_compilation()

NNFW_STATUS nnfw_session::run_with_auto_compilation ( const char *  target,
NNFW_CODEGEN_PREF  pref 
)

Definition at line 2039 of file nnfw_session.cc.

2040{
2041
2042 if (!isStatePreparedOrFinishedRun())
2043 {
2044 std::cerr << "Error during nnfw_session::run_with_auto_compilation : "
2045 << "run should be after preparation" << std::endl;
2047 }
2048
2049 // Check quantization and code-generation parameters
2050 std::string target_str{target};
2051 if (_quant_manager->exportModelPath().empty() || _codegen_manager->exportModelPath().empty() ||
2052 target_str.empty() || target_str.substr(target_str.size() - 4) != "-gen")
2053 {
2054 std::cerr << "Error during nnfw_session::run_with_auto_compilation : "
2055 << "quantization and code generation parameters should be set" << std::endl;
2057 }
2058
2059 // Odc: auto compilation with hidden switching mechanizm
2060 // Check is model already quantized or compiled
2061 std::ifstream file_quantized_model(_quant_manager->exportModelPath());
2062 std::ifstream file_compiled_model(_codegen_manager->exportModelPath());
2063
2064 if (!file_quantized_model.good() && !file_compiled_model.good())
2065 {
2066 // Run float model and try to quantize it
2067 {
2068 // Save execution options
2069 auto saved_options = _execution->executionOptions();
2070 // turn on minmax recording
2071 _execution->executionOptions().dump_minmax = true;
2072
2073 try
2074 {
2075 _execution->execute();
2076 }
2078 {
2079 // Currently insufficient buffer always means output buffer.
2080 std::cerr << "Error during nnfw_session::run_with_auto_compilation : " << e.what()
2081 << std::endl;
2083 }
2084 catch (const std::exception &e)
2085 {
2086 std::cerr << "Error during nnfw_session::run_with_auto_compilation : " << e.what()
2087 << std::endl;
2088 return NNFW_STATUS_ERROR;
2089 }
2090
2091 _state = State::FINISHED_RUN;
2092
2093 // restore min_max option to user defined state
2094 _execution->executionOptions().dump_minmax = saved_options.dump_minmax;
2095
2096 // if enough statistics are collected, then run the quantization
2097 if (_quant_manager->readyForQuantize())
2098 {
2099 try
2100 {
2101 if (isStateInitialized() || isStateRunning())
2102 {
2103 std::cerr << "invalid state" << std::endl;
2105 }
2106
2107 auto result = _quant_manager->quantize(_model_path);
2108 if (!result)
2110
2111 // remove minmax file
2112 result = _quant_manager->deleteMinMaxFile();
2113 if (!result)
2115 }
2116 catch (const std::exception &e)
2117 {
2118 std::cerr
2119 << "Error during nnfw_session::run_with_auto_compilation in quantize operation: "
2120 << e.what() << std::endl;
2121 return NNFW_STATUS_ERROR;
2122 }
2123 }
2124 }
2125 }
2126 else
2127 {
2128 // run compiled or quantized model
2129 NNFW_STATUS status;
2130
2131 // turn off minmax recording
2132 _execution->executionOptions().dump_minmax = false;
2133
2134 // save initial buffers if quantized model or compiled model is not loaded
2135 if (_autoCompilationState == nnfw_session::AutoCompilationState::INITIAL_STATE)
2136 {
2137 auto dotidx = _codegen_manager->exportModelPath().rfind('.');
2138 if (dotidx == std::string::npos)
2139 {
2140 std::cerr << "Error during nnfw_session::run_with_auto_compilation : Invalid compiled "
2141 "model path. Please use a "
2142 "path that includes the extension."
2143 << std::endl;
2144 return NNFW_STATUS_ERROR;
2145 }
2146
2147 std::string compiled_model_type =
2148 _codegen_manager->exportModelPath().substr(dotidx + 1); // + 1 to exclude dot
2149
2150 dotidx = _quant_manager->exportModelPath().rfind('.');
2151 if (dotidx == std::string::npos)
2152 {
2153 std::cerr << "Error during nnfw_session::run_with_auto_compilation : Invalid quantized "
2154 "model path. Please use a "
2155 "path that includes the extension."
2156 << std::endl;
2157 return NNFW_STATUS_ERROR;
2158 }
2159 std::string quantized_model_type =
2160 _quant_manager->exportModelPath().substr(dotidx + 1); // + 1 to exclude dot
2161
2162 // Save initial (float) input and output buffers
2163 auto input_size = _compiler_artifact->_executors->inputSize();
2164 auto output_size = _compiler_artifact->_executors->outputSize();
2165
2166 std::vector<const void *> _input_buffers;
2167 std::vector<void *> _output_buffers;
2168
2169 // Save Inputs buffers
2170 for (size_t input_index = 0; input_index < input_size; input_index++)
2171 {
2172 auto io_input_index = onert::ir::IOIndex(input_index);
2173 auto input_Shape = _execution->getInputShape(io_input_index);
2174 auto input_buffer = _execution->getInputBuffer(io_input_index);
2175
2176 _input_buffers.push_back(input_buffer);
2177 }
2178
2179 // Save Outputs buffers
2180 for (size_t output_index = 0; output_index < output_size; output_index++)
2181 {
2182 auto io_output_index = onert::ir::IOIndex(output_index);
2183
2184 auto output_Shape = _execution->getOutputShape(io_output_index);
2185 auto output_buffer = _execution->getOutputBuffer(io_output_index);
2186
2187 _output_buffers.push_back(output_buffer);
2188 }
2189
2190 // Save execution options
2191 auto saved_options = _execution->executionOptions();
2192
2193 // if there is compiled model - try to load it
2194 if (file_compiled_model.good())
2195 {
2196 // load compiled model
2197 status = loadModelFile(_codegen_manager->exportModelPath(), compiled_model_type);
2198 if (status == NNFW_STATUS_NO_ERROR)
2199 {
2200 _autoCompilationState = nnfw_session::AutoCompilationState::COMPILED_MODEL_LOADED;
2201 }
2202 }
2203 else // there is no compiled model - try to compile and load it
2204 {
2205
2206 // avoiding code duplication use existing "codegen" function. Set up _model_path for the
2207 // codegen function.
2208 // TODO: change it if codegen function will be generalized
2209 _model_path = _quant_manager->exportModelPath();
2210
2211 // try to compile and load compiled model
2212 status = codegen(target, pref);
2213 if (status == NNFW_STATUS_NO_ERROR)
2214 {
2215 _autoCompilationState = nnfw_session::AutoCompilationState::COMPILED_MODEL_LOADED;
2216 // TODO delete quantized model
2217 }
2218 }
2219
2220 // loading compiled model is fail - try to load quantized model
2221 if (_autoCompilationState != nnfw_session::AutoCompilationState::COMPILED_MODEL_LOADED)
2222 {
2223 // load quantized model
2224 status = loadModelFile(_quant_manager->exportModelPath(), quantized_model_type);
2225 if (status != NNFW_STATUS_NO_ERROR)
2226 return status;
2227 else
2228 _autoCompilationState = nnfw_session::AutoCompilationState::QUANTIZED_MODEL_LOADED;
2229 }
2230
2231 status = prepare();
2232 if (status != NNFW_STATUS_NO_ERROR)
2233 return status;
2234
2235 // Restore execution options
2236 _execution->executionOptions() = saved_options;
2237
2238 // Restore inputs to the quantized or compiled model
2239 for (uint32_t input_index = 0; input_index < _input_buffers.size(); input_index++)
2240 {
2241 nnfw_tensorinfo ti;
2242 status = input_tensorinfo(input_index, &ti);
2243 if (status != NNFW_STATUS_NO_ERROR)
2244 return status;
2245
2247 auto input_size_in_bytes = getBufSize(&ti);
2248
2249 status = set_input(input_index, ti.dtype, _input_buffers[input_index], input_size_in_bytes);
2250
2251 if (status != NNFW_STATUS_NO_ERROR)
2252 return status;
2253 }
2254
2255 // Restore outputs to the quantized or compiled model
2256 for (uint32_t output_index = 0; output_index < _output_buffers.size(); output_index++)
2257 {
2258
2259 nnfw_tensorinfo ti;
2260 status = output_tensorinfo(output_index, &ti);
2261 if (status != NNFW_STATUS_NO_ERROR)
2262 return status;
2263
2265
2266 uint64_t output_size_in_bytes = getBufSize(&ti);
2267
2268 status =
2269 set_output(output_index, ti.dtype, _output_buffers[output_index], output_size_in_bytes);
2270 if (status != NNFW_STATUS_NO_ERROR)
2271 return status;
2272 }
2273 }
2274
2275 // Run quantized model
2276 if (!isStatePreparedOrFinishedRun())
2277 {
2278 std::cerr << "Error during nnfw_session::run_with_auto_compilation : "
2279 << "run should be run after prepare" << std::endl;
2281 }
2282
2283 try
2284 {
2285 _execution->execute();
2286 }
2288 {
2289 // Currently insufficient buffer always means output buffer.
2290 std::cerr << "Error during nnfw_session::run_with_auto_compilation : " << e.what()
2291 << std::endl;
2293 }
2294 catch (const std::exception &e)
2295 {
2296 std::cerr << "Error during nnfw_session::run_with_auto_compilation : " << e.what()
2297 << std::endl;
2298 return NNFW_STATUS_ERROR;
2299 }
2300
2301 _state = State::FINISHED_RUN;
2302 }
2303
2304 return NNFW_STATUS_NO_ERROR;
2305}
::onert::util::Index< uint32_t, IOIndexTag > IOIndex
Definition Index.h:38
NNFW_STATUS
Result values returned from a call to an API function.
Definition onert-micro.h:86
@ NNFW_TYPE_TENSOR_FLOAT32
Definition onert-micro.h:77
NNFW_STATUS input_tensorinfo(uint32_t index, nnfw_tensorinfo *ti)
NNFW_STATUS output_tensorinfo(uint32_t index, nnfw_tensorinfo *ti)
NNFW_STATUS set_output(uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
NNFW_STATUS set_input(uint32_t index, NNFW_TYPE type, const void *buffer, size_t length)
NNFW_STATUS output_size(uint32_t *number)
NNFW_STATUS prepare()
NNFW_STATUS input_size(uint32_t *number)
NNFW_STATUS codegen(const char *target, NNFW_CODEGEN_PREF pref)
tensor info describes the type and shape of tensors
NNFW_TYPE dtype

References codegen(), nnfw_tensorinfo::dtype, input_size(), input_tensorinfo(), NNFW_STATUS_ERROR, NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_TYPE_TENSOR_FLOAT32, output_size(), output_tensorinfo(), prepare(), set_input(), set_output(), and onert::OnertException::what().

◆ set_available_backends()

NNFW_STATUS nnfw_session::set_available_backends ( const char *  backends)

Definition at line 831 of file nnfw_session.cc.

832{
833 if (!isStateModelLoaded())
835
836 try
837 {
838 if (!backends)
840 if (null_terminating(backends, MAX_BACKEND_NAME_LENGTH) == false)
841 return NNFW_STATUS_ERROR;
842
843 using namespace onert::util;
844
845 _coptions->backend_list = nnfw::misc::split(std::string{backends}, ';');
846 }
847 catch (const std::exception &e)
848 {
849 std::cerr << "Error during nnfw_session::set_available_backends : " << e.what() << std::endl;
850 return NNFW_STATUS_ERROR;
851 }
853}
#define MAX_BACKEND_NAME_LENGTH

References MAX_BACKEND_NAME_LENGTH, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, and nnfw::misc::split().

◆ set_backends_per_operation()

NNFW_STATUS nnfw_session::set_backends_per_operation ( const char *  backend_settings)

Set backends with string-encoded mapping from operation index to backend type (cpu, acl_cl)

Definition at line 1100 of file nnfw_session.cc.

1101{
1102 if (backend_settings == NULL)
1103 return NNFW_STATUS_ERROR;
1104
1105 if (!isStateModelLoaded())
1107
1108 // Not supported multiple model
1109 // TODO Support this
1110 if (_nnpkg->model_count() > 1)
1111 {
1112 std::cerr << "Not supported multiple model" << std::endl;
1113 return NNFW_STATUS_ERROR;
1114 }
1115
1116 try
1117 {
1118 // Backend for all
1119 auto &ms_options = _coptions->manual_scheduler_options;
1120 ms_options.setBackendMap(std::string{backend_settings});
1121 }
1122 catch (const std::exception &e)
1123 {
1124 std::cerr << "Error during nnfw_session::set_backends_per_operation" << e.what() << std::endl;
1125 return NNFW_STATUS_ERROR;
1126 }
1127
1128 return NNFW_STATUS_NO_ERROR;
1129}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_codegen_model_path()

NNFW_STATUS nnfw_session::set_codegen_model_path ( const char *  path)

Definition at line 1846 of file nnfw_session.cc.

1847{
1848 try
1849 {
1850 if (isStateInitialized() || isStateRunning())
1851 {
1852 std::cerr << "invalid state" << std::endl;
1854 }
1855
1856 assert(_codegen_manager != nullptr);
1857 _codegen_manager->exportModelPath(std::string(path));
1858 }
1859 catch (const std::exception &e)
1860 {
1861 std::cerr << "Error during nnfw_session::set_codegen_model_path : " << e.what() << std::endl;
1862 return NNFW_STATUS_ERROR;
1863 }
1864
1865 return NNFW_STATUS_NO_ERROR;
1866}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_config()

NNFW_STATUS nnfw_session::set_config ( const char *  key,
const char *  value 
)

Definition at line 876 of file nnfw_session.cc.

877{
878 if (!isStateModelLoaded())
880
881 if (!key || !value)
883
884 using namespace onert::util;
885
886 const std::string skey = key;
887
888 if (skey == config::GRAPH_DOT_DUMP)
889 {
890 _coptions->graph_dump_level = toInt(value);
891 }
892 else if (skey == config::EXECUTOR)
893 {
894 _coptions->executor = value;
895 }
896 else if (skey == config::OP_BACKEND_ALLOPS)
897 {
898 _coptions->manual_scheduler_options.backend_for_all = value;
899 }
900 else if (skey == config::USE_SCHEDULER)
901 {
902 _coptions->he_scheduler = toBool(value);
903 }
904 else if (skey == config::PROFILING_MODE)
905 {
906 _coptions->he_profiling_mode = toBool(value);
907 }
908 else
909 {
910 return NNFW_STATUS_ERROR;
911 }
913}
int toInt(const std::string &val)
bool toBool(const std::string &val)

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

◆ set_execute_config()

NNFW_STATUS nnfw_session::set_execute_config ( const NNFW_RUN_CONFIG  key,
const char *  value 
)

Definition at line 1965 of file nnfw_session.cc.

1966{
1967 if (!isStatePreparedOrFinishedRun())
1968 {
1969 std::cerr << "Error during nnfw_session::set_execution_config : Invalid state" << std::endl;
1971 }
1972
1973 switch (key)
1974 {
1976 if (_coptions->workspace_dir.empty())
1977 return NNFW_STATUS_ERROR;
1978 _execution->executionOptions().dump_minmax = true;
1979 break;
1981 if (_coptions->workspace_dir.empty())
1982 return NNFW_STATUS_ERROR;
1983 _execution->executionOptions().trace = true;
1984 break;
1986 _execution->executionOptions().profile = true;
1987 break;
1988 default:
1989 return NNFW_STATUS_ERROR;
1990 }
1991
1992 return NNFW_STATUS_NO_ERROR;
1993}
@ NNFW_RUN_CONFIG_PROFILE
@ NNFW_RUN_CONFIG_TRACE
@ NNFW_RUN_CONFIG_DUMP_MINMAX

References NNFW_RUN_CONFIG_DUMP_MINMAX, NNFW_RUN_CONFIG_PROFILE, NNFW_RUN_CONFIG_TRACE, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_input()

NNFW_STATUS nnfw_session::set_input ( uint32_t  index,
NNFW_TYPE  type,
const void *  buffer,
size_t  length 
)

Definition at line 523 of file nnfw_session.cc.

525{
526 if (!isStatePreparedOrFinishedRun())
527 {
528 std::cerr << "Error during nnfw_session::set_input : invalid state" << std::endl;
530 }
531
532 if (!buffer && length != 0)
533 {
534 std::cerr
535 << "Error during nnfw_session::set_input : given buffer is NULL but the length is not 0"
536 << std::endl;
537 return NNFW_STATUS_ERROR;
538 }
539
540 try
541 {
542 // Allow float input internal quantization only
543 if (type == NNFW_TYPE_TENSOR_FLOAT32)
544 _execution->setInputType(onert::ir::IOIndex(index),
545 onert::ir::TypeInfo(onert::ir::DataType::FLOAT32));
546 _execution->setInput(onert::ir::IOIndex(index), buffer, length);
547 }
548 catch (const std::exception &e)
549 {
550 std::cerr << "Error during nnfw_session::set_input : " << e.what() << std::endl;
551 return NNFW_STATUS_ERROR;
552 }
554}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_TYPE_TENSOR_FLOAT32.

Referenced by run_with_auto_compilation(), and package.infer.session::set_inputs().

◆ set_input_layout()

NNFW_STATUS nnfw_session::set_input_layout ( uint32_t  index,
NNFW_LAYOUT  layout 
)

Definition at line 632 of file nnfw_session.cc.

633{
634 if (!isStatePreparedOrFinishedRun())
635 {
636 std::cerr << "Error during nnfw_session::set_input_layout : "
637 << "run should be run after prepare" << std::endl;
639 }
640
641 try
642 {
643 if (layout != NNFW_LAYOUT_NONE && layout != NNFW_LAYOUT_CHANNELS_FIRST &&
645 {
646 std::cerr << "Error during nnfw_session::set_input_layout, not supported layout" << std::endl;
647 return NNFW_STATUS_ERROR;
648 }
649
650 _execution->setInputLayout(onert::ir::IOIndex(index), convertLayout(layout));
651 }
652 catch (const std::exception &e)
653 {
654 std::cerr << "Error during nnfw_session::set_input_layout : " << e.what() << std::endl;
655 return NNFW_STATUS_ERROR;
656 }
658}
@ NNFW_LAYOUT_CHANNELS_LAST
Definition nnfw.h:141
@ NNFW_LAYOUT_CHANNELS_FIRST
Definition nnfw.h:146
@ NNFW_LAYOUT_NONE
Definition nnfw.h:136

References NNFW_LAYOUT_CHANNELS_FIRST, NNFW_LAYOUT_CHANNELS_LAST, NNFW_LAYOUT_NONE, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_input_tensorinfo()

NNFW_STATUS nnfw_session::set_input_tensorinfo ( uint32_t  index,
const nnfw_tensorinfo ti 
)

Definition at line 689 of file nnfw_session.cc.

690{
691 // sanity check
692 {
693 if (isStateInitialized())
694 {
695 std::cerr << "Error during set_input_tensorinfo : should be run after load_model"
696 << std::endl;
698 }
699
700 if (ti == nullptr)
701 {
702 std::cerr << "Error during nnfw_session::set_input_tensorinfo : tensorinfo is null"
703 << std::endl;
705 }
706
707 if (ti->rank <= 0 || ti->rank > NNFW_MAX_RANK)
708 {
709 std::cerr << "unsupported rank: " << ti->rank << std::endl;
710 return NNFW_STATUS_ERROR;
711 }
712
713 for (int32_t i = 0; i < ti->rank; ++i)
714 {
715 if (ti->dims[i] <= 0)
716 {
717 std::cerr << "dim must be positive integer but was " << ti->dims[i] << std::endl;
718 return NNFW_STATUS_ERROR;
719 }
720 }
721 }
722
723 onert::ir::Shape new_shape(ti->rank);
724 for (int32_t i = 0; i < ti->rank; i++)
725 new_shape.dim(i) = ti->dims[i];
726
727 if (!isStatePreparedOrFinishedRun())
728 {
729
730 // In this case, if we apply input shape, it will propagate after compilation and excution
731 _nnpkg->changeInputShape(index, new_shape);
732 }
733 else // when called after nnfw_session::prepare()
734 _execution->changeInputShape(onert::ir::IOIndex(index), new_shape);
735
737}
#define NNFW_MAX_RANK
Maximum rank expressible with nnfw.
int32_t dims[NNFW_MAX_RANK]

References nnfw_tensorinfo::dims, NNFW_MAX_RANK, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, and nnfw_tensorinfo::rank.

◆ set_odc_param_minmax_records_count()

NNFW_STATUS nnfw_session::set_odc_param_minmax_records_count ( int  minmax_records_count)

Definition at line 2010 of file nnfw_session.cc.

2011{
2012 if (isStateInitialized() || isStateRunning())
2013 {
2014 std::cerr << "invalid state" << std::endl;
2016 }
2017
2018 if (_quant_manager->setMinMaxRecordsThreshold(minmax_records_count))
2019 return NNFW_STATUS_NO_ERROR;
2020 else
2021 return NNFW_STATUS_ERROR;
2022}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_output()

NNFW_STATUS nnfw_session::set_output ( uint32_t  index,
NNFW_TYPE  type,
void *  buffer,
size_t  length 
)

Definition at line 556 of file nnfw_session.cc.

557{
558 if (!isStatePreparedOrFinishedRun())
559 {
560 std::cerr << "Error during nnfw_session::set_output : invalid state" << std::endl;
562 }
563
564 if (!buffer && length != 0)
565 {
566 std::cerr
567 << "Error during nnfw_session::set_output : given buffer is NULL but the length is not 0"
568 << std::endl;
569 return NNFW_STATUS_ERROR;
570 }
571
572 try
573 {
574 // Allow float output internal dequantization only
575 if (type == NNFW_TYPE_TENSOR_FLOAT32)
576 _execution->setOutputType(onert::ir::IOIndex(index),
577 onert::ir::TypeInfo(onert::ir::DataType::FLOAT32));
578 _execution->setOutput(onert::ir::IOIndex(index), buffer, length);
579 }
580 catch (const std::exception &e)
581 {
582 std::cerr << "Error during nnfw_session::set_output : " << e.what() << std::endl;
583 return NNFW_STATUS_ERROR;
584 }
586}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_TYPE_TENSOR_FLOAT32.

Referenced by run_with_auto_compilation(), and package.infer.session::set_outputs().

◆ set_output_layout()

NNFW_STATUS nnfw_session::set_output_layout ( uint32_t  index,
NNFW_LAYOUT  layout 
)

Definition at line 660 of file nnfw_session.cc.

661{
662 if (!isStatePreparedOrFinishedRun())
663 {
664 std::cerr << "Error during nnfw_session::set_output_layout : "
665 << "run should be run after prepare" << std::endl;
667 }
668
669 try
670 {
671 if (layout != NNFW_LAYOUT_NONE && layout != NNFW_LAYOUT_CHANNELS_FIRST &&
673 {
674 std::cerr << "Error during nnfw_session::set_output_layout, not supported layout"
675 << std::endl;
676 return NNFW_STATUS_ERROR;
677 }
678
679 _execution->setOutputLayout(onert::ir::IOIndex(index), convertLayout(layout));
680 }
681 catch (const std::exception &e)
682 {
683 std::cerr << "Error during nnfw_session::set_output_layout : " << e.what() << std::endl;
684 return NNFW_STATUS_ERROR;
685 }
687}

References NNFW_LAYOUT_CHANNELS_FIRST, NNFW_LAYOUT_CHANNELS_LAST, NNFW_LAYOUT_NONE, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_prepare_config()

NNFW_STATUS nnfw_session::set_prepare_config ( const NNFW_PREPARE_CONFIG  key,
const char *  value 
)

Definition at line 1932 of file nnfw_session.cc.

1933{
1934 if (!isStateModelLoaded())
1935 {
1936 std::cerr << "Error during nnfw_session::set_prepare_config : Invalid state" << std::endl;
1938 }
1939
1940 switch (key)
1941 {
1943 _coptions->he_profiling_mode = true;
1944 break;
1945 default:
1946 return NNFW_STATUS_ERROR;
1947 }
1948
1949 return NNFW_STATUS_NO_ERROR;
1950}
@ NNFW_PREPARE_CONFIG_PROFILE

References NNFW_PREPARE_CONFIG_PROFILE, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_quantization_type()

NNFW_STATUS nnfw_session::set_quantization_type ( NNFW_QUANTIZE_TYPE  qtype)

Definition at line 1760 of file nnfw_session.cc.

1761{
1763 try
1764 {
1765 if (isStateInitialized() || isStateRunning())
1766 {
1767 std::cerr << "invalid state" << std::endl;
1769 }
1770
1772 switch (qtype)
1773 {
1776 break;
1779 break;
1782 break;
1785 break;
1786 default:
1788 }
1789 _quant_manager->quantizeType(odc_qtype);
1790 }
1791 catch (const std::exception &e)
1792 {
1793 std::cerr << "Error during nnfw_session::set_quantization_type : " << e.what() << std::endl;
1794 return NNFW_STATUS_ERROR;
1795 }
1796
1797 return NNFW_STATUS_NO_ERROR;
1798}
@ ODC_QTYPE_WO_I8_SYM
@ ODC_QTYPE_WO_I16_SYM
@ NNFW_QUANTIZE_TYPE_WO_I16_SYM
@ NNFW_QUANTIZE_TYPE_U8_ASYM
@ NNFW_QUANTIZE_TYPE_I16_SYM
@ NNFW_QUANTIZE_TYPE_WO_I8_SYM

References NNFW_QUANTIZE_TYPE_I16_SYM, NNFW_QUANTIZE_TYPE_U8_ASYM, NNFW_QUANTIZE_TYPE_WO_I16_SYM, NNFW_QUANTIZE_TYPE_WO_I8_SYM, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, onert::odc::ODC_QTYPE_I16_SYM, onert::odc::ODC_QTYPE_NOT_SET, onert::odc::ODC_QTYPE_U8_ASYM, onert::odc::ODC_QTYPE_WO_I16_SYM, and onert::odc::ODC_QTYPE_WO_I8_SYM.

◆ set_quantized_model_path()

NNFW_STATUS nnfw_session::set_quantized_model_path ( const char *  path)

Definition at line 1800 of file nnfw_session.cc.

1801{
1802 try
1803 {
1804 if (isStateInitialized() || isStateRunning())
1805 {
1806 std::cerr << "invalid state" << std::endl;
1808 }
1809
1810 _quant_manager->exportModelPath(std::string(path));
1811 }
1812 catch (const std::exception &e)
1813 {
1814 std::cerr << "Error during nnfw_session::set_quantized_model_path : " << e.what() << std::endl;
1815 return NNFW_STATUS_ERROR;
1816 }
1817
1818 return NNFW_STATUS_NO_ERROR;
1819}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_workspace()

NNFW_STATUS nnfw_session::set_workspace ( const char *  dir)

Definition at line 855 of file nnfw_session.cc.

856{
857 // TODO Check dir read & write permission
858
859 if (!dir)
861
862 if (!isStateInitialized())
864
865 _coptions->workspace_dir = std::string(dir);
866
868}

References NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

◆ train_expected_tensorinfo() [1/2]

NNFW_STATUS nnfw_session::train_expected_tensorinfo ( uint32_t  index,
nnfw_tensorinfo ti 
)

Definition at line 1409 of file nnfw_session.cc.

1410{
1411 if (!isStatePreparedOrFinishedTraining())
1412 {
1413 std::cerr << "Error during nnfw_session::train_expected_tensorinfo : invalid state"
1414 << std::endl;
1416 }
1417
1418 // Check index is valid: [0, getExpectedSize())
1419
1420 // NYI
1421 (void)index;
1422 (void)ti;
1423 return NNFW_STATUS_ERROR;
1424}

References NNFW_STATUS_ERROR, and NNFW_STATUS_INVALID_STATE.

◆ train_expected_tensorinfo() [2/2]

NNFW_STATUS nnfw_session::train_expected_tensorinfo ( uint32_t  index,
nnfw_tensorinfo ti 
)

◆ train_export_checkpoint() [1/2]

NNFW_STATUS nnfw_session::train_export_checkpoint ( const char *  path)

Definition at line 324 of file onert-micro.cpp.

325{
326 _train_interpreter->saveCheckpoint(_config, path);
328}
OMStatus saveCheckpoint(const OMConfig &config, const char *save_path)

References NNFW_STATUS_NO_ERROR, and onert_micro::OMTrainingInterpreter::saveCheckpoint().

◆ train_export_checkpoint() [2/2]

NNFW_STATUS nnfw_session::train_export_checkpoint ( const char *  path)

◆ train_export_circle() [1/2]

NNFW_STATUS nnfw_session::train_export_circle ( const char *  path)

Definition at line 318 of file onert-micro.cpp.

319{
320 _train_interpreter->saveModel(_config, path);
322}
OMStatus saveModel(const OMConfig &config, const char *save_path)

References NNFW_STATUS_NO_ERROR, and onert_micro::OMTrainingInterpreter::saveModel().

◆ train_export_circle() [2/2]

NNFW_STATUS nnfw_session::train_export_circle ( const char *  path)

◆ train_export_circleplus()

NNFW_STATUS nnfw_session::train_export_circleplus ( const char *  path)

Definition at line 1647 of file nnfw_session.cc.

1648{
1649 if (path == nullptr)
1650 {
1651 std::cerr << "Error during nnfw_session::train_export_circleplus : path is null" << std::endl;
1653 }
1654
1655 if (!isStatePreparedOrFinishedTraining())
1656 {
1657 std::cerr << "Error during nnfw_session::train_export_circleplus : invalid state" << std::endl;
1659 }
1660
1661 try
1662 {
1663 onert::exporter::CircleExporter exporter(_model_path.string(), std::string{path});
1664 exporter.updateWeight(_execution);
1665 exporter.updateMetadata(_train_info);
1666 }
1667 catch (const std::exception &e)
1668 {
1669 std::cerr << "Error during nnfw_session::train_export_circleplus : " << e.what() << std::endl;
1670 return NNFW_STATUS_ERROR;
1671 }
1672
1673 return NNFW_STATUS_NO_ERROR;
1674}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, onert::exporter::CircleExporter::updateMetadata(), and onert::exporter::CircleExporter::updateWeight().

◆ train_get_loss() [1/2]

NNFW_STATUS nnfw_session::train_get_loss ( uint32_t  index,
float *  loss 
)

Definition at line 370 of file onert-micro.cpp.

371{
373 switch (_config.training_context.loss)
374 {
377 break;
378 default:
380 break;
381 }
382
383 _train_interpreter->evaluateMetric(_config, m, reinterpret_cast<void *>(loss),
386}
OMStatus evaluateMetric(const OMConfig &config, OMMetrics metric, void *metric_val, uint32_t test_size)
@ CROSS_ENTROPY
Definition OMConfig.h:54
@ CROSS_ENTROPY_METRICS
Definition OMConfig.h:42
OMTrainingContext training_context
Definition OMConfig.h:107

References onert_micro::OMTrainingContext::batch_size, onert_micro::CROSS_ENTROPY, onert_micro::CROSS_ENTROPY_METRICS, onert_micro::OMTrainingInterpreter::evaluateMetric(), onert_micro::OMTrainingContext::loss, m, NNFW_STATUS_NO_ERROR, and onert_micro::OMConfig::training_context.

◆ train_get_loss() [2/2]

NNFW_STATUS nnfw_session::train_get_loss ( uint32_t  index,
float *  loss 
)

◆ train_get_traininfo()

NNFW_STATUS nnfw_session::train_get_traininfo ( nnfw_train_info info)

Definition at line 1131 of file nnfw_session.cc.

1132{
1133 if (isStateInitialized())
1134 {
1135 // There is no _train_info in INITIALIZED, since _train_info is set when a model loaded
1136 std::cerr << "Error during nnfw_session::train_get_traininfo : invalid state";
1138 }
1139
1140 if (info == nullptr)
1141 {
1142 std::cerr << "Error during nnfw_session::train_get_traininfo : info is nullptr" << std::endl;
1144 }
1145
1146 // after model loaded, it ensures that _train_info is not nullptr
1147 assert(_train_info != nullptr);
1148
1149 auto convertLossCode = [](const onert::ir::train::LossCode &code) -> NNFW_TRAIN_LOSS {
1150 switch (code)
1151 {
1158 default:
1159 throw std::runtime_error{"fail to convert ir::train::LossCode"};
1160 }
1161 };
1162
1163 auto convertLossReduction =
1165 switch (type)
1166 {
1173 default:
1174 throw std::runtime_error{"fail to convert from ir::train::LossReductionType"};
1175 break;
1176 }
1177 };
1178
1179 auto convertOptimizerCode =
1181 switch (code)
1182 {
1189 default:
1190 throw std::runtime_error{"fail to convert from ir::train::OptimizerCode"};
1191 }
1192 };
1193
1194 const auto &loss = _train_info->lossInfo();
1195 const auto &optim = _train_info->optimizerInfo();
1196
1197 try
1198 {
1199 info->learning_rate = optim.learning_rate;
1200 info->batch_size = _train_info->batchSize();
1201 info->loss_info.loss = convertLossCode(loss.loss_code);
1202 info->loss_info.reduction_type = convertLossReduction(loss.reduction_type);
1203 info->opt = convertOptimizerCode(optim.optim_code);
1204
1205 if (_train_info->getTrainableOps().size() > 0)
1206 {
1207 const uint32_t first_trainable_idx = _train_info->getTrainableOps().cbegin()->value();
1208 const uint32_t last_trainable_idx = _train_info->getTrainableOps().crbegin()->value();
1209 const uint32_t ops_size = primary_subgraph()->operations().size();
1210 const uint32_t trainable_indexes_range = last_trainable_idx - first_trainable_idx + 1;
1211
1212 // check if trainable ops set contains continuous indexes on the back of the set
1213 if (last_trainable_idx == ops_size - 1 &&
1214 trainable_indexes_range == _train_info->getTrainableOps().size())
1215 {
1216 // check if all ops are trainable
1217 if (0 == first_trainable_idx)
1218 {
1219 info->num_of_trainable_ops = NNFW_TRAIN_TRAINABLE_ALL;
1220 }
1221 else
1222 {
1223 info->num_of_trainable_ops = trainable_indexes_range;
1224 }
1225 }
1226 else
1227 {
1228 info->num_of_trainable_ops = NNFW_TRAIN_TRAINABLE_INCORRECT_STATE;
1229 std::cerr << "conversion from set of trainable ops to num_of_trainable_ops is impossible"
1230 << std::endl;
1232 }
1233 }
1234 else
1235 {
1236 // no layer will be trained
1237 info->num_of_trainable_ops = NNFW_TRAIN_TRAINABLE_NONE;
1238 }
1239 }
1240 catch (const std::exception &e)
1241 {
1242 std::cerr << "Error during nnfw_session::train_get_traininfo" << e.what() << std::endl;
1243 return NNFW_STATUS_ERROR;
1244 }
1245
1246 return NNFW_STATUS_NO_ERROR;
1247}
size_t size() const
Return the number of objects that the manager contains.
Code * code(const SessionID &sess)
Definition Session.cpp:54
type
Definition infer.py:18
@ NNFW_TRAIN_TRAINABLE_NONE
@ NNFW_TRAIN_TRAINABLE_ALL
@ NNFW_TRAIN_TRAINABLE_INCORRECT_STATE
NNFW_TRAIN_LOSS_REDUCTION
@ NNFW_TRAIN_LOSS_REDUCTION_UNDEFINED
@ NNFW_TRAIN_LOSS_REDUCTION_SUM
@ NNFW_TRAIN_LOSS_REDUCTION_SUM_OVER_BATCH_SIZE
NNFW_TRAIN_OPTIMIZER
@ NNFW_TRAIN_OPTIMIZER_ADAM
@ NNFW_TRAIN_OPTIMIZER_SGD
@ NNFW_TRAIN_OPTIMIZER_UNDEFINED
NNFW_TRAIN_LOSS
@ NNFW_TRAIN_LOSS_MEAN_SQUARED_ERROR
@ NNFW_TRAIN_LOSS_UNDEFINED
@ NNFW_TRAIN_LOSS_CATEGORICAL_CROSSENTROPY
virtual const Operations & operations() const =0

References onert::ir::train::Adam, onert::ir::train::CategoricalCrossentropy, info, onert::ir::train::MeanSquaredError, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, NNFW_TRAIN_LOSS_CATEGORICAL_CROSSENTROPY, NNFW_TRAIN_LOSS_MEAN_SQUARED_ERROR, NNFW_TRAIN_LOSS_REDUCTION_SUM, NNFW_TRAIN_LOSS_REDUCTION_SUM_OVER_BATCH_SIZE, NNFW_TRAIN_LOSS_REDUCTION_UNDEFINED, NNFW_TRAIN_LOSS_UNDEFINED, NNFW_TRAIN_OPTIMIZER_ADAM, NNFW_TRAIN_OPTIMIZER_SGD, NNFW_TRAIN_OPTIMIZER_UNDEFINED, NNFW_TRAIN_TRAINABLE_ALL, NNFW_TRAIN_TRAINABLE_INCORRECT_STATE, NNFW_TRAIN_TRAINABLE_NONE, onert::ir::IGraph::operations(), onert::ir::train::SGD, onert::util::ObjectManager< Index, Object >::size(), onert::ir::train::Sum, onert::ir::train::SumOverBatchSize, and onert::ir::train::Undefined.

◆ train_import_checkpoint() [1/2]

NNFW_STATUS nnfw_session::train_import_checkpoint ( const char *  path)

Definition at line 330 of file onert-micro.cpp.

331{
332 _train_interpreter->loadCheckpoint(_config, path);
334}
OMStatus loadCheckpoint(OMConfig &config, const char *load_path)

References onert_micro::OMTrainingInterpreter::loadCheckpoint(), and NNFW_STATUS_NO_ERROR.

◆ train_import_checkpoint() [2/2]

NNFW_STATUS nnfw_session::train_import_checkpoint ( const char *  path)

◆ train_input_tensorinfo() [1/2]

NNFW_STATUS nnfw_session::train_input_tensorinfo ( uint32_t  index,
nnfw_tensorinfo ti 
)

Definition at line 1393 of file nnfw_session.cc.

1394{
1395 if (!isStatePreparedOrFinishedTraining())
1396 {
1397 std::cerr << "Error during nnfw_session::train_input_tensorinfo : invalid state" << std::endl;
1399 }
1400
1401 // Check index is valid: [0, getInputSize())
1402
1403 // NYI
1404 (void)index;
1405 (void)ti;
1406 return NNFW_STATUS_ERROR;
1407}

References NNFW_STATUS_ERROR, and NNFW_STATUS_INVALID_STATE.

◆ train_input_tensorinfo() [2/2]

NNFW_STATUS nnfw_session::train_input_tensorinfo ( uint32_t  index,
nnfw_tensorinfo ti 
)

◆ train_prepare() [1/2]

NNFW_STATUS nnfw_session::train_prepare ( )

Definition at line 284 of file onert-micro.cpp.

285{
286 // TODO: Implement remaining jobs if inference_interpreter is introduced
287 // maybe interpreter initialization ?
289}

References NNFW_STATUS_NO_ERROR.

◆ train_prepare() [2/2]

NNFW_STATUS nnfw_session::train_prepare ( )

◆ train_run() [1/2]

NNFW_STATUS nnfw_session::train_run ( bool  update_weights)

Definition at line 291 of file onert-micro.cpp.

292{
293 if (update_weights)
294 {
295 // TOOD: micro support update_weights ???
296 // Here we use this flag for distinguish inference and train in trainaing interpreter
297 _train_interpreter->trainSingleStep(_config);
300 }
301 else
302 {
303 // TODO: support multiple input/output
304 assert(outputbuf != nullptr);
305 _train_interpreter->allocateInputs();
306 float *allocated_input_data = (float *)_train_interpreter->getInputDataAt(0);
307 float *user_input_data = (float *)_train_interpreter->getInputData(0);
308 memcpy(allocated_input_data, user_input_data,
309 sizeof(float) * _train_interpreter->getInputSizeAt(0));
310 _train_interpreter->run(_config);
311 float *calculated_ptr = (float *)_train_interpreter->getOutputDataAt(0);
312 memcpy(outputbuf, calculated_ptr, sizeof(float) * _train_interpreter->getOutputSizeAt(0));
313 _train_interpreter->reset();
314 }
316}
uint32_t getOutputSizeAt(uint32_t position)
OMStatus run(const OMConfig &config)
uint32_t getInputSizeAt(uint32_t position)
OMStatus trainSingleStep(OMConfig &config)

References onert_micro::OMTrainingInterpreter::allocateInputs(), onert_micro::OMTrainingContext::batch_size, onert_micro::OMTrainingInterpreter::getInputData(), onert_micro::OMTrainingInterpreter::getInputDataAt(), onert_micro::OMTrainingInterpreter::getInputSizeAt(), onert_micro::OMTrainingInterpreter::getOutputDataAt(), onert_micro::OMTrainingInterpreter::getOutputSizeAt(), NNFW_STATUS_NO_ERROR, onert_micro::OMTrainingContext::num_epoch, onert_micro::OMTrainingContext::num_step, onert_micro::OMTrainingInterpreter::reset(), onert_micro::OMTrainingInterpreter::run(), onert_micro::OMConfig::training_context, and onert_micro::OMTrainingInterpreter::trainSingleStep().

◆ train_run() [2/2]

NNFW_STATUS nnfw_session::train_run ( bool  update_weights)

◆ train_set_expected() [1/2]

NNFW_STATUS nnfw_session::train_set_expected ( uint32_t  index,
const void *  expected,
const nnfw_tensorinfo expected_tensorinfo 
)

Definition at line 1470 of file nnfw_session.cc.

1472{
1473 if (expected == nullptr)
1474 {
1475 std::cerr << "Error during nnfw_session::train_set_expected : expected buffer is null"
1476 << std::endl;
1478 }
1479
1480 if (!isStatePreparedOrFinishedTraining())
1481 {
1482 std::cerr << "Error during nnfw_session::train_set_expected : invalid state" << std::endl;
1484 }
1485
1486 if (index >= getOutputSize())
1487 {
1488 std::cerr << "Error during nnfw_session::train_set_expected : index is out of range"
1489 << std::endl;
1490 return NNFW_STATUS_ERROR;
1491 }
1492
1493 try
1494 {
1495 auto output_ind = onert::ir::IOIndex(index);
1496 auto size = _execution->getOutputTotalSize(output_ind);
1497 if (expected_tensorinfo && getBufSize(expected_tensorinfo) != size)
1498 {
1499 std::cerr << "Error during nnfw_session::train_set_expected : invalid tensorinfo"
1500 << std::endl;
1501 return NNFW_STATUS_ERROR;
1502 }
1503
1504 // NOTE Find the loss input index
1505 // Input is added as many as the number of outputs.
1506 // The loss index is calculated from the value obtained by subtracting the
1507 // total output(added loss input) from the total input size.
1508 auto input_index = getInputSize() - getOutputSize() + index;
1509 auto input_ind = onert::ir::IOIndex(input_index);
1510 _execution->setInput(input_ind, expected, size);
1511 }
1512 catch (const std::exception &e)
1513 {
1514 std::cerr << "Error during nnfw_session::train_set_expected : " << e.what() << std::endl;
1515 return NNFW_STATUS_ERROR;
1516 }
1517
1518 return NNFW_STATUS_NO_ERROR;
1519}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, and size.

◆ train_set_expected() [2/2]

NNFW_STATUS nnfw_session::train_set_expected ( uint32_t  index,
void *  expected 
)

Definition at line 344 of file onert-micro.cpp.

345{
346 _train_interpreter->setTarget((uint8_t *)expected, index);
348}
void setTarget(uint8_t *data, uint32_t target_index)

References NNFW_STATUS_NO_ERROR, and onert_micro::OMTrainingInterpreter::setTarget().

◆ train_set_input() [1/2]

NNFW_STATUS nnfw_session::train_set_input ( uint32_t  index,
const void *  input,
const nnfw_tensorinfo input_tensorinfo 
)

Definition at line 1426 of file nnfw_session.cc.

1428{
1429 if (input == nullptr)
1430 {
1431 std::cerr << "Error during nnfw_session::train_set_input : input buffer is null" << std::endl;
1433 }
1434
1435 if (!isStatePreparedOrFinishedTraining())
1436 {
1437 std::cerr << "Error during nnfw_session::train_set_input : invalid state" << std::endl;
1439 }
1440
1441 if (index >= getInputSize())
1442 {
1443 std::cerr << "Error during nnfw_session::train_set_input : index is out of range" << std::endl;
1444 return NNFW_STATUS_ERROR;
1445 }
1446
1447 try
1448 {
1449 auto ind = onert::ir::IOIndex(index);
1450 auto size = _execution->getInputTotalSize(ind);
1451 if (input_tensorinfo && getBufSize(input_tensorinfo) != size)
1452 {
1453 std::cerr
1454 << "Error during nnfw_session::train_set_input : not supporeted to change tensorinfo"
1455 << std::endl;
1456 return NNFW_STATUS_ERROR;
1457 }
1458
1459 _execution->setInput(ind, input, size);
1460 }
1461 catch (const std::exception &e)
1462 {
1463 std::cerr << "Error during nnfw_session::train_set_input : " << e.what() << std::endl;
1464 return NNFW_STATUS_ERROR;
1465 }
1466
1467 return NNFW_STATUS_NO_ERROR;
1468}

References input_tensorinfo(), NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, and size.

◆ train_set_input() [2/2]

NNFW_STATUS nnfw_session::train_set_input ( uint32_t  index,
void *  input 
)

Definition at line 337 of file onert-micro.cpp.

338{
339 _train_interpreter->setInput((uint8_t *)input, index);
341}
void setInput(uint8_t *data, uint32_t input_index)

References NNFW_STATUS_NO_ERROR, and onert_micro::OMTrainingInterpreter::setInput().

◆ train_set_output() [1/2]

NNFW_STATUS nnfw_session::train_set_output ( uint32_t  index,
NNFW_TYPE  type,
void *  buffer,
size_t  length 
)

Definition at line 350 of file onert-micro.cpp.

352{
353 outputbuf = (uint8_t *)buffer;
355}

References NNFW_STATUS_NO_ERROR.

◆ train_set_output() [2/2]

NNFW_STATUS nnfw_session::train_set_output ( uint32_t  index,
NNFW_TYPE  type,
void *  buffer,
size_t  length 
)

◆ train_set_traininfo() [1/2]

NNFW_STATUS nnfw_session::train_set_traininfo ( const nnfw_train_info info)

Definition at line 357 of file onert-micro.cpp.

358{
359 _config.training_context.learning_rate = info->learning_rate;
360 _config.training_context.batch_size = info->batch_size;
363 _config.training_context.beta = info->adam_opt.beta;
364 _config.training_context.beta_squares = info->adam_opt.beta2;
365 _config.training_context.beta = info->adam_opt.epsilon;
366 _config.training_context.num_of_train_layers = info->num_trainble_ops;
368}
OMTrainOptimizer optimizer
Definition OMConfig.h:78

References onert_micro::ADAM, onert_micro::OMTrainingContext::batch_size, onert_micro::OMTrainingContext::beta, onert_micro::OMTrainingContext::beta_squares, info, onert_micro::OMTrainingContext::learning_rate, NNFW_STATUS_NO_ERROR, NNFW_TRAIN_OPTIMIZER_ADAM, onert_micro::OMTrainingContext::num_of_train_layers, onert_micro::OMTrainingContext::optimizer, onert_micro::SGD, and onert_micro::OMConfig::training_context.

◆ train_set_traininfo() [2/2]

NNFW_STATUS nnfw_session::train_set_traininfo ( const nnfw_train_info info)

The documentation for this struct was generated from the following files: