ONE - On-device Neural Engine
Loading...
Searching...
No Matches
nnfw_session Struct Reference

#include <nnfw_session.h>

Public Member Functions

 ~nnfw_session ()
 
NNFW_STATUS load_model_from_file (const char *package_file_path)
 
NNFW_STATUS train_set_traininfo (const nnfw_train_info *info)
 
NNFW_STATUS train_prepare ()
 
NNFW_STATUS train_input_tensorinfo (uint32_t index, nnfw_tensorinfo *ti)
 
NNFW_STATUS train_expected_tensorinfo (uint32_t index, nnfw_tensorinfo *ti)
 
NNFW_STATUS train_set_input (uint32_t index, void *input)
 
NNFW_STATUS train_set_expected (uint32_t index, void *expected)
 
NNFW_STATUS train_set_output (uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
 
NNFW_STATUS train_run (bool update_weights)
 
NNFW_STATUS train_get_loss (uint32_t index, float *loss)
 
NNFW_STATUS train_export_circle (const char *path)
 
NNFW_STATUS train_export_checkpoint (const char *path)
 
NNFW_STATUS train_import_checkpoint (const char *path)
 
 ~nnfw_session ()
 
NNFW_STATUS load_model_from_path (const char *path)
 
NNFW_STATUS prepare ()
 
NNFW_STATUS run ()
 
NNFW_STATUS run_async ()
 
NNFW_STATUS await ()
 
NNFW_STATUS set_input (uint32_t index, NNFW_TYPE type, const void *buffer, size_t length)
 
NNFW_STATUS set_output (uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
 
NNFW_STATUS input_size (uint32_t *number)
 
NNFW_STATUS output_size (uint32_t *number)
 
NNFW_STATUS set_input_layout (uint32_t index, NNFW_LAYOUT layout)
 
NNFW_STATUS set_output_layout (uint32_t index, NNFW_LAYOUT layout)
 
NNFW_STATUS set_input_type (uint32_t index, NNFW_TYPE type)
 
NNFW_STATUS set_output_type (uint32_t index, NNFW_TYPE type)
 
NNFW_STATUS set_input_tensorinfo (uint32_t index, const nnfw_tensorinfo *ti)
 
NNFW_STATUS input_tensorinfo (uint32_t index, nnfw_tensorinfo *ti)
 
NNFW_STATUS output_tensorinfo (uint32_t index, nnfw_tensorinfo *ti)
 
NNFW_STATUS set_available_backends (const char *backends)
 
NNFW_STATUS set_workspace (const char *dir)
 
NNFW_STATUS set_signature_for_tensorinfo (const char *signature)
 
NNFW_STATUS set_signature_run (const char *signature)
 
NNFW_STATUS set_config (const char *key, const char *value)
 
NNFW_STATUS get_config (const char *key, char *value, size_t value_size)
 
NNFW_STATUS load_circle_from_buffer (uint8_t *buffer, size_t size)
 
NNFW_STATUS get_output (uint32_t index, nnfw_tensorinfo *out_info, const void **out_buffer)
 
NNFW_STATUS register_custom_operation (const std::string &id, nnfw_custom_eval eval_func)
 
NNFW_STATUS input_tensorindex (const char *tensorname, uint32_t *index)
 
NNFW_STATUS output_tensorindex (const char *tensorname, uint32_t *index)
 
NNFW_STATUS run_with_auto_compilation (const char *target, NNFW_CODEGEN_PREF pref)
 
NNFW_STATUS set_odc_param_minmax_records_count (int minmax_records_count)
 
NNFW_STATUS delete_odc_minmax_file ()
 
NNFW_STATUS set_backends_per_operation (const char *backend_settings)
 Set backends with string-encoded mapping from operation index to backend type (cpu, acl_cl)
 
NNFW_STATUS train_get_traininfo (nnfw_train_info *info)
 
NNFW_STATUS train_set_traininfo (const nnfw_train_info *info)
 
NNFW_STATUS train_prepare ()
 
NNFW_STATUS train_input_tensorinfo (uint32_t index, nnfw_tensorinfo *ti)
 
NNFW_STATUS train_expected_tensorinfo (uint32_t index, nnfw_tensorinfo *ti)
 
NNFW_STATUS train_set_input (uint32_t index, const void *input, const nnfw_tensorinfo *input_tensorinfo)
 
NNFW_STATUS train_set_expected (uint32_t index, const void *expected, const nnfw_tensorinfo *expected_tensorinfo)
 
NNFW_STATUS train_set_output (uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
 
NNFW_STATUS train_run (bool update_weights)
 
NNFW_STATUS train_get_loss (uint32_t index, float *loss)
 
NNFW_STATUS train_export_circle (const char *path)
 
NNFW_STATUS train_export_circleplus (const char *path)
 
NNFW_STATUS train_import_checkpoint (const char *path)
 
NNFW_STATUS train_export_checkpoint (const char *path)
 
NNFW_STATUS set_quantization_type (NNFW_QUANTIZE_TYPE qtype)
 
NNFW_STATUS set_quantized_model_path (const char *path)
 
NNFW_STATUS quantize ()
 
NNFW_STATUS set_codegen_model_path (const char *path)
 
NNFW_STATUS codegen (const char *target, NNFW_CODEGEN_PREF pref)
 
NNFW_STATUS set_prepare_config (const NNFW_PREPARE_CONFIG key, const char *value)
 
NNFW_STATUS reset_prepare_config ()
 
NNFW_STATUS set_execute_config (const NNFW_RUN_CONFIG key, const char *value)
 
NNFW_STATUS reset_execute_config ()
 

Static Public Member Functions

static NNFW_STATUS create (nnfw_session **session)
 Factory method. It creates and initialize nnfw_session.
 
static NNFW_STATUS create (nnfw_session **session)
 Factory method. It creates and initialize nnfw_session.
 
static NNFW_STATUS deprecated (const char *msg)
 

Detailed Description

Definition at line 68 of file onert-micro.cpp.

Constructor & Destructor Documentation

◆ ~nnfw_session() [1/2]

nnfw_session::~nnfw_session ( )
default

Definition at line 165 of file onert-micro.cpp.

165{ delete _train_interpreter; }

◆ ~nnfw_session() [2/2]

nnfw_session::~nnfw_session ( )

Member Function Documentation

◆ await()

NNFW_STATUS nnfw_session::await ( )

Definition at line 509 of file nnfw_session.cc.

510{
511 if (!isStateRunning())
512 {
513 std::cerr << "Error during nnfw_session::run_await : "
514 << "run_await should be run after run_async" << std::endl;
515 return NNFW_STATUS_ERROR;
516 }
517
518 _execution->waitFinish();
519
520 _state = State::FINISHED_RUN;
522}
@ NNFW_STATUS_NO_ERROR
Definition onert-micro.h:88
@ NNFW_STATUS_ERROR
Definition onert-micro.h:93

References NNFW_STATUS_ERROR, and NNFW_STATUS_NO_ERROR.

◆ codegen()

NNFW_STATUS nnfw_session::codegen ( const char *  target,
NNFW_CODEGEN_PREF  pref 
)

Definition at line 2074 of file nnfw_session.cc.

2075{
2076 try
2077 {
2078 if (isStateInitialized() || isStateRunning())
2079 {
2080 std::cerr << "Error during nnfw_session::codegen : Invalid state" << std::endl;
2082 }
2083
2084 std::string target_str{target};
2085 if (target_str.empty() || target_str.size() < 5 ||
2086 target_str.substr(target_str.size() - 4) != "-gen")
2087 {
2088 std::cerr << "Error during nnfw_session::codegen : Invalid target" << std::endl;
2089 return NNFW_STATUS_ERROR;
2090 }
2091
2092 onert::odc::CodegenPreference codegen_pref;
2093 switch (pref)
2094 {
2097 break;
2100 break;
2103 break;
2106 break;
2107 default:
2108 std::cerr << "Error during nnfw_session::codegen : Invalid preference" << std::endl;
2109 return NNFW_STATUS_ERROR;
2110 }
2111
2112 assert(_codegen_manager != nullptr);
2113 auto export_model_path = std::filesystem::path(_codegen_manager->exportModelPath());
2114 const auto model_type = target_str.substr(0, target_str.size() - 4);
2115 // If the export_model_path is not set, it generates a compiled model path
2116 // automatically.
2117 if (export_model_path.empty())
2118 {
2119 // The compiled model path is the same directory of the original model/package with
2120 // target backend extension.
2121 export_model_path = _model_path.replace_extension(model_type);
2122 _codegen_manager->exportModelPath(export_model_path.string());
2123 }
2124
2125 _codegen_manager->codegen(_model_path, target, codegen_pref);
2126
2127 // Replace model
2128 // TODO Support buffer replace, not file reload
2129 return loadModelFile(export_model_path, model_type);
2130 }
2131 catch (const std::exception &e)
2132 {
2133 std::cerr << "Error during nnfw_session::compile : " << e.what() << std::endl;
2134 return NNFW_STATUS_ERROR;
2135 }
2136}
Option< std::string > target(optname("--target"), overview("select target language to emit for given architecture." "Valid values are '" NNC_TARGET_ARM_CPP "', '" NNC_TARGET_X86_CPP "', '" NNC_TARGET_ARM_GPU_CPP "', '" NNC_TARGET_INTERPRETER "'"), std::string(), optional(false), optvalues(NNC_TARGET_ARM_CPP "," NNC_TARGET_X86_CPP "," NNC_TARGET_ARM_GPU_CPP "," NNC_TARGET_INTERPRETER), nullptr, separators("="))
Definition Options.h:47
@ NNFW_CODEGEN_PREF_DEFAULT
@ NNFW_CODEGEN_PREF_MEMORY_FIRST
@ NNFW_CODEGEN_PREF_COMPILE_TIME_FIRST
@ NNFW_CODEGEN_PREF_PERFORMANCE_FIRST
@ NNFW_STATUS_INVALID_STATE
Definition onert-micro.h:97

References onert::odc::CODEGEN_PREF_COMPILE_TIME_FIRST, onert::odc::CODEGEN_PREF_DEFAULT, onert::odc::CODEGEN_PREF_MEMORY_FIRST, onert::odc::CODEGEN_PREF_PERFORMANCE_FIRST, NNFW_CODEGEN_PREF_COMPILE_TIME_FIRST, NNFW_CODEGEN_PREF_DEFAULT, NNFW_CODEGEN_PREF_MEMORY_FIRST, NNFW_CODEGEN_PREF_PERFORMANCE_FIRST, NNFW_STATUS_ERROR, and NNFW_STATUS_INVALID_STATE.

Referenced by run_with_auto_compilation().

◆ create() [1/2]

NNFW_STATUS nnfw_session::create ( nnfw_session **  session)
static

Factory method. It creates and initialize nnfw_session.

Note
Use factory instead of constructor to get status

Definition at line 149 of file onert-micro.cpp.

150{
151 if (session == nullptr)
153
154 auto new_session = std::unique_ptr<nnfw_session>(new nnfw_session());
155 *session = new_session.release();
156
157 if (*session == nullptr)
158 {
159 return NNFW_STATUS_ERROR;
160 }
161
163}
SessionID session(const coco::Module *m)
Definition Session.cpp:48
@ NNFW_STATUS_UNEXPECTED_NULL
Definition onert-micro.h:95

References NNFW_STATUS_ERROR, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

Referenced by nnfw_create_session().

◆ create() [2/2]

static NNFW_STATUS nnfw_session::create ( nnfw_session **  session)
static

Factory method. It creates and initialize nnfw_session.

Note
Use factory instead of constructor to get status

◆ delete_odc_minmax_file()

NNFW_STATUS nnfw_session::delete_odc_minmax_file ( )

Definition at line 2233 of file nnfw_session.cc.

2234{
2235 if (isStateRunning())
2236 {
2237 std::cerr << "invalid state" << std::endl;
2239 }
2240
2241 if (_quant_manager->deleteMinMaxFile())
2242 return NNFW_STATUS_NO_ERROR;
2243 else
2244 return NNFW_STATUS_ERROR;
2245}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ deprecated()

NNFW_STATUS nnfw_session::deprecated ( const char *  msg)
static

Definition at line 1064 of file nnfw_session.cc.

1065{
1066 std::cerr << msg << std::endl;
1068}
@ NNFW_STATUS_DEPRECATED_API

References NNFW_STATUS_DEPRECATED_API.

Referenced by nnfw_apply_tensorinfo(), nnfw_pop_pipeline_output(), nnfw_prepare_pipeline(), nnfw_push_pipeline_input(), and nnfw_set_op_backend().

◆ get_config()

NNFW_STATUS nnfw_session::get_config ( const char *  key,
char *  value,
size_t  value_size 
)

Definition at line 1178 of file nnfw_session.cc.

1179{
1180 if (!isStateModelLoaded())
1182
1183 if (!key || !value)
1185
1186 auto check_boundary = [](size_t dest_size, std::string &src) {
1187 if (dest_size < src.length() + 1 /* for '\0' */)
1188 {
1189 std::cerr << "buffer is small to copy config value." << std::endl;
1190 return false;
1191 }
1192 return true;
1193 };
1194
1195 const std::string skey = key;
1196
1197 if (skey == onert::util::config::BACKENDS)
1198 {
1199 if (_coptions->backend_list.size() == 0)
1200 return NNFW_STATUS_NO_ERROR; // no setting backend is not an error of get_config_str()
1201
1202 auto str =
1203 nnfw::misc::join(_coptions->backend_list.begin(), _coptions->backend_list.end(), ";");
1204
1205 if (!check_boundary(value_size, str))
1206 return NNFW_STATUS_ERROR;
1207
1208 strncpy(value, str.c_str(), value_size);
1209 }
1210 else if (skey == onert::util::config::EXECUTOR)
1211 {
1212 if (!check_boundary(value_size, _coptions->executor))
1213 return NNFW_STATUS_ERROR;
1214
1215 strncpy(value, _coptions->executor.c_str(), _coptions->executor.length());
1216 }
1217 else
1218 {
1219 return NNFW_STATUS_ERROR;
1220 }
1221
1222 return NNFW_STATUS_NO_ERROR;
1223}
str
Definition infer.py:18
std::string join(InputIt first, InputIt last, const std::string &concat)

References nnfw::misc::join(), NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

◆ get_output()

NNFW_STATUS nnfw_session::get_output ( uint32_t  index,
nnfw_tensorinfo out_info,
const void **  out_buffer 
)

Definition at line 917 of file nnfw_session.cc.

918{
919 if (ti == nullptr)
920 {
921 std::cerr << "Error during nnfw_session::get_output : tensor info is null" << std::endl;
923 }
924
925 if (out_buffer == nullptr)
926 {
927 std::cerr << "Error during nnfw_session::get_output : output buffer is null" << std::endl;
929 }
930
931 if (!isStateFinishedRun())
932 {
933 std::cerr << "Error during nnfw_session::get_output : invalid state" << std::endl;
935 }
936
937 try
938 {
939 if (index >= getOutputSize())
940 {
941 std::cerr << "Error during nnfw_session::get_output, index " << index
942 << " is out of range. (output count: " << getOutputSize() << ")" << std::endl;
943 return NNFW_STATUS_ERROR;
944 }
945
946 if (!_coptions->internal_output_alloc)
947 {
948 std::cerr << "Error during nnfw_session::get_output: "
949 << "internal output allocation is not enabled. "
950 << "Call nnfw_set_prepare_config(session, "
951 "NNFW_PREPARE_CONFIG_ENABLE_INTERNAL_OUTPUT_ALLOC, \"true\") "
952 << "before nnfw_prepare()." << std::endl;
953 return NNFW_STATUS_ERROR;
954 }
955
956 auto io_index = onert::ir::IOIndex{index};
957 const auto &info = _execution->outputInfo(io_index);
958 const auto &shape = info.shape();
959 const auto &dtype = info.typeInfo().type();
960 fillTensorInfo(ti, shape, dtype);
961
962 *out_buffer = _execution->outputBuffer(io_index);
963 }
964 catch (const std::exception &e)
965 {
966 std::cerr << "Error during nnfw_session::get_output : " << e.what() << std::endl;
967 return NNFW_STATUS_ERROR;
968 }
969
971}
A wrapper class for unsigned integral Index NOTE : Max value of the underlying type is used as the in...
Definition Index.h:37
volatile const char info[]
loco::GraphInputIndex index(const TFPlaceholder *node)
Definition TFNode.cpp:54

References info, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

◆ input_size()

NNFW_STATUS nnfw_session::input_size ( uint32_t *  number)

Definition at line 580 of file nnfw_session.cc.

581{
582 if (isStateInitialized()) // Model is not loaded
584
585 try
586 {
587 if (number == nullptr)
588 {
589 std::cerr << "Error during nnfw_session::input_size, number is null pointer." << std::endl;
591 }
592 *number = getInputSize();
593 }
594 catch (const std::exception &e)
595 {
596 std::cerr << "Error during nnfw_session::input_size : " << e.what() << std::endl;
597 return NNFW_STATUS_ERROR;
598 }
600}
int number
Definition jpeg2hdf5.py:87

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

Referenced by run_with_auto_compilation().

◆ input_tensorindex()

NNFW_STATUS nnfw_session::input_tensorindex ( const char *  tensorname,
uint32_t *  index 
)

Definition at line 1297 of file nnfw_session.cc.

1298{
1299 return getTensorIndexImpl(*primary_subgraph(), tensorname, index, true);
1300}

◆ input_tensorinfo()

NNFW_STATUS nnfw_session::input_tensorinfo ( uint32_t  index,
nnfw_tensorinfo ti 
)

Definition at line 824 of file nnfw_session.cc.

825{
826 if (isStateInitialized())
828
829 try
830 {
831 if (ti == nullptr)
832 {
833 std::cerr << "Error during nnfw_session::input_tensorinfo, tensorinfo is null pointer."
834 << std::endl;
836 }
837
838 if (index >= getInputSize())
839 {
840 std::cerr << "Error during nnfw_session::input_tensorinfo, index is out of range."
841 << std::endl;
842 return NNFW_STATUS_ERROR;
843 }
844
846 if (isStateModelLoaded())
847 {
848 const auto &info = _selected_signature.valid() ? _nnpkg->inputInfo(_selected_signature, index)
849 : _nnpkg->inputInfo(input_index);
850 fillTensorInfo(ti, info.shape(), info.typeInfo().type());
851 }
852 else
853 {
854 const auto &info = _execution->inputInfo(input_index);
855 fillTensorInfo(ti, info.shape(), info.typeInfo().type());
856 }
857 }
858 catch (const std::exception &e)
859 {
860 std::cerr << "Error during nnfw_session::input_tensorinfo : " << e.what() << std::endl;
861 return NNFW_STATUS_ERROR;
862 }
864}
bool valid() const
Check whether the value is valid or not.
Definition Index.h:125

References info, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, and onert::util::Index< T, DummyTag >::valid().

Referenced by train_set_input().

◆ load_circle_from_buffer()

NNFW_STATUS nnfw_session::load_circle_from_buffer ( uint8_t *  buffer,
size_t  size 
)

Definition at line 278 of file nnfw_session.cc.

279{
280 if (!isStateInitialized())
282
283 if (!buffer)
285
286 if (size == 0)
287 return NNFW_STATUS_ERROR;
288
289 try
290 {
292 // TODO: Update _model_path if necessary
293 _nnpkg = std::make_unique<onert::ir::NNPkg>(std::move(model));
294 _train_info = loadTrainingInfo(_nnpkg->primary_model());
295 _state = State::MODEL_LOADED;
296 }
297 catch (const std::exception &e)
298 {
299 std::cerr << "Error during model loading : " << e.what() << std::endl;
300 return NNFW_STATUS_ERROR;
301 }
303}
std::unique_ptr< ir::Model > loadCircleModel(const std::string &filename)
int32_t size[5]
Definition Slice.cpp:35

References onert::loader::loadCircleModel(), NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, and size.

◆ load_model_from_file()

NNFW_STATUS nnfw_session::load_model_from_file ( const char *  package_file_path)

Definition at line 272 of file onert-micro.cpp.

273{
274 _model_buf = readFile(file_path);
275 _config.model_ptr = _model_buf.data();
276 _config.model_size = _model_buf.size();
277 // load training info
278 loadTrainingInfo(_config.model_ptr);
279 // TODO: this import should start on nnfw_prepare if inference_interpreter is introduced
280 _train_interpreter->importTrainModel(_config.model_ptr, _config);
282}
OMStatus importTrainModel(char *model_ptr, const OMConfig &config)
DataBuffer readFile(const char *path)

References onert_micro::OMTrainingInterpreter::importTrainModel(), onert_micro::OMConfig::model_ptr, onert_micro::OMConfig::model_size, NNFW_STATUS_NO_ERROR, and readFile().

◆ load_model_from_path()

NNFW_STATUS nnfw_session::load_model_from_path ( const char *  path)

Definition at line 305 of file nnfw_session.cc.

306{
307 if (!isStateInitialized())
309
310 if (!path)
311 {
312 std::cerr << "Path is null." << std::endl;
314 }
315
316 if (!null_terminating(path, MAX_PATH_LENGTH))
317 {
318 std::cerr << "Path is too long" << std::endl;
319 return NNFW_STATUS_ERROR;
320 }
321
322 try
323 {
324 std::filesystem::path filename{path};
325 if (!std::filesystem::is_directory(filename) && filename.has_extension())
326 {
327 std::string model_type = filename.extension().string().substr(1); // + 1 to exclude dot
328 return loadModelFile(filename, model_type);
329 }
330
331 const auto &package_dir = filename;
332
333 // TODO : add support for zipped package file load
334 if (!std::filesystem::is_directory(package_dir))
335 {
336 std::cerr << "invalid path: " << package_dir << std::endl;
337 return NNFW_STATUS_ERROR;
338 }
339
340 const auto manifest_file_name = package_dir / "metadata/MANIFEST";
341 std::ifstream mfs(manifest_file_name);
342
343 // extract the filename of the first(index 0) model
344 // e.g. In MANIFEST file, { "models" : [ "firstmodel.tflite", "2nd.tflite" ] }
345 Json::Value root;
346 mfs >> root;
347 const Json::Value &models = root["models"];
348 const Json::Value &model_types = root["model-types"];
349 const Json::Value &configs = root["configs"];
350
351 if (!configs.empty() && !configs[0].empty())
352 {
353 const auto filepath = package_dir / "metadata" / configs[0].asString();
354
356 if (loadConfigure(filepath.string(), keyValues))
357 {
359 }
360 }
361 _nnpkg = std::make_unique<onert::ir::NNPkg>();
362 auto num_models = models.size();
363 if (num_models == 0 || (num_models - 1) > onert::ir::ModelIndex::max())
364 {
365 std::cerr << "Invalid model size - " << std::to_string(num_models) << std::endl;
366 return NNFW_STATUS_ERROR;
367 }
368
369 // Not support backend mapping to operator index for multiple models yet
370 // TODO Support this
371 if (num_models > 1 && _coptions->manual_scheduler_options.index_to_backend.size() != 0)
372 {
373 std::cerr << "Cannot set backend to operator index for multiple models" << std::endl;
374 return NNFW_STATUS_ERROR;
375 }
376
377 for (uint16_t i = 0; i < num_models; ++i)
378 {
379 const auto model_file_path = package_dir / models[i].asString();
380 const auto model_type = model_types[i].asString();
381 auto model = loadModel(model_file_path.string(), model_type);
382 if (model == nullptr)
383 return NNFW_STATUS_ERROR;
384 _model_path = model_file_path; // TODO Support multiple models
385 model->bindKernelBuilder(_kernel_registry->getBuilder());
386 _nnpkg->push(onert::ir::ModelIndex{i}, std::move(model));
387 }
388
389 _train_info = loadTrainingInfo(_nnpkg->primary_model());
390
391 auto toIODesc = [](std::string str) {
392 auto indices = nnfw::misc::split(str, ':');
393 if (indices.size() != 3)
394 {
395 std::cerr << "IODesc should be 3-tuple." << std::endl;
396 return onert::ir::IODesc{};
397 }
398 auto model_idx = static_cast<uint32_t>(std::stoi(indices.at(0)));
399 auto subgraph_idx = static_cast<uint32_t>(std::stoi(indices.at(1)));
400 auto operand_idx = static_cast<uint32_t>(std::stoi(indices.at(2)));
401 return onert::ir::IODesc{model_idx, subgraph_idx, operand_idx};
402 };
403 // read pkg-inputs and pkg-outputs
404 const Json::Value &pkg_inputs = root["pkg-inputs"];
405 for (uint32_t i = 0; i < pkg_inputs.size(); ++i)
406 _nnpkg->addInput(toIODesc(pkg_inputs[i].asString()));
407 const Json::Value &pkg_outputs = root["pkg-outputs"];
408 for (uint32_t i = 0; i < pkg_outputs.size(); ++i)
409 _nnpkg->addOutput(toIODesc(pkg_outputs[i].asString()));
410 // read model-connect
411 const Json::Value &fromtos = root["model-connect"];
412 for (uint32_t i = 0; i < fromtos.size(); ++i)
413 {
414 const Json::Value &tos = fromtos[i]["to"];
415 for (uint32_t j = 0; j < tos.size(); ++j)
416 _nnpkg->addEdge(toIODesc(fromtos[i]["from"].asString()), toIODesc(tos[j].asString()));
417 }
418
419 _nnpkg->verify();
420 _state = State::MODEL_LOADED;
421 }
422 catch (const std::exception &e)
423 {
424 std::cerr << "Error during model loading : " << e.what() << std::endl;
425 return NNFW_STATUS_ERROR;
426 }
428}
static uint16_t max()
Return max index value.
Definition Index.h:146
Op * root(Op *)
Return the root Op from a given Op node.
Definition Op.cpp:144
std::unique_ptr< mir::Graph > loadModel(std::string predict_net, std::string init_net, const std::vector< std::vector< int > > &input_shapes)
std::vector< std::string > split(const std::string &s, char delim)
std::tuple< ModelIndex, SubgraphIndex, IOIndex > IODesc
Definition NNPkg.h:30
std::unordered_map< std::string, std::string > CfgKeyValues
void setConfigKeyValues(const CfgKeyValues &keyValues)
#define MAX_PATH_LENGTH

References onert::util::Index< uint16_t, ModelIndexTag >::max(), MAX_PATH_LENGTH, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, onert::util::setConfigKeyValues(), and nnfw::misc::split().

◆ output_size()

NNFW_STATUS nnfw_session::output_size ( uint32_t *  number)

Definition at line 602 of file nnfw_session.cc.

603{
604 if (isStateInitialized()) // Model is not loaded
606
607 try
608 {
609 if (number == nullptr)
610 {
611 std::cerr << "Error during nnfw_session::output_size, number is null pointer." << std::endl;
613 }
614 *number = getOutputSize();
615 }
616 catch (const std::exception &e)
617 {
618 std::cerr << "Error during nnfw_session::output_size" << e.what() << std::endl;
619 return NNFW_STATUS_ERROR;
620 }
622}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

Referenced by run_with_auto_compilation().

◆ output_tensorindex()

NNFW_STATUS nnfw_session::output_tensorindex ( const char *  tensorname,
uint32_t *  index 
)

Definition at line 1302 of file nnfw_session.cc.

1303{
1304 return getTensorIndexImpl(*primary_subgraph(), tensorname, index, false);
1305}

◆ output_tensorinfo()

NNFW_STATUS nnfw_session::output_tensorinfo ( uint32_t  index,
nnfw_tensorinfo ti 
)

Definition at line 866 of file nnfw_session.cc.

867{
868 if (isStateInitialized())
870
871 if (ti == nullptr)
872 {
873 std::cerr << "Error during nnfw_session::output_tensorinfo, tensorinfo is null pointer."
874 << std::endl;
876 }
877
878 try
879 {
880 if (index >= getOutputSize())
881 {
882 std::cerr << "Error during nnfw_session::output_tensorinfo, index is out of range."
883 << std::endl;
884 return NNFW_STATUS_ERROR;
885 }
886
887 const auto output_index = onert::ir::IOIndex{index};
888 if (isStateModelLoaded())
889 {
890 const auto &info = _selected_signature.valid()
891 ? _nnpkg->outputInfo(_selected_signature, index)
892 : _nnpkg->outputInfo(output_index);
893 fillTensorInfo(ti, info.shape(), info.typeInfo().type());
894 }
895 else
896 {
897 auto info = _execution->outputInfo(output_index);
898 fillTensorInfo(ti, info.shape(), info.typeInfo().type());
899 }
900 }
901 catch (const std::exception &e)
902 {
903 std::cerr << "Error during nnfw_session::output_tensorinfo : " << e.what() << std::endl;
904 return NNFW_STATUS_ERROR;
905 }
906
908}

References info, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, and onert::util::Index< T, DummyTag >::valid().

◆ prepare()

NNFW_STATUS nnfw_session::prepare ( )

Definition at line 430 of file nnfw_session.cc.

431{
432 // NOTE. If users want to run prepare() more than one time, this could be removed.
433 if (!isStateModelLoaded())
434 {
435 std::cerr << "Error during model prepare : ";
436 if (isStateInitialized())
437 {
438 std::cerr << "prepare should be run once";
439 }
440 else
441 {
442 std::cerr << "invalid state";
443 }
444 std::cerr << std::endl;
446 }
447
448 try
449 {
450 auto compiler =
451 onert::compiler::CompilerFactory::get().create(std::move(_nnpkg), _coptions.get());
452 _compiler_artifact = compiler->compile();
453 _execution = std::make_unique<onert::exec::Execution>(_compiler_artifact->_executors);
454 }
455 catch (const std::exception &e)
456 {
457 std::cerr << "Error during model prepare : " << e.what() << std::endl;
458 return NNFW_STATUS_ERROR;
459 }
460
461 _state = State::PREPARED;
463}
static CompilerFactory & get()
std::unique_ptr< ICompiler > create(std::unique_ptr< ir::NNPkg > nnpkg, CompilerOptions *copts, const ir::train::TrainingInfo *training_info=nullptr)
Create ICompiler instance. Ownership of nnpkg is moved to ICompiler instance.

References onert::compiler::CompilerFactory::create(), onert::compiler::CompilerFactory::get(), NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

Referenced by run_with_auto_compilation().

◆ quantize()

NNFW_STATUS nnfw_session::quantize ( )

Definition at line 2027 of file nnfw_session.cc.

2028{
2029 try
2030 {
2031 if (isStateInitialized() || isStateRunning())
2032 {
2033 std::cerr << "invalid state" << std::endl;
2035 }
2036
2037 auto result = _quant_manager->quantize(_model_path.string());
2038 if (!result)
2040
2041 // Replace model
2042 // TODO Support buffer replace, not file reload
2043 return loadModelFile(_quant_manager->exportModelPath(), "circle");
2044 }
2045 catch (const std::exception &e)
2046 {
2047 std::cerr << "Error during nnfw_session::quantize : " << e.what() << std::endl;
2048 return NNFW_STATUS_ERROR;
2049 }
2050}
result
Definition infer.py:103

References NNFW_STATUS_ERROR, and NNFW_STATUS_INVALID_STATE.

◆ register_custom_operation()

NNFW_STATUS nnfw_session::register_custom_operation ( const std::string &  id,
nnfw_custom_eval  eval_func 
)

Definition at line 910 of file nnfw_session.cc.

912{
913 _kernel_registry->registerKernel(id, eval_func);
915}

References NNFW_STATUS_NO_ERROR.

◆ reset_execute_config()

NNFW_STATUS nnfw_session::reset_execute_config ( )

Definition at line 2204 of file nnfw_session.cc.

2205{
2206 if (!isStatePreparedOrFinishedRun())
2207 {
2208 std::cerr << "Error during nnfw_session::set_execution_config : Invalid state" << std::endl;
2210 }
2211
2212 _execution->executionOptions().dump_minmax = false;
2213 _execution->executionOptions().trace = false;
2214 _execution->executionOptions().profile = false;
2215
2216 return NNFW_STATUS_NO_ERROR;
2217}

References NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ reset_prepare_config()

NNFW_STATUS nnfw_session::reset_prepare_config ( )

Definition at line 2161 of file nnfw_session.cc.

2162{
2163 if (!isStateModelLoaded())
2164 {
2165 std::cerr << "Error during nnfw_session::reset_prepare_config : Invalid state" << std::endl;
2167 }
2168
2169 _coptions->he_profiling_mode = false;
2170
2171 return NNFW_STATUS_NO_ERROR;
2172}

References NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ run()

NNFW_STATUS nnfw_session::run ( )

Definition at line 465 of file nnfw_session.cc.

466{
467 if (!isStatePreparedOrFinishedRun())
468 {
469 std::cerr << "Error during nnfw_session::run : "
470 << "run should be run after prepare" << std::endl;
472 }
473
474 try
475 {
476 _execution->execute();
477 }
479 {
480 // Currently insufficient buffer always means output buffer.
481 std::cerr << "Error during nnfw_session::run : " << e.what() << std::endl;
483 }
484 catch (const std::exception &e)
485 {
486 std::cerr << "Error during nnfw_session::run : " << e.what() << std::endl;
487 return NNFW_STATUS_ERROR;
488 }
489
490 _state = State::FINISHED_RUN;
492}
const char * what() const noexcept override
Definition Exceptions.h:31
@ NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE

References NNFW_STATUS_ERROR, NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and onert::OnertException::what().

◆ run_async()

NNFW_STATUS nnfw_session::run_async ( )

Definition at line 494 of file nnfw_session.cc.

495{
496 if (!isStatePreparedOrFinishedRun())
497 {
498 std::cerr << "Error during nnfw_session::run_async : "
499 << "run_async should be run after prepare" << std::endl;
501 }
502
503 _execution->startExecute();
504
505 _state = State::RUNNING;
507}

References NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ run_with_auto_compilation()

NNFW_STATUS nnfw_session::run_with_auto_compilation ( const char *  target,
NNFW_CODEGEN_PREF  pref 
)

Definition at line 2248 of file nnfw_session.cc.

2249{
2250
2251 if (!isStatePreparedOrFinishedRun())
2252 {
2253 std::cerr << "Error during nnfw_session::run_with_auto_compilation : "
2254 << "run should be after preparation" << std::endl;
2256 }
2257
2258 // Check quantization and code-generation parameters
2259 std::string target_str{target};
2260 if (_quant_manager->exportModelPath().empty() || _codegen_manager->exportModelPath().empty() ||
2261 target_str.empty() || target_str.substr(target_str.size() - 4) != "-gen")
2262 {
2263 std::cerr << "Error during nnfw_session::run_with_auto_compilation : "
2264 << "quantization and code generation parameters should be set" << std::endl;
2266 }
2267
2268 // Odc: auto compilation with hidden switching mechanizm
2269 // Check is model already quantized or compiled
2270 std::ifstream file_quantized_model(_quant_manager->exportModelPath());
2271 std::ifstream file_compiled_model(_codegen_manager->exportModelPath());
2272
2273 if (!file_quantized_model.good() && !file_compiled_model.good())
2274 {
2275 // Run float model and try to quantize it
2276 {
2277 // Save execution options
2278 auto saved_options = _execution->executionOptions();
2279 // turn on minmax recording
2280 _execution->executionOptions().dump_minmax = true;
2281
2282 try
2283 {
2284 _execution->execute();
2285 }
2287 {
2288 // Currently insufficient buffer always means output buffer.
2289 std::cerr << "Error during nnfw_session::run_with_auto_compilation : " << e.what()
2290 << std::endl;
2292 }
2293 catch (const std::exception &e)
2294 {
2295 std::cerr << "Error during nnfw_session::run_with_auto_compilation : " << e.what()
2296 << std::endl;
2297 return NNFW_STATUS_ERROR;
2298 }
2299
2300 _state = State::FINISHED_RUN;
2301
2302 // restore min_max option to user defined state
2303 _execution->executionOptions().dump_minmax = saved_options.dump_minmax;
2304
2305 // if enough statistics are collected, then run the quantization
2306 if (_quant_manager->readyForQuantize())
2307 {
2308 try
2309 {
2310 if (isStateInitialized() || isStateRunning())
2311 {
2312 std::cerr << "invalid state" << std::endl;
2314 }
2315
2316 auto result = _quant_manager->quantize(_model_path);
2317 if (!result)
2319
2320 // remove minmax file
2321 result = _quant_manager->deleteMinMaxFile();
2322 if (!result)
2324 }
2325 catch (const std::exception &e)
2326 {
2327 std::cerr
2328 << "Error during nnfw_session::run_with_auto_compilation in quantize operation: "
2329 << e.what() << std::endl;
2330 return NNFW_STATUS_ERROR;
2331 }
2332 }
2333 }
2334 }
2335 else
2336 {
2337 // run compiled or quantized model
2338 NNFW_STATUS status;
2339
2340 // turn off minmax recording
2341 _execution->executionOptions().dump_minmax = false;
2342
2343 // save initial buffers if quantized model or compiled model is not loaded
2344 if (_autoCompilationState == nnfw_session::AutoCompilationState::INITIAL_STATE)
2345 {
2346 auto dotidx = _codegen_manager->exportModelPath().rfind('.');
2347 if (dotidx == std::string::npos)
2348 {
2349 std::cerr << "Error during nnfw_session::run_with_auto_compilation : Invalid compiled "
2350 "model path. Please use a "
2351 "path that includes the extension."
2352 << std::endl;
2353 return NNFW_STATUS_ERROR;
2354 }
2355
2356 std::string compiled_model_type =
2357 _codegen_manager->exportModelPath().substr(dotidx + 1); // + 1 to exclude dot
2358
2359 dotidx = _quant_manager->exportModelPath().rfind('.');
2360 if (dotidx == std::string::npos)
2361 {
2362 std::cerr << "Error during nnfw_session::run_with_auto_compilation : Invalid quantized "
2363 "model path. Please use a "
2364 "path that includes the extension."
2365 << std::endl;
2366 return NNFW_STATUS_ERROR;
2367 }
2368 std::string quantized_model_type =
2369 _quant_manager->exportModelPath().substr(dotidx + 1); // + 1 to exclude dot
2370
2371 // Save initial (float) input and output buffers
2372 auto input_size = _execution->inputSize();
2373 auto output_size = _execution->outputSize();
2374
2375 std::vector<const void *> _input_buffers;
2376 std::vector<void *> _output_buffers;
2377
2378 using namespace onert::ir;
2379 // Copy execution context for backup: I/O buffer, shape, and execution options
2380 const onert::exec::ExecutionContext ctx_backup = _execution->context();
2381
2382 // Set compile option to use float type
2383 for (auto input_index = IOIndex{0}; input_index < IOIndex{input_size}; input_index++)
2384 _coptions->input_type.insert_or_assign(input_index, TypeInfo(DataType::FLOAT32));
2385
2386 // Save Outputs buffers
2387 for (auto output_index = IOIndex{0}; output_index < IOIndex{output_size}; output_index++)
2388 _coptions->output_type.insert_or_assign(output_index, TypeInfo(DataType::FLOAT32));
2389
2390 // if there is compiled model - try to load it
2391 if (file_compiled_model.good())
2392 {
2393 // load compiled model
2394 status = loadModelFile(_codegen_manager->exportModelPath(), compiled_model_type);
2395 if (status == NNFW_STATUS_NO_ERROR)
2396 {
2397 _autoCompilationState = nnfw_session::AutoCompilationState::COMPILED_MODEL_LOADED;
2398 }
2399 }
2400 else // there is no compiled model - try to compile and load it
2401 {
2402
2403 // avoiding code duplication use existing "codegen" function. Set up _model_path for the
2404 // codegen function.
2405 // TODO: change it if codegen function will be generalized
2406 _model_path = _quant_manager->exportModelPath();
2407
2408 // try to compile and load compiled model
2409 status = codegen(target, pref);
2410 if (status == NNFW_STATUS_NO_ERROR)
2411 {
2412 _autoCompilationState = nnfw_session::AutoCompilationState::COMPILED_MODEL_LOADED;
2413 // TODO delete quantized model
2414 }
2415 }
2416
2417 // loading compiled model is fail - try to load quantized model
2418 if (_autoCompilationState != nnfw_session::AutoCompilationState::COMPILED_MODEL_LOADED)
2419 {
2420 // load quantized model
2421 status = loadModelFile(_quant_manager->exportModelPath(), quantized_model_type);
2422 if (status != NNFW_STATUS_NO_ERROR)
2423 return status;
2424 else
2425 _autoCompilationState = nnfw_session::AutoCompilationState::QUANTIZED_MODEL_LOADED;
2426 }
2427
2428 status = prepare();
2429 if (status != NNFW_STATUS_NO_ERROR)
2430 return status;
2431
2432 // Restore execution context: I/O buffer, shape, and execution options
2433 _execution->restoreContext(ctx_backup);
2434 }
2435
2436 // Run quantized model
2437 if (!isStatePreparedOrFinishedRun())
2438 {
2439 std::cerr << "Error during nnfw_session::run_with_auto_compilation : "
2440 << "run should be run after prepare" << std::endl;
2442 }
2443
2444 try
2445 {
2446 _execution->execute();
2447 }
2449 {
2450 // Currently insufficient buffer always means output buffer.
2451 std::cerr << "Error during nnfw_session::run_with_auto_compilation : " << e.what()
2452 << std::endl;
2454 }
2455 catch (const std::exception &e)
2456 {
2457 std::cerr << "Error during nnfw_session::run_with_auto_compilation : " << e.what()
2458 << std::endl;
2459 return NNFW_STATUS_ERROR;
2460 }
2461
2462 _state = State::FINISHED_RUN;
2463 }
2464
2465 return NNFW_STATUS_NO_ERROR;
2466}
NNFW_STATUS
Result values returned from a call to an API function.
Definition onert-micro.h:86
NNFW_STATUS output_size(uint32_t *number)
NNFW_STATUS prepare()
NNFW_STATUS input_size(uint32_t *number)
NNFW_STATUS codegen(const char *target, NNFW_CODEGEN_PREF pref)

References codegen(), input_size(), NNFW_STATUS_ERROR, NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, output_size(), prepare(), and onert::OnertException::what().

◆ set_available_backends()

NNFW_STATUS nnfw_session::set_available_backends ( const char *  backends)

Definition at line 973 of file nnfw_session.cc.

974{
975 if (!isStateModelLoaded())
977
978 try
979 {
980 if (!backends)
982 if (null_terminating(backends, MAX_BACKEND_NAME_LENGTH) == false)
983 return NNFW_STATUS_ERROR;
984
985 using namespace onert::util;
986
987 _coptions->backend_list = nnfw::misc::split(std::string{backends}, ';');
988 }
989 catch (const std::exception &e)
990 {
991 std::cerr << "Error during nnfw_session::set_available_backends : " << e.what() << std::endl;
992 return NNFW_STATUS_ERROR;
993 }
995}
#define MAX_BACKEND_NAME_LENGTH

References MAX_BACKEND_NAME_LENGTH, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, and nnfw::misc::split().

◆ set_backends_per_operation()

NNFW_STATUS nnfw_session::set_backends_per_operation ( const char *  backend_settings)

Set backends with string-encoded mapping from operation index to backend type (cpu, acl_cl)

Definition at line 1307 of file nnfw_session.cc.

1308{
1309 if (backend_settings == NULL)
1310 return NNFW_STATUS_ERROR;
1311
1312 if (!isStateModelLoaded())
1314
1315 // Not supported multiple model
1316 // TODO Support this
1317 if (_nnpkg->model_count() > 1)
1318 {
1319 std::cerr << "Not supported multiple model" << std::endl;
1320 return NNFW_STATUS_ERROR;
1321 }
1322
1323 try
1324 {
1325 // Backend for all
1326 auto &ms_options = _coptions->manual_scheduler_options;
1327 ms_options.setBackendMap(std::string{backend_settings});
1328 }
1329 catch (const std::exception &e)
1330 {
1331 std::cerr << "Error during nnfw_session::set_backends_per_operation" << e.what() << std::endl;
1332 return NNFW_STATUS_ERROR;
1333 }
1334
1335 return NNFW_STATUS_NO_ERROR;
1336}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_codegen_model_path()

NNFW_STATUS nnfw_session::set_codegen_model_path ( const char *  path)

Definition at line 2052 of file nnfw_session.cc.

2053{
2054 try
2055 {
2056 if (isStateInitialized() || isStateRunning())
2057 {
2058 std::cerr << "invalid state" << std::endl;
2060 }
2061
2062 assert(_codegen_manager != nullptr);
2063 _codegen_manager->exportModelPath(std::string(path));
2064 }
2065 catch (const std::exception &e)
2066 {
2067 std::cerr << "Error during nnfw_session::set_codegen_model_path : " << e.what() << std::endl;
2068 return NNFW_STATUS_ERROR;
2069 }
2070
2071 return NNFW_STATUS_NO_ERROR;
2072}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_config()

NNFW_STATUS nnfw_session::set_config ( const char *  key,
const char *  value 
)

Definition at line 1070 of file nnfw_session.cc.

1071{
1072 if (!isStateModelLoaded())
1074
1075 if (!key || !value)
1077
1078 using namespace onert::util;
1079
1080 const std::string skey = key;
1081
1082 if (skey == config::GRAPH_DOT_DUMP)
1083 {
1084 _coptions->graph_dump_level = toInt(value);
1085 }
1086 else if (skey == config::EXECUTOR)
1087 {
1088 _coptions->executor = value;
1089 }
1090 else if (skey == config::OP_BACKEND_ALLOPS)
1091 {
1092 _coptions->manual_scheduler_options.backend_for_all = value;
1093 }
1094 else if (skey == config::USE_SCHEDULER)
1095 {
1096 _coptions->he_scheduler = toBool(value);
1097 }
1098 else if (skey == config::PROFILING_MODE)
1099 {
1100 _coptions->he_profiling_mode = toBool(value);
1101 }
1102 else if (skey == config::ENABLE_LOG || skey == config::NUM_THREADS)
1103 {
1104 onert::util::CfgKeyValues keyValues;
1105 keyValues[skey] = std::string(value);
1107
1108 if (skey == config::ENABLE_LOG)
1109 {
1111 }
1112 }
1113 else
1114 {
1115 return NNFW_STATUS_ERROR;
1116 }
1117 return NNFW_STATUS_NO_ERROR;
1118}
int toInt(const std::string &val)
bool toBool(const std::string &val)
#define UPDATE_VERBOSE_CONFIG()
Definition logging.h:81

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, onert::util::setConfigKeyValues(), and UPDATE_VERBOSE_CONFIG.

◆ set_execute_config()

NNFW_STATUS nnfw_session::set_execute_config ( const NNFW_RUN_CONFIG  key,
const char *  value 
)

Definition at line 2174 of file nnfw_session.cc.

2175{
2176 if (!isStatePreparedOrFinishedRun())
2177 {
2178 std::cerr << "Error during nnfw_session::set_execution_config : Invalid state" << std::endl;
2180 }
2181
2182 switch (key)
2183 {
2185 if (_coptions->workspace_dir.empty())
2186 return NNFW_STATUS_ERROR;
2187 _execution->executionOptions().dump_minmax = true;
2188 break;
2190 if (_coptions->workspace_dir.empty())
2191 return NNFW_STATUS_ERROR;
2192 _execution->executionOptions().trace = true;
2193 break;
2195 _execution->executionOptions().profile = true;
2196 break;
2197 default:
2198 return NNFW_STATUS_ERROR;
2199 }
2200
2201 return NNFW_STATUS_NO_ERROR;
2202}
@ NNFW_RUN_CONFIG_PROFILE
@ NNFW_RUN_CONFIG_TRACE
@ NNFW_RUN_CONFIG_DUMP_MINMAX

References NNFW_RUN_CONFIG_DUMP_MINMAX, NNFW_RUN_CONFIG_PROFILE, NNFW_RUN_CONFIG_TRACE, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_input()

NNFW_STATUS nnfw_session::set_input ( uint32_t  index,
NNFW_TYPE  type,
const void *  buffer,
size_t  length 
)

Definition at line 524 of file nnfw_session.cc.

525{
526 if (!isStatePreparedOrFinishedRun())
527 {
528 std::cerr << "Error during nnfw_session::set_input : invalid state" << std::endl;
530 }
531
532 if (!buffer && length != 0)
533 {
534 std::cerr
535 << "Error during nnfw_session::set_input : given buffer is NULL but the length is not 0"
536 << std::endl;
537 return NNFW_STATUS_ERROR;
538 }
539
540 try
541 {
542 _execution->setInput(onert::ir::IOIndex(index), buffer, length);
543 }
544 catch (const std::exception &e)
545 {
546 std::cerr << "Error during nnfw_session::set_input : " << e.what() << std::endl;
547 return NNFW_STATUS_ERROR;
548 }
550}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_input_layout()

NNFW_STATUS nnfw_session::set_input_layout ( uint32_t  index,
NNFW_LAYOUT  layout 
)

Definition at line 624 of file nnfw_session.cc.

625{
626 if (!isStateModelLoaded())
627 {
628 std::cerr << "Error during nnfw_session::set_input_layout : "
629 << "run should be run before prepare" << std::endl;
631 }
632
633 try
634 {
635 if (layout != NNFW_LAYOUT_NONE && layout != NNFW_LAYOUT_CHANNELS_FIRST &&
637 {
638 std::cerr << "Error during nnfw_session::set_input_layout, not supported layout" << std::endl;
639 return NNFW_STATUS_ERROR;
640 }
641
642 if (_selected_signature.valid())
643 {
644 // TODO Support this
645 std::cerr << "Error during nnfw_session::set_input_layout : "
646 << "set_input_layout after signature selection is not supported yet" << std::endl;
647 return NNFW_STATUS_ERROR;
648 }
649
650 // Insert if not exists, otherwise update the value
651 _coptions->input_layout[onert::ir::IOIndex{index}] = convertLayout(layout);
652 }
653 catch (const std::exception &e)
654 {
655 std::cerr << "Error during nnfw_session::set_input_layout : " << e.what() << std::endl;
656 return NNFW_STATUS_ERROR;
657 }
659}
@ NNFW_LAYOUT_CHANNELS_LAST
Definition nnfw.h:141
@ NNFW_LAYOUT_CHANNELS_FIRST
Definition nnfw.h:146
@ NNFW_LAYOUT_NONE
Definition nnfw.h:136

References NNFW_LAYOUT_CHANNELS_FIRST, NNFW_LAYOUT_CHANNELS_LAST, NNFW_LAYOUT_NONE, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and onert::util::Index< T, DummyTag >::valid().

◆ set_input_tensorinfo()

NNFW_STATUS nnfw_session::set_input_tensorinfo ( uint32_t  index,
const nnfw_tensorinfo ti 
)

Definition at line 773 of file nnfw_session.cc.

774{
775 // sanity check
776 {
777 if (isStateInitialized())
778 {
779 std::cerr << "Error during set_input_tensorinfo : should be run after load_model"
780 << std::endl;
782 }
783
784 if (ti == nullptr)
785 {
786 std::cerr << "Error during nnfw_session::set_input_tensorinfo : tensorinfo is null"
787 << std::endl;
789 }
790
791 if (ti->rank < 0 || ti->rank > NNFW_MAX_RANK)
792 {
793 std::cerr << "unsupported rank: " << ti->rank << std::endl;
794 return NNFW_STATUS_ERROR;
795 }
796
797 for (int32_t i = 0; i < ti->rank; ++i)
798 {
799 if (ti->dims[i] <= 0)
800 {
801 std::cerr << "dim must be positive integer but was " << ti->dims[i] << std::endl;
802 return NNFW_STATUS_ERROR;
803 }
804 }
805 }
806
807 onert::ir::Shape new_shape(ti->rank);
808 for (int32_t i = 0; i < ti->rank; i++)
809 new_shape.dim(i) = ti->dims[i];
810
811 const auto input_index = onert::ir::IOIndex(index);
812 if (!isStatePreparedOrFinishedRun())
813 {
814 // In this case, if we apply input shape, it will propagate after compilation and excution
815 _selected_signature.valid() ? _nnpkg->changeInputShape(_selected_signature, index, new_shape)
816 : _nnpkg->changeInputShape(input_index, new_shape);
817 }
818 else // when called after nnfw_session::prepare()
819 _execution->changeInputShape(input_index, new_shape);
820
822}
::onert::util::Index< uint32_t, IOIndexTag > IOIndex
Definition Index.h:36
#define NNFW_MAX_RANK
Maximum rank expressible with nnfw.
int32_t dims[NNFW_MAX_RANK]

References nnfw_tensorinfo::dims, NNFW_MAX_RANK, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, nnfw_tensorinfo::rank, and onert::util::Index< T, DummyTag >::valid().

◆ set_input_type()

NNFW_STATUS nnfw_session::set_input_type ( uint32_t  index,
NNFW_TYPE  type 
)

Definition at line 699 of file nnfw_session.cc.

700{
701 if (!isStateModelLoaded())
702 {
703 std::cerr << "Error during nnfw_session::set_input_type : "
704 << "set_input_type should be called before prepare" << std::endl;
706 }
707
708 try
709 {
710 if (type != NNFW_TYPE_TENSOR_FLOAT32)
711 {
712 std::cerr << "Error during nnfw_session::set_input_type, not supported type" << std::endl;
713 return NNFW_STATUS_ERROR;
714 }
715
716 if (_selected_signature.valid())
717 {
718 // TODO Support this
719 std::cerr << "Error during nnfw_session::set_input_type : "
720 << "set_input_type after signature selection is not supported yet" << std::endl;
721 return NNFW_STATUS_ERROR;
722 }
723
724 _coptions->input_type.insert_or_assign(onert::ir::IOIndex{index},
725 onert::ir::TypeInfo(onert::ir::DataType::FLOAT32));
726 }
727 catch (const std::exception &e)
728 {
729 std::cerr << "Error during nnfw_session::set_input_type : " << e.what() << std::endl;
730 return NNFW_STATUS_ERROR;
731 }
732
734}
@ NNFW_TYPE_TENSOR_FLOAT32
Definition onert-micro.h:77

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_TYPE_TENSOR_FLOAT32, and onert::util::Index< T, DummyTag >::valid().

◆ set_odc_param_minmax_records_count()

NNFW_STATUS nnfw_session::set_odc_param_minmax_records_count ( int  minmax_records_count)

Definition at line 2219 of file nnfw_session.cc.

2220{
2221 if (isStateInitialized() || isStateRunning())
2222 {
2223 std::cerr << "invalid state" << std::endl;
2225 }
2226
2227 if (_quant_manager->setMinMaxRecordsThreshold(minmax_records_count))
2228 return NNFW_STATUS_NO_ERROR;
2229 else
2230 return NNFW_STATUS_ERROR;
2231}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_output()

NNFW_STATUS nnfw_session::set_output ( uint32_t  index,
NNFW_TYPE  type,
void *  buffer,
size_t  length 
)

Definition at line 552 of file nnfw_session.cc.

553{
554 if (!isStatePreparedOrFinishedRun())
555 {
556 std::cerr << "Error during nnfw_session::set_output : invalid state" << std::endl;
558 }
559
560 if (!buffer && length != 0)
561 {
562 std::cerr
563 << "Error during nnfw_session::set_output : given buffer is NULL but the length is not 0"
564 << std::endl;
565 return NNFW_STATUS_ERROR;
566 }
567
568 try
569 {
570 _execution->setOutput(onert::ir::IOIndex(index), buffer, length);
571 }
572 catch (const std::exception &e)
573 {
574 std::cerr << "Error during nnfw_session::set_output : " << e.what() << std::endl;
575 return NNFW_STATUS_ERROR;
576 }
578}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_output_layout()

NNFW_STATUS nnfw_session::set_output_layout ( uint32_t  index,
NNFW_LAYOUT  layout 
)

Definition at line 661 of file nnfw_session.cc.

662{
663 if (!isStateModelLoaded())
664 {
665 std::cerr << "Error during nnfw_session::set_output_layout : "
666 << "run should be run before prepare" << std::endl;
668 }
669
670 try
671 {
672 if (layout != NNFW_LAYOUT_NONE && layout != NNFW_LAYOUT_CHANNELS_FIRST &&
674 {
675 std::cerr << "Error during nnfw_session::set_output_layout, not supported layout"
676 << std::endl;
677 return NNFW_STATUS_ERROR;
678 }
679
680 if (_selected_signature.valid())
681 {
682 // TODO Support this
683 std::cerr << "Error during nnfw_session::set_output_layout : "
684 << "set_output_layout after signature selection is not supported yet" << std::endl;
685 return NNFW_STATUS_ERROR;
686 }
687
688 // Insert if not exists, otherwise update the value
689 _coptions->output_layout[onert::ir::IOIndex{index}] = convertLayout(layout);
690 }
691 catch (const std::exception &e)
692 {
693 std::cerr << "Error during nnfw_session::set_output_layout : " << e.what() << std::endl;
694 return NNFW_STATUS_ERROR;
695 }
697}

References NNFW_LAYOUT_CHANNELS_FIRST, NNFW_LAYOUT_CHANNELS_LAST, NNFW_LAYOUT_NONE, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and onert::util::Index< T, DummyTag >::valid().

◆ set_output_type()

NNFW_STATUS nnfw_session::set_output_type ( uint32_t  index,
NNFW_TYPE  type 
)

Definition at line 736 of file nnfw_session.cc.

737{
738 if (!isStateModelLoaded())
739 {
740 std::cerr << "Error during nnfw_session::set_output_type : "
741 << "set_output_type should be called before prepare" << std::endl;
743 }
744
745 try
746 {
747 if (type != NNFW_TYPE_TENSOR_FLOAT32)
748 {
749 std::cerr << "Error during nnfw_session::set_output_type, not supported type" << std::endl;
750 return NNFW_STATUS_ERROR;
751 }
752
753 if (_selected_signature.valid())
754 {
755 // TODO Support this
756 std::cerr << "Error during nnfw_session::set_output_type : "
757 << "set_output_type after signature selection is not supported yet" << std::endl;
758 return NNFW_STATUS_ERROR;
759 }
760
761 _coptions->output_type.insert_or_assign(onert::ir::IOIndex{index},
762 onert::ir::TypeInfo(onert::ir::DataType::FLOAT32));
763 }
764 catch (const std::exception &e)
765 {
766 std::cerr << "Error during nnfw_session::set_output_type : " << e.what() << std::endl;
767 return NNFW_STATUS_ERROR;
768 }
769
771}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_TYPE_TENSOR_FLOAT32, and onert::util::Index< T, DummyTag >::valid().

◆ set_prepare_config()

NNFW_STATUS nnfw_session::set_prepare_config ( const NNFW_PREPARE_CONFIG  key,
const char *  value 
)

Definition at line 2138 of file nnfw_session.cc.

2139{
2140 if (!isStateModelLoaded())
2141 {
2142 std::cerr << "Error during nnfw_session::set_prepare_config : Invalid state" << std::endl;
2144 }
2145
2146 switch (key)
2147 {
2149 _coptions->he_profiling_mode = true;
2150 break;
2152 _coptions->internal_output_alloc = true;
2153 break;
2154 default:
2155 return NNFW_STATUS_ERROR;
2156 }
2157
2158 return NNFW_STATUS_NO_ERROR;
2159}
@ NNFW_PREPARE_CONFIG_PROFILE
@ NNFW_ENABLE_INTERNAL_OUTPUT_ALLOC

References NNFW_ENABLE_INTERNAL_OUTPUT_ALLOC, NNFW_PREPARE_CONFIG_PROFILE, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_quantization_type()

NNFW_STATUS nnfw_session::set_quantization_type ( NNFW_QUANTIZE_TYPE  qtype)

Definition at line 1966 of file nnfw_session.cc.

1967{
1969 try
1970 {
1971 if (isStateInitialized() || isStateRunning())
1972 {
1973 std::cerr << "invalid state" << std::endl;
1975 }
1976
1978 switch (qtype)
1979 {
1982 break;
1985 break;
1988 break;
1991 break;
1992 default:
1994 }
1995 _quant_manager->quantizeType(odc_qtype);
1996 }
1997 catch (const std::exception &e)
1998 {
1999 std::cerr << "Error during nnfw_session::set_quantization_type : " << e.what() << std::endl;
2000 return NNFW_STATUS_ERROR;
2001 }
2002
2003 return NNFW_STATUS_NO_ERROR;
2004}
@ ODC_QTYPE_WO_I8_SYM
@ ODC_QTYPE_WO_I16_SYM
@ NNFW_QUANTIZE_TYPE_WO_I16_SYM
@ NNFW_QUANTIZE_TYPE_U8_ASYM
@ NNFW_QUANTIZE_TYPE_I16_SYM
@ NNFW_QUANTIZE_TYPE_WO_I8_SYM

References NNFW_QUANTIZE_TYPE_I16_SYM, NNFW_QUANTIZE_TYPE_U8_ASYM, NNFW_QUANTIZE_TYPE_WO_I16_SYM, NNFW_QUANTIZE_TYPE_WO_I8_SYM, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, onert::odc::ODC_QTYPE_I16_SYM, onert::odc::ODC_QTYPE_NOT_SET, onert::odc::ODC_QTYPE_U8_ASYM, onert::odc::ODC_QTYPE_WO_I16_SYM, and onert::odc::ODC_QTYPE_WO_I8_SYM.

◆ set_quantized_model_path()

NNFW_STATUS nnfw_session::set_quantized_model_path ( const char *  path)

Definition at line 2006 of file nnfw_session.cc.

2007{
2008 try
2009 {
2010 if (isStateInitialized() || isStateRunning())
2011 {
2012 std::cerr << "invalid state" << std::endl;
2014 }
2015
2016 _quant_manager->exportModelPath(std::string(path));
2017 }
2018 catch (const std::exception &e)
2019 {
2020 std::cerr << "Error during nnfw_session::set_quantized_model_path : " << e.what() << std::endl;
2021 return NNFW_STATUS_ERROR;
2022 }
2023
2024 return NNFW_STATUS_NO_ERROR;
2025}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, and NNFW_STATUS_NO_ERROR.

◆ set_signature_for_tensorinfo()

NNFW_STATUS nnfw_session::set_signature_for_tensorinfo ( const char *  signature)

Definition at line 1012 of file nnfw_session.cc.

1013{
1014 if (!signature)
1016
1017 if (!isStateModelLoaded())
1018 {
1019 std::cerr << "Error during nnfw_session::set_signature_for_tensorinfo : invalid state"
1020 << std::endl;
1022 }
1023
1024 for (const auto &[subg_idx, sig_str] : _signature_map)
1025 {
1026 if (sig_str == std::string(signature))
1027 {
1028 _selected_signature = subg_idx;
1029
1030 return NNFW_STATUS_NO_ERROR;
1031 }
1032 }
1033
1034 std::cerr << "Error during nnfw_session::set_signature_for_tensorinfo : cannot find signature \""
1035 << signature << "\"" << std::endl;
1036 return NNFW_STATUS_ERROR;
1037}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

◆ set_signature_run()

NNFW_STATUS nnfw_session::set_signature_run ( const char *  signature)

Definition at line 1039 of file nnfw_session.cc.

1040{
1041 if (!signature)
1043
1044 if (!isStatePreparedOrFinishedRun())
1045 {
1046 std::cerr << "Error during nnfw_session::set_signature_run : invalid state" << std::endl;
1048 }
1049
1050 for (const auto &[subg_idx, sig_str] : _signature_map)
1051 {
1052 if (sig_str == std::string(signature))
1053 {
1054 _execution =
1055 std::make_unique<onert::exec::Execution>(_compiler_artifact->_executors, subg_idx);
1056 return NNFW_STATUS_NO_ERROR;
1057 }
1058 }
1059
1060 std::cerr << "Error during nnfw_session::set_signature_run : cannot find signature" << std::endl;
1061 return NNFW_STATUS_ERROR;
1062}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

◆ set_workspace()

NNFW_STATUS nnfw_session::set_workspace ( const char *  dir)

Definition at line 997 of file nnfw_session.cc.

998{
999 // TODO Check dir read & write permission
1000
1001 if (!dir)
1003
1004 if (!isStateInitialized())
1006
1007 _coptions->workspace_dir = std::string(dir);
1008
1009 return NNFW_STATUS_NO_ERROR;
1010}

References NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, and NNFW_STATUS_UNEXPECTED_NULL.

◆ train_expected_tensorinfo() [1/2]

NNFW_STATUS nnfw_session::train_expected_tensorinfo ( uint32_t  index,
nnfw_tensorinfo ti 
)

Definition at line 1615 of file nnfw_session.cc.

1616{
1617 if (!isStatePreparedOrFinishedTraining())
1618 {
1619 std::cerr << "Error during nnfw_session::train_expected_tensorinfo : invalid state"
1620 << std::endl;
1622 }
1623
1624 // Check index is valid: [0, getExpectedSize())
1625
1626 // NYI
1627 (void)index;
1628 (void)ti;
1629 return NNFW_STATUS_ERROR;
1630}

References NNFW_STATUS_ERROR, and NNFW_STATUS_INVALID_STATE.

◆ train_expected_tensorinfo() [2/2]

NNFW_STATUS nnfw_session::train_expected_tensorinfo ( uint32_t  index,
nnfw_tensorinfo ti 
)

◆ train_export_checkpoint() [1/2]

NNFW_STATUS nnfw_session::train_export_checkpoint ( const char *  path)

Definition at line 324 of file onert-micro.cpp.

325{
326 _train_interpreter->saveCheckpoint(_config, path);
328}
OMStatus saveCheckpoint(const OMConfig &config, const char *save_path)

References NNFW_STATUS_NO_ERROR, and onert_micro::OMTrainingInterpreter::saveCheckpoint().

◆ train_export_checkpoint() [2/2]

NNFW_STATUS nnfw_session::train_export_checkpoint ( const char *  path)

◆ train_export_circle() [1/2]

NNFW_STATUS nnfw_session::train_export_circle ( const char *  path)

Definition at line 318 of file onert-micro.cpp.

319{
320 _train_interpreter->saveModel(_config, path);
322}
OMStatus saveModel(const OMConfig &config, const char *save_path)

References NNFW_STATUS_NO_ERROR, and onert_micro::OMTrainingInterpreter::saveModel().

◆ train_export_circle() [2/2]

NNFW_STATUS nnfw_session::train_export_circle ( const char *  path)

◆ train_export_circleplus()

NNFW_STATUS nnfw_session::train_export_circleplus ( const char *  path)

Definition at line 1853 of file nnfw_session.cc.

1854{
1855 if (path == nullptr)
1856 {
1857 std::cerr << "Error during nnfw_session::train_export_circleplus : path is null" << std::endl;
1859 }
1860
1861 if (!isStatePreparedOrFinishedTraining())
1862 {
1863 std::cerr << "Error during nnfw_session::train_export_circleplus : invalid state" << std::endl;
1865 }
1866
1867 try
1868 {
1869 onert::exporter::CircleExporter exporter(_model_path.string(), std::string{path});
1870 exporter.updateWeight(_execution);
1871 exporter.updateMetadata(_train_info);
1872 }
1873 catch (const std::exception &e)
1874 {
1875 std::cerr << "Error during nnfw_session::train_export_circleplus : " << e.what() << std::endl;
1876 return NNFW_STATUS_ERROR;
1877 }
1878
1879 return NNFW_STATUS_NO_ERROR;
1880}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, onert::exporter::CircleExporter::updateMetadata(), and onert::exporter::CircleExporter::updateWeight().

◆ train_get_loss() [1/2]

NNFW_STATUS nnfw_session::train_get_loss ( uint32_t  index,
float *  loss 
)

Definition at line 370 of file onert-micro.cpp.

371{
373 switch (_config.training_context.loss)
374 {
377 break;
378 default:
380 break;
381 }
382
383 _train_interpreter->evaluateMetric(_config, m, reinterpret_cast<void *>(loss),
386}
OMStatus evaluateMetric(const OMConfig &config, OMMetrics metric, void *metric_val, uint32_t test_size)
@ CROSS_ENTROPY
Definition OMConfig.h:54
@ CROSS_ENTROPY_METRICS
Definition OMConfig.h:42
OMTrainingContext training_context
Definition OMConfig.h:107

References onert_micro::OMTrainingContext::batch_size, onert_micro::CROSS_ENTROPY, onert_micro::CROSS_ENTROPY_METRICS, onert_micro::OMTrainingInterpreter::evaluateMetric(), onert_micro::OMTrainingContext::loss, m, NNFW_STATUS_NO_ERROR, and onert_micro::OMConfig::training_context.

◆ train_get_loss() [2/2]

NNFW_STATUS nnfw_session::train_get_loss ( uint32_t  index,
float *  loss 
)

◆ train_get_traininfo()

NNFW_STATUS nnfw_session::train_get_traininfo ( nnfw_train_info info)

Definition at line 1338 of file nnfw_session.cc.

1339{
1340 if (isStateInitialized())
1341 {
1342 // There is no _train_info in INITIALIZED, since _train_info is set when a model loaded
1343 std::cerr << "Error during nnfw_session::train_get_traininfo : invalid state";
1345 }
1346
1347 if (info == nullptr)
1348 {
1349 std::cerr << "Error during nnfw_session::train_get_traininfo : info is nullptr" << std::endl;
1351 }
1352
1353 // after model loaded, it ensures that _train_info is not nullptr
1354 assert(_train_info != nullptr);
1355
1356 auto convertLossCode = [](const onert::ir::train::LossCode &code) -> NNFW_TRAIN_LOSS {
1357 switch (code)
1358 {
1365 default:
1366 throw std::runtime_error{"fail to convert ir::train::LossCode"};
1367 }
1368 };
1369
1370 auto convertLossReduction =
1372 switch (type)
1373 {
1380 default:
1381 throw std::runtime_error{"fail to convert from ir::train::LossReductionType"};
1382 break;
1383 }
1384 };
1385
1386 auto convertOptimizerCode =
1388 switch (code)
1389 {
1396 default:
1397 throw std::runtime_error{"fail to convert from ir::train::OptimizerCode"};
1398 }
1399 };
1400
1401 const auto &loss = _train_info->lossInfo();
1402 const auto &optim = _train_info->optimizerInfo();
1403
1404 try
1405 {
1406 info->learning_rate = optim.learning_rate;
1407 info->batch_size = _train_info->batchSize();
1408 info->loss_info.loss = convertLossCode(loss.loss_code);
1409 info->loss_info.reduction_type = convertLossReduction(loss.reduction_type);
1410 info->opt = convertOptimizerCode(optim.optim_code);
1411
1412 if (_train_info->getTrainableOps().size() > 0)
1413 {
1414 const uint32_t first_trainable_idx = _train_info->getTrainableOps().cbegin()->value();
1415 const uint32_t last_trainable_idx = _train_info->getTrainableOps().crbegin()->value();
1416 const uint32_t ops_size = primary_subgraph()->operations().size();
1417 const uint32_t trainable_indexes_range = last_trainable_idx - first_trainable_idx + 1;
1418
1419 // check if trainable ops set contains continuous indexes on the back of the set
1420 if (last_trainable_idx == ops_size - 1 &&
1421 trainable_indexes_range == _train_info->getTrainableOps().size())
1422 {
1423 // check if all ops are trainable
1424 if (0 == first_trainable_idx)
1425 {
1426 info->num_of_trainable_ops = NNFW_TRAIN_TRAINABLE_ALL;
1427 }
1428 else
1429 {
1430 info->num_of_trainable_ops = trainable_indexes_range;
1431 }
1432 }
1433 else
1434 {
1435 info->num_of_trainable_ops = NNFW_TRAIN_TRAINABLE_INCORRECT_STATE;
1436 std::cerr << "conversion from set of trainable ops to num_of_trainable_ops is impossible"
1437 << std::endl;
1439 }
1440 }
1441 else
1442 {
1443 // no layer will be trained
1444 info->num_of_trainable_ops = NNFW_TRAIN_TRAINABLE_NONE;
1445 }
1446 }
1447 catch (const std::exception &e)
1448 {
1449 std::cerr << "Error during nnfw_session::train_get_traininfo" << e.what() << std::endl;
1450 return NNFW_STATUS_ERROR;
1451 }
1452
1453 return NNFW_STATUS_NO_ERROR;
1454}
size_t size() const
Return the number of objects that the manager contains.
Code * code(const SessionID &sess)
Definition Session.cpp:54
type
Definition infer.py:18
@ NNFW_TRAIN_TRAINABLE_NONE
@ NNFW_TRAIN_TRAINABLE_ALL
@ NNFW_TRAIN_TRAINABLE_INCORRECT_STATE
NNFW_TRAIN_LOSS_REDUCTION
@ NNFW_TRAIN_LOSS_REDUCTION_UNDEFINED
@ NNFW_TRAIN_LOSS_REDUCTION_SUM
@ NNFW_TRAIN_LOSS_REDUCTION_SUM_OVER_BATCH_SIZE
NNFW_TRAIN_OPTIMIZER
@ NNFW_TRAIN_OPTIMIZER_ADAM
@ NNFW_TRAIN_OPTIMIZER_SGD
@ NNFW_TRAIN_OPTIMIZER_UNDEFINED
NNFW_TRAIN_LOSS
@ NNFW_TRAIN_LOSS_MEAN_SQUARED_ERROR
@ NNFW_TRAIN_LOSS_UNDEFINED
@ NNFW_TRAIN_LOSS_CATEGORICAL_CROSSENTROPY
virtual const Operations & operations() const =0

References onert::ir::train::Adam, onert::ir::train::CategoricalCrossentropy, info, onert::ir::train::MeanSquaredError, NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, NNFW_TRAIN_LOSS_CATEGORICAL_CROSSENTROPY, NNFW_TRAIN_LOSS_MEAN_SQUARED_ERROR, NNFW_TRAIN_LOSS_REDUCTION_SUM, NNFW_TRAIN_LOSS_REDUCTION_SUM_OVER_BATCH_SIZE, NNFW_TRAIN_LOSS_REDUCTION_UNDEFINED, NNFW_TRAIN_LOSS_UNDEFINED, NNFW_TRAIN_OPTIMIZER_ADAM, NNFW_TRAIN_OPTIMIZER_SGD, NNFW_TRAIN_OPTIMIZER_UNDEFINED, NNFW_TRAIN_TRAINABLE_ALL, NNFW_TRAIN_TRAINABLE_INCORRECT_STATE, NNFW_TRAIN_TRAINABLE_NONE, onert::ir::IGraph::operations(), onert::ir::train::SGD, onert::util::ObjectManager< Index, Object >::size(), onert::ir::train::Sum, onert::ir::train::SumOverBatchSize, and onert::ir::train::Undefined.

◆ train_import_checkpoint() [1/2]

NNFW_STATUS nnfw_session::train_import_checkpoint ( const char *  path)

Definition at line 330 of file onert-micro.cpp.

331{
332 _train_interpreter->loadCheckpoint(_config, path);
334}
OMStatus loadCheckpoint(OMConfig &config, const char *load_path)

References onert_micro::OMTrainingInterpreter::loadCheckpoint(), and NNFW_STATUS_NO_ERROR.

◆ train_import_checkpoint() [2/2]

NNFW_STATUS nnfw_session::train_import_checkpoint ( const char *  path)

◆ train_input_tensorinfo() [1/2]

NNFW_STATUS nnfw_session::train_input_tensorinfo ( uint32_t  index,
nnfw_tensorinfo ti 
)

Definition at line 1599 of file nnfw_session.cc.

1600{
1601 if (!isStatePreparedOrFinishedTraining())
1602 {
1603 std::cerr << "Error during nnfw_session::train_input_tensorinfo : invalid state" << std::endl;
1605 }
1606
1607 // Check index is valid: [0, getInputSize())
1608
1609 // NYI
1610 (void)index;
1611 (void)ti;
1612 return NNFW_STATUS_ERROR;
1613}

References NNFW_STATUS_ERROR, and NNFW_STATUS_INVALID_STATE.

◆ train_input_tensorinfo() [2/2]

NNFW_STATUS nnfw_session::train_input_tensorinfo ( uint32_t  index,
nnfw_tensorinfo ti 
)

◆ train_prepare() [1/2]

NNFW_STATUS nnfw_session::train_prepare ( )

Definition at line 284 of file onert-micro.cpp.

285{
286 // TODO: Implement remaining jobs if inference_interpreter is introduced
287 // maybe interpreter initialization ?
289}

References NNFW_STATUS_NO_ERROR.

◆ train_prepare() [2/2]

NNFW_STATUS nnfw_session::train_prepare ( )

◆ train_run() [1/2]

NNFW_STATUS nnfw_session::train_run ( bool  update_weights)

Definition at line 291 of file onert-micro.cpp.

292{
293 if (update_weights)
294 {
295 // TOOD: micro support update_weights ???
296 // Here we use this flag for distinguish inference and train in trainaing interpreter
297 _train_interpreter->trainSingleStep(_config);
300 }
301 else
302 {
303 // TODO: support multiple input/output
304 assert(outputbuf != nullptr);
305 _train_interpreter->allocateInputs();
306 float *allocated_input_data = (float *)_train_interpreter->getInputDataAt(0);
307 float *user_input_data = (float *)_train_interpreter->getInputData(0);
308 memcpy(allocated_input_data, user_input_data,
309 sizeof(float) * _train_interpreter->getInputSizeAt(0));
310 _train_interpreter->run(_config);
311 float *calculated_ptr = (float *)_train_interpreter->getOutputDataAt(0);
312 memcpy(outputbuf, calculated_ptr, sizeof(float) * _train_interpreter->getOutputSizeAt(0));
313 _train_interpreter->reset();
314 }
316}
uint32_t getOutputSizeAt(uint32_t position)
OMStatus run(const OMConfig &config)
uint32_t getInputSizeAt(uint32_t position)
OMStatus trainSingleStep(OMConfig &config)

References onert_micro::OMTrainingInterpreter::allocateInputs(), onert_micro::OMTrainingContext::batch_size, onert_micro::OMTrainingInterpreter::getInputData(), onert_micro::OMTrainingInterpreter::getInputDataAt(), onert_micro::OMTrainingInterpreter::getInputSizeAt(), onert_micro::OMTrainingInterpreter::getOutputDataAt(), onert_micro::OMTrainingInterpreter::getOutputSizeAt(), NNFW_STATUS_NO_ERROR, onert_micro::OMTrainingContext::num_epoch, onert_micro::OMTrainingContext::num_step, onert_micro::OMTrainingInterpreter::reset(), onert_micro::OMTrainingInterpreter::run(), onert_micro::OMConfig::training_context, and onert_micro::OMTrainingInterpreter::trainSingleStep().

◆ train_run() [2/2]

NNFW_STATUS nnfw_session::train_run ( bool  update_weights)

◆ train_set_expected() [1/2]

NNFW_STATUS nnfw_session::train_set_expected ( uint32_t  index,
const void *  expected,
const nnfw_tensorinfo expected_tensorinfo 
)

Definition at line 1676 of file nnfw_session.cc.

1678{
1679 if (expected == nullptr)
1680 {
1681 std::cerr << "Error during nnfw_session::train_set_expected : expected buffer is null"
1682 << std::endl;
1684 }
1685
1686 if (!isStatePreparedOrFinishedTraining())
1687 {
1688 std::cerr << "Error during nnfw_session::train_set_expected : invalid state" << std::endl;
1690 }
1691
1692 if (index >= getOutputSize())
1693 {
1694 std::cerr << "Error during nnfw_session::train_set_expected : index is out of range"
1695 << std::endl;
1696 return NNFW_STATUS_ERROR;
1697 }
1698
1699 try
1700 {
1701 const auto ind = onert::ir::IOIndex{index};
1702 auto size = _execution->outputInfo(ind).total_size();
1703 if (expected_tensorinfo && getBufSize(expected_tensorinfo) != size)
1704 {
1705 std::cerr << "Error during nnfw_session::train_set_expected : invalid tensorinfo"
1706 << std::endl;
1707 return NNFW_STATUS_ERROR;
1708 }
1709
1710 // NOTE Find the loss input index
1711 // Input is added as many as the number of outputs.
1712 // The loss index is calculated from the value obtained by subtracting the
1713 // total output(added loss input) from the total input size.
1714 auto input_index = getInputSize() - getOutputSize() + index;
1715 auto input_ind = onert::ir::IOIndex(input_index);
1716 _execution->setInput(input_ind, expected, size);
1717 }
1718 catch (const std::exception &e)
1719 {
1720 std::cerr << "Error during nnfw_session::train_set_expected : " << e.what() << std::endl;
1721 return NNFW_STATUS_ERROR;
1722 }
1723
1724 return NNFW_STATUS_NO_ERROR;
1725}

References NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, and size.

◆ train_set_expected() [2/2]

NNFW_STATUS nnfw_session::train_set_expected ( uint32_t  index,
void *  expected 
)

Definition at line 344 of file onert-micro.cpp.

345{
346 _train_interpreter->setTarget((uint8_t *)expected, index);
348}
void setTarget(uint8_t *data, uint32_t target_index)

References NNFW_STATUS_NO_ERROR, and onert_micro::OMTrainingInterpreter::setTarget().

◆ train_set_input() [1/2]

NNFW_STATUS nnfw_session::train_set_input ( uint32_t  index,
const void *  input,
const nnfw_tensorinfo input_tensorinfo 
)

Definition at line 1632 of file nnfw_session.cc.

1634{
1635 if (input == nullptr)
1636 {
1637 std::cerr << "Error during nnfw_session::train_set_input : input buffer is null" << std::endl;
1639 }
1640
1641 if (!isStatePreparedOrFinishedTraining())
1642 {
1643 std::cerr << "Error during nnfw_session::train_set_input : invalid state" << std::endl;
1645 }
1646
1647 if (index >= getInputSize())
1648 {
1649 std::cerr << "Error during nnfw_session::train_set_input : index is out of range" << std::endl;
1650 return NNFW_STATUS_ERROR;
1651 }
1652
1653 try
1654 {
1655 auto ind = onert::ir::IOIndex(index);
1656 auto size = _execution->inputInfo(ind).total_size();
1657 if (input_tensorinfo && getBufSize(input_tensorinfo) != size)
1658 {
1659 std::cerr
1660 << "Error during nnfw_session::train_set_input : not supporeted to change tensorinfo"
1661 << std::endl;
1662 return NNFW_STATUS_ERROR;
1663 }
1664
1665 _execution->setInput(ind, input, size);
1666 }
1667 catch (const std::exception &e)
1668 {
1669 std::cerr << "Error during nnfw_session::train_set_input : " << e.what() << std::endl;
1670 return NNFW_STATUS_ERROR;
1671 }
1672
1673 return NNFW_STATUS_NO_ERROR;
1674}
NNFW_STATUS input_tensorinfo(uint32_t index, nnfw_tensorinfo *ti)

References input_tensorinfo(), NNFW_STATUS_ERROR, NNFW_STATUS_INVALID_STATE, NNFW_STATUS_NO_ERROR, NNFW_STATUS_UNEXPECTED_NULL, and size.

◆ train_set_input() [2/2]

NNFW_STATUS nnfw_session::train_set_input ( uint32_t  index,
void *  input 
)

Definition at line 337 of file onert-micro.cpp.

338{
339 _train_interpreter->setInput((uint8_t *)input, index);
341}
void setInput(uint8_t *data, uint32_t input_index)

References NNFW_STATUS_NO_ERROR, and onert_micro::OMTrainingInterpreter::setInput().

◆ train_set_output() [1/2]

NNFW_STATUS nnfw_session::train_set_output ( uint32_t  index,
NNFW_TYPE  type,
void *  buffer,
size_t  length 
)

Definition at line 350 of file onert-micro.cpp.

352{
353 outputbuf = (uint8_t *)buffer;
355}

References NNFW_STATUS_NO_ERROR.

◆ train_set_output() [2/2]

NNFW_STATUS nnfw_session::train_set_output ( uint32_t  index,
NNFW_TYPE  type,
void *  buffer,
size_t  length 
)

◆ train_set_traininfo() [1/2]

NNFW_STATUS nnfw_session::train_set_traininfo ( const nnfw_train_info info)

Definition at line 357 of file onert-micro.cpp.

358{
359 _config.training_context.learning_rate = info->learning_rate;
360 _config.training_context.batch_size = info->batch_size;
363 _config.training_context.beta = info->adam_opt.beta;
364 _config.training_context.beta_squares = info->adam_opt.beta2;
365 _config.training_context.beta = info->adam_opt.epsilon;
366 _config.training_context.num_of_train_layers = info->num_trainble_ops;
368}
OMTrainOptimizer optimizer
Definition OMConfig.h:78

References onert_micro::ADAM, onert_micro::OMTrainingContext::batch_size, onert_micro::OMTrainingContext::beta, onert_micro::OMTrainingContext::beta_squares, info, onert_micro::OMTrainingContext::learning_rate, NNFW_STATUS_NO_ERROR, NNFW_TRAIN_OPTIMIZER_ADAM, onert_micro::OMTrainingContext::num_of_train_layers, onert_micro::OMTrainingContext::optimizer, onert_micro::SGD, and onert_micro::OMConfig::training_context.

◆ train_set_traininfo() [2/2]

NNFW_STATUS nnfw_session::train_set_traininfo ( const nnfw_train_info info)

The documentation for this struct was generated from the following files: