44#define MAX_BACKEND_NAME_LENGTH 32
45#define MAX_OP_NAME_LENGTH 64
46#define MAX_PATH_LENGTH 1024
47#define MAX_TENSOR_NAME_LENGTH 64
53bool null_terminating(
const char *str, uint32_t length)
55 for (uint32_t i = 0; i < length; i++)
57 if (*(str + i) ==
'\0')
79 uint32_t *index,
bool is_input)
81 if (!tensorname || !index)
86 std::cerr <<
"nnpackage path is too long" << std::endl;
90 auto ind_found = is_input ? graph.getInputIndex(tensorname) : graph.getOutputIndex(tensorname);
92 if (ind_found.undefined())
99 *index = ind_found.value();
104std::string trim(std::string_view value)
106 constexpr std::string_view whitespace =
" \t";
108 auto begin = value.find_first_not_of(whitespace);
109 if (
begin == std::string_view::npos)
112 auto end = value.find_last_not_of(whitespace);
113 return std::string(value.substr(
begin, end -
begin + 1));
118 std::ifstream ifs(cfgfile);
122 while (std::getline(ifs, line))
124 auto cmtpos = line.find(
'#');
125 if (cmtpos != std::string::npos)
127 line = line.substr(0, cmtpos);
129 std::istringstream isline(line);
131 if (std::getline(isline, key,
'='))
134 if (std::getline(isline, value))
137 keyValues[key] = trim(value);
152 case DataType::FLOAT32:
154 case DataType::INT32:
156 case DataType::QUANT_UINT8_ASYMM:
158 case DataType::BOOL8:
160 case DataType::UINT8:
162 case DataType::INT64:
164 case DataType::QUANT_INT8_ASYMM:
166 case DataType::QUANT_INT16_SYMM:
168 case DataType::UINT32:
169 case DataType::QUANT_INT8_SYMM:
171 throw std::runtime_error(
"Error: Model has type that runtime API does not support.");
178 ti->
rank = shape.rank();
179 for (
int j = 0; j < ti->
rank; ++j)
181 ti->
dims[j] = shape.dim(j);
183 ti->
dtype = datatype_to_nnfw_dtype(dtype);
186std::unique_ptr<onert::ir::Model> loadModel(
const std::string filename,
187 const std::string model_type)
191 if (model_type ==
"tflite")
193 if (model_type ==
"circle")
198 catch (
const std::exception &e)
200 std::cerr <<
"Fail to load model: " << e.what() <<
'\n';
203 return std::unique_ptr<onert::ir::Model>(
nullptr);
206std::unique_ptr<onert::ir::train::TrainingInfo>
207loadTrainingInfo(
const std::shared_ptr<onert::ir::Model> &model)
210 if (model->exists_metadata(tinfo_name))
212 const auto buffer = model->extract_metadata(tinfo_name);
215 return std::make_unique<onert::ir::train::TrainingInfo>();
220 static int elmsize[] = {
232 for (int32_t i = 0; i <
info->rank; ++i)
234 assert(
info->dims[i] >= 0);
237 return elmsize[
info->dtype] * n;
241nnfw_session::nnfw_session()
242 : _nnpkg{nullptr}, _coptions{
onert::compiler::CompilerOptions::fromGlobalConfig()},
243 _compiler_artifact{nullptr}, _execution{nullptr}, _kernel_registry{nullptr},
244 _train_info{nullptr}, _quant_manager{
std::make_unique<
onert::odc::QuantizeManager>()},
245 _codegen_manager{
std::make_unique<
onert::odc::CodegenManager>()}, _model_path{},
253 if (session ==
nullptr)
257 auto new_session = std::unique_ptr<nnfw_session>(
new nnfw_session());
258 new_session->_kernel_registry = std::make_shared<onert::api::CustomKernelRegistry>();
259 *
session = new_session.release();
261 catch (
const std::bad_alloc &e)
263 std::cerr <<
"Error during session creation" << std::endl;
267 catch (
const std::exception &e)
269 std::cerr <<
"Error during session initialization : " << e.what() << std::endl;
280 if (!isStateInitialized())
293 _nnpkg = std::make_unique<onert::ir::NNPkg>(std::move(model));
294 _train_info = loadTrainingInfo(_nnpkg->primary_model());
295 _state = State::MODEL_LOADED;
297 catch (
const std::exception &e)
299 std::cerr <<
"Error during model loading : " << e.what() << std::endl;
307 if (!isStateInitialized())
312 std::cerr <<
"Path is null." << std::endl;
318 std::cerr <<
"Path is too long" << std::endl;
324 std::filesystem::path filename{path};
325 if (!std::filesystem::is_directory(filename) && filename.has_extension())
327 std::string model_type = filename.extension().string().substr(1);
328 return loadModelFile(filename, model_type);
331 const auto &package_dir = filename;
334 if (!std::filesystem::is_directory(package_dir))
336 std::cerr <<
"invalid path: " << package_dir << std::endl;
340 const auto manifest_file_name = package_dir /
"metadata/MANIFEST";
341 std::ifstream mfs(manifest_file_name);
347 const Json::Value &models = root[
"models"];
348 const Json::Value &model_types = root[
"model-types"];
349 const Json::Value &configs = root[
"configs"];
351 if (!configs.empty() && !configs[0].empty())
353 const auto filepath = package_dir /
"metadata" / configs[0].asString();
356 if (loadConfigure(filepath.string(), keyValues))
361 _nnpkg = std::make_unique<onert::ir::NNPkg>();
362 auto num_models = models.size();
365 std::cerr <<
"Invalid model size - " << std::to_string(num_models) << std::endl;
371 if (num_models > 1 && _coptions->manual_scheduler_options.index_to_backend.size() != 0)
373 std::cerr <<
"Cannot set backend to operator index for multiple models" << std::endl;
377 for (uint16_t i = 0; i < num_models; ++i)
379 const auto model_file_path = package_dir / models[i].asString();
380 const auto model_type = model_types[i].asString();
381 auto model = loadModel(model_file_path.string(), model_type);
382 if (model ==
nullptr)
384 _model_path = model_file_path;
385 model->bindKernelBuilder(_kernel_registry->getBuilder());
389 _train_info = loadTrainingInfo(_nnpkg->primary_model());
391 auto toIODesc = [](std::string str) {
393 if (indices.size() != 3)
395 std::cerr <<
"IODesc should be 3-tuple." << std::endl;
398 auto model_idx =
static_cast<uint32_t
>(std::stoi(indices.at(0)));
399 auto subgraph_idx =
static_cast<uint32_t
>(std::stoi(indices.at(1)));
400 auto operand_idx =
static_cast<uint32_t
>(std::stoi(indices.at(2)));
404 const Json::Value &pkg_inputs = root[
"pkg-inputs"];
405 for (uint32_t i = 0; i < pkg_inputs.size(); ++i)
406 _nnpkg->addInput(toIODesc(pkg_inputs[i].asString()));
407 const Json::Value &pkg_outputs = root[
"pkg-outputs"];
408 for (uint32_t i = 0; i < pkg_outputs.size(); ++i)
409 _nnpkg->addOutput(toIODesc(pkg_outputs[i].asString()));
411 const Json::Value &fromtos = root[
"model-connect"];
412 for (uint32_t i = 0; i < fromtos.size(); ++i)
414 const Json::Value &tos = fromtos[i][
"to"];
415 for (uint32_t j = 0; j < tos.size(); ++j)
416 _nnpkg->addEdge(toIODesc(fromtos[i][
"from"].asString()), toIODesc(tos[j].asString()));
420 _state = State::MODEL_LOADED;
422 catch (
const std::exception &e)
424 std::cerr <<
"Error during model loading : " << e.what() << std::endl;
433 if (!isStateModelLoaded())
435 std::cerr <<
"Error during model prepare : ";
436 if (isStateInitialized())
438 std::cerr <<
"prepare should be run once";
442 std::cerr <<
"invalid state";
444 std::cerr << std::endl;
452 _compiler_artifact = compiler->compile();
453 _execution = std::make_unique<onert::exec::Execution>(_compiler_artifact->_executors);
455 catch (
const std::exception &e)
457 std::cerr <<
"Error during model prepare : " << e.what() << std::endl;
461 _state = State::PREPARED;
467 if (!isStatePreparedOrFinishedRun())
469 std::cerr <<
"Error during nnfw_session::run : "
470 <<
"run should be run after prepare" << std::endl;
476 _execution->execute();
481 std::cerr <<
"Error during nnfw_session::run : " << e.
what() << std::endl;
484 catch (
const std::exception &e)
486 std::cerr <<
"Error during nnfw_session::run : " << e.
what() << std::endl;
490 _state = State::FINISHED_RUN;
496 if (!isStatePreparedOrFinishedRun())
498 std::cerr <<
"Error during nnfw_session::run_async : "
499 <<
"run_async should be run after prepare" << std::endl;
503 _execution->startExecute();
505 _state = State::RUNNING;
511 if (!isStateRunning())
513 std::cerr <<
"Error during nnfw_session::run_await : "
514 <<
"run_await should be run after run_async" << std::endl;
518 _execution->waitFinish();
520 _state = State::FINISHED_RUN;
526 if (!isStatePreparedOrFinishedRun())
528 std::cerr <<
"Error during nnfw_session::set_input : invalid state" << std::endl;
532 if (!buffer && length != 0)
535 <<
"Error during nnfw_session::set_input : given buffer is NULL but the length is not 0"
544 catch (
const std::exception &e)
546 std::cerr <<
"Error during nnfw_session::set_input : " << e.what() << std::endl;
554 if (!isStatePreparedOrFinishedRun())
556 std::cerr <<
"Error during nnfw_session::set_output : invalid state" << std::endl;
560 if (!buffer && length != 0)
563 <<
"Error during nnfw_session::set_output : given buffer is NULL but the length is not 0"
572 catch (
const std::exception &e)
574 std::cerr <<
"Error during nnfw_session::set_output : " << e.what() << std::endl;
582 if (isStateInitialized())
587 if (number ==
nullptr)
589 std::cerr <<
"Error during nnfw_session::input_size, number is null pointer." << std::endl;
592 *number = getInputSize();
594 catch (
const std::exception &e)
596 std::cerr <<
"Error during nnfw_session::input_size : " << e.what() << std::endl;
604 if (isStateInitialized())
609 if (number ==
nullptr)
611 std::cerr <<
"Error during nnfw_session::output_size, number is null pointer." << std::endl;
614 *number = getOutputSize();
616 catch (
const std::exception &e)
618 std::cerr <<
"Error during nnfw_session::output_size" << e.what() << std::endl;
626 if (!isStateModelLoaded())
628 std::cerr <<
"Error during nnfw_session::set_input_layout : "
629 <<
"run should be run before prepare" << std::endl;
638 std::cerr <<
"Error during nnfw_session::set_input_layout, not supported layout" << std::endl;
642 if (_selected_signature.
valid())
645 std::cerr <<
"Error during nnfw_session::set_input_layout : "
646 <<
"set_input_layout after signature selection is not supported yet" << std::endl;
653 catch (
const std::exception &e)
655 std::cerr <<
"Error during nnfw_session::set_input_layout : " << e.what() << std::endl;
663 if (!isStateModelLoaded())
665 std::cerr <<
"Error during nnfw_session::set_output_layout : "
666 <<
"run should be run before prepare" << std::endl;
675 std::cerr <<
"Error during nnfw_session::set_output_layout, not supported layout"
680 if (_selected_signature.
valid())
683 std::cerr <<
"Error during nnfw_session::set_output_layout : "
684 <<
"set_output_layout after signature selection is not supported yet" << std::endl;
691 catch (
const std::exception &e)
693 std::cerr <<
"Error during nnfw_session::set_output_layout : " << e.what() << std::endl;
701 if (!isStateModelLoaded())
703 std::cerr <<
"Error during nnfw_session::set_input_type : "
704 <<
"set_input_type should be called before prepare" << std::endl;
712 std::cerr <<
"Error during nnfw_session::set_input_type, not supported type" << std::endl;
716 if (_selected_signature.
valid())
719 std::cerr <<
"Error during nnfw_session::set_input_type : "
720 <<
"set_input_type after signature selection is not supported yet" << std::endl;
727 catch (
const std::exception &e)
729 std::cerr <<
"Error during nnfw_session::set_input_type : " << e.what() << std::endl;
738 if (!isStateModelLoaded())
740 std::cerr <<
"Error during nnfw_session::set_output_type : "
741 <<
"set_output_type should be called before prepare" << std::endl;
749 std::cerr <<
"Error during nnfw_session::set_output_type, not supported type" << std::endl;
753 if (_selected_signature.
valid())
756 std::cerr <<
"Error during nnfw_session::set_output_type : "
757 <<
"set_output_type after signature selection is not supported yet" << std::endl;
764 catch (
const std::exception &e)
766 std::cerr <<
"Error during nnfw_session::set_output_type : " << e.what() << std::endl;
777 if (isStateInitialized())
779 std::cerr <<
"Error during set_input_tensorinfo : should be run after load_model"
786 std::cerr <<
"Error during nnfw_session::set_input_tensorinfo : tensorinfo is null"
793 std::cerr <<
"unsupported rank: " << ti->
rank << std::endl;
797 for (int32_t i = 0; i < ti->
rank; ++i)
799 if (ti->
dims[i] <= 0)
801 std::cerr <<
"dim must be positive integer but was " << ti->
dims[i] << std::endl;
808 for (int32_t i = 0; i < ti->
rank; i++)
809 new_shape.dim(i) = ti->
dims[i];
812 if (!isStatePreparedOrFinishedRun())
815 _selected_signature.
valid() ? _nnpkg->changeInputShape(_selected_signature, index, new_shape)
816 : _nnpkg->changeInputShape(input_index, new_shape);
819 _execution->changeInputShape(input_index, new_shape);
826 if (isStateInitialized())
833 std::cerr <<
"Error during nnfw_session::input_tensorinfo, tensorinfo is null pointer."
838 if (index >= getInputSize())
840 std::cerr <<
"Error during nnfw_session::input_tensorinfo, index is out of range."
846 if (isStateModelLoaded())
848 const auto &
info = _selected_signature.
valid() ? _nnpkg->inputInfo(_selected_signature, index)
849 : _nnpkg->inputInfo(input_index);
850 fillTensorInfo(ti,
info.shape(),
info.typeInfo().type());
854 const auto &
info = _execution->inputInfo(input_index);
855 fillTensorInfo(ti,
info.shape(),
info.typeInfo().type());
858 catch (
const std::exception &e)
860 std::cerr <<
"Error during nnfw_session::input_tensorinfo : " << e.what() << std::endl;
868 if (isStateInitialized())
873 std::cerr <<
"Error during nnfw_session::output_tensorinfo, tensorinfo is null pointer."
880 if (index >= getOutputSize())
882 std::cerr <<
"Error during nnfw_session::output_tensorinfo, index is out of range."
888 if (isStateModelLoaded())
890 const auto &
info = _selected_signature.
valid()
891 ? _nnpkg->outputInfo(_selected_signature, index)
892 : _nnpkg->outputInfo(output_index);
893 fillTensorInfo(ti,
info.shape(),
info.typeInfo().type());
897 auto info = _execution->outputInfo(output_index);
898 fillTensorInfo(ti,
info.shape(),
info.typeInfo().type());
901 catch (
const std::exception &e)
903 std::cerr <<
"Error during nnfw_session::output_tensorinfo : " << e.what() << std::endl;
913 _kernel_registry->registerKernel(
id, eval_func);
921 std::cerr <<
"Error during nnfw_session::get_output : tensor info is null" << std::endl;
925 if (out_buffer ==
nullptr)
927 std::cerr <<
"Error during nnfw_session::get_output : output buffer is null" << std::endl;
931 if (!isStateFinishedRun())
933 std::cerr <<
"Error during nnfw_session::get_output : invalid state" << std::endl;
939 if (index >= getOutputSize())
941 std::cerr <<
"Error during nnfw_session::get_output, index " << index
942 <<
" is out of range. (output count: " << getOutputSize() <<
")" << std::endl;
946 if (!_coptions->internal_output_alloc)
948 std::cerr <<
"Error during nnfw_session::get_output: "
949 <<
"internal output allocation is not enabled. "
950 <<
"Call nnfw_set_prepare_config(session, "
951 "NNFW_PREPARE_CONFIG_ENABLE_INTERNAL_OUTPUT_ALLOC, \"true\") "
952 <<
"before nnfw_prepare()." << std::endl;
957 const auto &
info = _execution->outputInfo(io_index);
958 const auto &shape =
info.shape();
959 const auto &dtype =
info.typeInfo().type();
960 fillTensorInfo(ti, shape, dtype);
962 *out_buffer = _execution->outputBuffer(io_index);
964 catch (
const std::exception &e)
966 std::cerr <<
"Error during nnfw_session::get_output : " << e.what() << std::endl;
975 if (!isStateModelLoaded())
989 catch (
const std::exception &e)
991 std::cerr <<
"Error during nnfw_session::set_available_backends : " << e.what() << std::endl;
1004 if (!isStateInitialized())
1007 _coptions->workspace_dir = std::string(dir);
1017 if (!isStateModelLoaded())
1019 std::cerr <<
"Error during nnfw_session::set_signature_for_tensorinfo : invalid state"
1024 for (
const auto &[subg_idx, sig_str] : _signature_map)
1026 if (sig_str == std::string(signature))
1028 _selected_signature = subg_idx;
1034 std::cerr <<
"Error during nnfw_session::set_signature_for_tensorinfo : cannot find signature \""
1035 << signature <<
"\"" << std::endl;
1044 if (!isStatePreparedOrFinishedRun())
1046 std::cerr <<
"Error during nnfw_session::set_signature_run : invalid state" << std::endl;
1050 for (
const auto &[subg_idx, sig_str] : _signature_map)
1052 if (sig_str == std::string(signature))
1055 std::make_unique<onert::exec::Execution>(_compiler_artifact->_executors, subg_idx);
1060 std::cerr <<
"Error during nnfw_session::set_signature_run : cannot find signature" << std::endl;
1066 std::cerr << msg << std::endl;
1072 if (!isStateModelLoaded())
1080 const std::string skey = key;
1082 if (skey == config::GRAPH_DOT_DUMP)
1084 _coptions->graph_dump_level = toInt(value);
1086 else if (skey == config::EXECUTOR)
1088 _coptions->executor = value;
1090 else if (skey == config::OP_BACKEND_ALLOPS)
1092 _coptions->manual_scheduler_options.backend_for_all = value;
1094 else if (skey == config::USE_SCHEDULER)
1096 _coptions->he_scheduler = toBool(value);
1098 else if (skey == config::PROFILING_MODE)
1100 _coptions->he_profiling_mode = toBool(value);
1102 else if (skey == config::ENABLE_LOG || skey == config::NUM_THREADS)
1105 keyValues[skey] = std::string(value);
1108 if (skey == config::ENABLE_LOG)
1122 if (_nnpkg !=
nullptr)
1124 assert(_execution ==
nullptr);
1125 return _nnpkg->primary_model()->primary_subgraph().get();
1129 assert(_execution !=
nullptr);
1131 return &_execution->primary_subgraph();
1135uint32_t nnfw_session::getInputSize()
1137 if (isStateInitialized())
1138 throw std::runtime_error{
"Model is not loaded yet"};
1140 if (isStateModelLoaded())
1141 return _nnpkg->inputSize();
1144 return _execution->inputSize();
1147uint32_t nnfw_session::getOutputSize()
1149 if (isStateInitialized())
1150 throw std::runtime_error{
"Model is not loaded yet"};
1152 if (isStateModelLoaded())
1153 return _nnpkg->outputSize();
1156 return _execution->outputSize();
1159NNFW_STATUS nnfw_session::loadModelFile(
const std::string &model_file_path,
1160 const std::string &model_type)
1163 if (model ==
nullptr)
1166 _signature_map =
model->signatureMap();
1168 _nnpkg = std::make_unique<onert::ir::NNPkg>(std::move(model));
1169 _model_path = std::filesystem::path(model_file_path);
1170 _compiler_artifact.reset();
1172 _train_info = loadTrainingInfo(_nnpkg->primary_model());
1173 _state = State::MODEL_LOADED;
1180 if (!isStateModelLoaded())
1186 auto check_boundary = [](
size_t dest_size, std::string &src) {
1187 if (dest_size < src.length() + 1 )
1189 std::cerr <<
"buffer is small to copy config value." << std::endl;
1195 const std::string skey = key;
1197 if (skey == onert::util::config::BACKENDS)
1199 if (_coptions->backend_list.size() == 0)
1203 nnfw::misc::join(_coptions->backend_list.begin(), _coptions->backend_list.end(),
";");
1205 if (!check_boundary(value_size, str))
1208 strncpy(value, str.c_str(), value_size);
1210 else if (skey == onert::util::config::EXECUTOR)
1212 if (!check_boundary(value_size, _coptions->executor))
1215 strncpy(value, _coptions->executor.c_str(), _coptions->executor.length());
1225bool nnfw_session::isStateInitialized()
1227 if (_state == State::INITIALIZED)
1229 assert(_nnpkg ==
nullptr);
1230 assert(_execution ==
nullptr);
1239bool nnfw_session::isStateModelLoaded()
1241 if (_state == State::MODEL_LOADED)
1243 assert(_nnpkg !=
nullptr);
1244 assert(_execution ==
nullptr);
1253bool nnfw_session::isStatePrepared()
1255 if (_state == State::PREPARED)
1257 assert(_nnpkg ==
nullptr);
1258 assert(_execution !=
nullptr);
1267bool nnfw_session::isStateRunning()
1269 if (_state == State::RUNNING)
1271 assert(_nnpkg ==
nullptr);
1272 assert(_execution !=
nullptr);
1278bool nnfw_session::isStateFinishedRun()
1280 if (_state == State::FINISHED_RUN)
1282 assert(_nnpkg ==
nullptr);
1283 assert(_execution !=
nullptr);
1292bool nnfw_session::isStatePreparedOrFinishedRun()
1294 return isStatePrepared() || isStateFinishedRun();
1299 return getTensorIndexImpl(*primary_subgraph(), tensorname, index,
true);
1304 return getTensorIndexImpl(*primary_subgraph(), tensorname, index,
false);
1309 if (backend_settings == NULL)
1312 if (!isStateModelLoaded())
1317 if (_nnpkg->model_count() > 1)
1319 std::cerr <<
"Not supported multiple model" << std::endl;
1326 auto &ms_options = _coptions->manual_scheduler_options;
1327 ms_options.setBackendMap(std::string{backend_settings});
1329 catch (
const std::exception &e)
1331 std::cerr <<
"Error during nnfw_session::set_backends_per_operation" << e.what() << std::endl;
1340 if (isStateInitialized())
1343 std::cerr <<
"Error during nnfw_session::train_get_traininfo : invalid state";
1347 if (
info ==
nullptr)
1349 std::cerr <<
"Error during nnfw_session::train_get_traininfo : info is nullptr" << std::endl;
1354 assert(_train_info !=
nullptr);
1366 throw std::runtime_error{
"fail to convert ir::train::LossCode"};
1370 auto convertLossReduction =
1381 throw std::runtime_error{
"fail to convert from ir::train::LossReductionType"};
1386 auto convertOptimizerCode =
1397 throw std::runtime_error{
"fail to convert from ir::train::OptimizerCode"};
1401 const auto &loss = _train_info->lossInfo();
1402 const auto &optim = _train_info->optimizerInfo();
1406 info->learning_rate = optim.learning_rate;
1407 info->batch_size = _train_info->batchSize();
1408 info->loss_info.loss = convertLossCode(loss.loss_code);
1409 info->loss_info.reduction_type = convertLossReduction(loss.reduction_type);
1410 info->opt = convertOptimizerCode(optim.optim_code);
1412 if (_train_info->getTrainableOps().size() > 0)
1414 const uint32_t first_trainable_idx = _train_info->getTrainableOps().cbegin()->value();
1415 const uint32_t last_trainable_idx = _train_info->getTrainableOps().crbegin()->value();
1416 const uint32_t ops_size = primary_subgraph()->
operations().
size();
1417 const uint32_t trainable_indexes_range = last_trainable_idx - first_trainable_idx + 1;
1420 if (last_trainable_idx == ops_size - 1 &&
1421 trainable_indexes_range == _train_info->getTrainableOps().size())
1424 if (0 == first_trainable_idx)
1430 info->num_of_trainable_ops = trainable_indexes_range;
1436 std::cerr <<
"conversion from set of trainable ops to num_of_trainable_ops is impossible"
1447 catch (
const std::exception &e)
1449 std::cerr <<
"Error during nnfw_session::train_get_traininfo" << e.what() << std::endl;
1458 if (not isStateModelLoaded())
1460 std::cerr <<
"Error during nnfw_session::train_set_traininfo : invalid state" << std::endl;
1464 if (
info ==
nullptr)
1466 std::cerr <<
"nnfw_session::train_set_traininfo : info is nullptr" << std::endl;
1471 assert(_train_info !=
nullptr);
1473 auto convertLossType = [](
const int &
type) {
1479 throw std::runtime_error(
"not supported loss type");
1488 throw std::runtime_error(
"not supported loss reduction type");
1491 auto convertOptType = [](
const int &
type) {
1497 throw std::runtime_error(
"not supported optimizer type");
1503 loss_info.
loss_code = convertLossType(
info->loss_info.loss);
1510 _train_info->setBatchSize(
info->batch_size);
1511 _train_info->setLossInfo(loss_info);
1512 _train_info->setOptimizerInfo(opt_info);
1514 if (
info->num_of_trainable_ops < -1)
1516 std::cerr <<
"Error during nnfw_session::train_set_traininfo: provided num_of_trainable_ops "
1517 "has incorrect value: "
1518 <<
info->num_of_trainable_ops << std::endl;
1522 const uint32_t ops_size = primary_subgraph()->
operations().
size();
1523 std::set<onert::ir::OperationIndex> trainable_ops;
1527 for (uint32_t idx = 0; idx < ops_size; ++idx)
1529 trainable_ops.emplace(idx);
1534 if (
static_cast<uint32_t
>(
info->num_of_trainable_ops) > ops_size)
1537 <<
"Error during nnfw_session::train_set_traininfo: provided num_of_trainable_ops="
1538 <<
info->num_of_trainable_ops <<
" is out of operators range equals: " << ops_size
1542 for (uint32_t i = 1; i <= static_cast<uint32_t>(
info->num_of_trainable_ops); ++i)
1544 trainable_ops.emplace(ops_size - i);
1548 _train_info->setTrainableOps(trainable_ops);
1550 catch (
const std::exception &e)
1552 std::cerr <<
"Error during nnfw_session::train_set_traininfo : " << e.what() << std::endl;
1562 if (!isStateModelLoaded())
1564 std::cerr <<
"Error during model prepare training: ";
1565 if (_state == State::PREPARED_TRAINING)
1566 std::cerr <<
"prepare should be run once";
1568 std::cerr <<
"invalid state";
1569 std::cerr << std::endl;
1574 assert(_train_info !=
nullptr);
1578 if (not _train_info->isValid())
1579 throw std::runtime_error{
"training info is not valid"};
1582 _train_info->trainingStep() = 0;
1585 std::move(_nnpkg), _coptions.get(), _train_info.get());
1586 _compiler_artifact = compiler->compile();
1587 _execution = std::make_unique<onert::exec::Execution>(_compiler_artifact->_executors);
1589 catch (
const std::exception &e)
1591 std::cerr <<
"Error during nnfw_session::train_prepare : " << e.what() << std::endl;
1595 _state = State::PREPARED_TRAINING;
1601 if (!isStatePreparedOrFinishedTraining())
1603 std::cerr <<
"Error during nnfw_session::train_input_tensorinfo : invalid state" << std::endl;
1617 if (!isStatePreparedOrFinishedTraining())
1619 std::cerr <<
"Error during nnfw_session::train_expected_tensorinfo : invalid state"
1635 if (input ==
nullptr)
1637 std::cerr <<
"Error during nnfw_session::train_set_input : input buffer is null" << std::endl;
1641 if (!isStatePreparedOrFinishedTraining())
1643 std::cerr <<
"Error during nnfw_session::train_set_input : invalid state" << std::endl;
1647 if (index >= getInputSize())
1649 std::cerr <<
"Error during nnfw_session::train_set_input : index is out of range" << std::endl;
1656 auto size = _execution->inputInfo(ind).total_size();
1660 <<
"Error during nnfw_session::train_set_input : not supporeted to change tensorinfo"
1665 _execution->setInput(ind, input,
size);
1667 catch (
const std::exception &e)
1669 std::cerr <<
"Error during nnfw_session::train_set_input : " << e.what() << std::endl;
1679 if (expected ==
nullptr)
1681 std::cerr <<
"Error during nnfw_session::train_set_expected : expected buffer is null"
1686 if (!isStatePreparedOrFinishedTraining())
1688 std::cerr <<
"Error during nnfw_session::train_set_expected : invalid state" << std::endl;
1692 if (index >= getOutputSize())
1694 std::cerr <<
"Error during nnfw_session::train_set_expected : index is out of range"
1702 auto size = _execution->outputInfo(ind).total_size();
1703 if (expected_tensorinfo && getBufSize(expected_tensorinfo) !=
size)
1705 std::cerr <<
"Error during nnfw_session::train_set_expected : invalid tensorinfo"
1714 auto input_index = getInputSize() - getOutputSize() + index;
1716 _execution->setInput(input_ind, expected,
size);
1718 catch (
const std::exception &e)
1720 std::cerr <<
"Error during nnfw_session::train_set_expected : " << e.what() << std::endl;
1730 if (!isStatePreparedOrFinishedTraining())
1732 std::cerr <<
"Error during nnfw_session::train_set_output : invalid state" << std::endl;
1736 if (!buffer && length != 0)
1738 std::cerr <<
"Error during nnfw_session::train_set_output : given buffer is NULL but the "
1748 catch (
const std::exception &e)
1750 std::cerr <<
"Error during nnfw_session::train_set_output : " << e.what() << std::endl;
1758 if (!isStatePreparedOrFinishedTraining())
1760 std::cerr <<
"Error during nnfw_session::train_run : invalid state" << std::endl;
1768 auto &training_step = _train_info->trainingStep();
1769 _execution->train(training_step++);
1772 _execution->execute();
1777 std::cerr <<
"Error during nnfw_session::train_run : " << e.
what() << std::endl;
1780 catch (
const std::exception &e)
1782 std::cerr <<
"Error during nnfw_session::train_run : " << e.what() << std::endl;
1786 _state = State::FINISHED_TRAINING;
1792 if (loss ==
nullptr)
1794 std::cerr <<
"Error during nnfw_session::train_get_loss : loss is null" << std::endl;
1798 if (!isStateFinishedTraining())
1800 std::cerr <<
"Error during nnfw_session::train_get_loss : invalid state" << std::endl;
1804 if (index >= getOutputSize())
1806 std::cerr <<
"Error during nnfw_session::train_get_loss : index is out of range" << std::endl;
1813 *loss = _execution->getLoss(ind);
1815 catch (
const std::exception &e)
1817 std::cerr <<
"Error during nnfw_session::train_get_loss : " << e.what() << std::endl;
1826 if (path ==
nullptr)
1828 std::cerr <<
"Error during nnfw_session::train_export_circle : path is null" << std::endl;
1833 if (!isStateFinishedTraining())
1835 std::cerr <<
"Error during nnfw_session::train_export_circle : invalid state" << std::endl;
1842 exporter.updateWeight(_execution);
1844 catch (
const std::exception &e)
1846 std::cerr <<
"Error during nnfw_session::train_export_circle : " << e.what() << std::endl;
1855 if (path ==
nullptr)
1857 std::cerr <<
"Error during nnfw_session::train_export_circleplus : path is null" << std::endl;
1861 if (!isStatePreparedOrFinishedTraining())
1863 std::cerr <<
"Error during nnfw_session::train_export_circleplus : invalid state" << std::endl;
1873 catch (
const std::exception &e)
1875 std::cerr <<
"Error during nnfw_session::train_export_circleplus : " << e.what() << std::endl;
1884 if (path ==
nullptr)
1886 std::cerr <<
"Error during nnfw_session::train_import_checkpoint : path is null" << std::endl;
1890 if (!isStatePreparedOrFinishedTraining())
1892 std::cerr <<
"Error during nnfw_session::train_import_checkpoint : invalid state" << std::endl;
1900 catch (
const std::exception &e)
1902 std::cerr <<
"Error during nnfw_session::train_import_checkpoint : " << e.what() << std::endl;
1911 if (path ==
nullptr)
1913 std::cerr <<
"Error during nnfw_session::train_export_checkpoint : path is null" << std::endl;
1918 if (!isStateFinishedTraining())
1920 std::cerr <<
"Error during nnfw_session::train_export_checkpoint : invalid state" << std::endl;
1928 catch (
const std::exception &e)
1930 std::cerr <<
"Error during nnfw_session::train_export_checkpoint : " << e.what() << std::endl;
1937bool nnfw_session::isStatePreparedTraining()
1939 if (_state == State::PREPARED_TRAINING)
1941 assert(_nnpkg ==
nullptr);
1942 assert(_execution !=
nullptr);
1949bool nnfw_session::isStateFinishedTraining()
1951 if (_state == State::FINISHED_TRAINING)
1953 assert(_nnpkg ==
nullptr);
1954 assert(_execution !=
nullptr);
1961bool nnfw_session::isStatePreparedOrFinishedTraining()
1963 return isStatePreparedTraining() || isStateFinishedTraining();
1971 if (isStateInitialized() || isStateRunning())
1973 std::cerr <<
"invalid state" << std::endl;
1995 _quant_manager->quantizeType(odc_qtype);
1997 catch (
const std::exception &e)
1999 std::cerr <<
"Error during nnfw_session::set_quantization_type : " << e.what() << std::endl;
2010 if (isStateInitialized() || isStateRunning())
2012 std::cerr <<
"invalid state" << std::endl;
2016 _quant_manager->exportModelPath(std::string(path));
2018 catch (
const std::exception &e)
2020 std::cerr <<
"Error during nnfw_session::set_quantized_model_path : " << e.what() << std::endl;
2031 if (isStateInitialized() || isStateRunning())
2033 std::cerr <<
"invalid state" << std::endl;
2037 auto result = _quant_manager->quantize(_model_path.string());
2043 return loadModelFile(_quant_manager->exportModelPath(),
"circle");
2045 catch (
const std::exception &e)
2047 std::cerr <<
"Error during nnfw_session::quantize : " << e.what() << std::endl;
2056 if (isStateInitialized() || isStateRunning())
2058 std::cerr <<
"invalid state" << std::endl;
2062 assert(_codegen_manager !=
nullptr);
2063 _codegen_manager->exportModelPath(std::string(path));
2065 catch (
const std::exception &e)
2067 std::cerr <<
"Error during nnfw_session::set_codegen_model_path : " << e.what() << std::endl;
2078 if (isStateInitialized() || isStateRunning())
2080 std::cerr <<
"Error during nnfw_session::codegen : Invalid state" << std::endl;
2084 std::string target_str{target};
2085 if (target_str.empty() || target_str.size() < 5 ||
2086 target_str.substr(target_str.size() - 4) !=
"-gen")
2088 std::cerr <<
"Error during nnfw_session::codegen : Invalid target" << std::endl;
2108 std::cerr <<
"Error during nnfw_session::codegen : Invalid preference" << std::endl;
2112 assert(_codegen_manager !=
nullptr);
2113 auto export_model_path = std::filesystem::path(_codegen_manager->exportModelPath());
2114 const auto model_type = target_str.substr(0, target_str.size() - 4);
2117 if (export_model_path.empty())
2121 export_model_path = _model_path.replace_extension(model_type);
2122 _codegen_manager->exportModelPath(export_model_path.string());
2125 _codegen_manager->codegen(_model_path, target, codegen_pref);
2129 return loadModelFile(export_model_path, model_type);
2131 catch (
const std::exception &e)
2133 std::cerr <<
"Error during nnfw_session::compile : " << e.what() << std::endl;
2140 if (!isStateModelLoaded())
2142 std::cerr <<
"Error during nnfw_session::set_prepare_config : Invalid state" << std::endl;
2149 _coptions->he_profiling_mode =
true;
2152 _coptions->internal_output_alloc =
true;
2163 if (!isStateModelLoaded())
2165 std::cerr <<
"Error during nnfw_session::reset_prepare_config : Invalid state" << std::endl;
2169 _coptions->he_profiling_mode =
false;
2176 if (!isStatePreparedOrFinishedRun())
2178 std::cerr <<
"Error during nnfw_session::set_execution_config : Invalid state" << std::endl;
2185 if (_coptions->workspace_dir.empty())
2187 _execution->executionOptions().dump_minmax =
true;
2190 if (_coptions->workspace_dir.empty())
2192 _execution->executionOptions().trace =
true;
2195 _execution->executionOptions().profile =
true;
2206 if (!isStatePreparedOrFinishedRun())
2208 std::cerr <<
"Error during nnfw_session::set_execution_config : Invalid state" << std::endl;
2212 _execution->executionOptions().dump_minmax =
false;
2213 _execution->executionOptions().trace =
false;
2214 _execution->executionOptions().profile =
false;
2221 if (isStateInitialized() || isStateRunning())
2223 std::cerr <<
"invalid state" << std::endl;
2227 if (_quant_manager->setMinMaxRecordsThreshold(minmax_records_count))
2235 if (isStateRunning())
2237 std::cerr <<
"invalid state" << std::endl;
2241 if (_quant_manager->deleteMinMaxFile())
2251 if (!isStatePreparedOrFinishedRun())
2253 std::cerr <<
"Error during nnfw_session::run_with_auto_compilation : "
2254 <<
"run should be after preparation" << std::endl;
2259 std::string target_str{target};
2260 if (_quant_manager->exportModelPath().empty() || _codegen_manager->exportModelPath().empty() ||
2261 target_str.empty() || target_str.substr(target_str.size() - 4) !=
"-gen")
2263 std::cerr <<
"Error during nnfw_session::run_with_auto_compilation : "
2264 <<
"quantization and code generation parameters should be set" << std::endl;
2270 std::ifstream file_quantized_model(_quant_manager->exportModelPath());
2271 std::ifstream file_compiled_model(_codegen_manager->exportModelPath());
2273 if (!file_quantized_model.good() && !file_compiled_model.good())
2278 auto saved_options = _execution->executionOptions();
2280 _execution->executionOptions().dump_minmax =
true;
2284 _execution->execute();
2289 std::cerr <<
"Error during nnfw_session::run_with_auto_compilation : " << e.
what()
2293 catch (
const std::exception &e)
2295 std::cerr <<
"Error during nnfw_session::run_with_auto_compilation : " << e.
what()
2300 _state = State::FINISHED_RUN;
2303 _execution->executionOptions().dump_minmax = saved_options.dump_minmax;
2306 if (_quant_manager->readyForQuantize())
2310 if (isStateInitialized() || isStateRunning())
2312 std::cerr <<
"invalid state" << std::endl;
2316 auto result = _quant_manager->quantize(_model_path);
2321 result = _quant_manager->deleteMinMaxFile();
2325 catch (
const std::exception &e)
2328 <<
"Error during nnfw_session::run_with_auto_compilation in quantize operation: "
2329 << e.what() << std::endl;
2341 _execution->executionOptions().dump_minmax =
false;
2344 if (_autoCompilationState == nnfw_session::AutoCompilationState::INITIAL_STATE)
2346 auto dotidx = _codegen_manager->exportModelPath().rfind(
'.');
2347 if (dotidx == std::string::npos)
2349 std::cerr <<
"Error during nnfw_session::run_with_auto_compilation : Invalid compiled "
2350 "model path. Please use a "
2351 "path that includes the extension."
2356 std::string compiled_model_type =
2357 _codegen_manager->exportModelPath().substr(dotidx + 1);
2359 dotidx = _quant_manager->exportModelPath().rfind(
'.');
2360 if (dotidx == std::string::npos)
2362 std::cerr <<
"Error during nnfw_session::run_with_auto_compilation : Invalid quantized "
2363 "model path. Please use a "
2364 "path that includes the extension."
2368 std::string quantized_model_type =
2369 _quant_manager->exportModelPath().substr(dotidx + 1);
2375 std::vector<const void *> _input_buffers;
2376 std::vector<void *> _output_buffers;
2384 _coptions->input_type.insert_or_assign(input_index,
TypeInfo(DataType::FLOAT32));
2388 _coptions->output_type.insert_or_assign(output_index,
TypeInfo(DataType::FLOAT32));
2391 if (file_compiled_model.good())
2394 status = loadModelFile(_codegen_manager->exportModelPath(), compiled_model_type);
2397 _autoCompilationState = nnfw_session::AutoCompilationState::COMPILED_MODEL_LOADED;
2406 _model_path = _quant_manager->exportModelPath();
2409 status =
codegen(target, pref);
2412 _autoCompilationState = nnfw_session::AutoCompilationState::COMPILED_MODEL_LOADED;
2418 if (_autoCompilationState != nnfw_session::AutoCompilationState::COMPILED_MODEL_LOADED)
2421 status = loadModelFile(_quant_manager->exportModelPath(), quantized_model_type);
2425 _autoCompilationState = nnfw_session::AutoCompilationState::QUANTIZED_MODEL_LOADED;
2433 _execution->restoreContext(ctx_backup);
2437 if (!isStatePreparedOrFinishedRun())
2439 std::cerr <<
"Error during nnfw_session::run_with_auto_compilation : "
2440 <<
"run should be run after prepare" << std::endl;
2446 _execution->execute();
2451 std::cerr <<
"Error during nnfw_session::run_with_auto_compilation : " << e.
what()
2455 catch (
const std::exception &e)
2457 std::cerr <<
"Error during nnfw_session::run_with_auto_compilation : " << e.
what()
2462 _state = State::FINISHED_RUN;
const char * what() const noexcept override
static CompilerFactory & get()
std::unique_ptr< ICompiler > create(std::unique_ptr< ir::NNPkg > nnpkg, CompilerOptions *copts, const ir::train::TrainingInfo *training_info=nullptr)
Create ICompiler instance. Ownership of nnpkg is moved to ICompiler instance.
void updateMetadata(const std::unique_ptr< onert::ir::train::TrainingInfo > &training_info)
void updateWeight(const std::unique_ptr< onert::exec::Execution > &exec)
bool valid() const
Check whether the value is valid or not.
static uint16_t max()
Return max index value.
size_t size() const
Return the number of objects that the manager contains.
volatile const char info[]
SessionID session(const coco::Module *m)
std::unique_ptr< mir::Graph > loadModel(std::string predict_net, std::string init_net, const std::vector< std::vector< int > > &input_shapes)
std::string join(InputIt first, InputIt last, const std::string &concat)
std::vector< std::string > split(const std::string &s, char delim)
nnfw::cker::train::LossReductionType convertLossReductionType(ir::train::LossReductionType type)
convert loss reduction type
void exportCheckpoint(const std::string &filename, const std::unique_ptr< ir::train::TrainingInfo > &train_info, const std::unique_ptr< exec::Execution > &exec)
@ CategoricalCrossentropy
std::tuple< ModelIndex, SubgraphIndex, IOIndex > IODesc
::onert::util::Index< uint32_t, IOIndexTag > IOIndex
::onert::util::Index< uint16_t, SubgraphIndexTag > SubgraphIndex
void loadCheckpoint(const std::string &filename, const std::unique_ptr< ir::train::TrainingInfo > &train_info, const std::unique_ptr< exec::Execution > &exec)
std::unique_ptr< ir::train::TrainingInfo > loadTrainingInfo(const uint8_t *buffer, const size_t size)
std::unique_ptr< ir::Model > loadCircleModel(const std::string &filename)
const char *const TRAININFO_METADATA_NAME
std::unique_ptr< ir::Model > loadTFLiteModel(const std::string &filename)
std::unique_ptr< ir::Model > loadModel(const std::string &filename, const std::string &type)
Create custom loader and load model from file.
@ CODEGEN_PREF_PERFORMANCE_FIRST
@ CODEGEN_PREF_MEMORY_FIRST
@ CODEGEN_PREF_COMPILE_TIME_FIRST
std::unordered_map< std::string, std::string > CfgKeyValues
void setConfigKeyValues(const CfgKeyValues &keyValues)
@ NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED
@ NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED
@ NNFW_TYPE_TENSOR_QUANT8_ASYMM
NNFW_LAYOUT
Data format of a tensor.
@ NNFW_LAYOUT_CHANNELS_LAST
@ NNFW_LAYOUT_CHANNELS_FIRST
void(* nnfw_custom_eval)(nnfw_custom_kernel_params *params, char *userdata, size_t userdata_size)
NNFW_CODEGEN_PREF
Preference for target-dependent code generation.
@ NNFW_CODEGEN_PREF_DEFAULT
@ NNFW_CODEGEN_PREF_MEMORY_FIRST
@ NNFW_CODEGEN_PREF_COMPILE_TIME_FIRST
@ NNFW_CODEGEN_PREF_PERFORMANCE_FIRST
@ NNFW_TRAIN_TRAINABLE_NONE
@ NNFW_TRAIN_TRAINABLE_ALL
@ NNFW_TRAIN_TRAINABLE_INCORRECT_STATE
NNFW_QUANTIZE_TYPE
Convert between training mode and inference mode.
@ NNFW_QUANTIZE_TYPE_WO_I16_SYM
@ NNFW_QUANTIZE_TYPE_U8_ASYM
@ NNFW_QUANTIZE_TYPE_I16_SYM
@ NNFW_QUANTIZE_TYPE_WO_I8_SYM
NNFW_RUN_CONFIG
Configuration key for execution.
@ NNFW_RUN_CONFIG_PROFILE
@ NNFW_RUN_CONFIG_DUMP_MINMAX
NNFW_PREPARE_CONFIG
Configuration key for prepare (compile and schedule)
@ NNFW_PREPARE_CONFIG_PROFILE
@ NNFW_ENABLE_INTERNAL_OUTPUT_ALLOC
#define MAX_TENSOR_NAME_LENGTH
#define MAX_BACKEND_NAME_LENGTH
NNFW_STATUS
Result values returned from a call to an API function.
@ NNFW_STATUS_INVALID_STATE
@ NNFW_STATUS_UNEXPECTED_NULL
@ NNFW_STATUS_DEPRECATED_API
@ NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE
@ NNFW_STATUS_OUT_OF_MEMORY
NNFW_TRAIN_LOSS_REDUCTION
@ NNFW_TRAIN_LOSS_REDUCTION_UNDEFINED
@ NNFW_TRAIN_LOSS_REDUCTION_SUM
@ NNFW_TRAIN_LOSS_REDUCTION_SUM_OVER_BATCH_SIZE
@ NNFW_TRAIN_OPTIMIZER_ADAM
@ NNFW_TRAIN_OPTIMIZER_SGD
@ NNFW_TRAIN_OPTIMIZER_UNDEFINED
@ NNFW_TRAIN_LOSS_MEAN_SQUARED_ERROR
@ NNFW_TRAIN_LOSS_UNDEFINED
@ NNFW_TRAIN_LOSS_CATEGORICAL_CROSSENTROPY
@ NNFW_TYPE_TENSOR_FLOAT32
#define NNFW_MAX_RANK
Maximum rank expressible with nnfw.
#define UPDATE_VERBOSE_CONFIG()
This file contains helper functions for std::string.
NNFW_STATUS train_prepare()
NNFW_STATUS set_input_type(uint32_t index, NNFW_TYPE type)
NNFW_STATUS train_set_output(uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
NNFW_STATUS train_get_traininfo(nnfw_train_info *info)
NNFW_STATUS set_config(const char *key, const char *value)
NNFW_STATUS train_set_expected(uint32_t index, void *expected)
NNFW_STATUS delete_odc_minmax_file()
NNFW_STATUS set_odc_param_minmax_records_count(int minmax_records_count)
NNFW_STATUS train_run(bool update_weights)
NNFW_STATUS set_signature_for_tensorinfo(const char *signature)
NNFW_STATUS input_tensorinfo(uint32_t index, nnfw_tensorinfo *ti)
NNFW_STATUS output_tensorinfo(uint32_t index, nnfw_tensorinfo *ti)
NNFW_STATUS set_input_tensorinfo(uint32_t index, const nnfw_tensorinfo *ti)
NNFW_STATUS input_tensorindex(const char *tensorname, uint32_t *index)
NNFW_STATUS train_export_checkpoint(const char *path)
NNFW_STATUS set_output(uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
NNFW_STATUS train_import_checkpoint(const char *path)
NNFW_STATUS set_workspace(const char *dir)
static NNFW_STATUS deprecated(const char *msg)
NNFW_STATUS train_expected_tensorinfo(uint32_t index, nnfw_tensorinfo *ti)
NNFW_STATUS reset_prepare_config()
NNFW_STATUS set_codegen_model_path(const char *path)
NNFW_STATUS train_set_input(uint32_t index, void *input)
NNFW_STATUS reset_execute_config()
NNFW_STATUS set_input(uint32_t index, NNFW_TYPE type, const void *buffer, size_t length)
NNFW_STATUS output_size(uint32_t *number)
NNFW_STATUS train_get_loss(uint32_t index, float *loss)
NNFW_STATUS load_model_from_path(const char *path)
NNFW_STATUS set_backends_per_operation(const char *backend_settings)
Set backends with string-encoded mapping from operation index to backend type (cpu,...
NNFW_STATUS set_quantization_type(NNFW_QUANTIZE_TYPE qtype)
NNFW_STATUS input_size(uint32_t *number)
NNFW_STATUS set_output_type(uint32_t index, NNFW_TYPE type)
NNFW_STATUS set_prepare_config(const NNFW_PREPARE_CONFIG key, const char *value)
NNFW_STATUS set_available_backends(const char *backends)
NNFW_STATUS get_config(const char *key, char *value, size_t value_size)
NNFW_STATUS codegen(const char *target, NNFW_CODEGEN_PREF pref)
NNFW_STATUS set_execute_config(const NNFW_RUN_CONFIG key, const char *value)
NNFW_STATUS run_with_auto_compilation(const char *target, NNFW_CODEGEN_PREF pref)
NNFW_STATUS set_output_layout(uint32_t index, NNFW_LAYOUT layout)
NNFW_STATUS train_input_tensorinfo(uint32_t index, nnfw_tensorinfo *ti)
NNFW_STATUS register_custom_operation(const std::string &id, nnfw_custom_eval eval_func)
NNFW_STATUS get_output(uint32_t index, nnfw_tensorinfo *out_info, const void **out_buffer)
NNFW_STATUS train_export_circle(const char *path)
NNFW_STATUS output_tensorindex(const char *tensorname, uint32_t *index)
NNFW_STATUS set_quantized_model_path(const char *path)
static NNFW_STATUS create(nnfw_session **session)
Factory method. It creates and initialize nnfw_session.
NNFW_STATUS set_signature_run(const char *signature)
NNFW_STATUS load_circle_from_buffer(uint8_t *buffer, size_t size)
NNFW_STATUS train_export_circleplus(const char *path)
NNFW_STATUS set_input_layout(uint32_t index, NNFW_LAYOUT layout)
NNFW_STATUS train_set_traininfo(const nnfw_train_info *info)
tensor info describes the type and shape of tensors
int32_t dims[NNFW_MAX_RANK]
Training information to prepare training.
virtual const Operations & operations() const =0
LossReductionType reduction_type