ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::loader::BaseLoader< LoaderDomain > Class Template Referenceabstract

#include <BaseLoader.h>

Collaboration diagram for onert::loader::BaseLoader< LoaderDomain >:

Public Member Functions

 BaseLoader ()
 Construct a new Loader object.
 
std::unique_ptr< ir::ModelloadFromFile (const std::string &file_path)
 Load a model from file.
 
std::unique_ptr< ir::ModelloadFromBuffer (uint8_t *buffer, size_t size)
 Load a model from a buffer.
 

Protected Types

using Verifier = typename LoaderDomain::Verifier
 
using ActivationFunctionType = typename LoaderDomain::ActivationFunctionType
 
using Buffer = typename LoaderDomain::Buffer
 
using BuiltinOperator = typename LoaderDomain::BuiltinOperator
 
using CustomOptionsFormat = typename LoaderDomain::CustomOptionsFormat
 
using Metadata = typename LoaderDomain::Metadata
 
using Model = typename LoaderDomain::Model
 
using Operator = typename LoaderDomain::Operator
 
using Padding = typename LoaderDomain::Padding
 
using Pool2DOptions = typename LoaderDomain::Pool2DOptions
 
using SubGraph = typename LoaderDomain::SubGraph
 
using Tensor = typename LoaderDomain::Tensor
 
using TensorType = typename LoaderDomain::TensorType
 
using DimensionType = typename LoaderDomain::DimensionType
 
using SparseIndexVector = typename LoaderDomain::SparseIndexVector
 

Protected Member Functions

bool isOptionalInputTensor (std::int32_t idx)
 
virtual bool allowOptionalInputTensor (BuiltinOperator)=0
 
 ~BaseLoader ()=default
 
std::unique_ptr< ir::ModelloadModel ()
 
ir::Activation convertActivation (ActivationFunctionType type)
 
virtual ir::DataType tensorTypeToDataType (const TensorType type)
 
ir::OperandIndex tensorIdxToOperandIdx (int32_t tensorIdx)
 
flexbuffers::Map getCustomOpAttrMap (const Operator *op)
 
ir::OperandIndex loadOperand (const Tensor *tensor, ir::Graph &subg)
 
void loadQuantization (const Tensor *tensor, ir::TypeInfo &typeInfo)
 
void loadSparsity (const Tensor *tensor, ir::TypeInfo &typeInfo)
 
void loadOperationIO (const Operator *op, ir::OperandIndexSequence &inputs, ir::OperandIndexSequence &outputs)
 
void loadOperation (const Operator *op, ir::Graph &subg)
 
template<typename Param , typename OptionsType >
void loadStridesAndPaddings (Param &param, const OptionsType *options)
 
template<typename Param >
void loadPool2DOptions (Param &param, const Pool2DOptions *options)
 
BuiltinOperator getBuiltinOperator (const Operator *op)
 
void verifySubgraphIndex (int subg_index)
 

Protected Attributes

uint8_t * _base
 
int32_t _pagesize
 
int _fd
 
std::string _file_path
 
const Model_domain_model
 
std::vector< ir::OperandIndex_tensor_to_operand
 
std::unordered_map< ir::OperandIndex, std::string > _tensor_names
 
std::unique_ptr< Verifier_verifier
 
bool _use_mmaped_data = false
 
std::unordered_map< uint32_t, std::shared_ptr< ir::Data > > _buf_to_data
 

Detailed Description

template<typename LoaderDomain>
class onert::loader::BaseLoader< LoaderDomain >

Definition at line 40 of file BaseLoader.h.

Member Typedef Documentation

◆ ActivationFunctionType

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::ActivationFunctionType = typename LoaderDomain::ActivationFunctionType
protected

Definition at line 44 of file BaseLoader.h.

◆ Buffer

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Buffer = typename LoaderDomain::Buffer
protected

Definition at line 45 of file BaseLoader.h.

◆ BuiltinOperator

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::BuiltinOperator = typename LoaderDomain::BuiltinOperator
protected

Definition at line 46 of file BaseLoader.h.

◆ CustomOptionsFormat

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::CustomOptionsFormat = typename LoaderDomain::CustomOptionsFormat
protected

Definition at line 47 of file BaseLoader.h.

◆ DimensionType

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::DimensionType = typename LoaderDomain::DimensionType
protected

Definition at line 56 of file BaseLoader.h.

◆ Metadata

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Metadata = typename LoaderDomain::Metadata
protected

Definition at line 48 of file BaseLoader.h.

◆ Model

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Model = typename LoaderDomain::Model
protected

Definition at line 49 of file BaseLoader.h.

◆ Operator

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Operator = typename LoaderDomain::Operator
protected

Definition at line 50 of file BaseLoader.h.

◆ Padding

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Padding = typename LoaderDomain::Padding
protected

Definition at line 51 of file BaseLoader.h.

◆ Pool2DOptions

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Pool2DOptions = typename LoaderDomain::Pool2DOptions
protected

Definition at line 52 of file BaseLoader.h.

◆ SparseIndexVector

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::SparseIndexVector = typename LoaderDomain::SparseIndexVector
protected

Definition at line 57 of file BaseLoader.h.

◆ SubGraph

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::SubGraph = typename LoaderDomain::SubGraph
protected

Definition at line 53 of file BaseLoader.h.

◆ Tensor

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Tensor = typename LoaderDomain::Tensor
protected

Definition at line 54 of file BaseLoader.h.

◆ TensorType

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::TensorType = typename LoaderDomain::TensorType
protected

Definition at line 55 of file BaseLoader.h.

◆ Verifier

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Verifier = typename LoaderDomain::Verifier
protected

Definition at line 43 of file BaseLoader.h.

Constructor & Destructor Documentation

◆ BaseLoader()

template<typename LoaderDomain >
onert::loader::BaseLoader< LoaderDomain >::BaseLoader ( )
inlineexplicit

Construct a new Loader object.

Definition at line 67 of file BaseLoader.h.

67 : _base{nullptr}, _pagesize(getpagesize()), _fd(-1), _domain_model{nullptr}
68 {
69 _use_mmaped_data = util::getConfigBool(util::config::USE_MMAPED_DATA);
70 }
const Model * _domain_model
Definition BaseLoader.h:189
bool getConfigBool(const std::string &key)

References onert::loader::BaseLoader< LoaderDomain >::_use_mmaped_data, and onert::util::getConfigBool().

◆ ~BaseLoader()

template<typename LoaderDomain >
onert::loader::BaseLoader< LoaderDomain >::~BaseLoader ( )
protecteddefault

Member Function Documentation

◆ allowOptionalInputTensor()

template<typename LoaderDomain >
virtual bool onert::loader::BaseLoader< LoaderDomain >::allowOptionalInputTensor ( BuiltinOperator  )
protectedpure virtual

◆ convertActivation()

template<typename LoaderDomain >
ir::Activation onert::loader::BaseLoader< LoaderDomain >::convertActivation ( ActivationFunctionType  type)
protected

◆ getBuiltinOperator()

template<typename LoaderDomain >
BuiltinOperator onert::loader::BaseLoader< LoaderDomain >::getBuiltinOperator ( const Operator op)
inlineprotected

Definition at line 112 of file BaseLoader.h.

113 {
114 auto const builtin_opcode = _domain_model->operator_codes()->Get(op->opcode_index());
115 auto builtin_op = builtin_opcode->builtin_code();
116 if (builtin_op < BuiltinOperator::BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES)
117 builtin_op = static_cast<BuiltinOperator>(builtin_opcode->deprecated_builtin_code());
118
119 return builtin_op;
120 }
typename LoaderDomain::BuiltinOperator BuiltinOperator
Definition BaseLoader.h:46

References onert::loader::BaseLoader< LoaderDomain >::_domain_model.

◆ getCustomOpAttrMap()

template<typename LoaderDomain >
flexbuffers::Map onert::loader::BaseLoader< LoaderDomain >::getCustomOpAttrMap ( const Operator op)
protected

◆ isOptionalInputTensor()

template<typename LoaderDomain >
bool onert::loader::BaseLoader< LoaderDomain >::isOptionalInputTensor ( std::int32_t  idx)
inlineprotected

Definition at line 60 of file BaseLoader.h.

60{ return idx == -1; }

◆ loadFromBuffer()

template<typename LoaderDomain >
std::unique_ptr< ir::Model > onert::loader::BaseLoader< LoaderDomain >::loadFromBuffer ( uint8_t *  buffer,
size_t  size 
)

Load a model from a buffer.

Parameters
[in]bufferbuffer pointer
[in]sizebuffer size
Returns
Loaded model object

◆ loadFromFile()

template<typename LoaderDomain >
std::unique_ptr< ir::Model > onert::loader::BaseLoader< LoaderDomain >::loadFromFile ( const std::string &  file_path)

Load a model from file.

Parameters
[in]file_pathmodel file path
Returns
Loaded model object

◆ loadModel()

template<typename LoaderDomain >
std::unique_ptr< ir::Model > onert::loader::BaseLoader< LoaderDomain >::loadModel ( )
protected

Definition at line 1695 of file BaseLoader.h.

1696{
1697 LoaderDomain::VerifyModelBuffer(*_verifier.get());
1698 _domain_model = LoaderDomain::GetModel(_base);
1699
1700 auto model = std::make_unique<ir::Model>();
1701 // Version unused
1702 // const auto version = _model->version();
1703 // Description unused
1704
1705 // Load Metadata
1706 auto const metadata_list = _domain_model->metadata();
1707 if (metadata_list != nullptr)
1708 {
1709 for (uint32_t i = 0; i < metadata_list->size(); ++i)
1710 {
1711 const auto metadata = metadata_list->Get(i);
1712 if (metadata->name() == nullptr)
1713 continue; // metadata should have name
1714
1715 std::unique_ptr<const ir::Data> data = loadMetadata(metadata->buffer());
1716 model->add_metadata(metadata->name()->str(), std::move(data));
1717 }
1718 }
1719
1720 // Load signature map
1721 auto const signature_table = _domain_model->signature_defs();
1722 if (signature_table != nullptr)
1723 {
1724 for (uint32_t i = 0; i < signature_table->size(); ++i)
1725 {
1726 const auto signature = signature_table->Get(i);
1727 if (signature == nullptr)
1728 continue;
1729 const auto signature_key = signature->signature_key();
1730 if (signature_key == nullptr)
1731 continue; // signature should have key
1732 const auto subgraph_index = static_cast<uint16_t>(signature->subgraph_index());
1733
1734 model->addSignatureMap(ir::SubgraphIndex{subgraph_index}, signature_key->str());
1735 }
1736 }
1737
1738 // const auto *description = _model->description();
1739 // Load subgraphs and map operations on subgraph
1740 const auto subgraphs = _domain_model->subgraphs();
1741 if (subgraphs->size() - 1 > ir::SubgraphIndex::max())
1742 throw std::runtime_error{"The number of subgraphs cannot exceed " +
1743 std::to_string(ir::SubgraphIndex::max() + 1)};
1744 for (uint16_t subgraph_index = 0; subgraph_index < subgraphs->size(); ++subgraph_index)
1745 {
1746 auto subg = loadSubgraph((*_domain_model->subgraphs())[subgraph_index]);
1747 // NOTE: Used () instead of {}, which does not check narrowing.
1748 // It is okay since overflow is checked the above if-statement.
1749 model->push(ir::SubgraphIndex(subgraph_index), std::move(subg));
1750 }
1751 return model;
1752}
std::unique_ptr< Verifier > _verifier
Definition BaseLoader.h:194
static uint16_t max()
Return max index value.
Definition Index.h:146
::onert::util::Index< uint16_t, SubgraphIndexTag > SubgraphIndex
Definition Index.h:39

References onert::util::Index< uint16_t, SubgraphIndexTag >::max().

◆ loadOperand()

template<typename LoaderDomain >
ir::OperandIndex onert::loader::BaseLoader< LoaderDomain >::loadOperand ( const Tensor tensor,
ir::Graph subg 
)
protected

Definition at line 362 of file BaseLoader.h.

363{
364 ir::Shape shape;
365 // Shape
366 const auto *tensor_shape =
367 tensor->shape_signature() ? tensor->shape_signature() : tensor->shape();
368 if (tensor_shape != nullptr)
369 {
370 for (const auto &dim : *tensor_shape)
371 {
372 shape.append(dim);
373 }
374 }
375
376 // Note for tensor->shape_signature()
377 // We don't handle shape signature
378 // How we handle:
379 // If shape_signature[k] == -1, we will use tensor->shape()[k] == 1
380 // If app wants to change the input shape, call nnfw_apply_input_tensorinfo() can
381 // be used.
382
383 // TypeInfo
384 ir::TypeInfo type_info(tensorTypeToDataType(tensor->type()));
385 // Ignore quantize param if datatype does not requires
386 if (ir::requireQuantParam(type_info.type()))
387 loadQuantization(tensor, type_info);
388 loadSparsity(tensor, type_info);
389
390 // Create operand
391 const auto operand_index = subg.addOperand(shape, type_info);
392
393 // Constant tensors are indicated by non-empty data.
394 const auto *data = _domain_model->buffers()->Get(tensor->buffer())->data();
395 if (data != nullptr)
396 {
397 using std::ptrdiff_t;
398 std::shared_ptr<ir::Data> data_obj;
399
400 if (_fd == -1) // Model is from memory
401 {
402 data_obj = std::make_shared<ir::ExternalData>(data->data(), data->size());
403 }
404 else // Model is loaded(mmap'd) from a file
405 {
406 size_t data_size = data->size();
407 ptrdiff_t unaligned_offset_start = data->data() - _base;
408 ptrdiff_t offset_end = unaligned_offset_start + data_size;
409
410 // Calculated aligned offset from base address of mapped region
411 // munmap accepts memory address which is a multiple of the pagesize
412 ptrdiff_t aligned_offset_start = (unaligned_offset_start / _pagesize) * _pagesize;
413 size_t mmap_size = offset_end - aligned_offset_start;
414
415 uint32_t buf_idx = tensor->buffer();
416 auto buffer_found = _buf_to_data.find(buf_idx);
417
418 if (buffer_found != _buf_to_data.end())
419 {
420 // Another tensor points this buffer and its matching Data(either CachedData or MMapedData)
421 // was already created. Let's reuse the Data
422 data_obj = buffer_found->second;
423 }
424 else if (_use_mmaped_data)
425 {
426 data_obj = std::make_shared<ir::MMapedData>(_fd, aligned_offset_start, mmap_size,
427 unaligned_offset_start, data_size);
428 _buf_to_data[buf_idx] = data_obj;
429 }
430 else
431 {
432 size_t offset = unaligned_offset_start - aligned_offset_start;
433 uint8_t *mmap_base = static_cast<uint8_t *>(
434 mmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, _fd, aligned_offset_start));
435
436 data_obj = std::make_shared<ir::CachedData>(mmap_base + offset, data_size);
437 _buf_to_data[buf_idx] = data_obj;
438
439 munmap(mmap_base, mmap_size);
440 }
441 }
442 subg.setOperandValue(operand_index, std::move(data_obj));
443 }
444
445 _tensor_names.emplace(operand_index, tensor->name()->str());
446
447 // Variable
448 if (tensor->is_variable())
449 {
450 if (data != nullptr)
451 throw std::runtime_error("Variable tensor with buffer is not supported!");
452
453 subg.operands().at(operand_index).info().setAsVariable();
454 }
455
456 return operand_index;
457}
std::unordered_map< uint32_t, std::shared_ptr< ir::Data > > _buf_to_data
Definition BaseLoader.h:199
virtual ir::DataType tensorTypeToDataType(const TensorType type)
void loadQuantization(const Tensor *tensor, ir::TypeInfo &typeInfo)
Definition BaseLoader.h:460
void loadSparsity(const Tensor *tensor, ir::TypeInfo &typeInfo)
Definition BaseLoader.h:516
std::unordered_map< ir::OperandIndex, std::string > _tensor_names
Definition BaseLoader.h:192
__global uchar * offset(const Image *img, int x, int y)
Definition helpers.h:540
bool requireQuantParam(DataType data_type)
Definition DataType.cc:57

References onert::ir::Graph::addOperand(), onert::util::ObjectManager< Index, Object >::at(), offset(), onert::ir::Graph::operands(), onert::ir::requireQuantParam(), onert::ir::Graph::setOperandValue(), and onert::ir::TypeInfo::type().

◆ loadOperation()

template<typename LoaderDomain >
void onert::loader::BaseLoader< LoaderDomain >::loadOperation ( const Operator op,
ir::Graph subg 
)
protected

Definition at line 1388 of file BaseLoader.h.

1389{
1390 auto const builtin_op = getBuiltinOperator(op);
1391
1392 switch (builtin_op)
1393 {
1394 case BuiltinOperator::BuiltinOperator_ADD_N:
1395 loadOperationTo<ir::operation::AddN>(op, subg);
1396 return;
1397 case BuiltinOperator::BuiltinOperator_CONV_2D:
1398 loadConv2D(op, subg);
1399 return;
1400 case BuiltinOperator::BuiltinOperator_AVERAGE_POOL_2D:
1401 loadPool2D(op, subg, ir::operation::Pool2D::PoolType::AVG);
1402 return;
1403 case BuiltinOperator::BuiltinOperator_DEPTHWISE_CONV_2D:
1404 loadDepthwiseConv2D(op, subg);
1405 return;
1406 case BuiltinOperator::BuiltinOperator_TRANSPOSE_CONV:
1407 loadTransposeConv(op, subg);
1408 return;
1409 case BuiltinOperator::BuiltinOperator_RESHAPE:
1410 loadReshape(op, subg);
1411 return;
1412 case BuiltinOperator::BuiltinOperator_SOFTMAX:
1413 loadSoftmax(op, subg);
1414 return;
1415 case BuiltinOperator::BuiltinOperator_MAX_POOL_2D:
1416 loadPool2D(op, subg, ir::operation::Pool2D::PoolType::MAX);
1417 return;
1418 case BuiltinOperator::BuiltinOperator_CONCATENATION:
1419 loadConcatenation(op, subg);
1420 return;
1421 case BuiltinOperator::BuiltinOperator_FLOOR:
1422 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::FLOOR);
1423 return;
1424 case BuiltinOperator::BuiltinOperator_FULLY_CONNECTED:
1425 loadFC(op, subg);
1426 return;
1427 case BuiltinOperator::BuiltinOperator_ADD:
1428 loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::ADD);
1429 return;
1430 case BuiltinOperator::BuiltinOperator_SUB:
1431 loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::SUB);
1432 return;
1433 case BuiltinOperator::BuiltinOperator_MUL:
1434 loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::MUL);
1435 return;
1436 case BuiltinOperator::BuiltinOperator_DIV:
1437 loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::DIV);
1438 return;
1439 case BuiltinOperator::BuiltinOperator_PACK:
1440 loadPack(op, subg);
1441 return;
1442 case BuiltinOperator::BuiltinOperator_ELU:
1443 loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::ELU);
1444 return;
1445 case BuiltinOperator::BuiltinOperator_RELU:
1446 loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::RELU,
1448 return;
1449 case BuiltinOperator::BuiltinOperator_RELU_N1_TO_1:
1450 loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::RELU, 1.f,
1451 -1.f);
1452 return;
1453 case BuiltinOperator::BuiltinOperator_RELU6:
1454 loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::RELU, 6.f,
1455 0.f);
1456 return;
1457 case BuiltinOperator::BuiltinOperator_RESIZE_BILINEAR:
1458 loadResizeBilinear(op, subg);
1459 return;
1460 case BuiltinOperator::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
1461 loadResizeNearestNeighbor(op, subg);
1462 return;
1463 case BuiltinOperator::BuiltinOperator_RSQRT:
1464 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::RSQRT);
1465 return;
1466 case BuiltinOperator::BuiltinOperator_SELECT:
1467 case BuiltinOperator::BuiltinOperator_SELECT_V2:
1468 loadOperationTo<ir::operation::Select>(op, subg);
1469 return;
1470 case BuiltinOperator::BuiltinOperator_SQRT:
1471 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::SQRT);
1472 return;
1473 case BuiltinOperator::BuiltinOperator_SQUARE:
1474 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::SQUARE);
1475 return;
1476 case BuiltinOperator::BuiltinOperator_SQUARED_DIFFERENCE:
1477 loadOperationTo<ir::operation::SquaredDifference>(op, subg);
1478 return;
1479 case BuiltinOperator::BuiltinOperator_TANH:
1480 loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::TANH, 1.f,
1481 1.f);
1482 return;
1483 case BuiltinOperator::BuiltinOperator_TRANSPOSE:
1484 loadOperationTo<ir::operation::Transpose>(op, subg);
1485 return;
1486 case BuiltinOperator::BuiltinOperator_MEAN:
1487 loadReduce(op, subg, ir::operation::Reduce::ReduceType::MEAN);
1488 return;
1489 case BuiltinOperator::BuiltinOperator_REDUCE_ANY:
1490 loadReduce(op, subg, ir::operation::Reduce::ReduceType::ANY);
1491 return;
1492 case BuiltinOperator::BuiltinOperator_REDUCE_MAX:
1493 loadReduce(op, subg, ir::operation::Reduce::ReduceType::MAX);
1494 return;
1495 case BuiltinOperator::BuiltinOperator_REDUCE_ALL:
1496 loadReduce(op, subg, ir::operation::Reduce::ReduceType::ALL);
1497 return;
1498 case BuiltinOperator::BuiltinOperator_REVERSE_V2:
1499 loadOperationTo<ir::operation::Reverse>(op, subg);
1500 return;
1501 case BuiltinOperator::BuiltinOperator_PAD:
1502 case BuiltinOperator::BuiltinOperator_PADV2:
1503 loadOperationTo<ir::operation::Pad>(op, subg);
1504 return;
1505 case BuiltinOperator::BuiltinOperator_LOGISTIC:
1506 loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::LOGISTIC);
1507 return;
1508 case BuiltinOperator::BuiltinOperator_EXP:
1509 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::EXP);
1510 return;
1511 case BuiltinOperator::BuiltinOperator_EXPAND_DIMS:
1512 loadOperationTo<ir::operation::ExpandDims>(op, subg);
1513 return;
1514 case BuiltinOperator::BuiltinOperator_GATHER:
1515 loadGather(op, subg);
1516 return;
1517 case BuiltinOperator::BuiltinOperator_GELU:
1518 loadGELU(op, subg);
1519 return;
1520 case BuiltinOperator::BuiltinOperator_SPACE_TO_BATCH_ND:
1521 loadOperationTo<ir::operation::SpaceToBatchND>(op, subg);
1522 return;
1523 case BuiltinOperator::BuiltinOperator_BATCH_TO_SPACE_ND:
1524 loadOperationTo<ir::operation::BatchToSpaceND>(op, subg);
1525 return;
1526 case BuiltinOperator::BuiltinOperator_SUM:
1527 loadReduce(op, subg, ir::operation::Reduce::ReduceType::SUM);
1528 return;
1529 case BuiltinOperator::BuiltinOperator_CUSTOM:
1530 loadCustom(op, subg);
1531 return;
1532 case BuiltinOperator::BuiltinOperator_SQUEEZE:
1533 loadSqueeze(op, subg);
1534 return;
1535 case BuiltinOperator::BuiltinOperator_PRELU:
1536 loadOperationTo<ir::operation::PReLU>(op, subg);
1537 return;
1538 case BuiltinOperator::BuiltinOperator_SPLIT:
1539 loadSplit(op, subg);
1540 return;
1541 case BuiltinOperator::BuiltinOperator_SPLIT_V:
1542 loadSplitV(op, subg);
1543 return;
1544 case BuiltinOperator::BuiltinOperator_SLICE:
1545 loadOperationTo<ir::operation::Slice>(op, subg);
1546 return;
1547 case BuiltinOperator::BuiltinOperator_STRIDED_SLICE:
1548 loadStridedSlice(op, subg);
1549 return;
1550 case BuiltinOperator::BuiltinOperator_UNPACK:
1551 loadUnpack(op, subg);
1552 return;
1553 case BuiltinOperator::BuiltinOperator_FLOOR_DIV:
1554 loadElementwiseBinary(op, subg,
1556 return;
1557 case BuiltinOperator::BuiltinOperator_FLOOR_MOD:
1558 loadElementwiseBinary(op, subg,
1560 return;
1561 case BuiltinOperator::BuiltinOperator_MINIMUM:
1562 loadElementwiseBinary(op, subg, ir::operation::ElementwiseBinary::ElementwiseBinaryType::MIN);
1563 return;
1564 case BuiltinOperator::BuiltinOperator_MAXIMUM:
1565 loadElementwiseBinary(op, subg, ir::operation::ElementwiseBinary::ElementwiseBinaryType::MAX);
1566 return;
1567 case BuiltinOperator::BuiltinOperator_CAST:
1568 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::CAST);
1569 return;
1570 case BuiltinOperator::BuiltinOperator_EQUAL:
1571 case BuiltinOperator::BuiltinOperator_NOT_EQUAL:
1572 case BuiltinOperator::BuiltinOperator_GREATER_EQUAL:
1573 case BuiltinOperator::BuiltinOperator_GREATER:
1574 case BuiltinOperator::BuiltinOperator_LESS_EQUAL:
1575 case BuiltinOperator::BuiltinOperator_LESS:
1576 loadComparison(op, subg);
1577 return;
1578 case BuiltinOperator::BuiltinOperator_ONE_HOT:
1579 loadOneHot(op, subg);
1580 return;
1581 case BuiltinOperator::BuiltinOperator_ABS:
1582 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ABS);
1583 return;
1584 case BuiltinOperator::BuiltinOperator_COS:
1585 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::COS);
1586 return;
1587 case BuiltinOperator::BuiltinOperator_SIN:
1588 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::SIN);
1589 return;
1590 case BuiltinOperator::BuiltinOperator_SHAPE:
1591 loadOperationTo<ir::operation::Shape>(op, subg);
1592 return;
1593 case BuiltinOperator::BuiltinOperator_REDUCE_PROD:
1594 loadReduce(op, subg, ir::operation::Reduce::ReduceType::PROD);
1595 return;
1596 case BuiltinOperator::BuiltinOperator_IF:
1597 loadIf(op, subg);
1598 return;
1599 case BuiltinOperator::BuiltinOperator_WHILE:
1600 loadWhile(op, subg);
1601 return;
1602 case BuiltinOperator::BuiltinOperator_NEG:
1603 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::NEG);
1604 return;
1605 case BuiltinOperator::BuiltinOperator_ARG_MAX:
1606 loadArgMinMax(op, subg, true);
1607 return;
1608 case BuiltinOperator::BuiltinOperator_ARG_MIN:
1609 loadArgMinMax(op, subg, false);
1610 return;
1611 case BuiltinOperator::BuiltinOperator_LOG:
1612 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::LOG);
1613 return;
1614 case BuiltinOperator::BuiltinOperator_ROUND:
1615 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ROUND);
1616 return;
1617 case BuiltinOperator::BuiltinOperator_POW:
1618 loadOperationTo<ir::operation::Pow>(op, subg);
1619 return;
1620 case BuiltinOperator::BuiltinOperator_LOGICAL_NOT:
1621 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::LOGICAL_NOT);
1622 return;
1623 case BuiltinOperator::BuiltinOperator_LOGICAL_AND:
1624 loadElementwiseBinary(op, subg,
1626 return;
1627 case BuiltinOperator::BuiltinOperator_LOGICAL_OR:
1628 loadElementwiseBinary(op, subg,
1630 return;
1631 case BuiltinOperator::BuiltinOperator_FILL:
1632 loadOperationTo<ir::operation::Fill>(op, subg);
1633 return;
1634 case BuiltinOperator::BuiltinOperator_ZEROS_LIKE:
1635 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ZEROS_LIKE);
1636 return;
1637 case BuiltinOperator::BuiltinOperator_TILE:
1638 loadOperationTo<ir::operation::Tile>(op, subg);
1639 return;
1640 case BuiltinOperator::BuiltinOperator_RANGE:
1641 loadOperationTo<ir::operation::Range>(op, subg);
1642 return;
1643 // case BuiltinOperator::BuiltinOperator_BATCH_MATMUL:
1644 // Handled on each loader: different option name
1645 // Circle: adjoint_lhs, adjoint_rhs
1646 // TFLite: adj_x, adj_y
1647 case BuiltinOperator::BuiltinOperator_BROADCAST_TO:
1648 loadOperationTo<ir::operation::BroadcastTo>(op, subg);
1649 return;
1650 case BuiltinOperator::BuiltinOperator_LOG_SOFTMAX:
1651 loadLogSoftmax(op, subg);
1652 return;
1653 case BuiltinOperator::BuiltinOperator_QUANTIZE:
1654 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::QUANTIZE);
1655 return;
1656 case BuiltinOperator::BuiltinOperator_DEQUANTIZE:
1657 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::DEQUANTIZE);
1658 return;
1659 case BuiltinOperator::BuiltinOperator_SPACE_TO_DEPTH:
1660 loadSpaceToDepth(op, subg);
1661 return;
1662 case BuiltinOperator::BuiltinOperator_L2_NORMALIZATION:
1663 loadOperationTo<ir::operation::L2Normalization>(op, subg);
1664 break;
1665 case BuiltinOperator::BuiltinOperator_LEAKY_RELU:
1666 loadLeakyRelu(op, subg);
1667 return;
1668 case BuiltinOperator::BuiltinOperator_RANK:
1669 loadOperationTo<ir::operation::Rank>(op, subg);
1670 return;
1671 case BuiltinOperator::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM:
1672 loadUnidirectionalSequenceLSTM(op, subg);
1673 return;
1674 case BuiltinOperator::BuiltinOperator_DEPTH_TO_SPACE:
1675 loadDepthToSpace(op, subg);
1676 return;
1677 case BuiltinOperator::BuiltinOperator_EMBEDDING_LOOKUP:
1678 loadOperationTo<ir::operation::EmbeddingLookup>(op, subg);
1679 return;
1680 case BuiltinOperator::BuiltinOperator_HASHTABLE_LOOKUP:
1681 loadOperationTo<ir::operation::HashtableLookup>(op, subg);
1682 return;
1683 case BuiltinOperator::BuiltinOperator_DYNAMIC_UPDATE_SLICE:
1684 loadOperationTo<ir::operation::DynamicUpdateSlice>(op, subg);
1685 return;
1686 case BuiltinOperator::BuiltinOperator_TOPK_V2:
1687 loadTopKV2(op, subg);
1688 return;
1689 default:
1690 throw std::runtime_error(
1691 std::string("Unsupported operation: ").append(EnumNameBuiltinOperator(builtin_op)));
1692 }
1693}
BuiltinOperator getBuiltinOperator(const Operator *op)
Definition BaseLoader.h:112

References onert::ir::operation::ElementwiseUnary::ABS, onert::ir::operation::BinaryArithmetic::ADD, onert::ir::operation::Reduce::ALL, onert::ir::operation::Reduce::ANY, onert::ir::operation::Pool2D::AVG, onert::ir::operation::ElementwiseUnary::CAST, onert::ir::operation::ElementwiseUnary::COS, onert::ir::operation::ElementwiseUnary::DEQUANTIZE, onert::ir::operation::BinaryArithmetic::DIV, onert::ir::operation::ElementwiseActivation::ELU, onert::ir::operation::ElementwiseUnary::EXP, onert::ir::operation::ElementwiseUnary::FLOOR, onert::ir::operation::ElementwiseBinary::FLOOR_DIV, onert::ir::operation::ElementwiseBinary::FLOOR_MOD, onert::ir::operation::ElementwiseActivation::infinity, onert::ir::operation::ElementwiseUnary::LOG, onert::ir::operation::ElementwiseBinary::LOGICAL_AND, onert::ir::operation::ElementwiseUnary::LOGICAL_NOT, onert::ir::operation::ElementwiseBinary::LOGICAL_OR, onert::ir::operation::ElementwiseActivation::LOGISTIC, onert::ir::operation::ElementwiseBinary::MAX, onert::ir::operation::Pool2D::MAX, onert::ir::operation::Reduce::MAX, onert::ir::operation::Reduce::MEAN, onert::ir::operation::ElementwiseBinary::MIN, onert::ir::operation::BinaryArithmetic::MUL, onert::ir::operation::ElementwiseUnary::NEG, onert::ir::operation::Reduce::PROD, onert::ir::operation::ElementwiseUnary::QUANTIZE, onert::ir::operation::ElementwiseActivation::RELU, onert::ir::operation::ElementwiseUnary::ROUND, onert::ir::operation::ElementwiseUnary::RSQRT, onert::ir::operation::ElementwiseUnary::SIN, onert::ir::operation::ElementwiseUnary::SQRT, onert::ir::operation::ElementwiseUnary::SQUARE, onert::ir::operation::BinaryArithmetic::SUB, onert::ir::operation::Reduce::SUM, onert::ir::operation::ElementwiseActivation::TANH, and onert::ir::operation::ElementwiseUnary::ZEROS_LIKE.

◆ loadOperationIO()

template<typename LoaderDomain >
void onert::loader::BaseLoader< LoaderDomain >::loadOperationIO ( const Operator op,
ir::OperandIndexSequence inputs,
ir::OperandIndexSequence outputs 
)
protected

Definition at line 612 of file BaseLoader.h.

614{
615 for (const std::int32_t idx : *op->inputs())
616 {
617 // Optional tensors are not supported yet except for FULLY_CONNECTED and BCQ_FULLY_CONNECTED
618 auto check_optional_input = [&]() {
619 auto builtin_code = getBuiltinOperator(op);
620 if (isOptionalInputTensor(idx) && !allowOptionalInputTensor(builtin_code))
621 throw std::runtime_error(
622 std::string("loader doesn't support optional input tensor yet for ")
623 .append(EnumNameBuiltinOperator(builtin_code)));
624 };
625 check_optional_input();
626 inputs.append(tensorIdxToOperandIdx(idx));
627 }
628
629 for (const std::int32_t idx : *op->outputs())
630 {
631 outputs.append(tensorIdxToOperandIdx(idx));
632 }
633}
bool isOptionalInputTensor(std::int32_t idx)
Definition BaseLoader.h:60
virtual bool allowOptionalInputTensor(BuiltinOperator)=0
ir::OperandIndex tensorIdxToOperandIdx(int32_t tensorIdx)

References onert::ir::OperandIndexSequence::append().

◆ loadPool2DOptions()

template<typename LoaderDomain >
template<typename Param >
void onert::loader::BaseLoader< LoaderDomain >::loadPool2DOptions ( Param &  param,
const Pool2DOptions options 
)
protected

Definition at line 659 of file BaseLoader.h.

660{
661 // Strides and Paddings
662 if (options->stride_h() <= 0 || options->stride_w() <= 0)
663 throw std::runtime_error{"Invalid stride vertical or horizontal - both must be bigger than 0"};
664 loadStridesAndPaddings(param, options);
665 // Filter width and height
666 // Strides
667 if (options->filter_width() <= 0 || options->filter_height() <= 0)
668 throw std::runtime_error{"Invalid filter width or height - both must be bigger than 0"};
669 param.kw = options->filter_width();
670 param.kh = options->filter_height();
671 // Activation
672 param.activation = convertActivation(options->fused_activation_function());
673}
ir::Activation convertActivation(ActivationFunctionType type)
void loadStridesAndPaddings(Param &param, const OptionsType *options)
Definition BaseLoader.h:637

◆ loadQuantization()

template<typename LoaderDomain >
void onert::loader::BaseLoader< LoaderDomain >::loadQuantization ( const Tensor tensor,
ir::TypeInfo typeInfo 
)
protected

Definition at line 460 of file BaseLoader.h.

461{
462 auto q_params = tensor->quantization();
463
464 // Type validation
465 // INT16 should be symmetric quantized
466 if (tensor->type() == TensorType::TensorType_INT16)
467 {
468 if (q_params->zero_point() != nullptr && q_params->zero_point()->size() != 0)
469 {
470 auto zero_points = q_params->zero_point();
471 for (uint32_t i = 0; i < zero_points->size(); i++)
472 {
473 if (zero_points->Get(i) != 0)
474 throw std::runtime_error(
475 "Quantization param: int16 should be symmetric, but zero_point is not zero.");
476 }
477 }
478 }
479
480 if (q_params == nullptr || q_params->scale() == nullptr || q_params->scale()->size() == 0)
481 {
482 typeInfo.quantization(0., 0);
483 return;
484 }
485 if (q_params->zero_point() == nullptr)
486 {
487 throw std::runtime_error("Quantization params: scale is not null, but zero_point is null.");
488 }
489 const size_t num_scales = q_params->scale()->size();
490 if (num_scales != q_params->zero_point()->size())
491 {
492 throw std::runtime_error("Quantization params: scale size != zero_point size");
493 }
494 std::vector<float> scales;
495 std::vector<int32_t> zero_points;
496 scales.resize(num_scales);
497 zero_points.resize(num_scales);
498 for (size_t i = 0; i < num_scales; ++i)
499 {
500 scales[i] = q_params->scale()->Get(i);
501 // zero_point is defined as long (i64) in schema while TypeInfo's zero_point is int32_t.
502 // int64_t is used instead of long because long is 4 byte in most 32bit architecture.
503 int64_t zero_point = q_params->zero_point()->Get(i);
504 if (zero_point < std::numeric_limits<int32_t>::min() ||
505 zero_point > std::numeric_limits<int32_t>::max())
506 throw std::runtime_error("Zero_point is out of int32 range.");
507 zero_points[i] = static_cast<int32_t>(zero_point);
508 }
509 auto details = q_params->details_as_CustomQuantization();
510 if (details != nullptr)
511 throw std::runtime_error("Custom Quantization is not supported");
512 typeInfo.quantization(std::move(scales), std::move(zero_points));
513}

References onert::ir::TypeInfo::quantization().

◆ loadSparsity()

template<typename LoaderDomain >
void onert::loader::BaseLoader< LoaderDomain >::loadSparsity ( const Tensor tensor,
ir::TypeInfo typeInfo 
)
protected

Definition at line 516 of file BaseLoader.h.

517{
518 auto src_sparsity = tensor->sparsity();
519 if (src_sparsity != nullptr)
520 {
521 std::vector<uint16_t> w1_segments;
522 std::vector<uint16_t> w1_indices;
523 // check traversal_order
524 if (src_sparsity->traversal_order())
525 {
526 const int traversal_order_size = src_sparsity->traversal_order()->size();
527 for (int i = 0; i < traversal_order_size; ++i)
528 {
529 if (i != src_sparsity->traversal_order()->Get(i))
530 throw std::runtime_error("traversal_order [0, 1, ..., n-1] is only supported.");
531 }
532 }
533 // check block_map
534 int block_rank = 0;
535 if (src_sparsity->block_map())
536 {
537 block_rank = src_sparsity->block_map()->size();
538 for (int i = 0; i < block_rank; ++i)
539 {
540 if (i != src_sparsity->block_map()->Get(i))
541 throw std::runtime_error("block_map [0, 1, ..., n-1] is only supported.");
542 }
543 }
544 // load metadata
545 const auto dim_metadata_size = src_sparsity->dim_metadata()->size();
546 const auto dense_rank = tensor->shape() ? tensor->shape()->size() : 0;
547 if (dense_rank + block_rank != dim_metadata_size)
548 throw std::runtime_error("sparsity dim_metadata length is wrong.");
549 bool random_sparsity = dim_metadata_size == 2 && block_rank == 0;
550 bool block2D_sparsity = dim_metadata_size == 4 && block_rank == 2;
551 if (dim_metadata_size != !random_sparsity && !block2D_sparsity)
552 throw std::runtime_error(
553 "sparsity is supported only for 2D tensor with random or 16x1 block sparsity.");
554
555 const auto *src_metadata = src_sparsity->dim_metadata()->Get(0);
556 if (src_metadata->format() != DimensionType::DimensionType_DENSE)
557 throw std::runtime_error("sparse tensor dim[0] is not DENSE");
558 src_metadata = src_sparsity->dim_metadata()->Get(1);
559 if (src_metadata->format() != DimensionType::DimensionType_SPARSE_CSR)
560 throw std::runtime_error("sparse tensor dim[0] is not SPARSE_CSR");
561 auto ParseSparseIndexVector = [src_metadata, &w1_segments, &w1_indices]() {
562 if (src_metadata->array_segments() == nullptr || src_metadata->array_indices() == nullptr)
563 return false;
564 bool status = true;
565 /* `onert` inernally uses uint16 type regardless of the value of
566 the array_segments_type and array_indices_type */
567 switch (src_metadata->array_segments_type())
568 {
569 case SparseIndexVector::SparseIndexVector_Int32Vector:
570 throw std::runtime_error("sparse tensor with int32 segment type is not supported");
571 case SparseIndexVector::SparseIndexVector_Uint16Vector:
572 status = Copy(src_metadata->array_segments_as_Uint16Vector(), w1_segments);
573 break;
574 case SparseIndexVector::SparseIndexVector_Uint8Vector:
575 status = Copy(src_metadata->array_segments_as_Uint8Vector(), w1_segments);
576 break;
577 default:
578 return false;
579 }
580 if (status != true)
581 return false;
582 switch (src_metadata->array_indices_type())
583 {
584 case SparseIndexVector::SparseIndexVector_Int32Vector:
585 throw std::runtime_error("sparse tensor with int32 indices type is not supported");
586 case SparseIndexVector::SparseIndexVector_Uint16Vector:
587 return Copy(src_metadata->array_indices_as_Uint16Vector(), w1_indices);
588 case SparseIndexVector::SparseIndexVector_Uint8Vector:
589 return Copy(src_metadata->array_indices_as_Uint8Vector(), w1_indices);
590 default:
591 break;
592 }
593 return false;
594 };
595 if (ParseSparseIndexVector() == false)
596 throw std::runtime_error("Error during parsing sparsity index information");
597 // Get block size
598 std::vector<int32_t> block_size;
599 for (int i = 0; i < block_rank; ++i)
600 {
601 auto block_metadata = src_sparsity->dim_metadata()->Get(dense_rank + i);
602 if (block_metadata->format() != DimensionType::DimensionType_DENSE)
603 throw std::runtime_error("block dimension must be DENSE.");
604 block_size.push_back(block_metadata->dense_size());
605 }
606 typeInfo.sparsity(std::make_shared<ir::Sparsity>(std::move(w1_segments), std::move(w1_indices),
607 std::move(block_size)));
608 }
609}
bool Copy(const T *data_ptr, std::vector< uint16_t > &arr)
Definition BaseLoader.h:345

References onert::loader::Copy(), and onert::ir::TypeInfo::sparsity().

◆ loadStridesAndPaddings()

template<typename LoaderDomain >
template<typename Param , typename OptionsType >
void onert::loader::BaseLoader< LoaderDomain >::loadStridesAndPaddings ( Param &  param,
const OptionsType *  options 
)
protected

Definition at line 637 of file BaseLoader.h.

638{
639 // Strides
640 param.stride.vertical = options->stride_h();
641 param.stride.horizontal = options->stride_w();
642 // Paddings
643 switch (options->padding())
644 {
645 case Padding::Padding_SAME:
646 param.padding.type = ir::PaddingType::SAME;
647 break;
648 case Padding::Padding_VALID:
649 param.padding.type = ir::PaddingType::VALID;
650 break;
651 default:
652 throw std::runtime_error{"Invalid padding type"};
653 }
654 // param paddings indexes unused
655}

References onert::ir::SAME, and onert::ir::VALID.

◆ tensorIdxToOperandIdx()

template<typename LoaderDomain >
ir::OperandIndex onert::loader::BaseLoader< LoaderDomain >::tensorIdxToOperandIdx ( int32_t  tensorIdx)
protected

◆ tensorTypeToDataType()

template<typename LoaderDomain >
virtual ir::DataType onert::loader::BaseLoader< LoaderDomain >::tensorTypeToDataType ( const TensorType  type)
protectedvirtual

◆ verifySubgraphIndex()

template<typename LoaderDomain >
void onert::loader::BaseLoader< LoaderDomain >::verifySubgraphIndex ( int  subg_index)
inlineprotected

Definition at line 122 of file BaseLoader.h.

123 {
124 const auto num_subgraphs = _domain_model->subgraphs()->size();
125 if (subg_index < 0 || subg_index >= static_cast<int32_t>(num_subgraphs))
126 throw std::runtime_error{std::string{"Invalid subgraph index - "} +
127 std::to_string(subg_index)};
128 }

References onert::loader::BaseLoader< LoaderDomain >::_domain_model.

Field Documentation

◆ _base

template<typename LoaderDomain >
uint8_t* onert::loader::BaseLoader< LoaderDomain >::_base
protected

Definition at line 183 of file BaseLoader.h.

◆ _buf_to_data

template<typename LoaderDomain >
std::unordered_map<uint32_t , std::shared_ptr<ir::Data> > onert::loader::BaseLoader< LoaderDomain >::_buf_to_data
protected

Definition at line 199 of file BaseLoader.h.

◆ _domain_model

template<typename LoaderDomain >
const Model* onert::loader::BaseLoader< LoaderDomain >::_domain_model
protected

◆ _fd

template<typename LoaderDomain >
int onert::loader::BaseLoader< LoaderDomain >::_fd
protected

Definition at line 187 of file BaseLoader.h.

◆ _file_path

template<typename LoaderDomain >
std::string onert::loader::BaseLoader< LoaderDomain >::_file_path
protected

Definition at line 188 of file BaseLoader.h.

◆ _pagesize

template<typename LoaderDomain >
int32_t onert::loader::BaseLoader< LoaderDomain >::_pagesize
protected

Definition at line 185 of file BaseLoader.h.

◆ _tensor_names

template<typename LoaderDomain >
std::unordered_map<ir::OperandIndex, std::string> onert::loader::BaseLoader< LoaderDomain >::_tensor_names
protected

Definition at line 192 of file BaseLoader.h.

◆ _tensor_to_operand

template<typename LoaderDomain >
std::vector<ir::OperandIndex> onert::loader::BaseLoader< LoaderDomain >::_tensor_to_operand
protected

Definition at line 191 of file BaseLoader.h.

◆ _use_mmaped_data

template<typename LoaderDomain >
bool onert::loader::BaseLoader< LoaderDomain >::_use_mmaped_data = false
protected

Definition at line 196 of file BaseLoader.h.

Referenced by onert::loader::BaseLoader< LoaderDomain >::BaseLoader().

◆ _verifier

template<typename LoaderDomain >
std::unique_ptr<Verifier> onert::loader::BaseLoader< LoaderDomain >::_verifier
protected

Definition at line 194 of file BaseLoader.h.


The documentation for this class was generated from the following file: