ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::loader::BaseLoader< LoaderDomain > Class Template Referenceabstract

#include <BaseLoader.h>

Collaboration diagram for onert::loader::BaseLoader< LoaderDomain >:

Public Member Functions

 BaseLoader ()
 Construct a new Loader object.
 
std::unique_ptr< ir::ModelloadFromFile (const std::string &file_path)
 Load a model from file.
 
std::unique_ptr< ir::ModelloadFromBuffer (uint8_t *buffer, size_t size)
 Load a model from a buffer.
 

Protected Types

using Verifier = typename LoaderDomain::Verifier
 
using ActivationFunctionType = typename LoaderDomain::ActivationFunctionType
 
using Buffer = typename LoaderDomain::Buffer
 
using BuiltinOperator = typename LoaderDomain::BuiltinOperator
 
using CustomOptionsFormat = typename LoaderDomain::CustomOptionsFormat
 
using Metadata = typename LoaderDomain::Metadata
 
using Model = typename LoaderDomain::Model
 
using Operator = typename LoaderDomain::Operator
 
using Padding = typename LoaderDomain::Padding
 
using Pool2DOptions = typename LoaderDomain::Pool2DOptions
 
using SubGraph = typename LoaderDomain::SubGraph
 
using Tensor = typename LoaderDomain::Tensor
 
using TensorType = typename LoaderDomain::TensorType
 
using DimensionType = typename LoaderDomain::DimensionType
 
using SparseIndexVector = typename LoaderDomain::SparseIndexVector
 

Protected Member Functions

bool isOptionalInputTensor (std::int32_t idx)
 
virtual bool allowOptionalInputTensor (BuiltinOperator)=0
 
 ~BaseLoader ()=default
 
std::unique_ptr< ir::ModelloadModel ()
 
ir::Activation convertActivation (ActivationFunctionType type)
 
virtual ir::DataType tensorTypeToDataType (const TensorType type)
 
ir::OperandIndex tensorIdxToOperandIdx (int32_t tensorIdx)
 
flexbuffers::Map getCustomOpAttrMap (const Operator *op)
 
ir::OperandIndex loadOperand (const Tensor *tensor, ir::Graph &subg)
 
void loadQuantization (const Tensor *tensor, ir::TypeInfo &typeInfo)
 
void loadSparsity (const Tensor *tensor, ir::TypeInfo &typeInfo)
 
void loadOperationIO (const Operator *op, ir::OperandIndexSequence &inputs, ir::OperandIndexSequence &outputs)
 
template<typename OpIR , typename... Args>
const OpIR * loadOperationTo (const Operator *op, ir::Graph &subg, Args &&...args)
 
void loadOperation (const Operator *op, ir::Graph &subg)
 
template<typename Param , typename OptionsType >
void loadStridesAndPaddings (Param &param, const OptionsType *options)
 
template<typename Param >
void loadPool2DOptions (Param &param, const Pool2DOptions *options)
 
BuiltinOperator getBuiltinOperator (const Operator *op)
 
void verifySubgraphIndex (int subg_index)
 

Protected Attributes

uint8_t * _base
 
int32_t _pagesize
 
int _fd
 
std::string _file_path
 
const Model_domain_model
 
std::vector< ir::OperandIndex_tensor_to_operand
 
std::unordered_map< ir::OperandIndex, std::string > _tensor_names
 
std::unique_ptr< Verifier_verifier
 
bool _use_mmaped_data = false
 
std::unordered_map< uint32_t, std::shared_ptr< ir::Data > > _buf_to_data
 

Detailed Description

template<typename LoaderDomain>
class onert::loader::BaseLoader< LoaderDomain >

Definition at line 40 of file BaseLoader.h.

Member Typedef Documentation

◆ ActivationFunctionType

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::ActivationFunctionType = typename LoaderDomain::ActivationFunctionType
protected

Definition at line 44 of file BaseLoader.h.

◆ Buffer

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Buffer = typename LoaderDomain::Buffer
protected

Definition at line 45 of file BaseLoader.h.

◆ BuiltinOperator

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::BuiltinOperator = typename LoaderDomain::BuiltinOperator
protected

Definition at line 46 of file BaseLoader.h.

◆ CustomOptionsFormat

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::CustomOptionsFormat = typename LoaderDomain::CustomOptionsFormat
protected

Definition at line 47 of file BaseLoader.h.

◆ DimensionType

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::DimensionType = typename LoaderDomain::DimensionType
protected

Definition at line 56 of file BaseLoader.h.

◆ Metadata

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Metadata = typename LoaderDomain::Metadata
protected

Definition at line 48 of file BaseLoader.h.

◆ Model

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Model = typename LoaderDomain::Model
protected

Definition at line 49 of file BaseLoader.h.

◆ Operator

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Operator = typename LoaderDomain::Operator
protected

Definition at line 50 of file BaseLoader.h.

◆ Padding

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Padding = typename LoaderDomain::Padding
protected

Definition at line 51 of file BaseLoader.h.

◆ Pool2DOptions

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Pool2DOptions = typename LoaderDomain::Pool2DOptions
protected

Definition at line 52 of file BaseLoader.h.

◆ SparseIndexVector

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::SparseIndexVector = typename LoaderDomain::SparseIndexVector
protected

Definition at line 57 of file BaseLoader.h.

◆ SubGraph

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::SubGraph = typename LoaderDomain::SubGraph
protected

Definition at line 53 of file BaseLoader.h.

◆ Tensor

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Tensor = typename LoaderDomain::Tensor
protected

Definition at line 54 of file BaseLoader.h.

◆ TensorType

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::TensorType = typename LoaderDomain::TensorType
protected

Definition at line 55 of file BaseLoader.h.

◆ Verifier

template<typename LoaderDomain >
using onert::loader::BaseLoader< LoaderDomain >::Verifier = typename LoaderDomain::Verifier
protected

Definition at line 43 of file BaseLoader.h.

Constructor & Destructor Documentation

◆ BaseLoader()

template<typename LoaderDomain >
onert::loader::BaseLoader< LoaderDomain >::BaseLoader ( )
inlineexplicit

Construct a new Loader object.

Definition at line 67 of file BaseLoader.h.

67 : _base{nullptr}, _pagesize(getpagesize()), _fd(-1), _domain_model{nullptr}
68 {
69 _use_mmaped_data = util::getConfigBool(util::config::USE_MMAPED_DATA);
70 }
const Model * _domain_model
Definition BaseLoader.h:196
bool getConfigBool(const std::string &key)

References onert::loader::BaseLoader< LoaderDomain >::_use_mmaped_data, and onert::util::getConfigBool().

◆ ~BaseLoader()

template<typename LoaderDomain >
onert::loader::BaseLoader< LoaderDomain >::~BaseLoader ( )
protecteddefault

Member Function Documentation

◆ allowOptionalInputTensor()

template<typename LoaderDomain >
virtual bool onert::loader::BaseLoader< LoaderDomain >::allowOptionalInputTensor ( BuiltinOperator  )
protectedpure virtual

◆ convertActivation()

template<typename LoaderDomain >
ir::Activation onert::loader::BaseLoader< LoaderDomain >::convertActivation ( ActivationFunctionType  type)
protected

◆ getBuiltinOperator()

template<typename LoaderDomain >
BuiltinOperator onert::loader::BaseLoader< LoaderDomain >::getBuiltinOperator ( const Operator op)
inlineprotected

Definition at line 114 of file BaseLoader.h.

115 {
116 // Enforce explicit bounds validation for opcode_index before every operator-code lookup
117 if (op->opcode_index() < 0 ||
118 static_cast<size_t>(op->opcode_index()) >= _domain_model->operator_codes()->size())
119 {
120 throw std::runtime_error("Invalid opcode_index: " + std::to_string(op->opcode_index()));
121 }
122
123 auto const builtin_opcode = _domain_model->operator_codes()->Get(op->opcode_index());
124 auto builtin_op = builtin_opcode->builtin_code();
125 if (builtin_op < BuiltinOperator::BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES)
126 builtin_op = static_cast<BuiltinOperator>(builtin_opcode->deprecated_builtin_code());
127
128 return builtin_op;
129 }
typename LoaderDomain::BuiltinOperator BuiltinOperator
Definition BaseLoader.h:46

References onert::loader::BaseLoader< LoaderDomain >::_domain_model.

◆ getCustomOpAttrMap()

template<typename LoaderDomain >
flexbuffers::Map onert::loader::BaseLoader< LoaderDomain >::getCustomOpAttrMap ( const Operator op)
protected

◆ isOptionalInputTensor()

template<typename LoaderDomain >
bool onert::loader::BaseLoader< LoaderDomain >::isOptionalInputTensor ( std::int32_t  idx)
inlineprotected

Definition at line 60 of file BaseLoader.h.

60{ return idx == -1; }

◆ loadFromBuffer()

template<typename LoaderDomain >
std::unique_ptr< ir::Model > onert::loader::BaseLoader< LoaderDomain >::loadFromBuffer ( uint8_t *  buffer,
size_t  size 
)

Load a model from a buffer.

Parameters
[in]bufferbuffer pointer
[in]sizebuffer size
Returns
Loaded model object

◆ loadFromFile()

template<typename LoaderDomain >
std::unique_ptr< ir::Model > onert::loader::BaseLoader< LoaderDomain >::loadFromFile ( const std::string &  file_path)

Load a model from file.

Parameters
[in]file_pathmodel file path
Returns
Loaded model object

◆ loadModel()

template<typename LoaderDomain >
std::unique_ptr< ir::Model > onert::loader::BaseLoader< LoaderDomain >::loadModel ( )
protected

Definition at line 1702 of file BaseLoader.h.

1703{
1704 LoaderDomain::VerifyModelBuffer(*_verifier.get());
1705 _domain_model = LoaderDomain::GetModel(_base);
1706
1707 auto model = std::make_unique<ir::Model>();
1708 // Version unused
1709 // const auto version = _model->version();
1710 // Description unused
1711
1712 // Load Metadata
1713 auto const metadata_list = _domain_model->metadata();
1714 if (metadata_list != nullptr)
1715 {
1716 for (uint32_t i = 0; i < metadata_list->size(); ++i)
1717 {
1718 const auto metadata = metadata_list->Get(i);
1719 if (metadata->name() == nullptr)
1720 continue; // metadata should have name
1721
1722 std::unique_ptr<const ir::Data> data = loadMetadata(metadata->buffer());
1723 model->add_metadata(metadata->name()->str(), std::move(data));
1724 }
1725 }
1726
1727 // Load signature map
1728 auto const signature_table = _domain_model->signature_defs();
1729 if (signature_table != nullptr)
1730 {
1731 for (uint32_t i = 0; i < signature_table->size(); ++i)
1732 {
1733 const auto signature = signature_table->Get(i);
1734 if (signature == nullptr)
1735 continue;
1736 const auto signature_key = signature->signature_key();
1737 if (signature_key == nullptr)
1738 continue; // signature should have key
1739 const auto subgraph_index = static_cast<uint16_t>(signature->subgraph_index());
1740
1741 model->addSignatureMap(ir::SubgraphIndex{subgraph_index}, signature_key->str());
1742 }
1743 }
1744
1745 // const auto *description = _model->description();
1746 // Load subgraphs and map operations on subgraph
1747 const auto subgraphs = _domain_model->subgraphs();
1748 if (subgraphs->size() - 1 > ir::SubgraphIndex::max())
1749 throw std::runtime_error{"The number of subgraphs cannot exceed " +
1750 std::to_string(ir::SubgraphIndex::max() + 1)};
1751 for (uint16_t subgraph_index = 0; subgraph_index < subgraphs->size(); ++subgraph_index)
1752 {
1753 auto subg = loadSubgraph((*_domain_model->subgraphs())[subgraph_index]);
1754 // NOTE: Used () instead of {}, which does not check narrowing.
1755 // It is okay since overflow is checked the above if-statement.
1756 model->push(ir::SubgraphIndex(subgraph_index), std::move(subg));
1757 }
1758 return model;
1759}
std::unique_ptr< Verifier > _verifier
Definition BaseLoader.h:201
static uint16_t max()
Return max index value.
Definition Index.h:146
::onert::util::Index< uint16_t, SubgraphIndexTag > SubgraphIndex
Definition Index.h:39

References onert::util::Index< uint16_t, SubgraphIndexTag >::max().

◆ loadOperand()

template<typename LoaderDomain >
ir::OperandIndex onert::loader::BaseLoader< LoaderDomain >::loadOperand ( const Tensor tensor,
ir::Graph subg 
)
protected

Definition at line 369 of file BaseLoader.h.

370{
371 ir::Shape shape;
372 // Shape
373 const auto *tensor_shape =
374 tensor->shape_signature() ? tensor->shape_signature() : tensor->shape();
375 if (tensor_shape != nullptr)
376 {
377 for (const auto &dim : *tensor_shape)
378 {
379 shape.append(dim);
380 }
381 }
382
383 // Note for tensor->shape_signature()
384 // We don't handle shape signature
385 // How we handle:
386 // If shape_signature[k] == -1, we will use tensor->shape()[k] == 1
387 // If app wants to change the input shape, call nnfw_apply_input_tensorinfo() can
388 // be used.
389
390 // TypeInfo
391 ir::TypeInfo type_info(tensorTypeToDataType(tensor->type()));
392 // Ignore quantize param if datatype does not requires
393 if (ir::requireQuantParam(type_info.type()))
394 loadQuantization(tensor, type_info);
395 loadSparsity(tensor, type_info);
396
397 // Create operand
398 const auto operand_index = subg.addOperand(shape, type_info);
399
400 // Constant tensors are indicated by non-empty data.
401 const auto *data = _domain_model->buffers()->Get(tensor->buffer())->data();
402 if (data != nullptr)
403 {
404 using std::ptrdiff_t;
405 std::shared_ptr<ir::Data> data_obj;
406
407 if (_fd == -1) // Model is from memory
408 {
409 data_obj = std::make_shared<ir::ExternalData>(data->data(), data->size());
410 }
411 else // Model is loaded(mmap'd) from a file
412 {
413 size_t data_size = data->size();
414 ptrdiff_t unaligned_offset_start = data->data() - _base;
415 ptrdiff_t offset_end = unaligned_offset_start + data_size;
416
417 // Calculated aligned offset from base address of mapped region
418 // munmap accepts memory address which is a multiple of the pagesize
419 ptrdiff_t aligned_offset_start = (unaligned_offset_start / _pagesize) * _pagesize;
420 size_t mmap_size = offset_end - aligned_offset_start;
421
422 uint32_t buf_idx = tensor->buffer();
423 auto buffer_found = _buf_to_data.find(buf_idx);
424
425 if (buffer_found != _buf_to_data.end())
426 {
427 // Another tensor points this buffer and its matching Data(either CachedData or MMapedData)
428 // was already created. Let's reuse the Data
429 data_obj = buffer_found->second;
430 }
431 else if (_use_mmaped_data)
432 {
433 data_obj = std::make_shared<ir::MMapedData>(_fd, aligned_offset_start, mmap_size,
434 unaligned_offset_start, data_size);
435 _buf_to_data[buf_idx] = data_obj;
436 }
437 else
438 {
439 size_t offset = unaligned_offset_start - aligned_offset_start;
440 uint8_t *mmap_base = static_cast<uint8_t *>(
441 mmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, _fd, aligned_offset_start));
442
443 data_obj = std::make_shared<ir::CachedData>(mmap_base + offset, data_size);
444 _buf_to_data[buf_idx] = data_obj;
445
446 munmap(mmap_base, mmap_size);
447 }
448 }
449 subg.setOperandValue(operand_index, std::move(data_obj));
450 }
451
452 _tensor_names.emplace(operand_index, tensor->name()->str());
453
454 // Variable
455 if (tensor->is_variable())
456 {
457 if (data != nullptr)
458 throw std::runtime_error("Variable tensor with buffer is not supported!");
459
460 subg.operands().at(operand_index).info().setAsVariable();
461 }
462
463 return operand_index;
464}
std::unordered_map< uint32_t, std::shared_ptr< ir::Data > > _buf_to_data
Definition BaseLoader.h:206
virtual ir::DataType tensorTypeToDataType(const TensorType type)
void loadQuantization(const Tensor *tensor, ir::TypeInfo &typeInfo)
Definition BaseLoader.h:467
void loadSparsity(const Tensor *tensor, ir::TypeInfo &typeInfo)
Definition BaseLoader.h:523
std::unordered_map< ir::OperandIndex, std::string > _tensor_names
Definition BaseLoader.h:199
__global uchar * offset(const Image *img, int x, int y)
Definition helpers.h:540
bool requireQuantParam(DataType data_type)
Definition DataType.cc:57

References onert::ir::Graph::addOperand(), onert::util::ObjectManager< Index, Object >::at(), offset(), onert::ir::Graph::operands(), onert::ir::requireQuantParam(), onert::ir::Graph::setOperandValue(), and onert::ir::TypeInfo::type().

◆ loadOperation()

template<typename LoaderDomain >
void onert::loader::BaseLoader< LoaderDomain >::loadOperation ( const Operator op,
ir::Graph subg 
)
protected

Definition at line 1395 of file BaseLoader.h.

1396{
1397 auto const builtin_op = getBuiltinOperator(op);
1398
1399 switch (builtin_op)
1400 {
1401 case BuiltinOperator::BuiltinOperator_ADD_N:
1402 loadOperationTo<ir::operation::AddN>(op, subg);
1403 return;
1404 case BuiltinOperator::BuiltinOperator_CONV_2D:
1405 loadConv2D(op, subg);
1406 return;
1407 case BuiltinOperator::BuiltinOperator_AVERAGE_POOL_2D:
1408 loadPool2D(op, subg, ir::operation::Pool2D::PoolType::AVG);
1409 return;
1410 case BuiltinOperator::BuiltinOperator_DEPTHWISE_CONV_2D:
1411 loadDepthwiseConv2D(op, subg);
1412 return;
1413 case BuiltinOperator::BuiltinOperator_TRANSPOSE_CONV:
1414 loadTransposeConv(op, subg);
1415 return;
1416 case BuiltinOperator::BuiltinOperator_RESHAPE:
1417 loadReshape(op, subg);
1418 return;
1419 case BuiltinOperator::BuiltinOperator_SOFTMAX:
1420 loadSoftmax(op, subg);
1421 return;
1422 case BuiltinOperator::BuiltinOperator_MAX_POOL_2D:
1423 loadPool2D(op, subg, ir::operation::Pool2D::PoolType::MAX);
1424 return;
1425 case BuiltinOperator::BuiltinOperator_CONCATENATION:
1426 loadConcatenation(op, subg);
1427 return;
1428 case BuiltinOperator::BuiltinOperator_FLOOR:
1429 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::FLOOR);
1430 return;
1431 case BuiltinOperator::BuiltinOperator_FULLY_CONNECTED:
1432 loadFC(op, subg);
1433 return;
1434 case BuiltinOperator::BuiltinOperator_ADD:
1435 loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::ADD);
1436 return;
1437 case BuiltinOperator::BuiltinOperator_SUB:
1438 loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::SUB);
1439 return;
1440 case BuiltinOperator::BuiltinOperator_MUL:
1441 loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::MUL);
1442 return;
1443 case BuiltinOperator::BuiltinOperator_DIV:
1444 loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::DIV);
1445 return;
1446 case BuiltinOperator::BuiltinOperator_PACK:
1447 loadPack(op, subg);
1448 return;
1449 case BuiltinOperator::BuiltinOperator_ELU:
1450 loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::ELU);
1451 return;
1452 case BuiltinOperator::BuiltinOperator_RELU:
1453 loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::RELU,
1455 return;
1456 case BuiltinOperator::BuiltinOperator_RELU_N1_TO_1:
1457 loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::RELU, 1.f,
1458 -1.f);
1459 return;
1460 case BuiltinOperator::BuiltinOperator_RELU6:
1461 loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::RELU, 6.f,
1462 0.f);
1463 return;
1464 case BuiltinOperator::BuiltinOperator_RESIZE_BILINEAR:
1465 loadResizeBilinear(op, subg);
1466 return;
1467 case BuiltinOperator::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
1468 loadResizeNearestNeighbor(op, subg);
1469 return;
1470 case BuiltinOperator::BuiltinOperator_RSQRT:
1471 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::RSQRT);
1472 return;
1473 case BuiltinOperator::BuiltinOperator_SELECT:
1474 case BuiltinOperator::BuiltinOperator_SELECT_V2:
1475 loadOperationTo<ir::operation::Select>(op, subg);
1476 return;
1477 case BuiltinOperator::BuiltinOperator_SQRT:
1478 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::SQRT);
1479 return;
1480 case BuiltinOperator::BuiltinOperator_SQUARE:
1481 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::SQUARE);
1482 return;
1483 case BuiltinOperator::BuiltinOperator_SQUARED_DIFFERENCE:
1484 loadOperationTo<ir::operation::SquaredDifference>(op, subg);
1485 return;
1486 case BuiltinOperator::BuiltinOperator_TANH:
1487 loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::TANH, 1.f,
1488 1.f);
1489 return;
1490 case BuiltinOperator::BuiltinOperator_TRANSPOSE:
1491 loadOperationTo<ir::operation::Transpose>(op, subg);
1492 return;
1493 case BuiltinOperator::BuiltinOperator_MEAN:
1494 loadReduce(op, subg, ir::operation::Reduce::ReduceType::MEAN);
1495 return;
1496 case BuiltinOperator::BuiltinOperator_REDUCE_ANY:
1497 loadReduce(op, subg, ir::operation::Reduce::ReduceType::ANY);
1498 return;
1499 case BuiltinOperator::BuiltinOperator_REDUCE_MAX:
1500 loadReduce(op, subg, ir::operation::Reduce::ReduceType::MAX);
1501 return;
1502 case BuiltinOperator::BuiltinOperator_REDUCE_ALL:
1503 loadReduce(op, subg, ir::operation::Reduce::ReduceType::ALL);
1504 return;
1505 case BuiltinOperator::BuiltinOperator_REVERSE_V2:
1506 loadOperationTo<ir::operation::Reverse>(op, subg);
1507 return;
1508 case BuiltinOperator::BuiltinOperator_PAD:
1509 case BuiltinOperator::BuiltinOperator_PADV2:
1510 loadOperationTo<ir::operation::Pad>(op, subg);
1511 return;
1512 case BuiltinOperator::BuiltinOperator_LOGISTIC:
1513 loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::LOGISTIC);
1514 return;
1515 case BuiltinOperator::BuiltinOperator_EXP:
1516 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::EXP);
1517 return;
1518 case BuiltinOperator::BuiltinOperator_EXPAND_DIMS:
1519 loadOperationTo<ir::operation::ExpandDims>(op, subg);
1520 return;
1521 case BuiltinOperator::BuiltinOperator_GATHER:
1522 loadGather(op, subg);
1523 return;
1524 case BuiltinOperator::BuiltinOperator_GELU:
1525 loadGELU(op, subg);
1526 return;
1527 case BuiltinOperator::BuiltinOperator_SPACE_TO_BATCH_ND:
1528 loadOperationTo<ir::operation::SpaceToBatchND>(op, subg);
1529 return;
1530 case BuiltinOperator::BuiltinOperator_BATCH_TO_SPACE_ND:
1531 loadOperationTo<ir::operation::BatchToSpaceND>(op, subg);
1532 return;
1533 case BuiltinOperator::BuiltinOperator_SUM:
1534 loadReduce(op, subg, ir::operation::Reduce::ReduceType::SUM);
1535 return;
1536 case BuiltinOperator::BuiltinOperator_CUSTOM:
1537 loadCustom(op, subg);
1538 return;
1539 case BuiltinOperator::BuiltinOperator_SQUEEZE:
1540 loadSqueeze(op, subg);
1541 return;
1542 case BuiltinOperator::BuiltinOperator_PRELU:
1543 loadOperationTo<ir::operation::PReLU>(op, subg);
1544 return;
1545 case BuiltinOperator::BuiltinOperator_SPLIT:
1546 loadSplit(op, subg);
1547 return;
1548 case BuiltinOperator::BuiltinOperator_SPLIT_V:
1549 loadSplitV(op, subg);
1550 return;
1551 case BuiltinOperator::BuiltinOperator_SLICE:
1552 loadOperationTo<ir::operation::Slice>(op, subg);
1553 return;
1554 case BuiltinOperator::BuiltinOperator_STRIDED_SLICE:
1555 loadStridedSlice(op, subg);
1556 return;
1557 case BuiltinOperator::BuiltinOperator_UNPACK:
1558 loadUnpack(op, subg);
1559 return;
1560 case BuiltinOperator::BuiltinOperator_FLOOR_DIV:
1561 loadElementwiseBinary(op, subg,
1563 return;
1564 case BuiltinOperator::BuiltinOperator_FLOOR_MOD:
1565 loadElementwiseBinary(op, subg,
1567 return;
1568 case BuiltinOperator::BuiltinOperator_MINIMUM:
1569 loadElementwiseBinary(op, subg, ir::operation::ElementwiseBinary::ElementwiseBinaryType::MIN);
1570 return;
1571 case BuiltinOperator::BuiltinOperator_MAXIMUM:
1572 loadElementwiseBinary(op, subg, ir::operation::ElementwiseBinary::ElementwiseBinaryType::MAX);
1573 return;
1574 case BuiltinOperator::BuiltinOperator_CAST:
1575 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::CAST);
1576 return;
1577 case BuiltinOperator::BuiltinOperator_EQUAL:
1578 case BuiltinOperator::BuiltinOperator_NOT_EQUAL:
1579 case BuiltinOperator::BuiltinOperator_GREATER_EQUAL:
1580 case BuiltinOperator::BuiltinOperator_GREATER:
1581 case BuiltinOperator::BuiltinOperator_LESS_EQUAL:
1582 case BuiltinOperator::BuiltinOperator_LESS:
1583 loadComparison(op, subg);
1584 return;
1585 case BuiltinOperator::BuiltinOperator_ONE_HOT:
1586 loadOneHot(op, subg);
1587 return;
1588 case BuiltinOperator::BuiltinOperator_ABS:
1589 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ABS);
1590 return;
1591 case BuiltinOperator::BuiltinOperator_COS:
1592 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::COS);
1593 return;
1594 case BuiltinOperator::BuiltinOperator_SIN:
1595 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::SIN);
1596 return;
1597 case BuiltinOperator::BuiltinOperator_SHAPE:
1598 loadOperationTo<ir::operation::Shape>(op, subg);
1599 return;
1600 case BuiltinOperator::BuiltinOperator_REDUCE_PROD:
1601 loadReduce(op, subg, ir::operation::Reduce::ReduceType::PROD);
1602 return;
1603 case BuiltinOperator::BuiltinOperator_IF:
1604 loadIf(op, subg);
1605 return;
1606 case BuiltinOperator::BuiltinOperator_WHILE:
1607 loadWhile(op, subg);
1608 return;
1609 case BuiltinOperator::BuiltinOperator_NEG:
1610 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::NEG);
1611 return;
1612 case BuiltinOperator::BuiltinOperator_ARG_MAX:
1613 loadArgMinMax(op, subg, true);
1614 return;
1615 case BuiltinOperator::BuiltinOperator_ARG_MIN:
1616 loadArgMinMax(op, subg, false);
1617 return;
1618 case BuiltinOperator::BuiltinOperator_LOG:
1619 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::LOG);
1620 return;
1621 case BuiltinOperator::BuiltinOperator_ROUND:
1622 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ROUND);
1623 return;
1624 case BuiltinOperator::BuiltinOperator_POW:
1625 loadOperationTo<ir::operation::Pow>(op, subg);
1626 return;
1627 case BuiltinOperator::BuiltinOperator_LOGICAL_NOT:
1628 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::LOGICAL_NOT);
1629 return;
1630 case BuiltinOperator::BuiltinOperator_LOGICAL_AND:
1631 loadElementwiseBinary(op, subg,
1633 return;
1634 case BuiltinOperator::BuiltinOperator_LOGICAL_OR:
1635 loadElementwiseBinary(op, subg,
1637 return;
1638 case BuiltinOperator::BuiltinOperator_FILL:
1639 loadOperationTo<ir::operation::Fill>(op, subg);
1640 return;
1641 case BuiltinOperator::BuiltinOperator_ZEROS_LIKE:
1642 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ZEROS_LIKE);
1643 return;
1644 case BuiltinOperator::BuiltinOperator_TILE:
1645 loadOperationTo<ir::operation::Tile>(op, subg);
1646 return;
1647 case BuiltinOperator::BuiltinOperator_RANGE:
1648 loadOperationTo<ir::operation::Range>(op, subg);
1649 return;
1650 // case BuiltinOperator::BuiltinOperator_BATCH_MATMUL:
1651 // Handled on each loader: different option name
1652 // Circle: adjoint_lhs, adjoint_rhs
1653 // TFLite: adj_x, adj_y
1654 case BuiltinOperator::BuiltinOperator_BROADCAST_TO:
1655 loadOperationTo<ir::operation::BroadcastTo>(op, subg);
1656 return;
1657 case BuiltinOperator::BuiltinOperator_LOG_SOFTMAX:
1658 loadLogSoftmax(op, subg);
1659 return;
1660 case BuiltinOperator::BuiltinOperator_QUANTIZE:
1661 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::QUANTIZE);
1662 return;
1663 case BuiltinOperator::BuiltinOperator_DEQUANTIZE:
1664 loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::DEQUANTIZE);
1665 return;
1666 case BuiltinOperator::BuiltinOperator_SPACE_TO_DEPTH:
1667 loadSpaceToDepth(op, subg);
1668 return;
1669 case BuiltinOperator::BuiltinOperator_L2_NORMALIZATION:
1670 loadOperationTo<ir::operation::L2Normalization>(op, subg);
1671 break;
1672 case BuiltinOperator::BuiltinOperator_LEAKY_RELU:
1673 loadLeakyRelu(op, subg);
1674 return;
1675 case BuiltinOperator::BuiltinOperator_RANK:
1676 loadOperationTo<ir::operation::Rank>(op, subg);
1677 return;
1678 case BuiltinOperator::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM:
1679 loadUnidirectionalSequenceLSTM(op, subg);
1680 return;
1681 case BuiltinOperator::BuiltinOperator_DEPTH_TO_SPACE:
1682 loadDepthToSpace(op, subg);
1683 return;
1684 case BuiltinOperator::BuiltinOperator_EMBEDDING_LOOKUP:
1685 loadOperationTo<ir::operation::EmbeddingLookup>(op, subg);
1686 return;
1687 case BuiltinOperator::BuiltinOperator_HASHTABLE_LOOKUP:
1688 loadOperationTo<ir::operation::HashtableLookup>(op, subg);
1689 return;
1690 case BuiltinOperator::BuiltinOperator_DYNAMIC_UPDATE_SLICE:
1691 loadOperationTo<ir::operation::DynamicUpdateSlice>(op, subg);
1692 return;
1693 case BuiltinOperator::BuiltinOperator_TOPK_V2:
1694 loadTopKV2(op, subg);
1695 return;
1696 default:
1697 throw std::runtime_error(
1698 std::string("Unsupported operation: ").append(EnumNameBuiltinOperator(builtin_op)));
1699 }
1700}
BuiltinOperator getBuiltinOperator(const Operator *op)
Definition BaseLoader.h:114

References onert::ir::operation::ElementwiseUnary::ABS, onert::ir::operation::BinaryArithmetic::ADD, onert::ir::operation::Reduce::ALL, onert::ir::operation::Reduce::ANY, onert::ir::operation::Pool2D::AVG, onert::ir::operation::ElementwiseUnary::CAST, onert::ir::operation::ElementwiseUnary::COS, onert::ir::operation::ElementwiseUnary::DEQUANTIZE, onert::ir::operation::BinaryArithmetic::DIV, onert::ir::operation::ElementwiseActivation::ELU, onert::ir::operation::ElementwiseUnary::EXP, onert::ir::operation::ElementwiseUnary::FLOOR, onert::ir::operation::ElementwiseBinary::FLOOR_DIV, onert::ir::operation::ElementwiseBinary::FLOOR_MOD, onert::ir::operation::ElementwiseActivation::infinity, onert::ir::operation::ElementwiseUnary::LOG, onert::ir::operation::ElementwiseBinary::LOGICAL_AND, onert::ir::operation::ElementwiseUnary::LOGICAL_NOT, onert::ir::operation::ElementwiseBinary::LOGICAL_OR, onert::ir::operation::ElementwiseActivation::LOGISTIC, onert::ir::operation::ElementwiseBinary::MAX, onert::ir::operation::Pool2D::MAX, onert::ir::operation::Reduce::MAX, onert::ir::operation::Reduce::MEAN, onert::ir::operation::ElementwiseBinary::MIN, onert::ir::operation::BinaryArithmetic::MUL, onert::ir::operation::ElementwiseUnary::NEG, onert::ir::operation::Reduce::PROD, onert::ir::operation::ElementwiseUnary::QUANTIZE, onert::ir::operation::ElementwiseActivation::RELU, onert::ir::operation::ElementwiseUnary::ROUND, onert::ir::operation::ElementwiseUnary::RSQRT, onert::ir::operation::ElementwiseUnary::SIN, onert::ir::operation::ElementwiseUnary::SQRT, onert::ir::operation::ElementwiseUnary::SQUARE, onert::ir::operation::BinaryArithmetic::SUB, onert::ir::operation::Reduce::SUM, onert::ir::operation::ElementwiseActivation::TANH, and onert::ir::operation::ElementwiseUnary::ZEROS_LIKE.

◆ loadOperationIO()

template<typename LoaderDomain >
void onert::loader::BaseLoader< LoaderDomain >::loadOperationIO ( const Operator op,
ir::OperandIndexSequence inputs,
ir::OperandIndexSequence outputs 
)
protected

Definition at line 619 of file BaseLoader.h.

621{
622 for (const std::int32_t idx : *op->inputs())
623 {
624 // Optional tensors are not supported yet except for FULLY_CONNECTED and BCQ_FULLY_CONNECTED
625 auto check_optional_input = [&]() {
626 auto builtin_code = getBuiltinOperator(op);
627 if (isOptionalInputTensor(idx) && !allowOptionalInputTensor(builtin_code))
628 throw std::runtime_error(
629 std::string("loader doesn't support optional input tensor yet for ")
630 .append(EnumNameBuiltinOperator(builtin_code)));
631 };
632 check_optional_input();
633 inputs.append(tensorIdxToOperandIdx(idx));
634 }
635
636 for (const std::int32_t idx : *op->outputs())
637 {
638 outputs.append(tensorIdxToOperandIdx(idx));
639 }
640}
bool isOptionalInputTensor(std::int32_t idx)
Definition BaseLoader.h:60
virtual bool allowOptionalInputTensor(BuiltinOperator)=0
ir::OperandIndex tensorIdxToOperandIdx(int32_t tensorIdx)

References onert::ir::OperandIndexSequence::append().

◆ loadOperationTo()

template<typename LoaderDomain >
template<typename OpIR , typename... Args>
const OpIR * onert::loader::BaseLoader< LoaderDomain >::loadOperationTo ( const Operator op,
ir::Graph subg,
Args &&...  args 
)
protected

Definition at line 684 of file BaseLoader.h.

686{
687 static_assert(sizeof...(args) <= 1, "You can't have more than 1 arguments!");
688 ir::OperandIndexSequence inputs;
689 ir::OperandIndexSequence outputs;
690
691 loadOperationIO(op, inputs, outputs);
692
693 std::unique_ptr<OpIR> new_op(new OpIR(inputs, outputs, std::forward<Args>(args)...));
694 auto ret = new_op.get();
695 subg.addOperation(std::move(new_op));
696
697 return ret;
698}
void loadOperationIO(const Operator *op, ir::OperandIndexSequence &inputs, ir::OperandIndexSequence &outputs)
Definition BaseLoader.h:619

References onert::ir::Graph::addOperation().

◆ loadPool2DOptions()

template<typename LoaderDomain >
template<typename Param >
void onert::loader::BaseLoader< LoaderDomain >::loadPool2DOptions ( Param &  param,
const Pool2DOptions options 
)
protected

Definition at line 666 of file BaseLoader.h.

667{
668 // Strides and Paddings
669 if (options->stride_h() <= 0 || options->stride_w() <= 0)
670 throw std::runtime_error{"Invalid stride vertical or horizontal - both must be bigger than 0"};
671 loadStridesAndPaddings(param, options);
672 // Filter width and height
673 // Strides
674 if (options->filter_width() <= 0 || options->filter_height() <= 0)
675 throw std::runtime_error{"Invalid filter width or height - both must be bigger than 0"};
676 param.kw = options->filter_width();
677 param.kh = options->filter_height();
678 // Activation
679 param.activation = convertActivation(options->fused_activation_function());
680}
ir::Activation convertActivation(ActivationFunctionType type)
void loadStridesAndPaddings(Param &param, const OptionsType *options)
Definition BaseLoader.h:644

◆ loadQuantization()

template<typename LoaderDomain >
void onert::loader::BaseLoader< LoaderDomain >::loadQuantization ( const Tensor tensor,
ir::TypeInfo typeInfo 
)
protected

Definition at line 467 of file BaseLoader.h.

468{
469 auto q_params = tensor->quantization();
470
471 // Type validation
472 // INT16 should be symmetric quantized
473 if (tensor->type() == TensorType::TensorType_INT16)
474 {
475 if (q_params->zero_point() != nullptr && q_params->zero_point()->size() != 0)
476 {
477 auto zero_points = q_params->zero_point();
478 for (uint32_t i = 0; i < zero_points->size(); i++)
479 {
480 if (zero_points->Get(i) != 0)
481 throw std::runtime_error(
482 "Quantization param: int16 should be symmetric, but zero_point is not zero.");
483 }
484 }
485 }
486
487 if (q_params == nullptr || q_params->scale() == nullptr || q_params->scale()->size() == 0)
488 {
489 typeInfo.quantization(0., 0);
490 return;
491 }
492 if (q_params->zero_point() == nullptr)
493 {
494 throw std::runtime_error("Quantization params: scale is not null, but zero_point is null.");
495 }
496 const size_t num_scales = q_params->scale()->size();
497 if (num_scales != q_params->zero_point()->size())
498 {
499 throw std::runtime_error("Quantization params: scale size != zero_point size");
500 }
501 std::vector<float> scales;
502 std::vector<int32_t> zero_points;
503 scales.resize(num_scales);
504 zero_points.resize(num_scales);
505 for (size_t i = 0; i < num_scales; ++i)
506 {
507 scales[i] = q_params->scale()->Get(i);
508 // zero_point is defined as long (i64) in schema while TypeInfo's zero_point is int32_t.
509 // int64_t is used instead of long because long is 4 byte in most 32bit architecture.
510 int64_t zero_point = q_params->zero_point()->Get(i);
511 if (zero_point < std::numeric_limits<int32_t>::min() ||
512 zero_point > std::numeric_limits<int32_t>::max())
513 throw std::runtime_error("Zero_point is out of int32 range.");
514 zero_points[i] = static_cast<int32_t>(zero_point);
515 }
516 auto details = q_params->details_as_CustomQuantization();
517 if (details != nullptr)
518 throw std::runtime_error("Custom Quantization is not supported");
519 typeInfo.quantization(std::move(scales), std::move(zero_points));
520}

References onert::ir::TypeInfo::quantization().

◆ loadSparsity()

template<typename LoaderDomain >
void onert::loader::BaseLoader< LoaderDomain >::loadSparsity ( const Tensor tensor,
ir::TypeInfo typeInfo 
)
protected

Definition at line 523 of file BaseLoader.h.

524{
525 auto src_sparsity = tensor->sparsity();
526 if (src_sparsity != nullptr)
527 {
528 std::vector<uint16_t> w1_segments;
529 std::vector<uint16_t> w1_indices;
530 // check traversal_order
531 if (src_sparsity->traversal_order())
532 {
533 const int traversal_order_size = src_sparsity->traversal_order()->size();
534 for (int i = 0; i < traversal_order_size; ++i)
535 {
536 if (i != src_sparsity->traversal_order()->Get(i))
537 throw std::runtime_error("traversal_order [0, 1, ..., n-1] is only supported.");
538 }
539 }
540 // check block_map
541 int block_rank = 0;
542 if (src_sparsity->block_map())
543 {
544 block_rank = src_sparsity->block_map()->size();
545 for (int i = 0; i < block_rank; ++i)
546 {
547 if (i != src_sparsity->block_map()->Get(i))
548 throw std::runtime_error("block_map [0, 1, ..., n-1] is only supported.");
549 }
550 }
551 // load metadata
552 const auto dim_metadata_size = src_sparsity->dim_metadata()->size();
553 const auto dense_rank = tensor->shape() ? tensor->shape()->size() : 0;
554 if (dense_rank + block_rank != dim_metadata_size)
555 throw std::runtime_error("sparsity dim_metadata length is wrong.");
556 bool random_sparsity = dim_metadata_size == 2 && block_rank == 0;
557 bool block2D_sparsity = dim_metadata_size == 4 && block_rank == 2;
558 if (dim_metadata_size != !random_sparsity && !block2D_sparsity)
559 throw std::runtime_error(
560 "sparsity is supported only for 2D tensor with random or 16x1 block sparsity.");
561
562 const auto *src_metadata = src_sparsity->dim_metadata()->Get(0);
563 if (src_metadata->format() != DimensionType::DimensionType_DENSE)
564 throw std::runtime_error("sparse tensor dim[0] is not DENSE");
565 src_metadata = src_sparsity->dim_metadata()->Get(1);
566 if (src_metadata->format() != DimensionType::DimensionType_SPARSE_CSR)
567 throw std::runtime_error("sparse tensor dim[0] is not SPARSE_CSR");
568 auto ParseSparseIndexVector = [src_metadata, &w1_segments, &w1_indices]() {
569 if (src_metadata->array_segments() == nullptr || src_metadata->array_indices() == nullptr)
570 return false;
571 bool status = true;
572 /* `onert` inernally uses uint16 type regardless of the value of
573 the array_segments_type and array_indices_type */
574 switch (src_metadata->array_segments_type())
575 {
576 case SparseIndexVector::SparseIndexVector_Int32Vector:
577 throw std::runtime_error("sparse tensor with int32 segment type is not supported");
578 case SparseIndexVector::SparseIndexVector_Uint16Vector:
579 status = Copy(src_metadata->array_segments_as_Uint16Vector(), w1_segments);
580 break;
581 case SparseIndexVector::SparseIndexVector_Uint8Vector:
582 status = Copy(src_metadata->array_segments_as_Uint8Vector(), w1_segments);
583 break;
584 default:
585 return false;
586 }
587 if (status != true)
588 return false;
589 switch (src_metadata->array_indices_type())
590 {
591 case SparseIndexVector::SparseIndexVector_Int32Vector:
592 throw std::runtime_error("sparse tensor with int32 indices type is not supported");
593 case SparseIndexVector::SparseIndexVector_Uint16Vector:
594 return Copy(src_metadata->array_indices_as_Uint16Vector(), w1_indices);
595 case SparseIndexVector::SparseIndexVector_Uint8Vector:
596 return Copy(src_metadata->array_indices_as_Uint8Vector(), w1_indices);
597 default:
598 break;
599 }
600 return false;
601 };
602 if (ParseSparseIndexVector() == false)
603 throw std::runtime_error("Error during parsing sparsity index information");
604 // Get block size
605 std::vector<int32_t> block_size;
606 for (int i = 0; i < block_rank; ++i)
607 {
608 auto block_metadata = src_sparsity->dim_metadata()->Get(dense_rank + i);
609 if (block_metadata->format() != DimensionType::DimensionType_DENSE)
610 throw std::runtime_error("block dimension must be DENSE.");
611 block_size.push_back(block_metadata->dense_size());
612 }
613 typeInfo.sparsity(std::make_shared<ir::Sparsity>(std::move(w1_segments), std::move(w1_indices),
614 std::move(block_size)));
615 }
616}
bool Copy(const T *data_ptr, std::vector< uint16_t > &arr)
Definition BaseLoader.h:352

References onert::loader::Copy(), and onert::ir::TypeInfo::sparsity().

◆ loadStridesAndPaddings()

template<typename LoaderDomain >
template<typename Param , typename OptionsType >
void onert::loader::BaseLoader< LoaderDomain >::loadStridesAndPaddings ( Param &  param,
const OptionsType *  options 
)
protected

Definition at line 644 of file BaseLoader.h.

645{
646 // Strides
647 param.stride.vertical = options->stride_h();
648 param.stride.horizontal = options->stride_w();
649 // Paddings
650 switch (options->padding())
651 {
652 case Padding::Padding_SAME:
653 param.padding.type = ir::PaddingType::SAME;
654 break;
655 case Padding::Padding_VALID:
656 param.padding.type = ir::PaddingType::VALID;
657 break;
658 default:
659 throw std::runtime_error{"Invalid padding type"};
660 }
661 // param paddings indexes unused
662}

References onert::ir::SAME, and onert::ir::VALID.

◆ tensorIdxToOperandIdx()

template<typename LoaderDomain >
ir::OperandIndex onert::loader::BaseLoader< LoaderDomain >::tensorIdxToOperandIdx ( int32_t  tensorIdx)
protected

◆ tensorTypeToDataType()

template<typename LoaderDomain >
virtual ir::DataType onert::loader::BaseLoader< LoaderDomain >::tensorTypeToDataType ( const TensorType  type)
protectedvirtual

◆ verifySubgraphIndex()

template<typename LoaderDomain >
void onert::loader::BaseLoader< LoaderDomain >::verifySubgraphIndex ( int  subg_index)
inlineprotected

Definition at line 131 of file BaseLoader.h.

132 {
133 const auto num_subgraphs = _domain_model->subgraphs()->size();
134 if (subg_index < 0 || subg_index >= static_cast<int32_t>(num_subgraphs))
135 throw std::runtime_error{std::string{"Invalid subgraph index - "} +
136 std::to_string(subg_index)};
137 }

References onert::loader::BaseLoader< LoaderDomain >::_domain_model.

Field Documentation

◆ _base

template<typename LoaderDomain >
uint8_t* onert::loader::BaseLoader< LoaderDomain >::_base
protected

Definition at line 190 of file BaseLoader.h.

◆ _buf_to_data

template<typename LoaderDomain >
std::unordered_map<uint32_t , std::shared_ptr<ir::Data> > onert::loader::BaseLoader< LoaderDomain >::_buf_to_data
protected

Definition at line 206 of file BaseLoader.h.

◆ _domain_model

template<typename LoaderDomain >
const Model* onert::loader::BaseLoader< LoaderDomain >::_domain_model
protected

◆ _fd

template<typename LoaderDomain >
int onert::loader::BaseLoader< LoaderDomain >::_fd
protected

Definition at line 194 of file BaseLoader.h.

◆ _file_path

template<typename LoaderDomain >
std::string onert::loader::BaseLoader< LoaderDomain >::_file_path
protected

Definition at line 195 of file BaseLoader.h.

◆ _pagesize

template<typename LoaderDomain >
int32_t onert::loader::BaseLoader< LoaderDomain >::_pagesize
protected

Definition at line 192 of file BaseLoader.h.

◆ _tensor_names

template<typename LoaderDomain >
std::unordered_map<ir::OperandIndex, std::string> onert::loader::BaseLoader< LoaderDomain >::_tensor_names
protected

Definition at line 199 of file BaseLoader.h.

◆ _tensor_to_operand

template<typename LoaderDomain >
std::vector<ir::OperandIndex> onert::loader::BaseLoader< LoaderDomain >::_tensor_to_operand
protected

Definition at line 198 of file BaseLoader.h.

◆ _use_mmaped_data

template<typename LoaderDomain >
bool onert::loader::BaseLoader< LoaderDomain >::_use_mmaped_data = false
protected

Definition at line 203 of file BaseLoader.h.

Referenced by onert::loader::BaseLoader< LoaderDomain >::BaseLoader().

◆ _verifier

template<typename LoaderDomain >
std::unique_ptr<Verifier> onert::loader::BaseLoader< LoaderDomain >::_verifier
protected

Definition at line 201 of file BaseLoader.h.


The documentation for this class was generated from the following file: