ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
onert::exec::DynamicShapeInferer Class Reference

Class to infer shape of output tensor at execution time and allocate memory fo output tensor if needed. More...

#include <DynamicShapeInferer.h>

Collaboration diagram for onert::exec::DynamicShapeInferer:

Public Member Functions

 DynamicShapeInferer (const std::shared_ptr< backend::ITensorRegistry > &tensor_registry)
 
void visit (const ir::operation::ArgMinMax &op) override
 
void visit (const ir::operation::BatchMatMul &op) override
 
void visit (const ir::operation::BCQFullyConnected &op) override
 
void visit (const ir::operation::BCQGather &op) override
 
void visit (const ir::operation::BinaryArithmetic &op) override
 
void visit (const ir::operation::BroadcastTo &op) override
 
void visit (const ir::operation::Comparison &op) override
 
void visit (const ir::operation::Concat &op) override
 
void visit (const ir::operation::Conv2D &op) override
 
void visit (const ir::operation::ElementwiseActivation &op) override
 
void visit (const ir::operation::ElementwiseBinary &op) override
 
void visit (const ir::operation::ElementwiseUnary &op) override
 
void visit (const ir::operation::ExpandDims &op) override
 
void visit (const ir::operation::Fill &op) override
 
void visit (const ir::operation::FullyConnected &op) override
 
void visit (const ir::operation::FusedBatchNorm &op) override
 
void visit (const ir::operation::Gather &op) override
 
void visit (const ir::operation::L2Normalization &op) override
 
void visit (const ir::operation::LSTM &op) override
 
void visit (const ir::operation::MatrixBandPart &op) override
 
void visit (const ir::operation::DetectionPostProcess &op) override
 
void visit (const ir::operation::OneHot &op) override
 
void visit (const ir::operation::Pack &op) override
 
void visit (const ir::operation::Pad &op) override
 
void visit (const ir::operation::Permute &op) override
 
void visit (const ir::operation::Pool2D &op) override
 
void visit (const ir::operation::Pow &op) override
 
void visit (const ir::operation::Range &op) override
 
void visit (const ir::operation::Reduce &op) override
 
void visit (const ir::operation::Reshape &op) override
 
void visit (const ir::operation::ResizeBilinear &op) override
 
void visit (const ir::operation::Reverse &op) override
 
void visit (const ir::operation::Select &op) override
 
void visit (const ir::operation::Shape &op) override
 
void visit (const ir::operation::Slice &op) override
 
void visit (const ir::operation::Softmax &op) override
 
void visit (const ir::operation::SpaceToBatchND &op) override
 
void visit (const ir::operation::Split &op) override
 
void visit (const ir::operation::Squeeze &op) override
 
void visit (const ir::operation::StridedSlice &op) override
 
void visit (const ir::operation::SquaredDifference &op) override
 
void visit (const ir::operation::Tile &op) override
 
void visit (const ir::operation::Transpose &op) override
 
void visit (const ir::operation::Unpack &op) override
 
- Public Member Functions inherited from onert::ir::OperationVisitor
virtual ~OperationVisitor ()=default
 

Detailed Description

Class to infer shape of output tensor at execution time and allocate memory fo output tensor if needed.

Definition at line 34 of file DynamicShapeInferer.h.

Constructor & Destructor Documentation

◆ DynamicShapeInferer()

onert::exec::DynamicShapeInferer::DynamicShapeInferer ( const std::shared_ptr< backend::ITensorRegistry > &  tensor_registry)
inline

Definition at line 37 of file DynamicShapeInferer.h.

38 : _tensor_registry(tensor_registry)
39 {
40 // DO NOTHING
41 }

Member Function Documentation

◆ visit() [1/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::ArgMinMax op)
override

Definition at line 93 of file DynamicShapeInferer.cc.

94{
95 const auto input_idx{op.getInputs().at(ir::operation::ArgMinMax::Input::INPUT)};
96 const auto input = _tensor_registry->getITensor(input_idx);
97
98 const auto axis_idx{op.getInputs().at(ir::operation::ArgMinMax::Input::AXIS)};
99 const auto axis = _tensor_registry->getITensor(axis_idx);
100
101 auto output_ind = op.getOutputs().at(0);
102 auto output = _tensor_registry->getITensor(output_ind);
103
104 if (!input->is_dynamic() && !output->is_dynamic())
105 return;
106
107 auto input_shape = input->getShape();
108 auto axis_value = *reinterpret_cast<const int32_t *>(axis->buffer());
109 const auto rank = input_shape.rank();
110 axis_value = axis_value < 0 ? axis_value + rank : axis_value;
111
112 ir::Shape new_shape = shape_inference::inferArgMinMaxShape(input_shape, axis_value, rank);
113
114 output->applyShape(new_shape);
115 assert(output->buffer() != nullptr);
116}
ir::Shape inferArgMinMaxShape(const ir::Shape &input_shape, int axis, int rank)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::ArgMinMax::AXIS, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferArgMinMaxShape(), and onert::ir::operation::ArgMinMax::INPUT.

◆ visit() [2/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::BatchMatMul op)
override

Definition at line 118 of file DynamicShapeInferer.cc.

119{
120 const auto lhs_index = op.getInputs().at(ir::operation::BatchMatMul::Input::LHS);
121 const auto rhs_index = op.getInputs().at(ir::operation::BatchMatMul::Input::RHS);
122 auto lhs = _tensor_registry->getITensor(lhs_index);
123 auto rhs = _tensor_registry->getITensor(rhs_index);
124
125 if (!lhs->is_dynamic() && !rhs->is_dynamic())
126 return;
127
128 const auto output_index = op.getOutputs().at(0);
129 auto output = _tensor_registry->getITensor(output_index);
130
131 auto lhs_shape = lhs->getShape();
132 auto rhs_shape = rhs->getShape();
133 // TODO
134
135 auto new_shape = shape_inference::inferBatchMatMulShape(lhs_shape, rhs_shape, op.param());
136 output->applyShape(new_shape);
137}
ir::Shape inferBatchMatMulShape(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape, const ir::operation::BatchMatMul::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferBatchMatMulShape(), onert::ir::operation::BatchMatMul::LHS, onert::ir::operation::BatchMatMul::param(), and onert::ir::operation::BatchMatMul::RHS.

◆ visit() [3/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::BCQFullyConnected op)
override

Definition at line 139 of file DynamicShapeInferer.cc.

140{
141 const auto input_idx{op.getInputs().at(ir::operation::BCQFullyConnected::Input::INPUT)};
142 const auto &input = _tensor_registry->getITensor(input_idx);
143
144 const auto cluster_idx{
146 const auto &cluster = _tensor_registry->getITensor(cluster_idx);
147 assert(cluster->is_constant());
148
149 if (!input->is_dynamic())
150 return;
151
152 auto input_shape = input->getShape();
153 auto cluster_shape = cluster->getShape();
154
155 auto cluster_buf = reinterpret_cast<const int32_t *>(cluster->buffer());
156 assert(cluster_buf);
157
158 ir::Shape new_shape =
159 shape_inference::inferBCQFullyConnectedShape(input_shape, cluster_shape, cluster_buf);
160
161 auto output_ind = op.getOutputs().at(0);
162 auto output = _tensor_registry->getITensor(output_ind);
163
164 output->applyShape(new_shape);
165 assert(output->buffer() != nullptr);
166}
ir::Shape inferBCQFullyConnectedShape(const ir::Shape &in_shape, const ir::Shape &cluster_shape, const int32_t *cluster_buf)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferBCQFullyConnectedShape(), onert::ir::operation::BCQFullyConnected::INPUT, and onert::ir::operation::BCQFullyConnected::WEIGHTS_CLUSTERS.

◆ visit() [4/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::BCQGather op)
override

Definition at line 168 of file DynamicShapeInferer.cc.

169{
170 const auto indices_idx{op.getInputs().at(ir::operation::BCQGather::Input::INDICES)};
171 const auto &indices = _tensor_registry->getITensor(indices_idx);
172
173 const auto input_binary_idx{op.getInputs().at(ir::operation::BCQGather::Input::INPUT_BINARY)};
174 const auto &input_binary = _tensor_registry->getITensor(input_binary_idx);
175
176 const auto cluster_idx{op.getInputs().at(ir::operation::BCQGather::Input::INPUT_CLUSTERS)};
177 const auto &cluster = _tensor_registry->getITensor(cluster_idx);
178 assert(cluster->is_constant());
179
180 if (!indices->is_dynamic())
181 return;
182
183 auto indices_shape = indices->getShape();
184 auto cluster_shape = cluster->getShape();
185 auto rank = input_binary->getShape().rank();
186
187 auto cluster_buf = reinterpret_cast<const int32_t *>(cluster->buffer());
188 assert(cluster_buf);
189
190 ir::Shape new_shape = shape_inference::inferBCQGatherShape(indices_shape, cluster_shape,
191 cluster_buf, rank, op.param());
192
193 auto output_ind = op.getOutputs().at(0);
194 auto output = _tensor_registry->getITensor(output_ind);
195
196 output->applyShape(new_shape);
197 assert(output->buffer() != nullptr);
198}
ir::Shape inferBCQGatherShape(const ir::Shape &indices_shape, const ir::Shape &cluster_shape, const int32_t *cluster_buf, int rank, const ir::operation::BCQGather::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::BCQGather::INDICES, onert::shape_inference::inferBCQGatherShape(), onert::ir::operation::BCQGather::INPUT_BINARY, onert::ir::operation::BCQGather::INPUT_CLUSTERS, and onert::ir::operation::BCQGather::param().

◆ visit() [5/44]

◆ visit() [6/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::BroadcastTo op)
override

Definition at line 206 of file DynamicShapeInferer.cc.

207{
208 auto output_ind = op.getOutputs().at(0);
209 auto output = _tensor_registry->getITensor(output_ind);
210
211 auto input_idx = op.getInputs().at(ir::operation::BroadcastTo::INPUT);
212 auto input = _tensor_registry->getITensor(input_idx);
213
214 if ((!input->is_dynamic()) && (!output->is_dynamic()))
215 return;
216
217 auto shape_idx = op.getInputs().at(ir::operation::Tile::Input::MULTIPLES);
218 const auto &shape = _tensor_registry->getITensor(shape_idx);
219
220 assert(shape); // It shouldn't be 0.
221
223 shape->getShape(), reinterpret_cast<const int32_t *>(shape->buffer()));
224
225 // set output shape and output buffer
226 output->applyShape(output_shape);
227 assert(output->buffer() != nullptr);
228}
const luci_interpreter::RuntimeShape output_shape
ir::Shape inferBroadcastToShape(const ir::Shape shp_shape, const int32_t *shp_buf)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferBroadcastToShape(), onert::ir::operation::BroadcastTo::INPUT, onert::ir::operation::Tile::MULTIPLES, and output_shape.

◆ visit() [7/44]

◆ visit() [8/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Concat op)
override

Definition at line 236 of file DynamicShapeInferer.cc.

237{
238 /*
239 The state after compilation (satic shape inference) could be one of the following:
240
241 inputs output execution-time shape inf required
242 ------------------------------------------ ---------------------------------
243 case 1) all static static X
244 case 2) at least on is dynamic dynamic O
245
246 Then nnfw_apply_tensorinf() could change one or both inputs dynamic.
247 So, in this method, we have one more state and we have to re-calculate shape for this shape.
248
249 case 3) at least on is dynamic static O
250
251 So, only when all inputs are static, we can skip dynamic shape inference.
252 */
253 bool all_static = true;
254 for (auto &&input_ind : op.getInputs())
255 {
256 auto input = _tensor_registry->getITensor(input_ind);
257 if (input->is_dynamic())
258 {
259 all_static = false;
260 break;
261 }
262 }
263
264 if (all_static)
265 return;
266
267 // sanity check
268 {
269 auto isConcatible = [](const backend::ITensor *input1, const backend::ITensor *input2,
270 int32_t axis) {
271 auto shape1 = input1->getShape();
272 auto shape2 = input2->getShape();
273 if (shape1.rank() != shape2.rank())
274 return false;
275
276 for (int i = 0; i < shape1.rank(); i++)
277 {
278 auto positive_axis = (axis >= 0) ? axis : axis + input1->getShape().rank();
279
280 if (i != positive_axis)
281 if (shape1.dim(i) != shape2.dim(i))
282 return false;
283 }
284
285 return true;
286 };
287
288 auto first_input_ind = op.getInputs().at(0);
289 auto first_input = _tensor_registry->getITensor(first_input_ind);
290
291 for (auto &&input_ind : op.getInputs())
292 {
293 auto input = _tensor_registry->getITensor(input_ind);
294 if (input != first_input && !isConcatible(first_input, input, op.param().axis))
295 throw std::runtime_error("input shapes does not matched for concat");
296 }
297 }
298
299 // getting output shape
301 for (auto &&input_ind : op.getInputs())
302 {
303 auto input = _tensor_registry->getITensor(input_ind);
304 ir::Shape shape = input->getShape();
305
306 in_shapes.emplace_back(shape);
307 }
308
309 auto output_ind = op.getOutputs().at(0);
310 auto output = _tensor_registry->getITensor(output_ind);
311 auto output_shape = shape_inference::inferConcatShape(in_shapes, op.param());
312
313 output->applyShape(output_shape);
314}
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
std::vector< ir::Shape > Shapes
ir::Shape inferConcatShape(const Shapes &in_shapes, const ir::operation::Concat::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Concat::Param::axis, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::backend::ITensor::getShape(), onert::shape_inference::inferConcatShape(), output_shape, and onert::ir::operation::Concat::param().

◆ visit() [9/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Conv2D op)
override

Definition at line 316 of file DynamicShapeInferer.cc.

317{
318 // check if input is not dynamic
319 auto input_ind = op.getInputs().at(ir::operation::Conv2D::INPUT);
320 auto input = _tensor_registry->getITensor(input_ind);
321
322 auto ker_ind = op.getInputs().at(ir::operation::Conv2D::KERNEL);
323 auto ker = _tensor_registry->getITensor(ker_ind);
324
325 if ((!input->is_dynamic()) && (!ker->is_dynamic()))
326 return;
327
328 ir::Shape input_shape = input->getShape();
329 ir::Shape ker_shape = ker->getShape();
330
331 auto output_ind = op.getOutputs().at(0);
332 auto output = _tensor_registry->getITensor(output_ind);
333
334 ir::Shape output_shape = shape_inference::inferConv2DShape(input_shape, ker_shape, op.param());
335
336 output->applyShape(output_shape);
337 assert(output->buffer() != nullptr);
338}
ir::Shape inferConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape, const ir::operation::Conv2D::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferConv2DShape(), onert::ir::operation::Conv2D::INPUT, onert::ir::operation::Conv2D::KERNEL, output_shape, and onert::ir::operation::Conv2D::param().

◆ visit() [10/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::DetectionPostProcess op)
override

Definition at line 602 of file DynamicShapeInferer.cc.

603{
604 // NOTE DetectionPostProcess's undefined outputs' shape are decided on compile time
605 // by static shape inferer.
606 // DetectionPostProcess's outputs' shape are independent with input shape
607 // and decided by parameter value.
608}

◆ visit() [11/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::ElementwiseActivation op)
override

◆ visit() [12/44]

◆ visit() [13/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::ElementwiseUnary op)
override

◆ visit() [14/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::ExpandDims op)
override

Definition at line 356 of file DynamicShapeInferer.cc.

357{
358 // check if input is not dynamic
359 auto input_ind = op.getInputs().at(ir::operation::ExpandDims::INPUT);
360 auto input = _tensor_registry->getITensor(input_ind);
361
362 // check if output is not dynamic, meaning when 1st input is static and 2nd input is const
363 auto output_ind = op.getOutputs().at(0);
364 auto output = _tensor_registry->getITensor(output_ind);
365
366 /*
367 Here, the state after compilation (satic shape inference) could be one of the following:
368
369 input1 input2 output execution-time shape inf required
370 ----------------------------- --------------------------------
371 case 1) static const static X
372 case 2) static placeholder dynamic O
373 case 3) dynamic const dynamic O
374 case 4) dynamic placeholder dynamic O
375
376 Then nnfw_apply_tensorinf() could change input dynamic.
377 So, in this method, we could have one more state and we have to re-calculate shape
378 for this shape.
379
380 case 5) dynamic const static O
381
382 So, only when input1 and ouput are static, we can skip dynamic shape inference.
383 */
384 if ((!input->is_dynamic()) && (!output->is_dynamic()))
385 return;
386
387 ir::Shape input_shape = input->getShape();
388
389 auto axis_ind = op.getInputs().at(ir::operation::ExpandDims::AXIS);
390 auto axis = _tensor_registry->getITensor(axis_ind);
391 auto axis_type = axis->data_type();
392 assert(axis_type == ir::DataType::INT32 || axis_type == ir::DataType::INT64);
393
394 assert(axis->buffer());
395 int32_t axis_value =
396 (axis_type == ir::DataType::INT32)
397 ? reinterpret_cast<const int32_t *>(axis->buffer())[0]
398 : static_cast<int32_t>(reinterpret_cast<const int64_t *>(axis->buffer())[0]);
399
400 auto output_shape = shape_inference::inferExpandDimsShape(input_shape, axis_value);
401
402 output->applyShape(output_shape);
403 assert(output->buffer() != nullptr);
404}
ir::Shape inferExpandDimsShape(const ir::Shape &in_shape, int32_t axis)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::ExpandDims::AXIS, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferExpandDimsShape(), onert::ir::operation::ExpandDims::INPUT, and output_shape.

◆ visit() [15/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Fill op)
override

Definition at line 406 of file DynamicShapeInferer.cc.

407{
408 // check if output is not dynamic
409 auto output_ind = op.getOutputs().at(0);
410 auto output = _tensor_registry->getITensor(output_ind);
411 auto shape_ind = op.getInputs().at(ir::operation::Fill::Input::SHAPE);
412 auto shape = _tensor_registry->getITensor(shape_ind);
413
414 if ((!shape->is_dynamic()) && (!output->is_dynamic()))
415 return;
416
417 const auto dims_type = shape->data_type();
418 assert(dims_type == ir::DataType::INT32 || dims_type == ir::DataType::INT64);
419
420 auto dims_buf = shape->buffer();
421 assert(dims_buf);
422
423 const auto &dims_shape = shape->getShape();
424 const auto &output_shape = ((dims_type == ir::DataType::INT32)
425 ? shape_inference::inferFillShape<int32_t>(
426 dims_shape, reinterpret_cast<const int32_t *>(dims_buf))
427 : shape_inference::inferFillShape<int64_t>(
428 dims_shape, reinterpret_cast<const int64_t *>(dims_buf)));
429
430 output->applyShape(output_shape);
431 assert(output->buffer() != nullptr);
432}
ir::Shape inferFillShape(const ir::Shape &fill_shape, const T *shape_buf)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), output_shape, and onert::ir::operation::Fill::SHAPE.

◆ visit() [16/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::FullyConnected op)
override

Definition at line 434 of file DynamicShapeInferer.cc.

435{
436 const auto input_idx{op.getInputs().at(ir::operation::FullyConnected::Input::INPUT)};
437 const auto &input = _tensor_registry->getITensor(input_idx);
438
439 const auto ker_idx{op.getInputs().at(ir::operation::FullyConnected::Input::WEIGHT)};
440 const auto &ker = _tensor_registry->getITensor(ker_idx);
441
442 if (!input->is_dynamic() && !ker->is_dynamic())
443 return;
444
445 auto input_shape = input->getShape();
446 auto ker_shape = ker->getShape();
447
448 ir::Shape new_shape = shape_inference::inferFullyConnectedShape(input_shape, ker_shape);
449
450 auto output_ind = op.getOutputs().at(0);
451 auto output = _tensor_registry->getITensor(output_ind);
452
453 output->applyShape(new_shape);
454 assert(output->buffer() != nullptr);
455}
ir::Shape inferFullyConnectedShape(const ir::Shape &in_shape, const ir::Shape &ker_shape)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferFullyConnectedShape(), onert::ir::operation::FullyConnected::INPUT, and onert::ir::operation::FullyConnected::WEIGHT.

◆ visit() [17/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::FusedBatchNorm op)
override

◆ visit() [18/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Gather op)
override

Definition at line 462 of file DynamicShapeInferer.cc.

463{
464 const auto input_idx{op.getInputs().at(ir::operation::Gather::Input::INPUT)};
465 const auto &input = _tensor_registry->getITensor(input_idx);
466 auto input_shape = input->getShape();
467
468 const auto indices_idx{op.getInputs().at(ir::operation::Gather::Input::INDICES)};
469 const auto &indices = _tensor_registry->getITensor(indices_idx);
470 auto indices_shape = indices->getShape();
471
472 if (!(input->is_dynamic()) && !(indices->is_dynamic()))
473 return;
474
475 const auto rank = input_shape.rank();
476 const auto axis = ((op.param().axis < 0) ? rank + op.param().axis : op.param().axis);
477
478 assert(0 <= axis && axis < rank);
479
480 ir::Shape new_shape = shape_inference::inferGatherShape(input_shape, indices_shape, axis, rank);
481
482 auto output_ind = op.getOutputs().at(0);
483 auto output = _tensor_registry->getITensor(output_ind);
484
485 output->applyShape(new_shape);
486 assert(output->buffer() != nullptr);
487}
ir::Shape inferGatherShape(const ir::Shape &input_shape, const ir::Shape &indices_shape, int axis, int rank)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Gather::Param::axis, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::Gather::INDICES, onert::shape_inference::inferGatherShape(), onert::ir::operation::Gather::INPUT, and onert::ir::operation::Gather::param().

◆ visit() [19/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::L2Normalization op)
override

◆ visit() [20/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::LSTM op)
override

Definition at line 494 of file DynamicShapeInferer.cc.

495{
496 const auto output_index{op.getOutputs().at(ir::operation::LSTM::Output::OUTPUT)};
497 auto output = _tensor_registry->getITensor(output_index);
498
499 const auto output_state_out_index{
501
502 const auto cell_state_out_index{op.getOutputs().at(ir::operation::LSTM::Output::CELL_STATE_OUT)};
503
504 const auto scratch_buffer_index{op.getOutputs().at(ir::operation::LSTM::Output::SCRATCH_BUFFER)};
505
506 if (!output->is_dynamic() &&
507 !(_tensor_registry->getITensor(output_state_out_index) != nullptr &&
508 _tensor_registry->getITensor(output_state_out_index)->is_dynamic()) &&
509 !(_tensor_registry->getITensor(cell_state_out_index) != nullptr &&
510 _tensor_registry->getITensor(cell_state_out_index)->is_dynamic()) &&
511 !(_tensor_registry->getITensor(scratch_buffer_index) != nullptr &&
512 _tensor_registry->getITensor(cell_state_out_index)->is_dynamic()))
513 return;
514
515 const auto input_index{op.getInputs().at(ir::operation::LSTM::Input::INPUT)};
516 const auto input = _tensor_registry->getITensor(input_index);
517 const auto input_shape = input->getShape();
518
519 const auto input_to_output_weights_index{
521 const auto input_to_output_weights = _tensor_registry->getITensor(input_to_output_weights_index);
522 const auto input_to_output_weights_shape = input_to_output_weights->getShape();
523
524 const auto recurrent_to_output_weights_index{
526 const auto recurrent_to_output_weights =
527 _tensor_registry->getITensor(recurrent_to_output_weights_index);
528 const auto recurrent_to_output_weights_shape = recurrent_to_output_weights->getShape();
529
530 // re-sizing outputs
531 const int n_batch =
532 (input_shape.rank() == 3 && op.param().time_major) ? input_shape.dim(1) : input_shape.dim(0);
533 const int n_cell = input_to_output_weights_shape.dim(0);
534 const int n_output = recurrent_to_output_weights_shape.dim(1);
535 if (input_shape.rank() == 3)
536 {
537 if (op.param().time_major)
538 output->applyShape(ir::Shape{input_shape.dim(0), n_batch, n_output});
539 else
540 output->applyShape(ir::Shape{n_batch, input_shape.dim(1), n_output});
541 }
542 else
543 {
544 assert(input_shape.rank() == 2);
545 output->applyShape(ir::Shape{n_batch, n_output});
546 }
547 assert(output->buffer() != nullptr);
548
549 auto output_state_out = _tensor_registry->getITensor(output_state_out_index);
550 if (output_state_out != nullptr)
551 {
552 output_state_out->applyShape(ir::Shape{n_batch, n_output});
553 assert(output_state_out->buffer() != nullptr);
554 }
555
556 auto cell_state_out = _tensor_registry->getITensor(cell_state_out_index);
557 if (cell_state_out != nullptr)
558 {
559 cell_state_out->applyShape(ir::Shape{n_batch, n_cell});
560 assert(cell_state_out->buffer() != nullptr);
561 }
562
563 auto scratch_buffer = _tensor_registry->getITensor(scratch_buffer_index);
564 if (scratch_buffer != nullptr)
565 {
566 const auto input_to_input_weights_index{
568 const auto recurrent_to_input_weights_index{
570
571 const auto input_to_input_weights_shape =
572 _tensor_registry->getITensor(input_to_input_weights_index)->getShape();
573 bool has_input_to_input_weights =
574 input_to_input_weights_shape.dim(0) != 0 && input_to_input_weights_shape.dim(1) != 0;
575
576 const auto recurrent_to_input_weights_shape =
577 _tensor_registry->getITensor(recurrent_to_input_weights_index)->getShape();
578 bool has_recurrent_to_input_weights =
579 recurrent_to_input_weights_shape.dim(0) != 0 && recurrent_to_input_weights_shape.dim(1) != 0;
580
581 // NOTE The cell_to_input_weights do not exist in non-peephole although regular LSTM(non-CIFG).
582 // true: no CIFG
583 // false: CIFG
584 bool has_cifg_param = has_input_to_input_weights && has_recurrent_to_input_weights;
585 if (has_cifg_param)
586 {
587 scratch_buffer->applyShape(ir::Shape{n_batch, n_cell * 4});
588 }
589 else
590 {
591 scratch_buffer->applyShape(ir::Shape{n_batch, n_cell * 3});
592 }
593 assert(scratch_buffer->buffer() != nullptr);
594 }
595}

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::LSTM::CELL_STATE_OUT, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::LSTM::INPUT, onert::ir::operation::LSTM::INPUT_TO_OUTPUT_WEIGHTS, onert::ir::operation::LSTM::OUTPUT, onert::ir::operation::LSTM::OUTPUT_STATE_OUT, onert::ir::operation::LSTM::param(), onert::ir::operation::LSTM::RECURRENT_TO_OUTPUT_WEIGHTS, onert::ir::operation::LSTM::SCRATCH_BUFFER, and onert::ir::operation::LSTM::Param::time_major.

◆ visit() [21/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::MatrixBandPart op)
override

◆ visit() [22/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::OneHot op)
override

Definition at line 610 of file DynamicShapeInferer.cc.

611{
612 auto output_ind = op.getOutputs().at(0);
613 auto output = _tensor_registry->getITensor(output_ind);
614
615 auto indices_ind = op.getInputs().at(ir::operation::OneHot::INDICES);
616 const auto &indices = _tensor_registry->getITensor(indices_ind);
617 auto indices_shape = indices->getShape();
618
619 auto depth_ind = op.getInputs().at(ir::operation::OneHot::DEPTH);
620 const auto &depth = _tensor_registry->getITensor(depth_ind);
621
622 if (!indices->is_dynamic() && !depth->is_dynamic())
623 {
624 return;
625 }
626
627 int32_t *depth_buf = reinterpret_cast<int32_t *>(depth->buffer());
628 assert(depth_buf);
629 const auto axis_val = op.param().axis;
630
631 ir::Shape new_shape = shape_inference::inferOnehotShape(indices_shape, *depth_buf, axis_val);
632 output->applyShape(new_shape);
633 assert(output->buffer() != nullptr);
634}
ir::Shape inferOnehotShape(const ir::Shape &input_shape, const int depth, int axis)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::OneHot::Param::axis, onert::ir::operation::OneHot::DEPTH, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::OneHot::INDICES, onert::shape_inference::inferOnehotShape(), and onert::ir::operation::OneHot::param().

◆ visit() [23/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Pack op)
override

Definition at line 636 of file DynamicShapeInferer.cc.

637{
638 bool is_any_of_inputs_dynamic = [&]() -> bool {
639 for (uint32_t i = 0; i < op.getInputs().size(); ++i)
640 {
641 const auto &input = _tensor_registry->getITensor(op.getInputs().at(i));
642 if (input->is_dynamic())
643 {
644 return true;
645 }
646 }
647 return false;
648 }();
649
650 const auto input_idx{op.getInputs().at(0)};
651 const auto &input = _tensor_registry->getITensor(input_idx);
652 auto input_shape = input->getShape();
653
654 auto output_ind = op.getOutputs().at(0);
655 auto output = _tensor_registry->getITensor(output_ind);
656
657 if (!is_any_of_inputs_dynamic && !output->is_dynamic())
658 return;
659
660 const auto rank = input_shape.rank() + 1;
661 const auto axis = ((op.param().axis < 0) ? rank + op.param().axis : op.param().axis);
662 const auto num = op.param().num;
663
664 assert(0 <= axis && axis < rank);
665
666 ir::Shape new_shape = shape_inference::inferPackShape(input_shape, axis, rank, num);
667
668 output->applyShape(new_shape);
669 assert(output->buffer() != nullptr);
670}
ir::Shape inferPackShape(const ir::Shape &input_shape, int axis, int rank, int num)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Pack::Param::axis, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferPackShape(), onert::ir::operation::Pack::Param::num, onert::ir::operation::Pack::param(), and onert::ir::OperandIndexSequence::size().

◆ visit() [24/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Pad op)
override

Definition at line 672 of file DynamicShapeInferer.cc.

673{
674 // check if output is not dynamic
675 auto output_ind = op.getOutputs().at(0);
676 auto output = _tensor_registry->getITensor(output_ind);
677
678 auto input_ind = op.getInputs().at(ir::operation::Pad::Input::INPUT);
679 auto input = _tensor_registry->getITensor(input_ind);
680
681 auto pad_ind = op.getInputs().at(ir::operation::Pad::Input::PAD);
682 auto pad = _tensor_registry->getITensor(pad_ind);
683
684 // check if input and output are not dynamic
685 if ((!input->is_dynamic()) && (!output->is_dynamic()))
686 return;
687
688 int32_t *pad_buf = reinterpret_cast<int32_t *>(pad->buffer());
689 assert(pad_buf);
690
691 auto output_shape =
692 shape_inference::inferPadShape(input->getShape(), pad_buf, pad->getShape().num_elements());
693
694 // change output shape and reallocate output tensor memory
695 output->applyShape(output_shape);
696 assert(output->buffer() != nullptr);
697}
ir::Shape inferPadShape(const ir::Shape &in_shape, const int32_t *pad_buf, const size_t num_pads)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferPadShape(), onert::ir::operation::Pad::INPUT, output_shape, and onert::ir::operation::Pad::PAD.

◆ visit() [25/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Permute op)
override

Definition at line 699 of file DynamicShapeInferer.cc.

700{
701 // NOTE Permute is a special operation which does not do shape inference before the actual
702 // function(kernel) execution. Shape inference and output allocation will be done in the kernel
703 // on-the-fly, as it must support inter-backend inference/allocation.
704}

◆ visit() [26/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Pool2D op)
override

Definition at line 706 of file DynamicShapeInferer.cc.

707{
708 // check if input is not dynamic
709 auto input_ind = op.getInputs().at(ir::operation::Pool2D::INPUT);
710 auto input = _tensor_registry->getITensor(input_ind);
711
712 if (!input->is_dynamic())
713 return;
714
715 ir::Shape input_shape = input->getShape();
716
717 auto output_ind = op.getOutputs().at(0);
718 auto output = _tensor_registry->getITensor(output_ind);
719
720 ir::Shape output_shape = shape_inference::inferPoolShape(input_shape, op.param());
721
722 output->applyShape(output_shape);
723 assert(output->buffer() != nullptr);
724}
ir::Shape inferPoolShape(const ir::Shape &in_shape, const ir::operation::Pool2D::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferPoolShape(), onert::ir::operation::Pool2D::INPUT, output_shape, and onert::ir::operation::Pool2D::param().

◆ visit() [27/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Pow op)
override

◆ visit() [28/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Range op)
override

Definition at line 732 of file DynamicShapeInferer.cc.

733{
734 // check if output is not dynamic
735 auto output_ind = op.getOutputs().at(0);
736 auto output = _tensor_registry->getITensor(output_ind);
737
738 // from op, access the buffer of second input to read new shape
739 auto start_idx = op.getInputs().at(ir::operation::Range::Input::START);
740 auto start_tensor = _tensor_registry->getITensor(start_idx);
741
742 auto limit_idx = op.getInputs().at(ir::operation::Range::Input::LIMIT);
743 auto limit_tensor = _tensor_registry->getITensor(limit_idx);
744
745 auto delta_idx = op.getInputs().at(ir::operation::Range::Input::DELTA);
746 auto delta_tensor = _tensor_registry->getITensor(delta_idx);
747
748 if (!start_tensor->is_dynamic() && !limit_tensor->is_dynamic() && !delta_tensor->is_dynamic() &&
749 !output->is_dynamic())
750 return;
751
752 ir::Shape new_shape;
753 if (output->data_type() == ir::DataType::FLOAT32)
754 {
755 new_shape =
756 shape_inference::inferRangeShape<float>(*reinterpret_cast<float *>(start_tensor->buffer()),
757 *reinterpret_cast<float *>(limit_tensor->buffer()),
758 *reinterpret_cast<float *>(delta_tensor->buffer()));
759 }
760 else if (output->data_type() == ir::DataType::INT32)
761 {
762 new_shape = shape_inference::inferRangeShape<int32_t>(
763 *reinterpret_cast<int32_t *>(start_tensor->buffer()),
764 *reinterpret_cast<int32_t *>(limit_tensor->buffer()),
765 *reinterpret_cast<int32_t *>(delta_tensor->buffer()));
766 }
767 output->applyShape(new_shape);
768 assert(output->buffer() != nullptr);
769}

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Range::DELTA, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::Range::LIMIT, and onert::ir::operation::Range::START.

◆ visit() [29/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Reduce op)
override

Definition at line 771 of file DynamicShapeInferer.cc.

772{
773 const auto input_idx{op.getInputs().at(ir::operation::Reduce::Input::INPUT)};
774 const auto &input = _tensor_registry->getITensor(input_idx);
775 auto input_shape = input->getShape();
776
777 const auto axes_idx{op.getInputs().at(ir::operation::Reduce::Input::AXES)};
778 const auto &axes = _tensor_registry->getITensor(axes_idx);
779
780 if (!input->is_dynamic())
781 return;
782
783 std::vector<int32_t> axes_vec;
784 for (uint32_t i = 0; i < axes->getShape().num_elements(); ++i)
785 {
786 const auto buffer = axes->buffer() + axes->calcOffset({i});
787 switch (axes->data_type())
788 {
789 case ir::DataType::INT32:
790 {
791 axes_vec.emplace_back(*reinterpret_cast<const int32_t *>(buffer));
792 break;
793 }
794 case ir::DataType::INT64:
795 {
796 axes_vec.emplace_back(*reinterpret_cast<const int64_t *>(buffer));
797 break;
798 }
799 default:
800 throw std::runtime_error("DynamicShapeInferer " + op.name() + ": Not supported data type");
801 break;
802 }
803 }
804 const auto keep_dims = op.param().keep_dims;
805
806 auto output_ind = op.getOutputs().at(0);
807 auto output = _tensor_registry->getITensor(output_ind);
808
809 ir::Shape new_shape = shape_inference::inferReduceShape(input_shape, axes_vec, keep_dims);
810
811 output->applyShape(new_shape);
812 assert(output->buffer() != nullptr);
813}
uint32_t num_elements(const Shape &shape)
The number of elements of a feature map of a given shape.
Definition Shape.h:59
ir::Shape inferReduceShape(const ir::Shape &input_shape, const std::vector< int > &axes, bool keep_dims)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Reduce::AXES, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferReduceShape(), onert::ir::operation::Reduce::INPUT, onert::ir::operation::Reduce::Param::keep_dims, onert::ir::operation::Reduce::name(), and onert::ir::operation::Reduce::param().

◆ visit() [30/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Reshape op)
override

Definition at line 815 of file DynamicShapeInferer.cc.

816{
817 // check if output is not dynamic
818 auto output_ind = op.getOutputs().at(0);
819 auto output = _tensor_registry->getITensor(output_ind);
820
821 auto input_ind = op.getInputs().at(ir::operation::Reshape::Input::INPUT);
822 auto input = _tensor_registry->getITensor(input_ind);
823
824 /*
825 Here, the state after compilation (satic shape inference) could be one of the following:
826
827 input1 input2 (or option) output execution-time shape inf required
828 ------------------------------------ --------------------------------
829 case 1) static const static X
830 case 2) static placeholder dynamic O
831 case 3) dynamic const dynamic O
832 case 4) dynamic placeholder dynamic O
833
834 Then nnfw_apply_tensorinf() could change input dynamic.
835 So, in this method, we could have one more state and we have to re-calculate shape
836 for this shape.
837
838 case 5) dynamic const static O
839
840 So, only when both input1 and ouput are static, we can skip dynamic shape inference.
841 */
842 if ((!input->is_dynamic()) && (!output->is_dynamic()))
843 return;
844
845 // New shape is given by second input tensor
846 if (op.getInputs().size() == 2)
847 {
848 // from op, access the buffer of second input to read new shape
849 auto new_shape_ind = op.getInputs().at(ir::operation::Reshape::Input::SHAPE);
850
851 // getting output shape by reading new_shape tensor buffer
852 auto new_shape = _tensor_registry->getITensor(new_shape_ind);
853 assert(new_shape);
854
855 int32_t *new_shape_buf = reinterpret_cast<int32_t *>(new_shape->buffer());
856 assert(new_shape_buf);
857
858 auto output_shape = shape_inference::inferReshapeShape(input->getShape(), new_shape_buf,
859 new_shape->getShape().num_elements());
860
861 // if shape is changed, change output shape and reallocate output tensor memory
862 if (output_shape != output->getShape() || output->buffer() == nullptr)
863 {
864 // change on output shape
865 output->applyShape(output_shape);
866 }
867 assert(output->buffer() != nullptr);
868 }
869 // New shape is given by option
870 else if (op.param().new_shape.size() != 0)
871 {
872 // Let's check the new_shape option
873 auto shape = op.param().new_shape;
874 auto output_shape =
875 shape_inference::inferReshapeShape(input->getShape(), shape.data(), shape.size());
876
877 // if shape is changed, change output shape and reallocate output tensor memory
878 if (output_shape != output->getShape() || output->buffer() == nullptr)
879 {
880 // change on output shape
881 output->applyShape(output_shape);
882 }
883 assert(output->buffer() != nullptr);
884 }
885 else
886 {
887 throw std::runtime_error("Reshape: new shape is missing");
888 return;
889 }
890}
ir::Shape inferReshapeShape(const ir::Shape &input_shape, const int32_t *shape_buf, const int32_t shape_num_elements)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferReshapeShape(), onert::ir::operation::Reshape::INPUT, onert::ir::operation::Reshape::Param::new_shape, output_shape, onert::ir::operation::Reshape::param(), onert::ir::operation::Reshape::SHAPE, and onert::ir::OperandIndexSequence::size().

◆ visit() [31/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::ResizeBilinear op)
override

Definition at line 892 of file DynamicShapeInferer.cc.

893{
894 // check if output is not dynamic
895 auto output_ind = op.getOutputs().at(0);
896 auto output = _tensor_registry->getITensor(output_ind);
897
898 auto input_ind = op.getInputs().at(ir::operation::Reshape::Input::INPUT);
899 auto input = _tensor_registry->getITensor(input_ind);
900
901 if ((!input->is_dynamic()) && (!output->is_dynamic()))
902 return;
903
904 // getting output shape from input shape and Params
905 int32_t height_out, width_out;
906 if (op.getInputs().size() == 2)
907 {
908 auto size_ind = op.getInputs().at(ir::operation::ResizeBilinear::Input::SIZE);
909 auto size = _tensor_registry->getITensor(size_ind);
910 if (size->data_type() == ir::DataType::INT32)
911 {
912 auto size_buf = reinterpret_cast<const int32_t *>(size->buffer());
913 height_out = size_buf[0];
914 width_out = size_buf[1];
915 }
916 else
917 {
918 throw std::runtime_error("DynamicShapeInferer ResizeBilinear : Unsupported data type");
919 }
920 }
921 else
922 {
923 height_out = op.param().height_out;
924 width_out = op.param().width_out;
925 }
926 auto output_shape =
927 shape_inference::inferResizeBilinearShape(input->getShape(), height_out, width_out);
928
929 // if shape is changed, change output shape and reallocate output tensor memory
930 if (output_shape != output->getShape() || output->buffer() == nullptr)
931 {
932 // change on output shape
933 output->applyShape(output_shape);
934 }
935 assert(output->buffer() != nullptr);
936}
ir::Shape inferResizeBilinearShape(const ir::Shape &in_shape, const int32_t output_height, const int32_t output_width)
int32_t size[5]
Definition Slice.cpp:35

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::ResizeBilinear::Param::height_out, onert::shape_inference::inferResizeBilinearShape(), onert::ir::operation::Reshape::INPUT, output_shape, onert::ir::operation::ResizeBilinear::param(), onert::ir::OperandIndexSequence::size(), size, onert::ir::operation::ResizeBilinear::SIZE, and onert::ir::operation::ResizeBilinear::Param::width_out.

◆ visit() [32/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Reverse op)
override

◆ visit() [33/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Select op)
override

Definition at line 943 of file DynamicShapeInferer.cc.

944{
945 const auto input_cond_idx = op.getInputs().at(ir::operation::Select::Input::CONDITION);
946 const auto &input_cond = _tensor_registry->getITensor(input_cond_idx);
947
948 const auto input_true_idx = op.getInputs().at(ir::operation::Select::Input::INPUT_TRUE);
949 const auto &input_true = _tensor_registry->getITensor(input_true_idx);
950
951 const auto input_false_idx = op.getInputs().at(ir::operation::Select::Input::INPUT_FALSE);
952 const auto &input_false = _tensor_registry->getITensor(input_false_idx);
953
954 if ((!input_cond->is_dynamic()) && (!input_true->is_dynamic()) && (!input_false->is_dynamic()))
955 {
956 return;
957 }
958
959 auto input_cond_shape = input_cond->getShape();
960 auto input_true_shape = input_true->getShape();
961 auto input_false_shape = input_false->getShape();
962
963 // Select output shpae
964 ir::Shape new_shape =
965 shape_inference::inferSelectShape(input_cond_shape, input_true_shape, input_false_shape);
966
967 auto output_ind = op.getOutputs().at(0);
968 auto output = _tensor_registry->getITensor(output_ind);
969
970 output->applyShape(new_shape);
971 assert(output->buffer() != nullptr);
972}
ir::Shape inferSelectShape(const ir::Shape &input_cond_shape, const ir::Shape &input_true_shape, const ir::Shape &input_false_shape)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Select::CONDITION, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferSelectShape(), onert::ir::operation::Select::INPUT_FALSE, and onert::ir::operation::Select::INPUT_TRUE.

◆ visit() [34/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Shape op)
override

Definition at line 974 of file DynamicShapeInferer.cc.

975{
976 const auto input_idx{op.getInputs().at(0)};
977 const auto &input = _tensor_registry->getITensor(input_idx);
978 auto input_shape = input->getShape();
979
980 if (!input->is_dynamic())
981 return;
982
983 auto output_ind = op.getOutputs().at(0);
984 auto output = _tensor_registry->getITensor(output_ind);
985
986 ir::Shape output_shape;
987 output_shape.append(input_shape.rank());
988
989 output->applyShape(output_shape);
990 assert(output->buffer() != nullptr);
991}

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), and output_shape.

◆ visit() [35/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Slice op)
override

Definition at line 993 of file DynamicShapeInferer.cc.

994{
995 const auto input_index{op.getInputs().at(ir::operation::Slice::Input::INPUT)};
996 const auto input = _tensor_registry->getITensor(input_index);
997 const auto begins_index{op.getInputs().at(ir::operation::Slice::Input::BEGINS)};
998 const auto begins = _tensor_registry->getITensor(begins_index);
999 const auto sizes_index{op.getInputs().at(ir::operation::Slice::Input::SIZES)};
1000 const auto sizes = _tensor_registry->getITensor(sizes_index);
1001 auto output_index = op.getOutputs().at(0);
1002 auto output = _tensor_registry->getITensor(output_index);
1003
1004 if (!(input->is_dynamic() || begins->is_dynamic() || sizes->is_dynamic() || output->is_dynamic()))
1005 {
1006 return;
1007 }
1008
1009 ir::Shape input_shape = input->getShape();
1010 auto begins_buf = reinterpret_cast<const int32_t *>(begins->buffer());
1011 auto sizes_buf = reinterpret_cast<const int32_t *>(sizes->buffer());
1012
1013 ir::Shape new_shape = shape_inference::inferSliceShape(input_shape, begins_buf, sizes_buf);
1014
1015 output->applyShape(new_shape);
1016 assert(output->buffer() != nullptr);
1017}
ir::Shape inferSliceShape(const ir::Shape &input_shape, const T *begins_buf, const T *sizes_buf)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Slice::BEGINS, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferSliceShape(), onert::ir::operation::Slice::INPUT, and onert::ir::operation::Slice::SIZES.

◆ visit() [36/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Softmax op)
override

◆ visit() [37/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::SpaceToBatchND op)
override

Definition at line 1024 of file DynamicShapeInferer.cc.

1025{
1026 const auto input_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::INPUT)};
1027 const auto block_shape_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::BLOCK_SIZE)};
1028 const auto padding_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::PADDINGS)};
1029 auto output_idx{op.getOutputs().at(0)};
1030
1031 const auto &input = _tensor_registry->getITensor(input_idx);
1032 const auto &block_shape = _tensor_registry->getITensor(block_shape_idx);
1033 const auto &padding = _tensor_registry->getITensor(padding_idx);
1034 auto output = _tensor_registry->getITensor(output_idx);
1035
1036 if (!(input->is_dynamic() || block_shape->is_dynamic() || padding->is_dynamic() ||
1037 output->is_dynamic()))
1038 {
1039 return;
1040 }
1041
1042 auto input_shape = input->getShape();
1043 auto block_shape_shape = block_shape->getShape();
1044 auto padding_shape = padding->getShape();
1045
1046 auto block_shape_data = reinterpret_cast<int32_t *>(block_shape->buffer());
1047 auto padding_data = reinterpret_cast<int32_t *>(padding->buffer());
1048
1049 ir::Shape new_shape = shape_inference::inferSpaceToBatchNDShape(
1050 input_shape, block_shape_shape, padding_shape, block_shape_data, padding_data);
1051
1052 output->applyShape(new_shape);
1053 assert(output->buffer() != nullptr);
1054}
ir::Shape inferSpaceToBatchNDShape(const ir::Shape &input_shape, const ir::Shape &block_shape_shape, const ir::Shape &padding_shape, const int32_t *block_shape_buf, const int32_t *padding_buf)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::SpaceToBatchND::BLOCK_SIZE, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferSpaceToBatchNDShape(), onert::ir::operation::SpaceToBatchND::INPUT, and onert::ir::operation::SpaceToBatchND::PADDINGS.

◆ visit() [38/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Split op)
override

Definition at line 1056 of file DynamicShapeInferer.cc.

1057{
1058 const auto input_idx{op.getInputs().at(ir::operation::Split::Input::INPUT)};
1059 const auto &input = _tensor_registry->getITensor(input_idx);
1060
1061 // Return if all tensors are not dynamic
1062 bool has_dynamic = false;
1063 for (const auto &output_idx : op.getOutputs())
1064 {
1065 auto output = _tensor_registry->getITensor(output_idx);
1066 has_dynamic |= output->is_dynamic();
1067 }
1068 if (!input->is_dynamic() && !has_dynamic)
1069 {
1070 return;
1071 }
1072
1073 auto input_shape = input->getShape();
1074
1075 const auto axis_idx{op.getInputs().at(ir::operation::Split::Input::AXIS)};
1076 const auto &axis = _tensor_registry->getITensor(axis_idx);
1077
1078 auto axis_value = *reinterpret_cast<const int32_t *>(axis->buffer());
1079 const auto num_splits = op.param().num_splits;
1080 const auto rank = input_shape.rank();
1081 axis_value = axis_value < 0 ? axis_value + rank : axis_value;
1082
1083 assert(0 <= axis_value && axis_value < rank);
1084
1085 ir::Shape new_shape = shape_inference::inferSplitShape(input_shape, axis_value, num_splits);
1086 for (int out_tensor_idx = 0; out_tensor_idx < num_splits; out_tensor_idx++)
1087 {
1088 auto output_ind = op.getOutputs().at(out_tensor_idx);
1089 auto output = _tensor_registry->getITensor(output_ind);
1090
1091 output->applyShape(new_shape);
1092 assert(output->buffer() != nullptr);
1093 }
1094}
ir::Shape inferSplitShape(const ir::Shape input_shape, int axis_value, int num_splits)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Split::AXIS, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferSplitShape(), onert::ir::operation::Split::INPUT, onert::ir::operation::Split::Param::num_splits, and onert::ir::operation::Split::param().

◆ visit() [39/44]

◆ visit() [40/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Squeeze op)
override

Definition at line 1102 of file DynamicShapeInferer.cc.

1103{
1104 const auto input_idx{op.getInputs().at(ir::operation::Squeeze::Input::INPUT)};
1105 const auto &input = _tensor_registry->getITensor(input_idx);
1106
1107 if (!input->is_dynamic())
1108 {
1109 return;
1110 }
1111
1112 auto input_shape = input->getShape();
1113
1114 // Squeeze output shpae
1115 ir::Shape new_shape = shape_inference::inferSqueezeShape(input_shape, op.param());
1116
1117 auto output_ind = op.getOutputs().at(0);
1118 auto output = _tensor_registry->getITensor(output_ind);
1119
1120 output->applyShape(new_shape);
1121 assert(output->buffer() != nullptr);
1122}
ir::Shape inferSqueezeShape(const ir::Shape &in_shape, const ir::operation::Squeeze::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferSqueezeShape(), onert::ir::operation::Squeeze::INPUT, and onert::ir::operation::Squeeze::param().

◆ visit() [41/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::StridedSlice op)
override

Definition at line 1124 of file DynamicShapeInferer.cc.

1125{
1126
1127 const auto input_index{op.getInputs().at(ir::operation::StridedSlice::Input::INPUT)};
1128 auto input = _tensor_registry->getITensor(input_index);
1129 ir::Shape input_shape = input->getShape();
1130
1131 const auto starts_index{op.getInputs().at(ir::operation::StridedSlice::Input::STARTS)};
1132 auto starts = _tensor_registry->getITensor(starts_index);
1133
1134 const auto ends_index{op.getInputs().at(ir::operation::StridedSlice::Input::ENDS)};
1135 auto ends = _tensor_registry->getITensor(ends_index);
1136
1137 const auto strides_index{op.getInputs().at(ir::operation::StridedSlice::Input::STRIDES)};
1138 auto strides = _tensor_registry->getITensor(strides_index);
1139
1140 if (!(input->is_dynamic() || starts->is_dynamic() || ends->is_dynamic() || strides->is_dynamic()))
1141 {
1142 return;
1143 }
1144
1145 const auto begin_mask = op.param().begin_mask;
1146 const auto end_mask = op.param().end_mask;
1147 const auto shrink_axis_mask = op.param().shrink_axis_mask;
1148 const auto rank = input_shape.rank();
1149
1151 reinterpret_cast<uint32_t *>(starts->buffer()), reinterpret_cast<uint32_t *>(ends->buffer()),
1152 reinterpret_cast<uint32_t *>(strides->buffer()), begin_mask, end_mask, shrink_axis_mask, rank);
1153
1154 auto output_index = op.getOutputs().at(0);
1155 auto output = _tensor_registry->getITensor(output_index);
1156
1157 ir::Shape output_shape =
1158 onert::shape_inference::inferStridedSliceShape(input_shape, op_params, rank);
1159
1160 output->applyShape(output_shape);
1161 assert(output->buffer() != nullptr);
1162}
StridedSliceParams buildStridedSliceParams(const T *begin, const T *end, const T *strides, const uint32_t begin_mask, const uint32_t end_mask, const uint32_t shrink_axis_mask, const uint8_t rank)
ir::Shape inferStridedSliceShape(const ir::Shape &input_shape, const StridedSliceParams &op_params, uint32_t rank)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::StridedSlice::Param::begin_mask, onert::shape_inference::buildStridedSliceParams(), onert::ir::operation::StridedSlice::Param::end_mask, onert::ir::operation::StridedSlice::ENDS, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferStridedSliceShape(), onert::ir::operation::StridedSlice::INPUT, output_shape, onert::ir::operation::StridedSlice::param(), onert::ir::operation::StridedSlice::Param::shrink_axis_mask, onert::ir::operation::StridedSlice::STARTS, and onert::ir::operation::StridedSlice::STRIDES.

◆ visit() [42/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Tile op)
override

Definition at line 1164 of file DynamicShapeInferer.cc.

1165{
1166 auto output_ind = op.getOutputs().at(0);
1167 auto output = _tensor_registry->getITensor(output_ind);
1168
1169 auto input_idx = op.getInputs().at(ir::operation::Tile::Input::INPUT);
1170 auto input = _tensor_registry->getITensor(input_idx);
1171
1172 auto multiplier_idx = op.getInputs().at(ir::operation::Tile::Input::MULTIPLES);
1173 auto multiplier = _tensor_registry->getITensor(multiplier_idx);
1174
1175 if ((!input->is_dynamic()) && (!output->is_dynamic()))
1176 return;
1177
1178 auto input_shape = input->getShape();
1179 auto multiplier_buffer = reinterpret_cast<const int32_t *>(multiplier->buffer());
1180 assert(multiplier_buffer);
1181
1182 auto mult_shape = multiplier->getShape();
1184 input_shape, multiplier_buffer, mult_shape.rank() == 0 ? 1 : mult_shape.dim(0));
1185
1186 // set output shape and output buffer
1187 output->applyShape(output_shape);
1188 assert(output->buffer() != nullptr);
1189}
ir::Shape inferTileShape(const ir::Shape &in_shape, const int32_t *multiplier_buf, const int32_t multiplier_size)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferTileShape(), onert::ir::operation::Tile::INPUT, onert::ir::operation::Tile::MULTIPLES, and output_shape.

◆ visit() [43/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Transpose op)
override

Definition at line 1191 of file DynamicShapeInferer.cc.

1192{
1193 // check if output is not dynamic
1194 auto output_ind = op.getOutputs().at(0);
1195 auto output = _tensor_registry->getITensor(output_ind);
1196
1197 // from op, access the buffer of second input to read new shape
1198 auto input_ind = op.getInputs().at(ir::operation::Transpose::Input::INPUT);
1199 auto input = _tensor_registry->getITensor(input_ind);
1200 auto input_shape = input->getShape();
1201
1202 /*
1203 Here, the state after compilation (static shape inference) could be one of the following:
1204
1205 input perms output execution-time shape inf required
1206 ------------------------------------ --------------------------------
1207 case 1) static const static X
1208 case 2) static non-const dynamic O
1209 case 3) dynamic const dynamic O
1210 case 4) dynamic non-const dynamic O
1211
1212 So, only when both input1 and ouput are static, we can skip dynamic shape inference.
1213 */
1214 if ((!input->is_dynamic()) && (!output->is_dynamic()))
1215 return;
1216
1217 auto perm_ind = op.getInputs().at(ir::operation::Transpose::Input::PERMUTATION);
1218 auto perm = _tensor_registry->getITensor(perm_ind);
1219
1220 ir::Shape new_shape;
1221 // TODO Change perm->dimension(0) == 0 to perm->num_elements() == 0
1222 if (perm->getShape().dim(0) == 0) // This condition means that perm is (n-1...0)
1223 {
1224 // Call by (n-1...0)
1225 new_shape = shape_inference::inferTransposeShape(input_shape, nullptr, 0);
1226 }
1227 else
1228 {
1229 // Check rank
1230 if (static_cast<size_t>(input->getShape().rank()) != perm->getShape().num_elements())
1231 {
1232 throw std::runtime_error("DynamicShapeInferer failed, bad rank size: " +
1233 std::to_string(perm->getShape().num_elements()));
1234 }
1235
1236 // set output shape, based on input and params
1237 const auto perm_buffer = reinterpret_cast<const int32_t *>(perm->buffer());
1238 new_shape =
1239 shape_inference::inferTransposeShape(input_shape, perm_buffer, perm->getShape().dim(0));
1240 }
1241 output->applyShape(new_shape);
1242 assert(output->buffer() != nullptr);
1243}
ir::Shape inferTransposeShape(const ir::Shape &in_shape, const int32_t *perm_buf, const int32_t rank)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferTransposeShape(), onert::ir::operation::Transpose::INPUT, and onert::ir::operation::Transpose::PERMUTATION.

◆ visit() [44/44]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Unpack op)
override

Definition at line 1245 of file DynamicShapeInferer.cc.

1246{
1247 // check if output is not dynamic
1248 const auto input_idx{op.getInputs().at(0)};
1249 const auto &input = _tensor_registry->getITensor(input_idx);
1250
1251 if (!input->is_dynamic())
1252 return;
1253
1254 auto input_shape = input->getShape();
1255
1256 const auto rank = input_shape.rank();
1257 const auto axis = ((op.param().axis < 0) ? rank + op.param().axis : op.param().axis);
1258 const auto num = op.param().num;
1259
1260 assert(0 <= axis && axis < rank);
1261
1262 ir::Shape new_shape = shape_inference::inferUnpackShape(input_shape, axis, rank);
1263
1264 for (int out_tensor_idx = 0; out_tensor_idx < num; out_tensor_idx++)
1265 {
1266 auto output_ind = op.getOutputs().at(out_tensor_idx);
1267 auto output = _tensor_registry->getITensor(output_ind);
1268
1269 output->applyShape(new_shape);
1270
1271 assert(output->buffer() != nullptr);
1272 }
1273}
ir::Shape inferUnpackShape(const ir::Shape &input_shape, int axis, int rank)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Unpack::Param::axis, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferUnpackShape(), onert::ir::operation::Unpack::Param::num, and onert::ir::operation::Unpack::param().


The documentation for this class was generated from the following files: