ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::exec::DynamicShapeInferer Class Reference

Class to infer shape of output tensor at execution time and allocate memory fo output tensor if needed. More...

#include <DynamicShapeInferer.h>

Collaboration diagram for onert::exec::DynamicShapeInferer:

Public Member Functions

 DynamicShapeInferer (const std::shared_ptr< backend::ITensorRegistry > &tensor_registry)
 
void visit (const ir::operation::ArgMinMax &op) override
 
void visit (const ir::operation::BatchMatMul &op) override
 
void visit (const ir::operation::BCQFullyConnected &op) override
 
void visit (const ir::operation::BCQGather &op) override
 
void visit (const ir::operation::BCQUnembedding &op) override
 
void visit (const ir::operation::BinaryArithmetic &op) override
 
void visit (const ir::operation::BroadcastTo &op) override
 
void visit (const ir::operation::Comparison &op) override
 
void visit (const ir::operation::Concat &op) override
 
void visit (const ir::operation::Conv2D &op) override
 
void visit (const ir::operation::DepthwiseConv2D &op) override
 
void visit (const ir::operation::DynamicUpdateSlice &op) override
 
void visit (const ir::operation::ElementwiseActivation &op) override
 
void visit (const ir::operation::ElementwiseBinary &op) override
 
void visit (const ir::operation::ElementwiseUnary &op) override
 
void visit (const ir::operation::ExpandDims &op) override
 
void visit (const ir::operation::Fill &op) override
 
void visit (const ir::operation::FullyConnected &op) override
 
void visit (const ir::operation::FusedBatchNorm &op) override
 
void visit (const ir::operation::Gather &op) override
 
void visit (const ir::operation::L2Normalization &op) override
 
void visit (const ir::operation::LSTM &op) override
 
void visit (const ir::operation::DetectionPostProcess &op) override
 
void visit (const ir::operation::OneHot &op) override
 
void visit (const ir::operation::Pack &op) override
 
void visit (const ir::operation::Pad &op) override
 
void visit (const ir::operation::Permute &op) override
 
void visit (const ir::operation::Pool2D &op) override
 
void visit (const ir::operation::Pow &op) override
 
void visit (const ir::operation::Range &op) override
 
void visit (const ir::operation::Reduce &op) override
 
void visit (const ir::operation::Reshape &op) override
 
void visit (const ir::operation::ResizeBilinear &op) override
 
void visit (const ir::operation::Reverse &op) override
 
void visit (const ir::operation::Select &op) override
 
void visit (const ir::operation::Shape &op) override
 
void visit (const ir::operation::Slice &op) override
 
void visit (const ir::operation::Softmax &op) override
 
void visit (const ir::operation::SpaceToBatchND &op) override
 
void visit (const ir::operation::Split &op) override
 
void visit (const ir::operation::Squeeze &op) override
 
void visit (const ir::operation::StridedSlice &op) override
 
void visit (const ir::operation::SquaredDifference &op) override
 
void visit (const ir::operation::Tile &op) override
 
void visit (const ir::operation::Transpose &op) override
 
void visit (const ir::operation::Unpack &op) override
 
- Public Member Functions inherited from onert::ir::OperationVisitor
virtual ~OperationVisitor ()=default
 

Detailed Description

Class to infer shape of output tensor at execution time and allocate memory fo output tensor if needed.

Definition at line 34 of file DynamicShapeInferer.h.

Constructor & Destructor Documentation

◆ DynamicShapeInferer()

onert::exec::DynamicShapeInferer::DynamicShapeInferer ( const std::shared_ptr< backend::ITensorRegistry > &  tensor_registry)
inline

Definition at line 37 of file DynamicShapeInferer.h.

38 : _tensor_registry(tensor_registry)
39 {
40 // DO NOTHING
41 }

Member Function Documentation

◆ visit() [1/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::ArgMinMax op)
override

Definition at line 93 of file DynamicShapeInferer.cc.

94{
95 const auto input_idx{op.getInputs().at(ir::operation::ArgMinMax::Input::INPUT)};
96 const auto input = _tensor_registry->getITensor(input_idx);
97
98 const auto axis_idx{op.getInputs().at(ir::operation::ArgMinMax::Input::AXIS)};
99 const auto axis = _tensor_registry->getITensor(axis_idx);
100
101 auto output_ind = op.getOutputs().at(0);
102 auto output = _tensor_registry->getITensor(output_ind);
103
104 if (!input->is_dynamic() && !output->is_dynamic())
105 return;
106
107 auto input_shape = input->getShape();
108 auto axis_value = *reinterpret_cast<const int32_t *>(axis->buffer());
109 const auto rank = input_shape.rank();
110 axis_value = axis_value < 0 ? axis_value + rank : axis_value;
111
112 ir::Shape new_shape = shape_inference::inferArgMinMaxShape(input_shape, axis_value, rank);
113
114 output->applyShape(new_shape);
115 assert(output->buffer() != nullptr);
116}
ir::Shape inferArgMinMaxShape(const ir::Shape &input_shape, int axis, int rank)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::ArgMinMax::AXIS, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferArgMinMaxShape(), and onert::ir::operation::ArgMinMax::INPUT.

◆ visit() [2/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::BatchMatMul op)
override

Definition at line 118 of file DynamicShapeInferer.cc.

119{
120 const auto lhs_index = op.getInputs().at(ir::operation::BatchMatMul::Input::LHS);
121 const auto rhs_index = op.getInputs().at(ir::operation::BatchMatMul::Input::RHS);
122 auto lhs = _tensor_registry->getITensor(lhs_index);
123 auto rhs = _tensor_registry->getITensor(rhs_index);
124
125 if (!lhs->is_dynamic() && !rhs->is_dynamic())
126 return;
127
128 const auto output_index = op.getOutputs().at(0);
129 auto output = _tensor_registry->getITensor(output_index);
130
131 auto lhs_shape = lhs->getShape();
132 auto rhs_shape = rhs->getShape();
133 // TODO
134
135 auto new_shape = shape_inference::inferBatchMatMulShape(lhs_shape, rhs_shape, op.param());
136 output->applyShape(new_shape);
137}
ir::Shape inferBatchMatMulShape(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape, const ir::operation::BatchMatMul::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferBatchMatMulShape(), onert::ir::operation::BatchMatMul::LHS, onert::ir::operation::BatchMatMul::param(), and onert::ir::operation::BatchMatMul::RHS.

◆ visit() [3/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::BCQFullyConnected op)
override

Definition at line 139 of file DynamicShapeInferer.cc.

140{
141 const auto input_idx{op.getInputs().at(ir::operation::BCQFullyConnected::Input::INPUT)};
142 const auto &input = _tensor_registry->getITensor(input_idx);
143
144 const auto cluster_idx{
146 const auto &cluster = _tensor_registry->getITensor(cluster_idx);
147 assert(cluster->is_constant());
148
149 if (!input->is_dynamic())
150 return;
151
152 auto input_shape = input->getShape();
153 auto cluster_shape = cluster->getShape();
154
155 auto cluster_buf = reinterpret_cast<const int32_t *>(cluster->buffer());
156 assert(cluster_buf);
157
158 ir::Shape new_shape =
159 shape_inference::inferBCQFullyConnectedShape(input_shape, cluster_shape, cluster_buf);
160
161 auto output_ind = op.getOutputs().at(0);
162 auto output = _tensor_registry->getITensor(output_ind);
163
164 output->applyShape(new_shape);
165 assert(output->buffer() != nullptr);
166}
ir::Shape inferBCQFullyConnectedShape(const ir::Shape &in_shape, const ir::Shape &cluster_shape, const int32_t *cluster_buf)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferBCQFullyConnectedShape(), onert::ir::operation::BCQFullyConnected::INPUT, and onert::ir::operation::BCQFullyConnected::WEIGHTS_CLUSTERS.

◆ visit() [4/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::BCQGather op)
override

Definition at line 168 of file DynamicShapeInferer.cc.

169{
170 const auto indices_idx{op.getInputs().at(ir::operation::BCQGather::Input::INDICES)};
171 const auto &indices = _tensor_registry->getITensor(indices_idx);
172
173 const auto input_binary_idx{op.getInputs().at(ir::operation::BCQGather::Input::INPUT_BINARY)};
174 const auto &input_binary = _tensor_registry->getITensor(input_binary_idx);
175
176 const auto cluster_idx{op.getInputs().at(ir::operation::BCQGather::Input::INPUT_CLUSTERS)};
177 const auto &cluster = _tensor_registry->getITensor(cluster_idx);
178 assert(cluster->is_constant());
179
180 if (!indices->is_dynamic())
181 return;
182
183 auto indices_shape = indices->getShape();
184 auto cluster_shape = cluster->getShape();
185 auto rank = input_binary->getShape().rank();
186
187 auto cluster_buf = reinterpret_cast<const int32_t *>(cluster->buffer());
188 assert(cluster_buf);
189
190 ir::Shape new_shape = shape_inference::inferBCQGatherShape(indices_shape, cluster_shape,
191 cluster_buf, rank, op.param());
192
193 auto output_ind = op.getOutputs().at(0);
194 auto output = _tensor_registry->getITensor(output_ind);
195
196 output->applyShape(new_shape);
197 assert(output->buffer() != nullptr);
198}
ir::Shape inferBCQGatherShape(const ir::Shape &indices_shape, const ir::Shape &cluster_shape, const int32_t *cluster_buf, int rank, const ir::operation::BCQGather::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::BCQGather::INDICES, onert::shape_inference::inferBCQGatherShape(), onert::ir::operation::BCQGather::INPUT_BINARY, onert::ir::operation::BCQGather::INPUT_CLUSTERS, and onert::ir::operation::BCQGather::param().

◆ visit() [5/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::BCQUnembedding op)
override

Definition at line 200 of file DynamicShapeInferer.cc.

201{
202 const auto input_idx{op.getInputs().at(ir::operation::BCQUnembedding::Input::INPUT)};
203 const auto &input = _tensor_registry->getITensor(input_idx);
204
205 const auto cluster_idx{op.getInputs().at(ir::operation::BCQUnembedding::Input::WEIGHTS_CLUSTERS)};
206 const auto &cluster = _tensor_registry->getITensor(cluster_idx);
207 assert(cluster->is_constant());
208
209 if (!input->is_dynamic())
210 return;
211
212 auto input_shape = input->getShape();
213 auto cluster_shape = cluster->getShape();
214
215 ir::Shape new_shape = shape_inference::inferBCQUnembeddingShape(input_shape);
216
217 auto output_ind = op.getOutputs().at(0);
218 auto output = _tensor_registry->getITensor(output_ind);
219
220 output->applyShape(new_shape);
221 assert(output->buffer() != nullptr);
222}
ir::Shape inferBCQUnembeddingShape(const ir::Shape &in_shape)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferBCQUnembeddingShape(), onert::ir::operation::BCQUnembedding::INPUT, and onert::ir::operation::BCQUnembedding::WEIGHTS_CLUSTERS.

◆ visit() [6/46]

◆ visit() [7/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::BroadcastTo op)
override

Definition at line 230 of file DynamicShapeInferer.cc.

231{
232 auto output_ind = op.getOutputs().at(0);
233 auto output = _tensor_registry->getITensor(output_ind);
234
235 auto input_idx = op.getInputs().at(ir::operation::BroadcastTo::INPUT);
236 auto input = _tensor_registry->getITensor(input_idx);
237
238 if ((!input->is_dynamic()) && (!output->is_dynamic()))
239 return;
240
241 auto shape_idx = op.getInputs().at(ir::operation::Tile::Input::MULTIPLES);
242 const auto &shape = _tensor_registry->getITensor(shape_idx);
243
244 assert(shape); // It shouldn't be 0.
245
247 shape->getShape(), reinterpret_cast<const int32_t *>(shape->buffer()));
248
249 // set output shape and output buffer
250 output->applyShape(output_shape);
251 assert(output->buffer() != nullptr);
252}
const luci_interpreter::RuntimeShape output_shape
ir::Shape inferBroadcastToShape(const ir::Shape shp_shape, const int32_t *shp_buf)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferBroadcastToShape(), onert::ir::operation::BroadcastTo::INPUT, onert::ir::operation::Tile::MULTIPLES, and output_shape.

◆ visit() [8/46]

◆ visit() [9/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Concat op)
override

Definition at line 260 of file DynamicShapeInferer.cc.

261{
262 /*
263 The state after compilation (satic shape inference) could be one of the following:
264
265 inputs output execution-time shape inf required
266 ------------------------------------------ ---------------------------------
267 case 1) all static static X
268 case 2) at least on is dynamic dynamic O
269
270 Then nnfw_apply_tensorinf() could change one or both inputs dynamic.
271 So, in this method, we have one more state and we have to re-calculate shape for this shape.
272
273 case 3) at least on is dynamic static O
274
275 So, only when all inputs are static, we can skip dynamic shape inference.
276 */
277 bool all_static = true;
278 for (auto &&input_ind : op.getInputs())
279 {
280 auto input = _tensor_registry->getITensor(input_ind);
281 if (input->is_dynamic())
282 {
283 all_static = false;
284 break;
285 }
286 }
287
288 if (all_static)
289 return;
290
291 // sanity check
292 {
293 auto isConcatible = [](const backend::ITensor *input1, const backend::ITensor *input2,
294 int32_t axis) {
295 auto shape1 = input1->getShape();
296 auto shape2 = input2->getShape();
297 if (shape1.rank() != shape2.rank())
298 return false;
299
300 for (int i = 0; i < shape1.rank(); i++)
301 {
302 auto positive_axis = (axis >= 0) ? axis : axis + input1->getShape().rank();
303
304 if (i != positive_axis)
305 if (shape1.dim(i) != shape2.dim(i))
306 return false;
307 }
308
309 return true;
310 };
311
312 auto first_input_ind = op.getInputs().at(0);
313 auto first_input = _tensor_registry->getITensor(first_input_ind);
314
315 for (auto &&input_ind : op.getInputs())
316 {
317 auto input = _tensor_registry->getITensor(input_ind);
318 if (input != first_input && !isConcatible(first_input, input, op.param().axis))
319 throw std::runtime_error("input shapes does not matched for concat");
320 }
321 }
322
323 // getting output shape
325 for (auto &&input_ind : op.getInputs())
326 {
327 auto input = _tensor_registry->getITensor(input_ind);
328 ir::Shape shape = input->getShape();
329
330 in_shapes.emplace_back(shape);
331 }
332
333 auto output_ind = op.getOutputs().at(0);
334 auto output = _tensor_registry->getITensor(output_ind);
335 auto output_shape = shape_inference::inferConcatShape(in_shapes, op.param());
336
337 output->applyShape(output_shape);
338}
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
std::vector< ir::Shape > Shapes
ir::Shape inferConcatShape(const Shapes &in_shapes, const ir::operation::Concat::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Concat::Param::axis, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::backend::ITensor::getShape(), onert::shape_inference::inferConcatShape(), output_shape, and onert::ir::operation::Concat::param().

◆ visit() [10/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Conv2D op)
override

Definition at line 340 of file DynamicShapeInferer.cc.

341{
342 // check if input is not dynamic
343 auto input_ind = op.getInputs().at(ir::operation::Conv2D::INPUT);
344 auto input = _tensor_registry->getITensor(input_ind);
345
346 auto ker_ind = op.getInputs().at(ir::operation::Conv2D::KERNEL);
347 auto ker = _tensor_registry->getITensor(ker_ind);
348
349 if ((!input->is_dynamic()) && (!ker->is_dynamic()))
350 return;
351
352 ir::Shape input_shape = input->getShape();
353 ir::Shape ker_shape = ker->getShape();
354
355 auto output_ind = op.getOutputs().at(0);
356 auto output = _tensor_registry->getITensor(output_ind);
357
358 ir::Shape output_shape = shape_inference::inferConv2DShape(input_shape, ker_shape, op.param());
359
360 output->applyShape(output_shape);
361 assert(output->buffer() != nullptr);
362}
ir::Shape inferConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape, const ir::operation::Conv2D::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferConv2DShape(), onert::ir::operation::Conv2D::INPUT, onert::ir::operation::Conv2D::KERNEL, output_shape, and onert::ir::operation::Conv2D::param().

◆ visit() [11/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::DepthwiseConv2D op)
override

Definition at line 364 of file DynamicShapeInferer.cc.

365{
366 // check if input is not dynamic
367 auto input_ind = op.getInputs().at(ir::operation::DepthwiseConv2D::INPUT);
368 auto input = _tensor_registry->getITensor(input_ind);
369
370 auto ker_ind = op.getInputs().at(ir::operation::DepthwiseConv2D::KERNEL);
371 auto ker = _tensor_registry->getITensor(ker_ind);
372
373 if ((!input->is_dynamic()) && (!ker->is_dynamic()))
374 return;
375
376 ir::Shape input_shape = input->getShape();
377 ir::Shape ker_shape = ker->getShape();
378
379 auto output_ind = op.getOutputs().at(0);
380 auto output = _tensor_registry->getITensor(output_ind);
381
382 ir::Shape output_shape =
383 shape_inference::inferDepthwiseConv2DShape(input_shape, ker_shape, op.param());
384
385 output->applyShape(output_shape);
386 assert(output->buffer() != nullptr);
387}
ir::Shape inferDepthwiseConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape, const ir::operation::DepthwiseConv2D::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferDepthwiseConv2DShape(), onert::ir::operation::DepthwiseConv2D::INPUT, onert::ir::operation::DepthwiseConv2D::KERNEL, output_shape, and onert::ir::operation::DepthwiseConv2D::param().

◆ visit() [12/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::DetectionPostProcess op)
override

Definition at line 653 of file DynamicShapeInferer.cc.

654{
655 // NOTE DetectionPostProcess's undefined outputs' shape are decided on compile time
656 // by static shape inferer.
657 // DetectionPostProcess's outputs' shape are independent with input shape
658 // and decided by parameter value.
659}

◆ visit() [13/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::DynamicUpdateSlice op)
override

Definition at line 389 of file DynamicShapeInferer.cc.

390{
391 // DynamicUpdateSlice is not unary operator, but output shape is same with input
392 handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::DynamicUpdateSlice::OPERAND));
393}

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), and onert::ir::operation::DynamicUpdateSlice::OPERAND.

◆ visit() [14/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::ElementwiseActivation op)
override

◆ visit() [15/46]

◆ visit() [16/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::ElementwiseUnary op)
override

◆ visit() [17/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::ExpandDims op)
override

Definition at line 411 of file DynamicShapeInferer.cc.

412{
413 // check if input is not dynamic
414 auto input_ind = op.getInputs().at(ir::operation::ExpandDims::INPUT);
415 auto input = _tensor_registry->getITensor(input_ind);
416
417 // check if output is not dynamic, meaning when 1st input is static and 2nd input is const
418 auto output_ind = op.getOutputs().at(0);
419 auto output = _tensor_registry->getITensor(output_ind);
420
421 /*
422 Here, the state after compilation (satic shape inference) could be one of the following:
423
424 input1 input2 output execution-time shape inf required
425 ----------------------------- --------------------------------
426 case 1) static const static X
427 case 2) static placeholder dynamic O
428 case 3) dynamic const dynamic O
429 case 4) dynamic placeholder dynamic O
430
431 Then nnfw_apply_tensorinf() could change input dynamic.
432 So, in this method, we could have one more state and we have to re-calculate shape
433 for this shape.
434
435 case 5) dynamic const static O
436
437 So, only when input1 and ouput are static, we can skip dynamic shape inference.
438 */
439 if ((!input->is_dynamic()) && (!output->is_dynamic()))
440 return;
441
442 ir::Shape input_shape = input->getShape();
443
444 auto axis_ind = op.getInputs().at(ir::operation::ExpandDims::AXIS);
445 auto axis = _tensor_registry->getITensor(axis_ind);
446 auto axis_type = axis->data_type();
447 assert(axis_type == ir::DataType::INT32 || axis_type == ir::DataType::INT64);
448
449 assert(axis->buffer());
450 int32_t axis_value =
451 (axis_type == ir::DataType::INT32)
452 ? reinterpret_cast<const int32_t *>(axis->buffer())[0]
453 : static_cast<int32_t>(reinterpret_cast<const int64_t *>(axis->buffer())[0]);
454
455 auto output_shape = shape_inference::inferExpandDimsShape(input_shape, axis_value);
456
457 output->applyShape(output_shape);
458 assert(output->buffer() != nullptr);
459}
ir::Shape inferExpandDimsShape(const ir::Shape &in_shape, int32_t axis)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::ExpandDims::AXIS, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferExpandDimsShape(), onert::ir::operation::ExpandDims::INPUT, and output_shape.

◆ visit() [18/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Fill op)
override

Definition at line 461 of file DynamicShapeInferer.cc.

462{
463 // check if output is not dynamic
464 auto output_ind = op.getOutputs().at(0);
465 auto output = _tensor_registry->getITensor(output_ind);
466 auto shape_ind = op.getInputs().at(ir::operation::Fill::Input::SHAPE);
467 auto shape = _tensor_registry->getITensor(shape_ind);
468
469 if ((!shape->is_dynamic()) && (!output->is_dynamic()))
470 return;
471
472 const auto dims_type = shape->data_type();
473 assert(dims_type == ir::DataType::INT32 || dims_type == ir::DataType::INT64);
474
475 auto dims_buf = shape->buffer();
476 assert(dims_buf);
477
478 const auto &dims_shape = shape->getShape();
479 const auto &output_shape = ((dims_type == ir::DataType::INT32)
480 ? shape_inference::inferFillShape<int32_t>(
481 dims_shape, reinterpret_cast<const int32_t *>(dims_buf))
482 : shape_inference::inferFillShape<int64_t>(
483 dims_shape, reinterpret_cast<const int64_t *>(dims_buf)));
484
485 output->applyShape(output_shape);
486 assert(output->buffer() != nullptr);
487}
ir::Shape inferFillShape(const ir::Shape &fill_shape, const T *shape_buf)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), output_shape, and onert::ir::operation::Fill::SHAPE.

◆ visit() [19/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::FullyConnected op)
override

Definition at line 489 of file DynamicShapeInferer.cc.

490{
491 const auto input_idx{op.getInputs().at(ir::operation::FullyConnected::Input::INPUT)};
492 const auto &input = _tensor_registry->getITensor(input_idx);
493
494 const auto ker_idx{op.getInputs().at(ir::operation::FullyConnected::Input::WEIGHT)};
495 const auto &ker = _tensor_registry->getITensor(ker_idx);
496
497 if (!input->is_dynamic() && !ker->is_dynamic())
498 return;
499
500 auto input_shape = input->getShape();
501 auto ker_shape = ker->getShape();
502
503 ir::Shape new_shape =
504 shape_inference::inferFullyConnectedShape(input_shape, ker_shape, op.param().keep_num_dims);
505
506 auto output_ind = op.getOutputs().at(0);
507 auto output = _tensor_registry->getITensor(output_ind);
508
509 output->applyShape(new_shape);
510 assert(output->buffer() != nullptr);
511}
ir::Shape inferFullyConnectedShape(const ir::Shape &in_shape, const ir::Shape &ker_shape, bool keep_num_dims)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferFullyConnectedShape(), onert::ir::operation::FullyConnected::INPUT, onert::ir::operation::FullyConnected::Param::keep_num_dims, onert::ir::operation::FullyConnected::param(), and onert::ir::operation::FullyConnected::WEIGHT.

◆ visit() [20/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::FusedBatchNorm op)
override

◆ visit() [21/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Gather op)
override

Definition at line 518 of file DynamicShapeInferer.cc.

519{
520 const auto input_idx{op.getInputs().at(ir::operation::Gather::Input::INPUT)};
521 const auto &input = _tensor_registry->getITensor(input_idx);
522 auto input_shape = input->getShape();
523
524 const auto indices_idx{op.getInputs().at(ir::operation::Gather::Input::INDICES)};
525 const auto &indices = _tensor_registry->getITensor(indices_idx);
526 auto indices_shape = indices->getShape();
527
528 if (!(input->is_dynamic()) && !(indices->is_dynamic()))
529 return;
530
531 const auto rank = input_shape.rank();
532 const auto axis = ((op.param().axis < 0) ? rank + op.param().axis : op.param().axis);
533
534 assert(0 <= axis && axis < rank);
535
536 ir::Shape new_shape = shape_inference::inferGatherShape(input_shape, indices_shape, axis, rank);
537
538 auto output_ind = op.getOutputs().at(0);
539 auto output = _tensor_registry->getITensor(output_ind);
540
541 output->applyShape(new_shape);
542 assert(output->buffer() != nullptr);
543}
ir::Shape inferGatherShape(const ir::Shape &input_shape, const ir::Shape &indices_shape, int axis, int rank)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Gather::Param::axis, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::Gather::INDICES, onert::shape_inference::inferGatherShape(), onert::ir::operation::Gather::INPUT, and onert::ir::operation::Gather::param().

◆ visit() [22/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::L2Normalization op)
override

◆ visit() [23/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::LSTM op)
override

Definition at line 550 of file DynamicShapeInferer.cc.

551{
552 const auto output_index{op.getOutputs().at(ir::operation::LSTM::Output::OUTPUT)};
553 auto output = _tensor_registry->getITensor(output_index);
554
555 const auto output_state_out_index{
557
558 const auto cell_state_out_index{op.getOutputs().at(ir::operation::LSTM::Output::CELL_STATE_OUT)};
559
560 const auto scratch_buffer_index{op.getOutputs().at(ir::operation::LSTM::Output::SCRATCH_BUFFER)};
561
562 if (!output->is_dynamic() &&
563 !(_tensor_registry->getITensor(output_state_out_index) != nullptr &&
564 _tensor_registry->getITensor(output_state_out_index)->is_dynamic()) &&
565 !(_tensor_registry->getITensor(cell_state_out_index) != nullptr &&
566 _tensor_registry->getITensor(cell_state_out_index)->is_dynamic()) &&
567 !(_tensor_registry->getITensor(scratch_buffer_index) != nullptr &&
568 _tensor_registry->getITensor(cell_state_out_index)->is_dynamic()))
569 return;
570
571 const auto input_index{op.getInputs().at(ir::operation::LSTM::Input::INPUT)};
572 const auto input = _tensor_registry->getITensor(input_index);
573 const auto input_shape = input->getShape();
574
575 const auto input_to_output_weights_index{
577 const auto input_to_output_weights = _tensor_registry->getITensor(input_to_output_weights_index);
578 const auto input_to_output_weights_shape = input_to_output_weights->getShape();
579
580 const auto recurrent_to_output_weights_index{
582 const auto recurrent_to_output_weights =
583 _tensor_registry->getITensor(recurrent_to_output_weights_index);
584 const auto recurrent_to_output_weights_shape = recurrent_to_output_weights->getShape();
585
586 // re-sizing outputs
587 const int n_batch =
588 (input_shape.rank() == 3 && op.param().time_major) ? input_shape.dim(1) : input_shape.dim(0);
589 const int n_cell = input_to_output_weights_shape.dim(0);
590 const int n_output = recurrent_to_output_weights_shape.dim(1);
591 if (input_shape.rank() == 3)
592 {
593 if (op.param().time_major)
594 output->applyShape(ir::Shape{input_shape.dim(0), n_batch, n_output});
595 else
596 output->applyShape(ir::Shape{n_batch, input_shape.dim(1), n_output});
597 }
598 else
599 {
600 assert(input_shape.rank() == 2);
601 output->applyShape(ir::Shape{n_batch, n_output});
602 }
603 assert(output->buffer() != nullptr);
604
605 auto output_state_out = _tensor_registry->getITensor(output_state_out_index);
606 if (output_state_out != nullptr)
607 {
608 output_state_out->applyShape(ir::Shape{n_batch, n_output});
609 assert(output_state_out->buffer() != nullptr);
610 }
611
612 auto cell_state_out = _tensor_registry->getITensor(cell_state_out_index);
613 if (cell_state_out != nullptr)
614 {
615 cell_state_out->applyShape(ir::Shape{n_batch, n_cell});
616 assert(cell_state_out->buffer() != nullptr);
617 }
618
619 auto scratch_buffer = _tensor_registry->getITensor(scratch_buffer_index);
620 if (scratch_buffer != nullptr)
621 {
622 const auto input_to_input_weights_index{
624 const auto recurrent_to_input_weights_index{
626
627 const auto input_to_input_weights_shape =
628 _tensor_registry->getITensor(input_to_input_weights_index)->getShape();
629 bool has_input_to_input_weights =
630 input_to_input_weights_shape.dim(0) != 0 && input_to_input_weights_shape.dim(1) != 0;
631
632 const auto recurrent_to_input_weights_shape =
633 _tensor_registry->getITensor(recurrent_to_input_weights_index)->getShape();
634 bool has_recurrent_to_input_weights =
635 recurrent_to_input_weights_shape.dim(0) != 0 && recurrent_to_input_weights_shape.dim(1) != 0;
636
637 // NOTE The cell_to_input_weights do not exist in non-peephole although regular LSTM(non-CIFG).
638 // true: no CIFG
639 // false: CIFG
640 bool has_cifg_param = has_input_to_input_weights && has_recurrent_to_input_weights;
641 if (has_cifg_param)
642 {
643 scratch_buffer->applyShape(ir::Shape{n_batch, n_cell * 4});
644 }
645 else
646 {
647 scratch_buffer->applyShape(ir::Shape{n_batch, n_cell * 3});
648 }
649 assert(scratch_buffer->buffer() != nullptr);
650 }
651}

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::LSTM::CELL_STATE_OUT, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::LSTM::INPUT, onert::ir::operation::LSTM::INPUT_TO_OUTPUT_WEIGHTS, onert::ir::operation::LSTM::OUTPUT, onert::ir::operation::LSTM::OUTPUT_STATE_OUT, onert::ir::operation::LSTM::param(), onert::ir::operation::LSTM::RECURRENT_TO_OUTPUT_WEIGHTS, onert::ir::operation::LSTM::SCRATCH_BUFFER, and onert::ir::operation::LSTM::Param::time_major.

◆ visit() [24/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::OneHot op)
override

Definition at line 661 of file DynamicShapeInferer.cc.

662{
663 auto output_ind = op.getOutputs().at(0);
664 auto output = _tensor_registry->getITensor(output_ind);
665
666 auto indices_ind = op.getInputs().at(ir::operation::OneHot::INDICES);
667 const auto &indices = _tensor_registry->getITensor(indices_ind);
668 auto indices_shape = indices->getShape();
669
670 auto depth_ind = op.getInputs().at(ir::operation::OneHot::DEPTH);
671 const auto &depth = _tensor_registry->getITensor(depth_ind);
672
673 if (!indices->is_dynamic() && !depth->is_dynamic())
674 {
675 return;
676 }
677
678 int32_t *depth_buf = reinterpret_cast<int32_t *>(depth->buffer());
679 assert(depth_buf);
680 const auto axis_val = op.param().axis;
681
682 ir::Shape new_shape = shape_inference::inferOnehotShape(indices_shape, *depth_buf, axis_val);
683 output->applyShape(new_shape);
684 assert(output->buffer() != nullptr);
685}
ir::Shape inferOnehotShape(const ir::Shape &input_shape, const int depth, int axis)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::OneHot::Param::axis, onert::ir::operation::OneHot::DEPTH, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::OneHot::INDICES, onert::shape_inference::inferOnehotShape(), and onert::ir::operation::OneHot::param().

◆ visit() [25/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Pack op)
override

Definition at line 687 of file DynamicShapeInferer.cc.

688{
689 bool is_any_of_inputs_dynamic = [&]() -> bool {
690 for (uint32_t i = 0; i < op.getInputs().size(); ++i)
691 {
692 const auto &input = _tensor_registry->getITensor(op.getInputs().at(i));
693 if (input->is_dynamic())
694 {
695 return true;
696 }
697 }
698 return false;
699 }();
700
701 const auto input_idx{op.getInputs().at(0)};
702 const auto &input = _tensor_registry->getITensor(input_idx);
703 auto input_shape = input->getShape();
704
705 auto output_ind = op.getOutputs().at(0);
706 auto output = _tensor_registry->getITensor(output_ind);
707
708 if (!is_any_of_inputs_dynamic && !output->is_dynamic())
709 return;
710
711 const auto rank = input_shape.rank() + 1;
712 const auto axis = ((op.param().axis < 0) ? rank + op.param().axis : op.param().axis);
713 const auto num = op.param().num;
714
715 assert(0 <= axis && axis < rank);
716
717 ir::Shape new_shape = shape_inference::inferPackShape(input_shape, axis, rank, num);
718
719 output->applyShape(new_shape);
720 assert(output->buffer() != nullptr);
721}
ir::Shape inferPackShape(const ir::Shape &input_shape, int axis, int rank, int num)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Pack::Param::axis, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferPackShape(), onert::ir::operation::Pack::Param::num, onert::ir::operation::Pack::param(), and onert::ir::OperandIndexSequence::size().

◆ visit() [26/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Pad op)
override

Definition at line 723 of file DynamicShapeInferer.cc.

724{
725 // check if output is not dynamic
726 auto output_ind = op.getOutputs().at(0);
727 auto output = _tensor_registry->getITensor(output_ind);
728
729 auto input_ind = op.getInputs().at(ir::operation::Pad::Input::INPUT);
730 auto input = _tensor_registry->getITensor(input_ind);
731
732 auto pad_ind = op.getInputs().at(ir::operation::Pad::Input::PAD);
733 auto pad = _tensor_registry->getITensor(pad_ind);
734
735 // check if input and output are not dynamic
736 if ((!input->is_dynamic()) && (!output->is_dynamic()))
737 return;
738
739 int32_t *pad_buf = reinterpret_cast<int32_t *>(pad->buffer());
740 assert(pad_buf);
741
742 auto output_shape =
743 shape_inference::inferPadShape(input->getShape(), pad_buf, pad->getShape().num_elements());
744
745 // change output shape and reallocate output tensor memory
746 output->applyShape(output_shape);
747 assert(output->buffer() != nullptr);
748}
ir::Shape inferPadShape(const ir::Shape &in_shape, const int32_t *pad_buf, const size_t num_pads)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferPadShape(), onert::ir::operation::Pad::INPUT, output_shape, and onert::ir::operation::Pad::PAD.

◆ visit() [27/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Permute op)
override

Definition at line 750 of file DynamicShapeInferer.cc.

751{
752 const auto output_ind = op.getOutputs().at(0);
753 const auto &output = _tensor_registry->getITensor(output_ind);
754
755 const auto input_ind = op.getInputs().at(0);
756 const auto &input = _tensor_registry->getITensor(input_ind);
757
758 // check if input and output are not dynamic
759 if ((!input->is_dynamic()) && (!output->is_dynamic()))
760 return;
761
762 ir::Shape input_shape = input->getShape();
763 const auto &output_shape = convertShape(input_shape, op.getPermuteType());
764
765 output->applyShape(output_shape);
766 assert(output->buffer() != nullptr);
767}
Shape convertShape(const Shape &shape, const PermuteType &type)
Converts shape when its rank is 4.
Definition Shape.cc:62

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::Permute::getPermuteType(), and output_shape.

◆ visit() [28/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Pool2D op)
override

Definition at line 769 of file DynamicShapeInferer.cc.

770{
771 // check if input is not dynamic
772 auto input_ind = op.getInputs().at(ir::operation::Pool2D::INPUT);
773 auto input = _tensor_registry->getITensor(input_ind);
774
775 if (!input->is_dynamic())
776 return;
777
778 ir::Shape input_shape = input->getShape();
779
780 auto output_ind = op.getOutputs().at(0);
781 auto output = _tensor_registry->getITensor(output_ind);
782
783 ir::Shape output_shape = shape_inference::inferPoolShape(input_shape, op.param());
784
785 output->applyShape(output_shape);
786 assert(output->buffer() != nullptr);
787}
ir::Shape inferPoolShape(const ir::Shape &in_shape, const ir::operation::Pool2D::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferPoolShape(), onert::ir::operation::Pool2D::INPUT, output_shape, and onert::ir::operation::Pool2D::param().

◆ visit() [29/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Pow op)
override

◆ visit() [30/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Range op)
override

Definition at line 795 of file DynamicShapeInferer.cc.

796{
797 // check if output is not dynamic
798 auto output_ind = op.getOutputs().at(0);
799 auto output = _tensor_registry->getITensor(output_ind);
800
801 // from op, access the buffer of second input to read new shape
802 auto start_idx = op.getInputs().at(ir::operation::Range::Input::START);
803 auto start_tensor = _tensor_registry->getITensor(start_idx);
804
805 auto limit_idx = op.getInputs().at(ir::operation::Range::Input::LIMIT);
806 auto limit_tensor = _tensor_registry->getITensor(limit_idx);
807
808 auto delta_idx = op.getInputs().at(ir::operation::Range::Input::DELTA);
809 auto delta_tensor = _tensor_registry->getITensor(delta_idx);
810
811 if (!start_tensor->is_dynamic() && !limit_tensor->is_dynamic() && !delta_tensor->is_dynamic() &&
812 !output->is_dynamic())
813 return;
814
815 ir::Shape new_shape;
816 if (output->data_type() == ir::DataType::FLOAT32)
817 {
818 new_shape =
819 shape_inference::inferRangeShape<float>(*reinterpret_cast<float *>(start_tensor->buffer()),
820 *reinterpret_cast<float *>(limit_tensor->buffer()),
821 *reinterpret_cast<float *>(delta_tensor->buffer()));
822 }
823 else if (output->data_type() == ir::DataType::INT32)
824 {
825 new_shape = shape_inference::inferRangeShape<int32_t>(
826 *reinterpret_cast<int32_t *>(start_tensor->buffer()),
827 *reinterpret_cast<int32_t *>(limit_tensor->buffer()),
828 *reinterpret_cast<int32_t *>(delta_tensor->buffer()));
829 }
830 output->applyShape(new_shape);
831 assert(output->buffer() != nullptr);
832}

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Range::DELTA, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::Range::LIMIT, and onert::ir::operation::Range::START.

◆ visit() [31/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Reduce op)
override

Definition at line 834 of file DynamicShapeInferer.cc.

835{
836 const auto input_idx{op.getInputs().at(ir::operation::Reduce::Input::INPUT)};
837 const auto &input = _tensor_registry->getITensor(input_idx);
838 auto input_shape = input->getShape();
839
840 const auto axes_idx{op.getInputs().at(ir::operation::Reduce::Input::AXES)};
841 const auto &axes = _tensor_registry->getITensor(axes_idx);
842
843 if (!input->is_dynamic())
844 return;
845
846 std::vector<int32_t> axes_vec;
847 for (uint32_t i = 0; i < axes->getShape().num_elements(); ++i)
848 {
849 const auto buffer = axes->buffer() + axes->calcOffset({i});
850 switch (axes->data_type())
851 {
852 case ir::DataType::INT32:
853 {
854 axes_vec.emplace_back(*reinterpret_cast<const int32_t *>(buffer));
855 break;
856 }
857 case ir::DataType::INT64:
858 {
859 axes_vec.emplace_back(*reinterpret_cast<const int64_t *>(buffer));
860 break;
861 }
862 default:
863 throw std::runtime_error("DynamicShapeInferer " + op.name() + ": Not supported data type");
864 break;
865 }
866 }
867 const auto keep_dims = op.param().keep_dims;
868
869 auto output_ind = op.getOutputs().at(0);
870 auto output = _tensor_registry->getITensor(output_ind);
871
872 ir::Shape new_shape = shape_inference::inferReduceShape(input_shape, axes_vec, keep_dims);
873
874 output->applyShape(new_shape);
875 assert(output->buffer() != nullptr);
876}
uint32_t num_elements(const Shape &shape)
The number of elements of a feature map of a given shape.
Definition Shape.h:59
ir::Shape inferReduceShape(const ir::Shape &input_shape, const std::vector< int > &axes, bool keep_dims)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Reduce::AXES, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferReduceShape(), onert::ir::operation::Reduce::INPUT, onert::ir::operation::Reduce::Param::keep_dims, onert::ir::operation::Reduce::name(), and onert::ir::operation::Reduce::param().

◆ visit() [32/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Reshape op)
override

Definition at line 878 of file DynamicShapeInferer.cc.

879{
880 // check if output is not dynamic
881 auto output_ind = op.getOutputs().at(0);
882 auto output = _tensor_registry->getITensor(output_ind);
883
884 auto input_ind = op.getInputs().at(ir::operation::Reshape::Input::INPUT);
885 auto input = _tensor_registry->getITensor(input_ind);
886
887 /*
888 Here, the state after compilation (satic shape inference) could be one of the following:
889
890 input1 input2 (or option) output execution-time shape inf required
891 ------------------------------------ --------------------------------
892 case 1) static const static X
893 case 2) static placeholder dynamic O
894 case 3) dynamic const dynamic O
895 case 4) dynamic placeholder dynamic O
896
897 Then nnfw_apply_tensorinf() could change input dynamic.
898 So, in this method, we could have one more state and we have to re-calculate shape
899 for this shape.
900
901 case 5) dynamic const static O
902
903 So, only when both input1 and ouput are static, we can skip dynamic shape inference.
904 */
905 if ((!input->is_dynamic()) && (!output->is_dynamic()))
906 return;
907
908 // New shape is given by second input tensor
909 if (op.getInputs().size() == 2)
910 {
911 // from op, access the buffer of second input to read new shape
912 auto new_shape_ind = op.getInputs().at(ir::operation::Reshape::Input::SHAPE);
913
914 // getting output shape by reading new_shape tensor buffer
915 auto new_shape = _tensor_registry->getITensor(new_shape_ind);
916 assert(new_shape);
917
918 int32_t *new_shape_buf = reinterpret_cast<int32_t *>(new_shape->buffer());
919 assert(new_shape_buf);
920
921 auto output_shape = shape_inference::inferReshapeShape(input->getShape(), new_shape_buf,
922 new_shape->getShape().num_elements());
923
924 // if shape is changed, change output shape and reallocate output tensor memory
925 if (output_shape != output->getShape() || output->buffer() == nullptr)
926 {
927 // change on output shape
928 output->applyShape(output_shape);
929 }
930 assert(output->buffer() != nullptr);
931 }
932 // New shape is given by option
933 else if (op.param().new_shape.size() != 0)
934 {
935 // Let's check the new_shape option
936 auto shape = op.param().new_shape;
937 auto output_shape =
938 shape_inference::inferReshapeShape(input->getShape(), shape.data(), shape.size());
939
940 // if shape is changed, change output shape and reallocate output tensor memory
941 if (output_shape != output->getShape() || output->buffer() == nullptr)
942 {
943 // change on output shape
944 output->applyShape(output_shape);
945 }
946 assert(output->buffer() != nullptr);
947 }
948 else
949 {
950 throw std::runtime_error("Reshape: new shape is missing");
951 return;
952 }
953}
ir::Shape inferReshapeShape(const ir::Shape &input_shape, const int32_t *shape_buf, const int32_t shape_num_elements)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferReshapeShape(), onert::ir::operation::Reshape::INPUT, onert::ir::operation::Reshape::Param::new_shape, output_shape, onert::ir::operation::Reshape::param(), onert::ir::operation::Reshape::SHAPE, and onert::ir::OperandIndexSequence::size().

◆ visit() [33/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::ResizeBilinear op)
override

Definition at line 955 of file DynamicShapeInferer.cc.

956{
957 // check if output is not dynamic
958 auto output_ind = op.getOutputs().at(0);
959 auto output = _tensor_registry->getITensor(output_ind);
960
961 auto input_ind = op.getInputs().at(ir::operation::Reshape::Input::INPUT);
962 auto input = _tensor_registry->getITensor(input_ind);
963
964 if ((!input->is_dynamic()) && (!output->is_dynamic()))
965 return;
966
967 // getting output shape from input shape and Params
968 int32_t height_out, width_out;
969 if (op.getInputs().size() == 2)
970 {
971 auto size_ind = op.getInputs().at(ir::operation::ResizeBilinear::Input::SIZE);
972 auto size = _tensor_registry->getITensor(size_ind);
973 if (size->data_type() == ir::DataType::INT32)
974 {
975 auto size_buf = reinterpret_cast<const int32_t *>(size->buffer());
976 height_out = size_buf[0];
977 width_out = size_buf[1];
978 }
979 else
980 {
981 throw std::runtime_error("DynamicShapeInferer ResizeBilinear : Unsupported data type");
982 }
983 }
984 else
985 {
986 height_out = op.param().height_out;
987 width_out = op.param().width_out;
988 }
989 auto output_shape =
990 shape_inference::inferResizeBilinearShape(input->getShape(), height_out, width_out);
991
992 // if shape is changed, change output shape and reallocate output tensor memory
993 if (output_shape != output->getShape() || output->buffer() == nullptr)
994 {
995 // change on output shape
996 output->applyShape(output_shape);
997 }
998 assert(output->buffer() != nullptr);
999}
ir::Shape inferResizeBilinearShape(const ir::Shape &in_shape, const int32_t output_height, const int32_t output_width)
int32_t size[5]
Definition Slice.cpp:35

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::ResizeBilinear::Param::height_out, onert::shape_inference::inferResizeBilinearShape(), onert::ir::operation::Reshape::INPUT, output_shape, onert::ir::operation::ResizeBilinear::param(), onert::ir::OperandIndexSequence::size(), size, onert::ir::operation::ResizeBilinear::SIZE, and onert::ir::operation::ResizeBilinear::Param::width_out.

◆ visit() [34/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Reverse op)
override

◆ visit() [35/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Select op)
override

Definition at line 1006 of file DynamicShapeInferer.cc.

1007{
1008 const auto input_cond_idx = op.getInputs().at(ir::operation::Select::Input::CONDITION);
1009 const auto &input_cond = _tensor_registry->getITensor(input_cond_idx);
1010
1011 const auto input_true_idx = op.getInputs().at(ir::operation::Select::Input::INPUT_TRUE);
1012 const auto &input_true = _tensor_registry->getITensor(input_true_idx);
1013
1014 const auto input_false_idx = op.getInputs().at(ir::operation::Select::Input::INPUT_FALSE);
1015 const auto &input_false = _tensor_registry->getITensor(input_false_idx);
1016
1017 if ((!input_cond->is_dynamic()) && (!input_true->is_dynamic()) && (!input_false->is_dynamic()))
1018 {
1019 return;
1020 }
1021
1022 auto input_cond_shape = input_cond->getShape();
1023 auto input_true_shape = input_true->getShape();
1024 auto input_false_shape = input_false->getShape();
1025
1026 // Select output shpae
1027 ir::Shape new_shape =
1028 shape_inference::inferSelectShape(input_cond_shape, input_true_shape, input_false_shape);
1029
1030 auto output_ind = op.getOutputs().at(0);
1031 auto output = _tensor_registry->getITensor(output_ind);
1032
1033 output->applyShape(new_shape);
1034 assert(output->buffer() != nullptr);
1035}
ir::Shape inferSelectShape(const ir::Shape &input_cond_shape, const ir::Shape &input_true_shape, const ir::Shape &input_false_shape)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Select::CONDITION, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferSelectShape(), onert::ir::operation::Select::INPUT_FALSE, and onert::ir::operation::Select::INPUT_TRUE.

◆ visit() [36/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Shape op)
override

Definition at line 1037 of file DynamicShapeInferer.cc.

1038{
1039 const auto input_idx{op.getInputs().at(0)};
1040 const auto &input = _tensor_registry->getITensor(input_idx);
1041 auto input_shape = input->getShape();
1042
1043 if (!input->is_dynamic())
1044 return;
1045
1046 auto output_ind = op.getOutputs().at(0);
1047 auto output = _tensor_registry->getITensor(output_ind);
1048
1049 ir::Shape output_shape;
1050 output_shape.append(input_shape.rank());
1051
1052 output->applyShape(output_shape);
1053 assert(output->buffer() != nullptr);
1054}

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), and output_shape.

◆ visit() [37/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Slice op)
override

Definition at line 1056 of file DynamicShapeInferer.cc.

1057{
1058 const auto input_index{op.getInputs().at(ir::operation::Slice::Input::INPUT)};
1059 const auto input = _tensor_registry->getITensor(input_index);
1060 const auto begins_index{op.getInputs().at(ir::operation::Slice::Input::BEGINS)};
1061 const auto begins = _tensor_registry->getITensor(begins_index);
1062 const auto sizes_index{op.getInputs().at(ir::operation::Slice::Input::SIZES)};
1063 const auto sizes = _tensor_registry->getITensor(sizes_index);
1064 auto output_index = op.getOutputs().at(0);
1065 auto output = _tensor_registry->getITensor(output_index);
1066
1067 if (!(input->is_dynamic() || begins->is_dynamic() || sizes->is_dynamic() || output->is_dynamic()))
1068 {
1069 return;
1070 }
1071
1072 ir::Shape input_shape = input->getShape();
1073 auto begins_buf = reinterpret_cast<const int32_t *>(begins->buffer());
1074 auto sizes_buf = reinterpret_cast<const int32_t *>(sizes->buffer());
1075
1076 ir::Shape new_shape = shape_inference::inferSliceShape(input_shape, begins_buf, sizes_buf);
1077
1078 output->applyShape(new_shape);
1079 assert(output->buffer() != nullptr);
1080}
ir::Shape inferSliceShape(const ir::Shape &input_shape, const T *begins_buf, const T *sizes_buf)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Slice::BEGINS, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferSliceShape(), onert::ir::operation::Slice::INPUT, and onert::ir::operation::Slice::SIZES.

◆ visit() [38/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Softmax op)
override

◆ visit() [39/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::SpaceToBatchND op)
override

Definition at line 1087 of file DynamicShapeInferer.cc.

1088{
1089 const auto input_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::INPUT)};
1090 const auto block_shape_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::BLOCK_SIZE)};
1091 const auto padding_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::PADDINGS)};
1092 auto output_idx{op.getOutputs().at(0)};
1093
1094 const auto &input = _tensor_registry->getITensor(input_idx);
1095 const auto &block_shape = _tensor_registry->getITensor(block_shape_idx);
1096 const auto &padding = _tensor_registry->getITensor(padding_idx);
1097 auto output = _tensor_registry->getITensor(output_idx);
1098
1099 if (!(input->is_dynamic() || block_shape->is_dynamic() || padding->is_dynamic() ||
1100 output->is_dynamic()))
1101 {
1102 return;
1103 }
1104
1105 auto input_shape = input->getShape();
1106 auto block_shape_shape = block_shape->getShape();
1107 auto padding_shape = padding->getShape();
1108
1109 auto block_shape_data = reinterpret_cast<int32_t *>(block_shape->buffer());
1110 auto padding_data = reinterpret_cast<int32_t *>(padding->buffer());
1111
1112 ir::Shape new_shape = shape_inference::inferSpaceToBatchNDShape(
1113 input_shape, block_shape_shape, padding_shape, block_shape_data, padding_data);
1114
1115 output->applyShape(new_shape);
1116 assert(output->buffer() != nullptr);
1117}
ir::Shape inferSpaceToBatchNDShape(const ir::Shape &input_shape, const ir::Shape &block_shape_shape, const ir::Shape &padding_shape, const int32_t *block_shape_buf, const int32_t *padding_buf)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::SpaceToBatchND::BLOCK_SIZE, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferSpaceToBatchNDShape(), onert::ir::operation::SpaceToBatchND::INPUT, and onert::ir::operation::SpaceToBatchND::PADDINGS.

◆ visit() [40/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Split op)
override

Definition at line 1119 of file DynamicShapeInferer.cc.

1120{
1121 const auto input_idx{op.getInputs().at(ir::operation::Split::Input::INPUT)};
1122 const auto &input = _tensor_registry->getITensor(input_idx);
1123
1124 // Return if all tensors are not dynamic
1125 bool has_dynamic = false;
1126 for (const auto &output_idx : op.getOutputs())
1127 {
1128 auto output = _tensor_registry->getITensor(output_idx);
1129 has_dynamic |= output->is_dynamic();
1130 }
1131 if (!input->is_dynamic() && !has_dynamic)
1132 {
1133 return;
1134 }
1135
1136 auto input_shape = input->getShape();
1137
1138 const auto axis_idx{op.getInputs().at(ir::operation::Split::Input::AXIS)};
1139 const auto &axis = _tensor_registry->getITensor(axis_idx);
1140
1141 auto axis_value = *reinterpret_cast<const int32_t *>(axis->buffer());
1142 const auto num_splits = op.param().num_splits;
1143 const auto rank = input_shape.rank();
1144 axis_value = axis_value < 0 ? axis_value + rank : axis_value;
1145
1146 assert(0 <= axis_value && axis_value < rank);
1147
1148 ir::Shape new_shape = shape_inference::inferSplitShape(input_shape, axis_value, num_splits);
1149 for (int out_tensor_idx = 0; out_tensor_idx < num_splits; out_tensor_idx++)
1150 {
1151 auto output_ind = op.getOutputs().at(out_tensor_idx);
1152 auto output = _tensor_registry->getITensor(output_ind);
1153
1154 output->applyShape(new_shape);
1155 assert(output->buffer() != nullptr);
1156 }
1157}
ir::Shape inferSplitShape(const ir::Shape input_shape, int axis_value, int num_splits)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Split::AXIS, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferSplitShape(), onert::ir::operation::Split::INPUT, onert::ir::operation::Split::Param::num_splits, and onert::ir::operation::Split::param().

◆ visit() [41/46]

◆ visit() [42/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Squeeze op)
override

Definition at line 1165 of file DynamicShapeInferer.cc.

1166{
1167 const auto input_idx{op.getInputs().at(ir::operation::Squeeze::Input::INPUT)};
1168 const auto &input = _tensor_registry->getITensor(input_idx);
1169
1170 if (!input->is_dynamic())
1171 {
1172 return;
1173 }
1174
1175 auto input_shape = input->getShape();
1176
1177 // Squeeze output shpae
1178 ir::Shape new_shape = shape_inference::inferSqueezeShape(input_shape, op.param());
1179
1180 auto output_ind = op.getOutputs().at(0);
1181 auto output = _tensor_registry->getITensor(output_ind);
1182
1183 output->applyShape(new_shape);
1184 assert(output->buffer() != nullptr);
1185}
ir::Shape inferSqueezeShape(const ir::Shape &in_shape, const ir::operation::Squeeze::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferSqueezeShape(), onert::ir::operation::Squeeze::INPUT, and onert::ir::operation::Squeeze::param().

◆ visit() [43/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::StridedSlice op)
override

Definition at line 1187 of file DynamicShapeInferer.cc.

1188{
1189
1190 const auto input_index{op.getInputs().at(ir::operation::StridedSlice::Input::INPUT)};
1191 auto input = _tensor_registry->getITensor(input_index);
1192 ir::Shape input_shape = input->getShape();
1193
1194 const auto starts_index{op.getInputs().at(ir::operation::StridedSlice::Input::STARTS)};
1195 auto starts = _tensor_registry->getITensor(starts_index);
1196
1197 const auto ends_index{op.getInputs().at(ir::operation::StridedSlice::Input::ENDS)};
1198 auto ends = _tensor_registry->getITensor(ends_index);
1199
1200 const auto strides_index{op.getInputs().at(ir::operation::StridedSlice::Input::STRIDES)};
1201 auto strides = _tensor_registry->getITensor(strides_index);
1202
1203 if (!(input->is_dynamic() || starts->is_dynamic() || ends->is_dynamic() || strides->is_dynamic()))
1204 {
1205 return;
1206 }
1207
1208 const auto begin_mask = op.param().begin_mask;
1209 const auto end_mask = op.param().end_mask;
1210 const auto shrink_axis_mask = op.param().shrink_axis_mask;
1211 const auto rank = input_shape.rank();
1212
1214 reinterpret_cast<uint32_t *>(starts->buffer()), reinterpret_cast<uint32_t *>(ends->buffer()),
1215 reinterpret_cast<uint32_t *>(strides->buffer()), begin_mask, end_mask, shrink_axis_mask, rank);
1216
1217 auto output_index = op.getOutputs().at(0);
1218 auto output = _tensor_registry->getITensor(output_index);
1219
1220 ir::Shape output_shape =
1221 onert::shape_inference::inferStridedSliceShape(input_shape, op_params, rank);
1222
1223 output->applyShape(output_shape);
1224 assert(output->buffer() != nullptr);
1225}
StridedSliceParams buildStridedSliceParams(const T *begin, const T *end, const T *strides, const uint32_t begin_mask, const uint32_t end_mask, const uint32_t shrink_axis_mask, const uint8_t rank)
ir::Shape inferStridedSliceShape(const ir::Shape &input_shape, const StridedSliceParams &op_params, uint32_t rank)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::StridedSlice::Param::begin_mask, onert::shape_inference::buildStridedSliceParams(), onert::ir::operation::StridedSlice::Param::end_mask, onert::ir::operation::StridedSlice::ENDS, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferStridedSliceShape(), onert::ir::operation::StridedSlice::INPUT, output_shape, onert::ir::operation::StridedSlice::param(), onert::ir::operation::StridedSlice::Param::shrink_axis_mask, onert::ir::operation::StridedSlice::STARTS, and onert::ir::operation::StridedSlice::STRIDES.

◆ visit() [44/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Tile op)
override

Definition at line 1227 of file DynamicShapeInferer.cc.

1228{
1229 auto output_ind = op.getOutputs().at(0);
1230 auto output = _tensor_registry->getITensor(output_ind);
1231
1232 auto input_idx = op.getInputs().at(ir::operation::Tile::Input::INPUT);
1233 auto input = _tensor_registry->getITensor(input_idx);
1234
1235 auto multiplier_idx = op.getInputs().at(ir::operation::Tile::Input::MULTIPLES);
1236 auto multiplier = _tensor_registry->getITensor(multiplier_idx);
1237
1238 if ((!input->is_dynamic()) && (!output->is_dynamic()))
1239 return;
1240
1241 auto input_shape = input->getShape();
1242 auto multiplier_buffer = reinterpret_cast<const int32_t *>(multiplier->buffer());
1243 assert(multiplier_buffer);
1244
1245 auto mult_shape = multiplier->getShape();
1247 input_shape, multiplier_buffer, mult_shape.rank() == 0 ? 1 : mult_shape.dim(0));
1248
1249 // set output shape and output buffer
1250 output->applyShape(output_shape);
1251 assert(output->buffer() != nullptr);
1252}
ir::Shape inferTileShape(const ir::Shape &in_shape, const int32_t *multiplier_buf, const int32_t multiplier_size)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferTileShape(), onert::ir::operation::Tile::INPUT, onert::ir::operation::Tile::MULTIPLES, and output_shape.

◆ visit() [45/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Transpose op)
override

Definition at line 1254 of file DynamicShapeInferer.cc.

1255{
1256 // check if output is not dynamic
1257 auto output_ind = op.getOutputs().at(0);
1258 auto output = _tensor_registry->getITensor(output_ind);
1259
1260 // from op, access the buffer of second input to read new shape
1261 auto input_ind = op.getInputs().at(ir::operation::Transpose::Input::INPUT);
1262 auto input = _tensor_registry->getITensor(input_ind);
1263 auto input_shape = input->getShape();
1264
1265 /*
1266 Here, the state after compilation (static shape inference) could be one of the following:
1267
1268 input perms output execution-time shape inf required
1269 ------------------------------------ --------------------------------
1270 case 1) static const static X
1271 case 2) static non-const dynamic O
1272 case 3) dynamic const dynamic O
1273 case 4) dynamic non-const dynamic O
1274
1275 So, only when both input1 and ouput are static, we can skip dynamic shape inference.
1276 */
1277 if ((!input->is_dynamic()) && (!output->is_dynamic()))
1278 return;
1279
1280 auto perm_ind = op.getInputs().at(ir::operation::Transpose::Input::PERMUTATION);
1281 auto perm = _tensor_registry->getITensor(perm_ind);
1282
1283 ir::Shape new_shape;
1284 // TODO Change perm->dimension(0) == 0 to perm->num_elements() == 0
1285 if (perm->getShape().dim(0) == 0) // This condition means that perm is (n-1...0)
1286 {
1287 // Call by (n-1...0)
1288 new_shape = shape_inference::inferTransposeShape(input_shape, nullptr, 0);
1289 }
1290 else
1291 {
1292 // Check rank
1293 if (static_cast<size_t>(input->getShape().rank()) != perm->getShape().num_elements())
1294 {
1295 throw std::runtime_error("DynamicShapeInferer failed, bad rank size: " +
1296 std::to_string(perm->getShape().num_elements()));
1297 }
1298
1299 // set output shape, based on input and params
1300 const auto perm_buffer = reinterpret_cast<const int32_t *>(perm->buffer());
1301 new_shape =
1302 shape_inference::inferTransposeShape(input_shape, perm_buffer, perm->getShape().dim(0));
1303 }
1304 output->applyShape(new_shape);
1305 assert(output->buffer() != nullptr);
1306}
ir::Shape inferTransposeShape(const ir::Shape &in_shape, const int32_t *perm_buf, const int32_t rank)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferTransposeShape(), onert::ir::operation::Transpose::INPUT, and onert::ir::operation::Transpose::PERMUTATION.

◆ visit() [46/46]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Unpack op)
override

Definition at line 1308 of file DynamicShapeInferer.cc.

1309{
1310 // check if output is not dynamic
1311 const auto input_idx{op.getInputs().at(0)};
1312 const auto &input = _tensor_registry->getITensor(input_idx);
1313
1314 if (!input->is_dynamic())
1315 return;
1316
1317 auto input_shape = input->getShape();
1318
1319 const auto rank = input_shape.rank();
1320 const auto axis = ((op.param().axis < 0) ? rank + op.param().axis : op.param().axis);
1321 const auto num = op.param().num;
1322
1323 assert(0 <= axis && axis < rank);
1324
1325 ir::Shape new_shape = shape_inference::inferUnpackShape(input_shape, axis, rank);
1326
1327 for (int out_tensor_idx = 0; out_tensor_idx < num; out_tensor_idx++)
1328 {
1329 auto output_ind = op.getOutputs().at(out_tensor_idx);
1330 auto output = _tensor_registry->getITensor(output_ind);
1331
1332 output->applyShape(new_shape);
1333
1334 assert(output->buffer() != nullptr);
1335 }
1336}
ir::Shape inferUnpackShape(const ir::Shape &input_shape, int axis, int rank)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Unpack::Param::axis, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferUnpackShape(), onert::ir::operation::Unpack::Param::num, and onert::ir::operation::Unpack::param().


The documentation for this class was generated from the following files: