ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::exec::DynamicShapeInferer Class Reference

Class to infer shape of output tensor at execution time and allocate memory fo output tensor if needed. More...

#include <DynamicShapeInferer.h>

Collaboration diagram for onert::exec::DynamicShapeInferer:

Public Member Functions

 DynamicShapeInferer (const std::shared_ptr< backend::ITensorRegistry > &tensor_registry)
 
void visit (const ir::operation::ArgMinMax &op) override
 
void visit (const ir::operation::BatchMatMul &op) override
 
void visit (const ir::operation::BCQFullyConnected &op) override
 
void visit (const ir::operation::BCQGather &op) override
 
void visit (const ir::operation::BinaryArithmetic &op) override
 
void visit (const ir::operation::BroadcastTo &op) override
 
void visit (const ir::operation::Comparison &op) override
 
void visit (const ir::operation::Concat &op) override
 
void visit (const ir::operation::Conv2D &op) override
 
void visit (const ir::operation::DepthwiseConv2D &op) override
 
void visit (const ir::operation::DynamicUpdateSlice &op) override
 
void visit (const ir::operation::ElementwiseActivation &op) override
 
void visit (const ir::operation::ElementwiseBinary &op) override
 
void visit (const ir::operation::ElementwiseUnary &op) override
 
void visit (const ir::operation::ExpandDims &op) override
 
void visit (const ir::operation::Fill &op) override
 
void visit (const ir::operation::FullyConnected &op) override
 
void visit (const ir::operation::FusedBatchNorm &op) override
 
void visit (const ir::operation::Gather &op) override
 
void visit (const ir::operation::L2Normalization &op) override
 
void visit (const ir::operation::LSTM &op) override
 
void visit (const ir::operation::DetectionPostProcess &op) override
 
void visit (const ir::operation::OneHot &op) override
 
void visit (const ir::operation::Pack &op) override
 
void visit (const ir::operation::Pad &op) override
 
void visit (const ir::operation::Permute &op) override
 
void visit (const ir::operation::Pool2D &op) override
 
void visit (const ir::operation::Pow &op) override
 
void visit (const ir::operation::Range &op) override
 
void visit (const ir::operation::Reduce &op) override
 
void visit (const ir::operation::Reshape &op) override
 
void visit (const ir::operation::ResizeBilinear &op) override
 
void visit (const ir::operation::Reverse &op) override
 
void visit (const ir::operation::Select &op) override
 
void visit (const ir::operation::Shape &op) override
 
void visit (const ir::operation::Slice &op) override
 
void visit (const ir::operation::Softmax &op) override
 
void visit (const ir::operation::SpaceToBatchND &op) override
 
void visit (const ir::operation::Split &op) override
 
void visit (const ir::operation::Squeeze &op) override
 
void visit (const ir::operation::StridedSlice &op) override
 
void visit (const ir::operation::SquaredDifference &op) override
 
void visit (const ir::operation::Tile &op) override
 
void visit (const ir::operation::Transpose &op) override
 
void visit (const ir::operation::Unpack &op) override
 
- Public Member Functions inherited from onert::ir::OperationVisitor
virtual ~OperationVisitor ()=default
 

Detailed Description

Class to infer shape of output tensor at execution time and allocate memory fo output tensor if needed.

Definition at line 34 of file DynamicShapeInferer.h.

Constructor & Destructor Documentation

◆ DynamicShapeInferer()

onert::exec::DynamicShapeInferer::DynamicShapeInferer ( const std::shared_ptr< backend::ITensorRegistry > &  tensor_registry)
inline

Definition at line 37 of file DynamicShapeInferer.h.

38 : _tensor_registry(tensor_registry)
39 {
40 // DO NOTHING
41 }

Member Function Documentation

◆ visit() [1/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::ArgMinMax op)
override

Definition at line 93 of file DynamicShapeInferer.cc.

94{
95 const auto input_idx{op.getInputs().at(ir::operation::ArgMinMax::Input::INPUT)};
96 const auto input = _tensor_registry->getITensor(input_idx);
97
98 const auto axis_idx{op.getInputs().at(ir::operation::ArgMinMax::Input::AXIS)};
99 const auto axis = _tensor_registry->getITensor(axis_idx);
100
101 auto output_ind = op.getOutputs().at(0);
102 auto output = _tensor_registry->getITensor(output_ind);
103
104 if (!input->is_dynamic() && !output->is_dynamic())
105 return;
106
107 auto input_shape = input->getShape();
108 auto axis_value = *reinterpret_cast<const int32_t *>(axis->buffer());
109 const auto rank = input_shape.rank();
110 axis_value = axis_value < 0 ? axis_value + rank : axis_value;
111
112 ir::Shape new_shape = shape_inference::inferArgMinMaxShape(input_shape, axis_value, rank);
113
114 output->applyShape(new_shape);
115 assert(output->buffer() != nullptr);
116}
ir::Shape inferArgMinMaxShape(const ir::Shape &input_shape, int axis, int rank)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::ArgMinMax::AXIS, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferArgMinMaxShape(), and onert::ir::operation::ArgMinMax::INPUT.

◆ visit() [2/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::BatchMatMul op)
override

Definition at line 118 of file DynamicShapeInferer.cc.

119{
120 const auto lhs_index = op.getInputs().at(ir::operation::BatchMatMul::Input::LHS);
121 const auto rhs_index = op.getInputs().at(ir::operation::BatchMatMul::Input::RHS);
122 auto lhs = _tensor_registry->getITensor(lhs_index);
123 auto rhs = _tensor_registry->getITensor(rhs_index);
124
125 if (!lhs->is_dynamic() && !rhs->is_dynamic())
126 return;
127
128 const auto output_index = op.getOutputs().at(0);
129 auto output = _tensor_registry->getITensor(output_index);
130
131 auto lhs_shape = lhs->getShape();
132 auto rhs_shape = rhs->getShape();
133 // TODO
134
135 auto new_shape = shape_inference::inferBatchMatMulShape(lhs_shape, rhs_shape, op.param());
136 output->applyShape(new_shape);
137}
ir::Shape inferBatchMatMulShape(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape, const ir::operation::BatchMatMul::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferBatchMatMulShape(), onert::ir::operation::BatchMatMul::LHS, onert::ir::operation::BatchMatMul::param(), and onert::ir::operation::BatchMatMul::RHS.

◆ visit() [3/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::BCQFullyConnected op)
override

Definition at line 139 of file DynamicShapeInferer.cc.

140{
141 const auto input_idx{op.getInputs().at(ir::operation::BCQFullyConnected::Input::INPUT)};
142 const auto &input = _tensor_registry->getITensor(input_idx);
143
144 const auto cluster_idx{
146 const auto &cluster = _tensor_registry->getITensor(cluster_idx);
147 assert(cluster->is_constant());
148
149 if (!input->is_dynamic())
150 return;
151
152 auto input_shape = input->getShape();
153 auto cluster_shape = cluster->getShape();
154
155 auto cluster_buf = reinterpret_cast<const int32_t *>(cluster->buffer());
156 assert(cluster_buf);
157
158 ir::Shape new_shape =
159 shape_inference::inferBCQFullyConnectedShape(input_shape, cluster_shape, cluster_buf);
160
161 auto output_ind = op.getOutputs().at(0);
162 auto output = _tensor_registry->getITensor(output_ind);
163
164 output->applyShape(new_shape);
165 assert(output->buffer() != nullptr);
166}
ir::Shape inferBCQFullyConnectedShape(const ir::Shape &in_shape, const ir::Shape &cluster_shape, const int32_t *cluster_buf)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferBCQFullyConnectedShape(), onert::ir::operation::BCQFullyConnected::INPUT, and onert::ir::operation::BCQFullyConnected::WEIGHTS_CLUSTERS.

◆ visit() [4/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::BCQGather op)
override

Definition at line 168 of file DynamicShapeInferer.cc.

169{
170 const auto indices_idx{op.getInputs().at(ir::operation::BCQGather::Input::INDICES)};
171 const auto &indices = _tensor_registry->getITensor(indices_idx);
172
173 const auto input_binary_idx{op.getInputs().at(ir::operation::BCQGather::Input::INPUT_BINARY)};
174 const auto &input_binary = _tensor_registry->getITensor(input_binary_idx);
175
176 const auto cluster_idx{op.getInputs().at(ir::operation::BCQGather::Input::INPUT_CLUSTERS)};
177 const auto &cluster = _tensor_registry->getITensor(cluster_idx);
178 assert(cluster->is_constant());
179
180 if (!indices->is_dynamic())
181 return;
182
183 auto indices_shape = indices->getShape();
184 auto cluster_shape = cluster->getShape();
185 auto rank = input_binary->getShape().rank();
186
187 auto cluster_buf = reinterpret_cast<const int32_t *>(cluster->buffer());
188 assert(cluster_buf);
189
190 ir::Shape new_shape = shape_inference::inferBCQGatherShape(indices_shape, cluster_shape,
191 cluster_buf, rank, op.param());
192
193 auto output_ind = op.getOutputs().at(0);
194 auto output = _tensor_registry->getITensor(output_ind);
195
196 output->applyShape(new_shape);
197 assert(output->buffer() != nullptr);
198}
ir::Shape inferBCQGatherShape(const ir::Shape &indices_shape, const ir::Shape &cluster_shape, const int32_t *cluster_buf, int rank, const ir::operation::BCQGather::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::BCQGather::INDICES, onert::shape_inference::inferBCQGatherShape(), onert::ir::operation::BCQGather::INPUT_BINARY, onert::ir::operation::BCQGather::INPUT_CLUSTERS, and onert::ir::operation::BCQGather::param().

◆ visit() [5/45]

◆ visit() [6/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::BroadcastTo op)
override

Definition at line 206 of file DynamicShapeInferer.cc.

207{
208 auto output_ind = op.getOutputs().at(0);
209 auto output = _tensor_registry->getITensor(output_ind);
210
211 auto input_idx = op.getInputs().at(ir::operation::BroadcastTo::INPUT);
212 auto input = _tensor_registry->getITensor(input_idx);
213
214 if ((!input->is_dynamic()) && (!output->is_dynamic()))
215 return;
216
217 auto shape_idx = op.getInputs().at(ir::operation::Tile::Input::MULTIPLES);
218 const auto &shape = _tensor_registry->getITensor(shape_idx);
219
220 assert(shape); // It shouldn't be 0.
221
223 shape->getShape(), reinterpret_cast<const int32_t *>(shape->buffer()));
224
225 // set output shape and output buffer
226 output->applyShape(output_shape);
227 assert(output->buffer() != nullptr);
228}
const luci_interpreter::RuntimeShape output_shape
ir::Shape inferBroadcastToShape(const ir::Shape shp_shape, const int32_t *shp_buf)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferBroadcastToShape(), onert::ir::operation::BroadcastTo::INPUT, onert::ir::operation::Tile::MULTIPLES, and output_shape.

◆ visit() [7/45]

◆ visit() [8/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Concat op)
override

Definition at line 236 of file DynamicShapeInferer.cc.

237{
238 /*
239 The state after compilation (satic shape inference) could be one of the following:
240
241 inputs output execution-time shape inf required
242 ------------------------------------------ ---------------------------------
243 case 1) all static static X
244 case 2) at least on is dynamic dynamic O
245
246 Then nnfw_apply_tensorinf() could change one or both inputs dynamic.
247 So, in this method, we have one more state and we have to re-calculate shape for this shape.
248
249 case 3) at least on is dynamic static O
250
251 So, only when all inputs are static, we can skip dynamic shape inference.
252 */
253 bool all_static = true;
254 for (auto &&input_ind : op.getInputs())
255 {
256 auto input = _tensor_registry->getITensor(input_ind);
257 if (input->is_dynamic())
258 {
259 all_static = false;
260 break;
261 }
262 }
263
264 if (all_static)
265 return;
266
267 // sanity check
268 {
269 auto isConcatible = [](const backend::ITensor *input1, const backend::ITensor *input2,
270 int32_t axis) {
271 auto shape1 = input1->getShape();
272 auto shape2 = input2->getShape();
273 if (shape1.rank() != shape2.rank())
274 return false;
275
276 for (int i = 0; i < shape1.rank(); i++)
277 {
278 auto positive_axis = (axis >= 0) ? axis : axis + input1->getShape().rank();
279
280 if (i != positive_axis)
281 if (shape1.dim(i) != shape2.dim(i))
282 return false;
283 }
284
285 return true;
286 };
287
288 auto first_input_ind = op.getInputs().at(0);
289 auto first_input = _tensor_registry->getITensor(first_input_ind);
290
291 for (auto &&input_ind : op.getInputs())
292 {
293 auto input = _tensor_registry->getITensor(input_ind);
294 if (input != first_input && !isConcatible(first_input, input, op.param().axis))
295 throw std::runtime_error("input shapes does not matched for concat");
296 }
297 }
298
299 // getting output shape
301 for (auto &&input_ind : op.getInputs())
302 {
303 auto input = _tensor_registry->getITensor(input_ind);
304 ir::Shape shape = input->getShape();
305
306 in_shapes.emplace_back(shape);
307 }
308
309 auto output_ind = op.getOutputs().at(0);
310 auto output = _tensor_registry->getITensor(output_ind);
311 auto output_shape = shape_inference::inferConcatShape(in_shapes, op.param());
312
313 output->applyShape(output_shape);
314}
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
std::vector< ir::Shape > Shapes
ir::Shape inferConcatShape(const Shapes &in_shapes, const ir::operation::Concat::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Concat::Param::axis, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::backend::ITensor::getShape(), onert::shape_inference::inferConcatShape(), output_shape, and onert::ir::operation::Concat::param().

◆ visit() [9/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Conv2D op)
override

Definition at line 316 of file DynamicShapeInferer.cc.

317{
318 // check if input is not dynamic
319 auto input_ind = op.getInputs().at(ir::operation::Conv2D::INPUT);
320 auto input = _tensor_registry->getITensor(input_ind);
321
322 auto ker_ind = op.getInputs().at(ir::operation::Conv2D::KERNEL);
323 auto ker = _tensor_registry->getITensor(ker_ind);
324
325 if ((!input->is_dynamic()) && (!ker->is_dynamic()))
326 return;
327
328 ir::Shape input_shape = input->getShape();
329 ir::Shape ker_shape = ker->getShape();
330
331 auto output_ind = op.getOutputs().at(0);
332 auto output = _tensor_registry->getITensor(output_ind);
333
334 ir::Shape output_shape = shape_inference::inferConv2DShape(input_shape, ker_shape, op.param());
335
336 output->applyShape(output_shape);
337 assert(output->buffer() != nullptr);
338}
ir::Shape inferConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape, const ir::operation::Conv2D::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferConv2DShape(), onert::ir::operation::Conv2D::INPUT, onert::ir::operation::Conv2D::KERNEL, output_shape, and onert::ir::operation::Conv2D::param().

◆ visit() [10/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::DepthwiseConv2D op)
override

Definition at line 340 of file DynamicShapeInferer.cc.

341{
342 // check if input is not dynamic
343 auto input_ind = op.getInputs().at(ir::operation::DepthwiseConv2D::INPUT);
344 auto input = _tensor_registry->getITensor(input_ind);
345
346 auto ker_ind = op.getInputs().at(ir::operation::DepthwiseConv2D::KERNEL);
347 auto ker = _tensor_registry->getITensor(ker_ind);
348
349 if ((!input->is_dynamic()) && (!ker->is_dynamic()))
350 return;
351
352 ir::Shape input_shape = input->getShape();
353 ir::Shape ker_shape = ker->getShape();
354
355 auto output_ind = op.getOutputs().at(0);
356 auto output = _tensor_registry->getITensor(output_ind);
357
358 ir::Shape output_shape =
359 shape_inference::inferDepthwiseConv2DShape(input_shape, ker_shape, op.param());
360
361 output->applyShape(output_shape);
362 assert(output->buffer() != nullptr);
363}
ir::Shape inferDepthwiseConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape, const ir::operation::DepthwiseConv2D::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferDepthwiseConv2DShape(), onert::ir::operation::DepthwiseConv2D::INPUT, onert::ir::operation::DepthwiseConv2D::KERNEL, output_shape, and onert::ir::operation::DepthwiseConv2D::param().

◆ visit() [11/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::DetectionPostProcess op)
override

Definition at line 629 of file DynamicShapeInferer.cc.

630{
631 // NOTE DetectionPostProcess's undefined outputs' shape are decided on compile time
632 // by static shape inferer.
633 // DetectionPostProcess's outputs' shape are independent with input shape
634 // and decided by parameter value.
635}

◆ visit() [12/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::DynamicUpdateSlice op)
override

Definition at line 365 of file DynamicShapeInferer.cc.

366{
367 // DynamicUpdateSlice is not unary operator, but output shape is same with input
368 handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::DynamicUpdateSlice::OPERAND));
369}

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), and onert::ir::operation::DynamicUpdateSlice::OPERAND.

◆ visit() [13/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::ElementwiseActivation op)
override

◆ visit() [14/45]

◆ visit() [15/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::ElementwiseUnary op)
override

◆ visit() [16/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::ExpandDims op)
override

Definition at line 387 of file DynamicShapeInferer.cc.

388{
389 // check if input is not dynamic
390 auto input_ind = op.getInputs().at(ir::operation::ExpandDims::INPUT);
391 auto input = _tensor_registry->getITensor(input_ind);
392
393 // check if output is not dynamic, meaning when 1st input is static and 2nd input is const
394 auto output_ind = op.getOutputs().at(0);
395 auto output = _tensor_registry->getITensor(output_ind);
396
397 /*
398 Here, the state after compilation (satic shape inference) could be one of the following:
399
400 input1 input2 output execution-time shape inf required
401 ----------------------------- --------------------------------
402 case 1) static const static X
403 case 2) static placeholder dynamic O
404 case 3) dynamic const dynamic O
405 case 4) dynamic placeholder dynamic O
406
407 Then nnfw_apply_tensorinf() could change input dynamic.
408 So, in this method, we could have one more state and we have to re-calculate shape
409 for this shape.
410
411 case 5) dynamic const static O
412
413 So, only when input1 and ouput are static, we can skip dynamic shape inference.
414 */
415 if ((!input->is_dynamic()) && (!output->is_dynamic()))
416 return;
417
418 ir::Shape input_shape = input->getShape();
419
420 auto axis_ind = op.getInputs().at(ir::operation::ExpandDims::AXIS);
421 auto axis = _tensor_registry->getITensor(axis_ind);
422 auto axis_type = axis->data_type();
423 assert(axis_type == ir::DataType::INT32 || axis_type == ir::DataType::INT64);
424
425 assert(axis->buffer());
426 int32_t axis_value =
427 (axis_type == ir::DataType::INT32)
428 ? reinterpret_cast<const int32_t *>(axis->buffer())[0]
429 : static_cast<int32_t>(reinterpret_cast<const int64_t *>(axis->buffer())[0]);
430
431 auto output_shape = shape_inference::inferExpandDimsShape(input_shape, axis_value);
432
433 output->applyShape(output_shape);
434 assert(output->buffer() != nullptr);
435}
ir::Shape inferExpandDimsShape(const ir::Shape &in_shape, int32_t axis)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::ExpandDims::AXIS, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferExpandDimsShape(), onert::ir::operation::ExpandDims::INPUT, and output_shape.

◆ visit() [17/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Fill op)
override

Definition at line 437 of file DynamicShapeInferer.cc.

438{
439 // check if output is not dynamic
440 auto output_ind = op.getOutputs().at(0);
441 auto output = _tensor_registry->getITensor(output_ind);
442 auto shape_ind = op.getInputs().at(ir::operation::Fill::Input::SHAPE);
443 auto shape = _tensor_registry->getITensor(shape_ind);
444
445 if ((!shape->is_dynamic()) && (!output->is_dynamic()))
446 return;
447
448 const auto dims_type = shape->data_type();
449 assert(dims_type == ir::DataType::INT32 || dims_type == ir::DataType::INT64);
450
451 auto dims_buf = shape->buffer();
452 assert(dims_buf);
453
454 const auto &dims_shape = shape->getShape();
455 const auto &output_shape = ((dims_type == ir::DataType::INT32)
456 ? shape_inference::inferFillShape<int32_t>(
457 dims_shape, reinterpret_cast<const int32_t *>(dims_buf))
458 : shape_inference::inferFillShape<int64_t>(
459 dims_shape, reinterpret_cast<const int64_t *>(dims_buf)));
460
461 output->applyShape(output_shape);
462 assert(output->buffer() != nullptr);
463}
ir::Shape inferFillShape(const ir::Shape &fill_shape, const T *shape_buf)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), output_shape, and onert::ir::operation::Fill::SHAPE.

◆ visit() [18/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::FullyConnected op)
override

Definition at line 465 of file DynamicShapeInferer.cc.

466{
467 const auto input_idx{op.getInputs().at(ir::operation::FullyConnected::Input::INPUT)};
468 const auto &input = _tensor_registry->getITensor(input_idx);
469
470 const auto ker_idx{op.getInputs().at(ir::operation::FullyConnected::Input::WEIGHT)};
471 const auto &ker = _tensor_registry->getITensor(ker_idx);
472
473 if (!input->is_dynamic() && !ker->is_dynamic())
474 return;
475
476 auto input_shape = input->getShape();
477 auto ker_shape = ker->getShape();
478
479 ir::Shape new_shape =
480 shape_inference::inferFullyConnectedShape(input_shape, ker_shape, op.param().keep_num_dims);
481
482 auto output_ind = op.getOutputs().at(0);
483 auto output = _tensor_registry->getITensor(output_ind);
484
485 output->applyShape(new_shape);
486 assert(output->buffer() != nullptr);
487}
ir::Shape inferFullyConnectedShape(const ir::Shape &in_shape, const ir::Shape &ker_shape, bool keep_num_dims)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferFullyConnectedShape(), onert::ir::operation::FullyConnected::INPUT, onert::ir::operation::FullyConnected::Param::keep_num_dims, onert::ir::operation::FullyConnected::param(), and onert::ir::operation::FullyConnected::WEIGHT.

◆ visit() [19/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::FusedBatchNorm op)
override

◆ visit() [20/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Gather op)
override

Definition at line 494 of file DynamicShapeInferer.cc.

495{
496 const auto input_idx{op.getInputs().at(ir::operation::Gather::Input::INPUT)};
497 const auto &input = _tensor_registry->getITensor(input_idx);
498 auto input_shape = input->getShape();
499
500 const auto indices_idx{op.getInputs().at(ir::operation::Gather::Input::INDICES)};
501 const auto &indices = _tensor_registry->getITensor(indices_idx);
502 auto indices_shape = indices->getShape();
503
504 if (!(input->is_dynamic()) && !(indices->is_dynamic()))
505 return;
506
507 const auto rank = input_shape.rank();
508 const auto axis = ((op.param().axis < 0) ? rank + op.param().axis : op.param().axis);
509
510 assert(0 <= axis && axis < rank);
511
512 ir::Shape new_shape = shape_inference::inferGatherShape(input_shape, indices_shape, axis, rank);
513
514 auto output_ind = op.getOutputs().at(0);
515 auto output = _tensor_registry->getITensor(output_ind);
516
517 output->applyShape(new_shape);
518 assert(output->buffer() != nullptr);
519}
ir::Shape inferGatherShape(const ir::Shape &input_shape, const ir::Shape &indices_shape, int axis, int rank)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Gather::Param::axis, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::Gather::INDICES, onert::shape_inference::inferGatherShape(), onert::ir::operation::Gather::INPUT, and onert::ir::operation::Gather::param().

◆ visit() [21/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::L2Normalization op)
override

◆ visit() [22/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::LSTM op)
override

Definition at line 526 of file DynamicShapeInferer.cc.

527{
528 const auto output_index{op.getOutputs().at(ir::operation::LSTM::Output::OUTPUT)};
529 auto output = _tensor_registry->getITensor(output_index);
530
531 const auto output_state_out_index{
533
534 const auto cell_state_out_index{op.getOutputs().at(ir::operation::LSTM::Output::CELL_STATE_OUT)};
535
536 const auto scratch_buffer_index{op.getOutputs().at(ir::operation::LSTM::Output::SCRATCH_BUFFER)};
537
538 if (!output->is_dynamic() &&
539 !(_tensor_registry->getITensor(output_state_out_index) != nullptr &&
540 _tensor_registry->getITensor(output_state_out_index)->is_dynamic()) &&
541 !(_tensor_registry->getITensor(cell_state_out_index) != nullptr &&
542 _tensor_registry->getITensor(cell_state_out_index)->is_dynamic()) &&
543 !(_tensor_registry->getITensor(scratch_buffer_index) != nullptr &&
544 _tensor_registry->getITensor(cell_state_out_index)->is_dynamic()))
545 return;
546
547 const auto input_index{op.getInputs().at(ir::operation::LSTM::Input::INPUT)};
548 const auto input = _tensor_registry->getITensor(input_index);
549 const auto input_shape = input->getShape();
550
551 const auto input_to_output_weights_index{
553 const auto input_to_output_weights = _tensor_registry->getITensor(input_to_output_weights_index);
554 const auto input_to_output_weights_shape = input_to_output_weights->getShape();
555
556 const auto recurrent_to_output_weights_index{
558 const auto recurrent_to_output_weights =
559 _tensor_registry->getITensor(recurrent_to_output_weights_index);
560 const auto recurrent_to_output_weights_shape = recurrent_to_output_weights->getShape();
561
562 // re-sizing outputs
563 const int n_batch =
564 (input_shape.rank() == 3 && op.param().time_major) ? input_shape.dim(1) : input_shape.dim(0);
565 const int n_cell = input_to_output_weights_shape.dim(0);
566 const int n_output = recurrent_to_output_weights_shape.dim(1);
567 if (input_shape.rank() == 3)
568 {
569 if (op.param().time_major)
570 output->applyShape(ir::Shape{input_shape.dim(0), n_batch, n_output});
571 else
572 output->applyShape(ir::Shape{n_batch, input_shape.dim(1), n_output});
573 }
574 else
575 {
576 assert(input_shape.rank() == 2);
577 output->applyShape(ir::Shape{n_batch, n_output});
578 }
579 assert(output->buffer() != nullptr);
580
581 auto output_state_out = _tensor_registry->getITensor(output_state_out_index);
582 if (output_state_out != nullptr)
583 {
584 output_state_out->applyShape(ir::Shape{n_batch, n_output});
585 assert(output_state_out->buffer() != nullptr);
586 }
587
588 auto cell_state_out = _tensor_registry->getITensor(cell_state_out_index);
589 if (cell_state_out != nullptr)
590 {
591 cell_state_out->applyShape(ir::Shape{n_batch, n_cell});
592 assert(cell_state_out->buffer() != nullptr);
593 }
594
595 auto scratch_buffer = _tensor_registry->getITensor(scratch_buffer_index);
596 if (scratch_buffer != nullptr)
597 {
598 const auto input_to_input_weights_index{
600 const auto recurrent_to_input_weights_index{
602
603 const auto input_to_input_weights_shape =
604 _tensor_registry->getITensor(input_to_input_weights_index)->getShape();
605 bool has_input_to_input_weights =
606 input_to_input_weights_shape.dim(0) != 0 && input_to_input_weights_shape.dim(1) != 0;
607
608 const auto recurrent_to_input_weights_shape =
609 _tensor_registry->getITensor(recurrent_to_input_weights_index)->getShape();
610 bool has_recurrent_to_input_weights =
611 recurrent_to_input_weights_shape.dim(0) != 0 && recurrent_to_input_weights_shape.dim(1) != 0;
612
613 // NOTE The cell_to_input_weights do not exist in non-peephole although regular LSTM(non-CIFG).
614 // true: no CIFG
615 // false: CIFG
616 bool has_cifg_param = has_input_to_input_weights && has_recurrent_to_input_weights;
617 if (has_cifg_param)
618 {
619 scratch_buffer->applyShape(ir::Shape{n_batch, n_cell * 4});
620 }
621 else
622 {
623 scratch_buffer->applyShape(ir::Shape{n_batch, n_cell * 3});
624 }
625 assert(scratch_buffer->buffer() != nullptr);
626 }
627}

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::LSTM::CELL_STATE_OUT, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::LSTM::INPUT, onert::ir::operation::LSTM::INPUT_TO_OUTPUT_WEIGHTS, onert::ir::operation::LSTM::OUTPUT, onert::ir::operation::LSTM::OUTPUT_STATE_OUT, onert::ir::operation::LSTM::param(), onert::ir::operation::LSTM::RECURRENT_TO_OUTPUT_WEIGHTS, onert::ir::operation::LSTM::SCRATCH_BUFFER, and onert::ir::operation::LSTM::Param::time_major.

◆ visit() [23/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::OneHot op)
override

Definition at line 637 of file DynamicShapeInferer.cc.

638{
639 auto output_ind = op.getOutputs().at(0);
640 auto output = _tensor_registry->getITensor(output_ind);
641
642 auto indices_ind = op.getInputs().at(ir::operation::OneHot::INDICES);
643 const auto &indices = _tensor_registry->getITensor(indices_ind);
644 auto indices_shape = indices->getShape();
645
646 auto depth_ind = op.getInputs().at(ir::operation::OneHot::DEPTH);
647 const auto &depth = _tensor_registry->getITensor(depth_ind);
648
649 if (!indices->is_dynamic() && !depth->is_dynamic())
650 {
651 return;
652 }
653
654 int32_t *depth_buf = reinterpret_cast<int32_t *>(depth->buffer());
655 assert(depth_buf);
656 const auto axis_val = op.param().axis;
657
658 ir::Shape new_shape = shape_inference::inferOnehotShape(indices_shape, *depth_buf, axis_val);
659 output->applyShape(new_shape);
660 assert(output->buffer() != nullptr);
661}
ir::Shape inferOnehotShape(const ir::Shape &input_shape, const int depth, int axis)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::OneHot::Param::axis, onert::ir::operation::OneHot::DEPTH, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::OneHot::INDICES, onert::shape_inference::inferOnehotShape(), and onert::ir::operation::OneHot::param().

◆ visit() [24/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Pack op)
override

Definition at line 663 of file DynamicShapeInferer.cc.

664{
665 bool is_any_of_inputs_dynamic = [&]() -> bool {
666 for (uint32_t i = 0; i < op.getInputs().size(); ++i)
667 {
668 const auto &input = _tensor_registry->getITensor(op.getInputs().at(i));
669 if (input->is_dynamic())
670 {
671 return true;
672 }
673 }
674 return false;
675 }();
676
677 const auto input_idx{op.getInputs().at(0)};
678 const auto &input = _tensor_registry->getITensor(input_idx);
679 auto input_shape = input->getShape();
680
681 auto output_ind = op.getOutputs().at(0);
682 auto output = _tensor_registry->getITensor(output_ind);
683
684 if (!is_any_of_inputs_dynamic && !output->is_dynamic())
685 return;
686
687 const auto rank = input_shape.rank() + 1;
688 const auto axis = ((op.param().axis < 0) ? rank + op.param().axis : op.param().axis);
689 const auto num = op.param().num;
690
691 assert(0 <= axis && axis < rank);
692
693 ir::Shape new_shape = shape_inference::inferPackShape(input_shape, axis, rank, num);
694
695 output->applyShape(new_shape);
696 assert(output->buffer() != nullptr);
697}
ir::Shape inferPackShape(const ir::Shape &input_shape, int axis, int rank, int num)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Pack::Param::axis, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferPackShape(), onert::ir::operation::Pack::Param::num, onert::ir::operation::Pack::param(), and onert::ir::OperandIndexSequence::size().

◆ visit() [25/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Pad op)
override

Definition at line 699 of file DynamicShapeInferer.cc.

700{
701 // check if output is not dynamic
702 auto output_ind = op.getOutputs().at(0);
703 auto output = _tensor_registry->getITensor(output_ind);
704
705 auto input_ind = op.getInputs().at(ir::operation::Pad::Input::INPUT);
706 auto input = _tensor_registry->getITensor(input_ind);
707
708 auto pad_ind = op.getInputs().at(ir::operation::Pad::Input::PAD);
709 auto pad = _tensor_registry->getITensor(pad_ind);
710
711 // check if input and output are not dynamic
712 if ((!input->is_dynamic()) && (!output->is_dynamic()))
713 return;
714
715 int32_t *pad_buf = reinterpret_cast<int32_t *>(pad->buffer());
716 assert(pad_buf);
717
718 auto output_shape =
719 shape_inference::inferPadShape(input->getShape(), pad_buf, pad->getShape().num_elements());
720
721 // change output shape and reallocate output tensor memory
722 output->applyShape(output_shape);
723 assert(output->buffer() != nullptr);
724}
ir::Shape inferPadShape(const ir::Shape &in_shape, const int32_t *pad_buf, const size_t num_pads)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferPadShape(), onert::ir::operation::Pad::INPUT, output_shape, and onert::ir::operation::Pad::PAD.

◆ visit() [26/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Permute op)
override

Definition at line 726 of file DynamicShapeInferer.cc.

727{
728 const auto output_ind = op.getOutputs().at(0);
729 const auto &output = _tensor_registry->getITensor(output_ind);
730
731 const auto input_ind = op.getInputs().at(0);
732 const auto &input = _tensor_registry->getITensor(input_ind);
733
734 // check if input and output are not dynamic
735 if ((!input->is_dynamic()) && (!output->is_dynamic()))
736 return;
737
738 ir::Shape input_shape = input->getShape();
739 const auto &output_shape = convertShape(input_shape, op.getPermuteType());
740
741 output->applyShape(output_shape);
742 assert(output->buffer() != nullptr);
743}
Shape convertShape(const Shape &shape, const PermuteType &type)
Converts shape when its rank is 4.
Definition Shape.cc:62

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::Permute::getPermuteType(), and output_shape.

◆ visit() [27/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Pool2D op)
override

Definition at line 745 of file DynamicShapeInferer.cc.

746{
747 // check if input is not dynamic
748 auto input_ind = op.getInputs().at(ir::operation::Pool2D::INPUT);
749 auto input = _tensor_registry->getITensor(input_ind);
750
751 if (!input->is_dynamic())
752 return;
753
754 ir::Shape input_shape = input->getShape();
755
756 auto output_ind = op.getOutputs().at(0);
757 auto output = _tensor_registry->getITensor(output_ind);
758
759 ir::Shape output_shape = shape_inference::inferPoolShape(input_shape, op.param());
760
761 output->applyShape(output_shape);
762 assert(output->buffer() != nullptr);
763}
ir::Shape inferPoolShape(const ir::Shape &in_shape, const ir::operation::Pool2D::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferPoolShape(), onert::ir::operation::Pool2D::INPUT, output_shape, and onert::ir::operation::Pool2D::param().

◆ visit() [28/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Pow op)
override

◆ visit() [29/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Range op)
override

Definition at line 771 of file DynamicShapeInferer.cc.

772{
773 // check if output is not dynamic
774 auto output_ind = op.getOutputs().at(0);
775 auto output = _tensor_registry->getITensor(output_ind);
776
777 // from op, access the buffer of second input to read new shape
778 auto start_idx = op.getInputs().at(ir::operation::Range::Input::START);
779 auto start_tensor = _tensor_registry->getITensor(start_idx);
780
781 auto limit_idx = op.getInputs().at(ir::operation::Range::Input::LIMIT);
782 auto limit_tensor = _tensor_registry->getITensor(limit_idx);
783
784 auto delta_idx = op.getInputs().at(ir::operation::Range::Input::DELTA);
785 auto delta_tensor = _tensor_registry->getITensor(delta_idx);
786
787 if (!start_tensor->is_dynamic() && !limit_tensor->is_dynamic() && !delta_tensor->is_dynamic() &&
788 !output->is_dynamic())
789 return;
790
791 ir::Shape new_shape;
792 if (output->data_type() == ir::DataType::FLOAT32)
793 {
794 new_shape =
795 shape_inference::inferRangeShape<float>(*reinterpret_cast<float *>(start_tensor->buffer()),
796 *reinterpret_cast<float *>(limit_tensor->buffer()),
797 *reinterpret_cast<float *>(delta_tensor->buffer()));
798 }
799 else if (output->data_type() == ir::DataType::INT32)
800 {
801 new_shape = shape_inference::inferRangeShape<int32_t>(
802 *reinterpret_cast<int32_t *>(start_tensor->buffer()),
803 *reinterpret_cast<int32_t *>(limit_tensor->buffer()),
804 *reinterpret_cast<int32_t *>(delta_tensor->buffer()));
805 }
806 output->applyShape(new_shape);
807 assert(output->buffer() != nullptr);
808}

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Range::DELTA, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::Range::LIMIT, and onert::ir::operation::Range::START.

◆ visit() [30/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Reduce op)
override

Definition at line 810 of file DynamicShapeInferer.cc.

811{
812 const auto input_idx{op.getInputs().at(ir::operation::Reduce::Input::INPUT)};
813 const auto &input = _tensor_registry->getITensor(input_idx);
814 auto input_shape = input->getShape();
815
816 const auto axes_idx{op.getInputs().at(ir::operation::Reduce::Input::AXES)};
817 const auto &axes = _tensor_registry->getITensor(axes_idx);
818
819 if (!input->is_dynamic())
820 return;
821
822 std::vector<int32_t> axes_vec;
823 for (uint32_t i = 0; i < axes->getShape().num_elements(); ++i)
824 {
825 const auto buffer = axes->buffer() + axes->calcOffset({i});
826 switch (axes->data_type())
827 {
828 case ir::DataType::INT32:
829 {
830 axes_vec.emplace_back(*reinterpret_cast<const int32_t *>(buffer));
831 break;
832 }
833 case ir::DataType::INT64:
834 {
835 axes_vec.emplace_back(*reinterpret_cast<const int64_t *>(buffer));
836 break;
837 }
838 default:
839 throw std::runtime_error("DynamicShapeInferer " + op.name() + ": Not supported data type");
840 break;
841 }
842 }
843 const auto keep_dims = op.param().keep_dims;
844
845 auto output_ind = op.getOutputs().at(0);
846 auto output = _tensor_registry->getITensor(output_ind);
847
848 ir::Shape new_shape = shape_inference::inferReduceShape(input_shape, axes_vec, keep_dims);
849
850 output->applyShape(new_shape);
851 assert(output->buffer() != nullptr);
852}
uint32_t num_elements(const Shape &shape)
The number of elements of a feature map of a given shape.
Definition Shape.h:59
ir::Shape inferReduceShape(const ir::Shape &input_shape, const std::vector< int > &axes, bool keep_dims)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Reduce::AXES, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferReduceShape(), onert::ir::operation::Reduce::INPUT, onert::ir::operation::Reduce::Param::keep_dims, onert::ir::operation::Reduce::name(), and onert::ir::operation::Reduce::param().

◆ visit() [31/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Reshape op)
override

Definition at line 854 of file DynamicShapeInferer.cc.

855{
856 // check if output is not dynamic
857 auto output_ind = op.getOutputs().at(0);
858 auto output = _tensor_registry->getITensor(output_ind);
859
860 auto input_ind = op.getInputs().at(ir::operation::Reshape::Input::INPUT);
861 auto input = _tensor_registry->getITensor(input_ind);
862
863 /*
864 Here, the state after compilation (satic shape inference) could be one of the following:
865
866 input1 input2 (or option) output execution-time shape inf required
867 ------------------------------------ --------------------------------
868 case 1) static const static X
869 case 2) static placeholder dynamic O
870 case 3) dynamic const dynamic O
871 case 4) dynamic placeholder dynamic O
872
873 Then nnfw_apply_tensorinf() could change input dynamic.
874 So, in this method, we could have one more state and we have to re-calculate shape
875 for this shape.
876
877 case 5) dynamic const static O
878
879 So, only when both input1 and ouput are static, we can skip dynamic shape inference.
880 */
881 if ((!input->is_dynamic()) && (!output->is_dynamic()))
882 return;
883
884 // New shape is given by second input tensor
885 if (op.getInputs().size() == 2)
886 {
887 // from op, access the buffer of second input to read new shape
888 auto new_shape_ind = op.getInputs().at(ir::operation::Reshape::Input::SHAPE);
889
890 // getting output shape by reading new_shape tensor buffer
891 auto new_shape = _tensor_registry->getITensor(new_shape_ind);
892 assert(new_shape);
893
894 int32_t *new_shape_buf = reinterpret_cast<int32_t *>(new_shape->buffer());
895 assert(new_shape_buf);
896
897 auto output_shape = shape_inference::inferReshapeShape(input->getShape(), new_shape_buf,
898 new_shape->getShape().num_elements());
899
900 // if shape is changed, change output shape and reallocate output tensor memory
901 if (output_shape != output->getShape() || output->buffer() == nullptr)
902 {
903 // change on output shape
904 output->applyShape(output_shape);
905 }
906 assert(output->buffer() != nullptr);
907 }
908 // New shape is given by option
909 else if (op.param().new_shape.size() != 0)
910 {
911 // Let's check the new_shape option
912 auto shape = op.param().new_shape;
913 auto output_shape =
914 shape_inference::inferReshapeShape(input->getShape(), shape.data(), shape.size());
915
916 // if shape is changed, change output shape and reallocate output tensor memory
917 if (output_shape != output->getShape() || output->buffer() == nullptr)
918 {
919 // change on output shape
920 output->applyShape(output_shape);
921 }
922 assert(output->buffer() != nullptr);
923 }
924 else
925 {
926 throw std::runtime_error("Reshape: new shape is missing");
927 return;
928 }
929}
ir::Shape inferReshapeShape(const ir::Shape &input_shape, const int32_t *shape_buf, const int32_t shape_num_elements)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferReshapeShape(), onert::ir::operation::Reshape::INPUT, onert::ir::operation::Reshape::Param::new_shape, output_shape, onert::ir::operation::Reshape::param(), onert::ir::operation::Reshape::SHAPE, and onert::ir::OperandIndexSequence::size().

◆ visit() [32/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::ResizeBilinear op)
override

Definition at line 931 of file DynamicShapeInferer.cc.

932{
933 // check if output is not dynamic
934 auto output_ind = op.getOutputs().at(0);
935 auto output = _tensor_registry->getITensor(output_ind);
936
937 auto input_ind = op.getInputs().at(ir::operation::Reshape::Input::INPUT);
938 auto input = _tensor_registry->getITensor(input_ind);
939
940 if ((!input->is_dynamic()) && (!output->is_dynamic()))
941 return;
942
943 // getting output shape from input shape and Params
944 int32_t height_out, width_out;
945 if (op.getInputs().size() == 2)
946 {
947 auto size_ind = op.getInputs().at(ir::operation::ResizeBilinear::Input::SIZE);
948 auto size = _tensor_registry->getITensor(size_ind);
949 if (size->data_type() == ir::DataType::INT32)
950 {
951 auto size_buf = reinterpret_cast<const int32_t *>(size->buffer());
952 height_out = size_buf[0];
953 width_out = size_buf[1];
954 }
955 else
956 {
957 throw std::runtime_error("DynamicShapeInferer ResizeBilinear : Unsupported data type");
958 }
959 }
960 else
961 {
962 height_out = op.param().height_out;
963 width_out = op.param().width_out;
964 }
965 auto output_shape =
966 shape_inference::inferResizeBilinearShape(input->getShape(), height_out, width_out);
967
968 // if shape is changed, change output shape and reallocate output tensor memory
969 if (output_shape != output->getShape() || output->buffer() == nullptr)
970 {
971 // change on output shape
972 output->applyShape(output_shape);
973 }
974 assert(output->buffer() != nullptr);
975}
ir::Shape inferResizeBilinearShape(const ir::Shape &in_shape, const int32_t output_height, const int32_t output_width)
int32_t size[5]
Definition Slice.cpp:35

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::ResizeBilinear::Param::height_out, onert::shape_inference::inferResizeBilinearShape(), onert::ir::operation::Reshape::INPUT, output_shape, onert::ir::operation::ResizeBilinear::param(), onert::ir::OperandIndexSequence::size(), size, onert::ir::operation::ResizeBilinear::SIZE, and onert::ir::operation::ResizeBilinear::Param::width_out.

◆ visit() [33/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Reverse op)
override

◆ visit() [34/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Select op)
override

Definition at line 982 of file DynamicShapeInferer.cc.

983{
984 const auto input_cond_idx = op.getInputs().at(ir::operation::Select::Input::CONDITION);
985 const auto &input_cond = _tensor_registry->getITensor(input_cond_idx);
986
987 const auto input_true_idx = op.getInputs().at(ir::operation::Select::Input::INPUT_TRUE);
988 const auto &input_true = _tensor_registry->getITensor(input_true_idx);
989
990 const auto input_false_idx = op.getInputs().at(ir::operation::Select::Input::INPUT_FALSE);
991 const auto &input_false = _tensor_registry->getITensor(input_false_idx);
992
993 if ((!input_cond->is_dynamic()) && (!input_true->is_dynamic()) && (!input_false->is_dynamic()))
994 {
995 return;
996 }
997
998 auto input_cond_shape = input_cond->getShape();
999 auto input_true_shape = input_true->getShape();
1000 auto input_false_shape = input_false->getShape();
1001
1002 // Select output shpae
1003 ir::Shape new_shape =
1004 shape_inference::inferSelectShape(input_cond_shape, input_true_shape, input_false_shape);
1005
1006 auto output_ind = op.getOutputs().at(0);
1007 auto output = _tensor_registry->getITensor(output_ind);
1008
1009 output->applyShape(new_shape);
1010 assert(output->buffer() != nullptr);
1011}
ir::Shape inferSelectShape(const ir::Shape &input_cond_shape, const ir::Shape &input_true_shape, const ir::Shape &input_false_shape)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Select::CONDITION, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferSelectShape(), onert::ir::operation::Select::INPUT_FALSE, and onert::ir::operation::Select::INPUT_TRUE.

◆ visit() [35/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Shape op)
override

Definition at line 1013 of file DynamicShapeInferer.cc.

1014{
1015 const auto input_idx{op.getInputs().at(0)};
1016 const auto &input = _tensor_registry->getITensor(input_idx);
1017 auto input_shape = input->getShape();
1018
1019 if (!input->is_dynamic())
1020 return;
1021
1022 auto output_ind = op.getOutputs().at(0);
1023 auto output = _tensor_registry->getITensor(output_ind);
1024
1025 ir::Shape output_shape;
1026 output_shape.append(input_shape.rank());
1027
1028 output->applyShape(output_shape);
1029 assert(output->buffer() != nullptr);
1030}

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), and output_shape.

◆ visit() [36/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Slice op)
override

Definition at line 1032 of file DynamicShapeInferer.cc.

1033{
1034 const auto input_index{op.getInputs().at(ir::operation::Slice::Input::INPUT)};
1035 const auto input = _tensor_registry->getITensor(input_index);
1036 const auto begins_index{op.getInputs().at(ir::operation::Slice::Input::BEGINS)};
1037 const auto begins = _tensor_registry->getITensor(begins_index);
1038 const auto sizes_index{op.getInputs().at(ir::operation::Slice::Input::SIZES)};
1039 const auto sizes = _tensor_registry->getITensor(sizes_index);
1040 auto output_index = op.getOutputs().at(0);
1041 auto output = _tensor_registry->getITensor(output_index);
1042
1043 if (!(input->is_dynamic() || begins->is_dynamic() || sizes->is_dynamic() || output->is_dynamic()))
1044 {
1045 return;
1046 }
1047
1048 ir::Shape input_shape = input->getShape();
1049 auto begins_buf = reinterpret_cast<const int32_t *>(begins->buffer());
1050 auto sizes_buf = reinterpret_cast<const int32_t *>(sizes->buffer());
1051
1052 ir::Shape new_shape = shape_inference::inferSliceShape(input_shape, begins_buf, sizes_buf);
1053
1054 output->applyShape(new_shape);
1055 assert(output->buffer() != nullptr);
1056}
ir::Shape inferSliceShape(const ir::Shape &input_shape, const T *begins_buf, const T *sizes_buf)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Slice::BEGINS, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferSliceShape(), onert::ir::operation::Slice::INPUT, and onert::ir::operation::Slice::SIZES.

◆ visit() [37/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Softmax op)
override

◆ visit() [38/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::SpaceToBatchND op)
override

Definition at line 1063 of file DynamicShapeInferer.cc.

1064{
1065 const auto input_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::INPUT)};
1066 const auto block_shape_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::BLOCK_SIZE)};
1067 const auto padding_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::PADDINGS)};
1068 auto output_idx{op.getOutputs().at(0)};
1069
1070 const auto &input = _tensor_registry->getITensor(input_idx);
1071 const auto &block_shape = _tensor_registry->getITensor(block_shape_idx);
1072 const auto &padding = _tensor_registry->getITensor(padding_idx);
1073 auto output = _tensor_registry->getITensor(output_idx);
1074
1075 if (!(input->is_dynamic() || block_shape->is_dynamic() || padding->is_dynamic() ||
1076 output->is_dynamic()))
1077 {
1078 return;
1079 }
1080
1081 auto input_shape = input->getShape();
1082 auto block_shape_shape = block_shape->getShape();
1083 auto padding_shape = padding->getShape();
1084
1085 auto block_shape_data = reinterpret_cast<int32_t *>(block_shape->buffer());
1086 auto padding_data = reinterpret_cast<int32_t *>(padding->buffer());
1087
1088 ir::Shape new_shape = shape_inference::inferSpaceToBatchNDShape(
1089 input_shape, block_shape_shape, padding_shape, block_shape_data, padding_data);
1090
1091 output->applyShape(new_shape);
1092 assert(output->buffer() != nullptr);
1093}
ir::Shape inferSpaceToBatchNDShape(const ir::Shape &input_shape, const ir::Shape &block_shape_shape, const ir::Shape &padding_shape, const int32_t *block_shape_buf, const int32_t *padding_buf)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::SpaceToBatchND::BLOCK_SIZE, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferSpaceToBatchNDShape(), onert::ir::operation::SpaceToBatchND::INPUT, and onert::ir::operation::SpaceToBatchND::PADDINGS.

◆ visit() [39/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Split op)
override

Definition at line 1095 of file DynamicShapeInferer.cc.

1096{
1097 const auto input_idx{op.getInputs().at(ir::operation::Split::Input::INPUT)};
1098 const auto &input = _tensor_registry->getITensor(input_idx);
1099
1100 // Return if all tensors are not dynamic
1101 bool has_dynamic = false;
1102 for (const auto &output_idx : op.getOutputs())
1103 {
1104 auto output = _tensor_registry->getITensor(output_idx);
1105 has_dynamic |= output->is_dynamic();
1106 }
1107 if (!input->is_dynamic() && !has_dynamic)
1108 {
1109 return;
1110 }
1111
1112 auto input_shape = input->getShape();
1113
1114 const auto axis_idx{op.getInputs().at(ir::operation::Split::Input::AXIS)};
1115 const auto &axis = _tensor_registry->getITensor(axis_idx);
1116
1117 auto axis_value = *reinterpret_cast<const int32_t *>(axis->buffer());
1118 const auto num_splits = op.param().num_splits;
1119 const auto rank = input_shape.rank();
1120 axis_value = axis_value < 0 ? axis_value + rank : axis_value;
1121
1122 assert(0 <= axis_value && axis_value < rank);
1123
1124 ir::Shape new_shape = shape_inference::inferSplitShape(input_shape, axis_value, num_splits);
1125 for (int out_tensor_idx = 0; out_tensor_idx < num_splits; out_tensor_idx++)
1126 {
1127 auto output_ind = op.getOutputs().at(out_tensor_idx);
1128 auto output = _tensor_registry->getITensor(output_ind);
1129
1130 output->applyShape(new_shape);
1131 assert(output->buffer() != nullptr);
1132 }
1133}
ir::Shape inferSplitShape(const ir::Shape input_shape, int axis_value, int num_splits)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Split::AXIS, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferSplitShape(), onert::ir::operation::Split::INPUT, onert::ir::operation::Split::Param::num_splits, and onert::ir::operation::Split::param().

◆ visit() [40/45]

◆ visit() [41/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Squeeze op)
override

Definition at line 1141 of file DynamicShapeInferer.cc.

1142{
1143 const auto input_idx{op.getInputs().at(ir::operation::Squeeze::Input::INPUT)};
1144 const auto &input = _tensor_registry->getITensor(input_idx);
1145
1146 if (!input->is_dynamic())
1147 {
1148 return;
1149 }
1150
1151 auto input_shape = input->getShape();
1152
1153 // Squeeze output shpae
1154 ir::Shape new_shape = shape_inference::inferSqueezeShape(input_shape, op.param());
1155
1156 auto output_ind = op.getOutputs().at(0);
1157 auto output = _tensor_registry->getITensor(output_ind);
1158
1159 output->applyShape(new_shape);
1160 assert(output->buffer() != nullptr);
1161}
ir::Shape inferSqueezeShape(const ir::Shape &in_shape, const ir::operation::Squeeze::Param &param)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferSqueezeShape(), onert::ir::operation::Squeeze::INPUT, and onert::ir::operation::Squeeze::param().

◆ visit() [42/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::StridedSlice op)
override

Definition at line 1163 of file DynamicShapeInferer.cc.

1164{
1165
1166 const auto input_index{op.getInputs().at(ir::operation::StridedSlice::Input::INPUT)};
1167 auto input = _tensor_registry->getITensor(input_index);
1168 ir::Shape input_shape = input->getShape();
1169
1170 const auto starts_index{op.getInputs().at(ir::operation::StridedSlice::Input::STARTS)};
1171 auto starts = _tensor_registry->getITensor(starts_index);
1172
1173 const auto ends_index{op.getInputs().at(ir::operation::StridedSlice::Input::ENDS)};
1174 auto ends = _tensor_registry->getITensor(ends_index);
1175
1176 const auto strides_index{op.getInputs().at(ir::operation::StridedSlice::Input::STRIDES)};
1177 auto strides = _tensor_registry->getITensor(strides_index);
1178
1179 if (!(input->is_dynamic() || starts->is_dynamic() || ends->is_dynamic() || strides->is_dynamic()))
1180 {
1181 return;
1182 }
1183
1184 const auto begin_mask = op.param().begin_mask;
1185 const auto end_mask = op.param().end_mask;
1186 const auto shrink_axis_mask = op.param().shrink_axis_mask;
1187 const auto rank = input_shape.rank();
1188
1190 reinterpret_cast<uint32_t *>(starts->buffer()), reinterpret_cast<uint32_t *>(ends->buffer()),
1191 reinterpret_cast<uint32_t *>(strides->buffer()), begin_mask, end_mask, shrink_axis_mask, rank);
1192
1193 auto output_index = op.getOutputs().at(0);
1194 auto output = _tensor_registry->getITensor(output_index);
1195
1196 ir::Shape output_shape =
1197 onert::shape_inference::inferStridedSliceShape(input_shape, op_params, rank);
1198
1199 output->applyShape(output_shape);
1200 assert(output->buffer() != nullptr);
1201}
StridedSliceParams buildStridedSliceParams(const T *begin, const T *end, const T *strides, const uint32_t begin_mask, const uint32_t end_mask, const uint32_t shrink_axis_mask, const uint8_t rank)
ir::Shape inferStridedSliceShape(const ir::Shape &input_shape, const StridedSliceParams &op_params, uint32_t rank)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::StridedSlice::Param::begin_mask, onert::shape_inference::buildStridedSliceParams(), onert::ir::operation::StridedSlice::Param::end_mask, onert::ir::operation::StridedSlice::ENDS, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferStridedSliceShape(), onert::ir::operation::StridedSlice::INPUT, output_shape, onert::ir::operation::StridedSlice::param(), onert::ir::operation::StridedSlice::Param::shrink_axis_mask, onert::ir::operation::StridedSlice::STARTS, and onert::ir::operation::StridedSlice::STRIDES.

◆ visit() [43/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Tile op)
override

Definition at line 1203 of file DynamicShapeInferer.cc.

1204{
1205 auto output_ind = op.getOutputs().at(0);
1206 auto output = _tensor_registry->getITensor(output_ind);
1207
1208 auto input_idx = op.getInputs().at(ir::operation::Tile::Input::INPUT);
1209 auto input = _tensor_registry->getITensor(input_idx);
1210
1211 auto multiplier_idx = op.getInputs().at(ir::operation::Tile::Input::MULTIPLES);
1212 auto multiplier = _tensor_registry->getITensor(multiplier_idx);
1213
1214 if ((!input->is_dynamic()) && (!output->is_dynamic()))
1215 return;
1216
1217 auto input_shape = input->getShape();
1218 auto multiplier_buffer = reinterpret_cast<const int32_t *>(multiplier->buffer());
1219 assert(multiplier_buffer);
1220
1221 auto mult_shape = multiplier->getShape();
1223 input_shape, multiplier_buffer, mult_shape.rank() == 0 ? 1 : mult_shape.dim(0));
1224
1225 // set output shape and output buffer
1226 output->applyShape(output_shape);
1227 assert(output->buffer() != nullptr);
1228}
ir::Shape inferTileShape(const ir::Shape &in_shape, const int32_t *multiplier_buf, const int32_t multiplier_size)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferTileShape(), onert::ir::operation::Tile::INPUT, onert::ir::operation::Tile::MULTIPLES, and output_shape.

◆ visit() [44/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Transpose op)
override

Definition at line 1230 of file DynamicShapeInferer.cc.

1231{
1232 // check if output is not dynamic
1233 auto output_ind = op.getOutputs().at(0);
1234 auto output = _tensor_registry->getITensor(output_ind);
1235
1236 // from op, access the buffer of second input to read new shape
1237 auto input_ind = op.getInputs().at(ir::operation::Transpose::Input::INPUT);
1238 auto input = _tensor_registry->getITensor(input_ind);
1239 auto input_shape = input->getShape();
1240
1241 /*
1242 Here, the state after compilation (static shape inference) could be one of the following:
1243
1244 input perms output execution-time shape inf required
1245 ------------------------------------ --------------------------------
1246 case 1) static const static X
1247 case 2) static non-const dynamic O
1248 case 3) dynamic const dynamic O
1249 case 4) dynamic non-const dynamic O
1250
1251 So, only when both input1 and ouput are static, we can skip dynamic shape inference.
1252 */
1253 if ((!input->is_dynamic()) && (!output->is_dynamic()))
1254 return;
1255
1256 auto perm_ind = op.getInputs().at(ir::operation::Transpose::Input::PERMUTATION);
1257 auto perm = _tensor_registry->getITensor(perm_ind);
1258
1259 ir::Shape new_shape;
1260 // TODO Change perm->dimension(0) == 0 to perm->num_elements() == 0
1261 if (perm->getShape().dim(0) == 0) // This condition means that perm is (n-1...0)
1262 {
1263 // Call by (n-1...0)
1264 new_shape = shape_inference::inferTransposeShape(input_shape, nullptr, 0);
1265 }
1266 else
1267 {
1268 // Check rank
1269 if (static_cast<size_t>(input->getShape().rank()) != perm->getShape().num_elements())
1270 {
1271 throw std::runtime_error("DynamicShapeInferer failed, bad rank size: " +
1272 std::to_string(perm->getShape().num_elements()));
1273 }
1274
1275 // set output shape, based on input and params
1276 const auto perm_buffer = reinterpret_cast<const int32_t *>(perm->buffer());
1277 new_shape =
1278 shape_inference::inferTransposeShape(input_shape, perm_buffer, perm->getShape().dim(0));
1279 }
1280 output->applyShape(new_shape);
1281 assert(output->buffer() != nullptr);
1282}
ir::Shape inferTransposeShape(const ir::Shape &in_shape, const int32_t *perm_buf, const int32_t rank)

References onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferTransposeShape(), onert::ir::operation::Transpose::INPUT, and onert::ir::operation::Transpose::PERMUTATION.

◆ visit() [45/45]

void onert::exec::DynamicShapeInferer::visit ( const ir::operation::Unpack op)
override

Definition at line 1284 of file DynamicShapeInferer.cc.

1285{
1286 // check if output is not dynamic
1287 const auto input_idx{op.getInputs().at(0)};
1288 const auto &input = _tensor_registry->getITensor(input_idx);
1289
1290 if (!input->is_dynamic())
1291 return;
1292
1293 auto input_shape = input->getShape();
1294
1295 const auto rank = input_shape.rank();
1296 const auto axis = ((op.param().axis < 0) ? rank + op.param().axis : op.param().axis);
1297 const auto num = op.param().num;
1298
1299 assert(0 <= axis && axis < rank);
1300
1301 ir::Shape new_shape = shape_inference::inferUnpackShape(input_shape, axis, rank);
1302
1303 for (int out_tensor_idx = 0; out_tensor_idx < num; out_tensor_idx++)
1304 {
1305 auto output_ind = op.getOutputs().at(out_tensor_idx);
1306 auto output = _tensor_registry->getITensor(output_ind);
1307
1308 output->applyShape(new_shape);
1309
1310 assert(output->buffer() != nullptr);
1311 }
1312}
ir::Shape inferUnpackShape(const ir::Shape &input_shape, int axis, int rank)

References onert::ir::OperandIndexSequence::at(), onert::ir::operation::Unpack::Param::axis, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::shape_inference::inferUnpackShape(), onert::ir::operation::Unpack::Param::num, and onert::ir::operation::Unpack::param().


The documentation for this class was generated from the following files: