ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::train::KernelGenerator Class Reference

#include <KernelGenerator.h>

Collaboration diagram for onert::backend::train::KernelGenerator:

Public Member Functions

 KernelGenerator (const ir::train::TrainableGraph &tgraph, const std::shared_ptr< TensorRegistry > &tensor_reg, const std::shared_ptr< ExternalContext > &external_context, const exec::train::optimizer::Optimizer *optimizer)
 
std::unique_ptr< exec::train::TrainableFnSequencegenerate (ir::OperationIndex op_ind) override
 
void visit (const ir::train::operation::BinaryArithmetic &) override
 
void visit (const ir::train::operation::Conv2D &) override
 
void visit (const ir::train::operation::DepthwiseConv2D &) override
 
void visit (const ir::train::operation::ElementwiseActivation &) override
 
void visit (const ir::train::operation::FullyConnected &) override
 
void visit (const ir::train::operation::Loss &) override
 
void visit (const ir::train::operation::Pad &) override
 
void visit (const ir::train::operation::Pool2D &) override
 
void visit (const ir::train::operation::Reduce &node) override
 
void visit (const ir::train::operation::Reshape &node) override
 
void visit (const ir::train::operation::Softmax &node) override
 
- Public Member Functions inherited from onert::backend::train::KernelGeneratorBase
virtual ~KernelGeneratorBase ()=default
 
 KernelGeneratorBase (const ir::train::TrainableGraph &tgraph)
 
- Public Member Functions inherited from onert::ir::train::TrainableOperationVisitor
virtual ~TrainableOperationVisitor ()=default
 

Additional Inherited Members

- Protected Attributes inherited from onert::backend::train::KernelGeneratorBase
const ir::train::TrainableGraph_tgraph
 
std::unique_ptr< exec::train::ITrainableFunction_return_fn
 

Detailed Description

Definition at line 39 of file KernelGenerator.h.

Constructor & Destructor Documentation

◆ KernelGenerator()

onert::backend::train::KernelGenerator::KernelGenerator ( const ir::train::TrainableGraph tgraph,
const std::shared_ptr< TensorRegistry > &  tensor_reg,
const std::shared_ptr< ExternalContext > &  external_context,
const exec::train::optimizer::Optimizer optimizer 
)

Definition at line 146 of file KernelGenerator.cc.

150 : backend::train::KernelGeneratorBase{tgraph}, _tensor_reg{tensor_reg},
151 _external_context(external_context), _optimizer{optimizer}, _update_funcs{}, _node_to_idx{}
152{
153 tgraph.operations().iterate(
154 [&](const onert::ir::OperationIndex &idx, const onert::ir::IOperation &op) {
155 assert(_node_to_idx.find(&op) == _node_to_idx.end());
156 _node_to_idx[&op] = idx;
157 });
158}

References onert::util::ObjectManager< Index, Object >::iterate(), and onert::ir::train::TrainableGraph::operations().

Member Function Documentation

◆ generate()

std::unique_ptr< exec::train::TrainableFnSequence > onert::backend::train::KernelGenerator::generate ( ir::OperationIndex  op_ind)
overridevirtual

Implements onert::backend::train::KernelGeneratorBase.

Definition at line 114 of file KernelGenerator.cc.

115{
116 // NOTE This function is related to planning tensors. If you change this function, you should
117 // also consider to change planning tensors.
118
119 auto ret = std::make_unique<exec::train::TrainableFnSequence>();
120
121 const auto &op = _tgraph.operation(idx);
122
123 // NOTE appendBackPropAccumulators() must be called before appending _return_fn to
124 // TrainableFnSequence as long as both are appended to the same TrainableFnSequence.
125 appendBackPropAccumulators(op, idx, _tensor_reg.get(), ret.get());
126
127 op.accept(*this);
128 assert(_return_fn);
129 ret->append(std::move(_return_fn));
130
131 for (auto &&update_fn : _update_funcs)
132 ret->append(std::move(update_fn));
133 _update_funcs.clear();
134
135 for (auto &&ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs())
136 {
137 auto tensor = _tensor_reg->getNonConstTensor(ind);
138 if (tensor)
139 {
140 tensor->increase_ref();
141 }
142 }
143 return ret;
144}
std::unique_ptr< exec::train::ITrainableFunction > _return_fn
const ir::train::TrainableGraph & _tgraph
const ITrainableOperation & operation(OperationIndex index) const

References onert::backend::train::KernelGeneratorBase::_return_fn, onert::backend::train::KernelGeneratorBase::_tgraph, onert::ir::train::TrainableGraph::operation(), and onert::ir::UNDEFINED.

◆ visit() [1/11]

void onert::backend::train::KernelGenerator::visit ( const ir::train::operation::BinaryArithmetic node)
override

Definition at line 160 of file KernelGenerator.cc.

161{
162 using ir::train::operation::BinaryArithmetic;
163
164 const auto output_index{node.getOutputs().at(0)};
165 const auto lhs_index{node.getInputs().at(BinaryArithmetic::Input::LHS)};
166 const auto rhs_index{node.getInputs().at(BinaryArithmetic::Input::RHS)};
167
168 const auto arithmetic_type = node.param().arithmetic_type;
169 const auto activation = node.param().activation;
170
171 auto output_tensor = _tensor_reg->getPortableTensor(output_index);
172 auto lhs_tensor = _tensor_reg->getPortableTensor(lhs_index);
173 auto rhs_tensor = _tensor_reg->getPortableTensor(rhs_index);
174
175 auto fn = std::make_unique<ops::BinaryArithmeticLayer>();
176 fn->configure(lhs_tensor, rhs_tensor, output_tensor, activation,
177 static_cast<cpu::ops::ArithmeticType>(arithmetic_type));
178
179 if (node.isRequiredForBackward())
180 {
181 auto back_prop_output_tensor = getBackPropOut(output_index);
182 auto back_prop_lhs_tensor = getBackPropIn(node, lhs_index);
183 auto back_prop_rhs_tensor = getBackPropIn(node, rhs_index);
184
185 fn->configureBackward(back_prop_lhs_tensor, back_prop_rhs_tensor, back_prop_output_tensor,
186 activation, static_cast<train::ops::ArithmeticType>(arithmetic_type));
187 }
188 _return_fn = std::move(fn);
189}

References onert::backend::train::KernelGeneratorBase::_return_fn, onert::ir::operation::BinaryArithmetic::Param::activation, onert::ir::operation::BinaryArithmetic::Param::arithmetic_type, onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::train::TrainableOperation::isRequiredForBackward(), and onert::ir::operation::BinaryArithmetic::param().

◆ visit() [2/11]

void onert::backend::train::KernelGenerator::visit ( const ir::train::operation::Conv2D node)
override

Definition at line 191 of file KernelGenerator.cc.

192{
193 using ir::train::operation::Conv2D;
194
195 const auto out_index{node.getOutputs().at(0)};
196 const auto in_index{node.getInputs().at(Conv2D::Input::INPUT)};
197 const auto ker_index{node.getInputs().at(Conv2D::Input::KERNEL)};
198 const auto bias_index{node.getInputs().at(Conv2D::Input::BIAS)};
199
200 auto out_tensor = _tensor_reg->getPortableTensor(out_index);
201 auto in_tensor = _tensor_reg->getPortableTensor(in_index);
202 auto ker_tensor = _tensor_reg->getTrainableTensor(ker_index);
203 auto bias_tensor = _tensor_reg->getTrainableTensor(bias_index);
204
205 // Generate kernel
206 const auto stride = node.param().stride;
207 const auto activation = node.param().activation;
208 const auto &param_padding = node.param().padding;
209 const auto dilation = node.param().dilation;
210 auto fn = std::make_unique<ops::ConvolutionLayer>();
211
212 auto &operands = _tgraph.operands();
213 const auto ifm_shape = operands.at(in_index).shape().asFeature();
214 const auto ofm_shape = operands.at(out_index).shape().asFeature();
215 // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
216 const auto &ker_shape = operands.at(ker_index).shape();
217 const auto ker_height = ker_shape.dim(1);
218 const auto ker_width = ker_shape.dim(2);
219
220 const auto padding =
221 ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width, ker_height,
222 dilation.width_factor, dilation.height_factor);
223
224 const bool is_cacheable_weights = false;
225 fn->configure(in_tensor, ker_tensor, bias_tensor, param_padding.type, padding.left, padding.right,
226 padding.top, padding.bottom, stride.horizontal, stride.vertical,
227 dilation.width_factor, dilation.height_factor, activation, out_tensor,
228 is_cacheable_weights);
229
230 auto ker_grad_tensor = _tensor_reg->getGradientTensor(ker_index);
231 auto bias_grad_tensor = _tensor_reg->getGradientTensor(bias_index);
232
233 if (node.isRequiredForBackward())
234 {
235
236 auto out_back_prop_tensor = getBackPropOut(out_index);
237 auto in_back_prop_tensor = getBackPropIn(node, in_index);
238
239 fn->configureBackward(ker_tensor, in_back_prop_tensor, ker_grad_tensor, bias_grad_tensor,
240 out_back_prop_tensor, activation);
241
242 // Generate GradientApplier
243 if (bias_tensor)
244 _update_funcs.emplace_back(
245 generateGradientApplier(_optimizer, bias_grad_tensor, bias_tensor));
246 _update_funcs.emplace_back(generateGradientApplier(_optimizer, ker_grad_tensor, ker_tensor));
247 }
248
249 _return_fn = std::move(fn);
250}
const Operands & operands() const override
const Object & at(const Index &index) const
Get the object that is associated with the given index.
const ExplicitPadding calculatePadding(const Padding &padding, const FeatureShape &ifm_shape, const FeatureShape &ofm_shape, const Stride &stride, uint32_t kw, uint32_t kh, uint32_t dwf=1, uint32_t dhf=1)
Definition Padding.cc:133

References onert::backend::train::KernelGeneratorBase::_return_fn, onert::backend::train::KernelGeneratorBase::_tgraph, onert::ir::operation::Conv2D::Param::activation, onert::util::ObjectManager< Index, Object >::at(), onert::ir::OperandIndexSequence::at(), onert::ir::calculatePadding(), onert::ir::operation::Conv2D::Param::dilation, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::train::TrainableOperation::isRequiredForBackward(), onert::ir::train::TrainableGraph::operands(), onert::ir::operation::Conv2D::Param::padding, onert::ir::operation::Conv2D::param(), and onert::ir::operation::Conv2D::Param::stride.

◆ visit() [3/11]

void onert::backend::train::KernelGenerator::visit ( const ir::train::operation::DepthwiseConv2D node)
override

Definition at line 252 of file KernelGenerator.cc.

253{
254 using ir::train::operation::DepthwiseConv2D;
255
256 const auto ofm_index{node.getOutputs().at(0)};
257 const auto ifm_index{node.getInputs().at(DepthwiseConv2D::Input::INPUT)};
258 const auto ker_index{node.getInputs().at(DepthwiseConv2D::Input::KERNEL)};
259 const auto bias_index{node.getInputs().at(DepthwiseConv2D::Input::BIAS)};
260
261 auto ofm_tensor = _tensor_reg->getPortableTensor(ofm_index);
262 auto ifm_tensor = _tensor_reg->getPortableTensor(ifm_index);
263 auto ker_tensor = _tensor_reg->getTrainableTensor(ker_index);
264 auto bias_tensor = _tensor_reg->getTrainableTensor(bias_index);
265
266 const auto stride = node.param().stride;
267 const auto &operands = _tgraph.operands();
268 const auto ofm_shape = operands.at(ofm_index).shape().asFeature();
269 const auto ifm_shape = operands.at(ifm_index).shape().asFeature();
270 // Kernel format is [1, kernel_height, kernel_width, depth_out].
271 const auto &ker_shape = operands.at(ker_index).shape();
272 const auto ker_height = ker_shape.dim(1);
273 const auto ker_width = ker_shape.dim(2);
274 const auto dilation_width = node.param().dilation.width_factor;
275 const auto dilation_height = node.param().dilation.height_factor;
276 const auto padding = ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride,
277 ker_width, ker_height, dilation_width, dilation_height);
278 const auto multiplier = node.param().multiplier;
279 const auto activation = node.param().activation;
280
281 auto fn = std::make_unique<ops::DepthwiseConvolutionLayer>();
282
283 fn->configure(ifm_tensor, ker_tensor, bias_tensor, padding.left, padding.right, padding.top,
284 padding.bottom, stride.horizontal, stride.vertical, multiplier, dilation_width,
285 dilation_height, activation, ofm_tensor, _external_context);
286
287 if (node.isRequiredForBackward())
288 {
289 auto ker_grad_tensor = _tensor_reg->getGradientTensor(ker_index);
290 auto bias_grad_tensor = _tensor_reg->getGradientTensor(bias_index);
291
292 auto ofm_back_prop_tensor = getBackPropOut(ofm_index);
293 auto ifm_back_prop_tensor = getBackPropIn(node, ifm_index);
294
295 fn->configureBackward(ifm_back_prop_tensor, ker_grad_tensor, bias_grad_tensor,
296 ofm_back_prop_tensor, activation);
297
298 // Generate GradientApplier
299 if (bias_tensor)
300 _update_funcs.emplace_back(
301 generateGradientApplier(_optimizer, bias_grad_tensor, bias_tensor));
302 _update_funcs.emplace_back(generateGradientApplier(_optimizer, ker_grad_tensor, ker_tensor));
303 }
304
305 _return_fn = std::move(fn);
306}

References onert::backend::train::KernelGeneratorBase::_return_fn, onert::backend::train::KernelGeneratorBase::_tgraph, onert::ir::operation::DepthwiseConv2D::Param::activation, onert::util::ObjectManager< Index, Object >::at(), onert::ir::OperandIndexSequence::at(), onert::ir::calculatePadding(), onert::ir::operation::DepthwiseConv2D::Param::dilation, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::Dilation::height_factor, onert::ir::train::TrainableOperation::isRequiredForBackward(), onert::ir::operation::DepthwiseConv2D::Param::multiplier, onert::ir::train::TrainableGraph::operands(), onert::ir::operation::DepthwiseConv2D::Param::padding, onert::ir::operation::DepthwiseConv2D::param(), onert::ir::operation::DepthwiseConv2D::Param::stride, and onert::ir::Dilation::width_factor.

◆ visit() [4/11]

void onert::backend::train::KernelGenerator::visit ( const ir::train::operation::ElementwiseActivation node)
override

Definition at line 308 of file KernelGenerator.cc.

309{
310 using ir::train::operation::ElementwiseActivation;
311
312 const auto output_index{node.getOutputs().at(0)};
313 const auto input_index{node.getInputs().at(ElementwiseActivation::Input::INPUT)};
314
315 auto output_tensor = _tensor_reg->getPortableTensor(output_index);
316 auto input_tensor = _tensor_reg->getPortableTensor(input_index);
317
318 auto fn = std::make_unique<ops::ElementwiseActivationLayer>();
319
320 auto convertToInferActivationType = [](const ir::operation::ElementwiseActivation::Type &type) {
321 switch (type)
322 {
325 default:
326 throw std::invalid_argument("Unsupported ElementwiseActivation::Type");
327 }
328 };
329
330 fn->configure(input_tensor, output_tensor, node.param().alpha, node.param().beta,
331 convertToInferActivationType(node.param().op_type));
332
333 if (node.isRequiredForBackward())
334 {
335 auto back_prop_input_tensor = getBackPropIn(node, input_index);
336 auto back_prop_output_tensor = getBackPropOut(output_index);
337
338 fn->configureBackward(input_tensor, back_prop_input_tensor, back_prop_output_tensor,
339 node.param().alpha, node.param().beta,
340 convertElementwiseActivationType(node.param().op_type));
341 }
342
343 _return_fn = std::move(fn);
344}
type
Definition infer.py:18

References onert::backend::train::KernelGeneratorBase::_return_fn, onert::ir::operation::ElementwiseActivation::Param::alpha, onert::ir::OperandIndexSequence::at(), onert::ir::operation::ElementwiseActivation::Param::beta, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::train::TrainableOperation::isRequiredForBackward(), onert::backend::cpu::ops::kReLU, onert::ir::operation::ElementwiseActivation::Param::op_type, onert::ir::operation::ElementwiseActivation::param(), and onert::ir::operation::ElementwiseActivation::RELU.

◆ visit() [5/11]

void onert::backend::train::KernelGenerator::visit ( const ir::train::operation::FullyConnected node)
override

Definition at line 346 of file KernelGenerator.cc.

347{
348 using ir::train::operation::FullyConnected;
349
350 const auto out_index{node.getOutputs().at(0)};
351 const auto in_index{node.getInputs().at(FullyConnected::Input::INPUT)};
352 const auto weights_index{node.getInputs().at(FullyConnected::Input::WEIGHT)};
353 const auto bias_index{node.getInputs().at(FullyConnected::Input::BIAS)};
354
355 auto out_tensor = _tensor_reg->getPortableTensor(out_index);
356 auto in_tensor = _tensor_reg->getPortableTensor(in_index);
357 auto weights_tensor = _tensor_reg->getTrainableTensor(weights_index);
358 auto bias_tensor = _tensor_reg->getTrainableTensor(bias_index);
359
360 // Generate kernel
361 const auto activation = node.param().activation;
362 const auto weights_format = node.param().weights_format;
363
364 auto fn = std::make_unique<ops::FullyConnectedLayer>();
365
366 fn->configure(in_tensor, weights_tensor, bias_tensor, activation, weights_format, out_tensor,
367 _external_context);
368
369 if (node.isRequiredForBackward())
370 {
371 auto out_back_prop_tensor = getBackPropOut(out_index);
372 auto in_back_prop_tensor = getBackPropIn(node, in_index);
373 auto weights_grad_tensor = _tensor_reg->getGradientTensor(weights_index);
374 auto bias_grad_tensor = _tensor_reg->getGradientTensor(bias_index);
375
376 fn->configureBackward(in_tensor, weights_tensor, out_tensor, in_back_prop_tensor,
377 weights_grad_tensor, bias_grad_tensor, out_back_prop_tensor, activation,
378 weights_format);
379
380 // Generate GradientAppliers
381 if (bias_tensor)
382 _update_funcs.emplace_back(
383 generateGradientApplier(_optimizer, bias_grad_tensor, bias_tensor));
384 _update_funcs.emplace_back(
385 generateGradientApplier(_optimizer, weights_grad_tensor, weights_tensor));
386 }
387
388 _return_fn = std::move(fn);
389}

References onert::backend::train::KernelGeneratorBase::_return_fn, onert::ir::operation::FullyConnected::Param::activation, onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::train::TrainableOperation::isRequiredForBackward(), onert::ir::operation::FullyConnected::param(), and onert::ir::operation::FullyConnected::Param::weights_format.

◆ visit() [6/11]

void onert::backend::train::KernelGenerator::visit ( const ir::train::operation::Loss node)
override

Definition at line 391 of file KernelGenerator.cc.

392{
393 using ir::train::operation::Loss;
394
395 const auto output_index{node.getOutputs().at(0)};
396 const auto y_pred_index{node.getInputs().at(Loss::Y_PRED)};
397 const auto y_true_index{node.getInputs().at(Loss::Y_TRUE)};
398
399 auto output_tensor = _tensor_reg->getPortableTensor(output_index);
400 auto y_pred_tensor = _tensor_reg->getPortableTensor(y_pred_index);
401 auto y_true_tensor = _tensor_reg->getPortableTensor(y_true_index);
402
403 // TODO Use BackPropTensor directly instead of DisposableTensor if y_pred is always used by only
404 // loss
405 auto back_prop_y_pred_tensor = getBackPropIn(node, y_pred_index);
406
407 auto loss_code = node.param().loss_code;
408 auto loss_param = node.param().loss_param;
409 const auto reduction_type = node.param().reduction_type;
410
411 switch (loss_code)
412 {
414 {
415 auto fn = std::make_unique<ops::LossMeanSquaredErrorLayer>();
416 fn->configure(y_pred_tensor, y_true_tensor, output_tensor, back_prop_y_pred_tensor,
417 reduction_type);
418 _return_fn = std::move(fn);
419 break;
420 }
422 {
423 const auto y_pred_op_code = node.y_pred_op_code();
424 bool is_normalization_required = (y_pred_op_code != ir::OpCode::Softmax);
425 auto fn = std::make_unique<ops::LossCategoricalCrossentropyLayer>();
426 fn->configure(y_pred_tensor, y_true_tensor, output_tensor, back_prop_y_pred_tensor,
427 reduction_type, loss_param.cce.axis, loss_param.cce.label_smoothing,
428 is_normalization_required);
429 _return_fn = std::move(fn);
430 break;
431 }
432 default:
433 throw std::runtime_error("LossLayer: unsupported loss type");
434 break;
435 }
436}

References onert::backend::train::KernelGeneratorBase::_return_fn, onert::ir::OperandIndexSequence::at(), onert::ir::train::CategoricalCrossentropy, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::train::LossInfo::loss_code, onert::ir::train::LossInfo::loss_param, onert::ir::train::MeanSquaredError, onert::ir::train::operation::Loss::param(), onert::ir::train::LossInfo::reduction_type, and onert::ir::train::operation::Loss::y_pred_op_code().

◆ visit() [7/11]

void onert::backend::train::KernelGenerator::visit ( const ir::train::operation::Pad node)
override

Definition at line 438 of file KernelGenerator.cc.

439{
440 const auto input_index{node.getInputs().at(ir::operation::Pad::Input::INPUT)};
441 const auto pad_index{node.getInputs().at(ir::operation::Pad::Input::PAD)};
442 const auto output_index{node.getOutputs().at(0)};
443
444 auto input = _tensor_reg->getPortableTensor(input_index);
445 auto pad = _tensor_reg->getPortableTensor(pad_index);
446 auto output = _tensor_reg->getPortableTensor(output_index);
447
448 auto fn = std::make_unique<ops::PadLayer>();
449
450 IPortableTensor *value = nullptr;
451 if (node.getInputs().size() == 3) // isPadV2
452 {
453 const auto value_index{node.getInputs().at(ir::operation::Pad::Input::VALUE)};
454 value = _tensor_reg->getPortableTensor(value_index);
455 }
456
457 fn->configure(input, pad, value, output);
458 if (node.isRequiredForBackward())
459 {
460 auto out_back_prop_tensor = getBackPropOut(output_index);
461 auto in_back_prop_tensor = getBackPropIn(node, input_index);
462 fn->configureBackward(in_back_prop_tensor, out_back_prop_tensor);
463 }
464 _return_fn = std::move(fn);
465}

References onert::backend::train::KernelGeneratorBase::_return_fn, onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::Pad::INPUT, onert::ir::train::TrainableOperation::isRequiredForBackward(), onert::ir::operation::Pad::PAD, onert::ir::OperandIndexSequence::size(), and onert::ir::operation::Pad::VALUE.

◆ visit() [8/11]

void onert::backend::train::KernelGenerator::visit ( const ir::train::operation::Pool2D node)
override

Definition at line 467 of file KernelGenerator.cc.

468{
469 using ir::train::operation::Pool2D;
470
471 const auto output_index{node.getOutputs().at(0)};
472 const auto input_index{node.getInputs().at(0)};
473
474 const auto &operands = _tgraph.operands();
475 const auto &ofm_shape = operands.at(output_index).shape();
476 const auto &ifm_shape = operands.at(input_index).shape();
477
478 if (ifm_shape.rank() != 4)
479 {
480 throw std::runtime_error(node.name() + " only supports 4D tensor as input");
481 }
482
483 // calculate padding
484 const auto stride = node.param().stride;
485 const auto kh = node.param().kh;
486 const auto kw = node.param().kw;
487 const auto padding = ir::calculatePadding(node.param().padding, ifm_shape.asFeature(),
488 ofm_shape.asFeature(), stride, kw, kh);
489
490 auto out_tensor = _tensor_reg->getPortableTensor(output_index);
491 auto in_tensor = _tensor_reg->getPortableTensor(input_index);
492
493 const auto activation = node.param().activation;
494 const auto pool_type = convertPoolType(node.param().op_type);
495
496 auto fn = std::make_unique<ops::PoolLayer>();
497
498 auto convertToInferPoolType = [](const train::ops::PoolType &pool_type) {
499 switch (pool_type)
500 {
505 default:
506 throw std::runtime_error("PoolLayer: Unsupported pool type yet");
507 }
508 };
509
510 fn->configure(in_tensor, padding.left, padding.right, padding.top, padding.bottom,
511 stride.horizontal, stride.vertical, kw, kh, activation, out_tensor,
512 convertToInferPoolType(pool_type));
513
514 if (node.isRequiredForBackward())
515 {
516 auto out_back_prop_tensor = getBackPropOut(output_index);
517 auto in_back_prop_tensor = getBackPropIn(node, input_index);
518 fn->configureBackward(padding.left, padding.right, padding.top, padding.bottom,
519 stride.horizontal, stride.vertical, kw, kh, activation, pool_type,
520 out_tensor, in_back_prop_tensor, out_back_prop_tensor);
521 }
522
523 _return_fn = std::move(fn);
524}
arm_compute::PoolingType convertPoolType(ir::operation::Pool2D::PoolType pool_type_ir)
Definition Convert.cc:283

References onert::backend::train::KernelGeneratorBase::_return_fn, onert::backend::train::KernelGeneratorBase::_tgraph, onert::ir::operation::Pool2D::Param::activation, onert::util::ObjectManager< Index, Object >::at(), onert::ir::OperandIndexSequence::at(), onert::ir::calculatePadding(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::train::TrainableOperation::isRequiredForBackward(), onert::backend::cpu::ops::kAvg, onert::backend::train::ops::kAvg, onert::ir::operation::Pool2D::Param::kh, onert::backend::cpu::ops::kMax, onert::backend::train::ops::kMax, onert::ir::operation::Pool2D::Param::kw, onert::ir::operation::Pool2D::name(), onert::ir::operation::Pool2D::Param::op_type, onert::ir::train::TrainableGraph::operands(), onert::ir::operation::Pool2D::Param::padding, onert::ir::operation::Pool2D::param(), and onert::ir::operation::Pool2D::Param::stride.

◆ visit() [9/11]

void onert::backend::train::KernelGenerator::visit ( const ir::train::operation::Reduce node)
override

Definition at line 526 of file KernelGenerator.cc.

527{
528 using ir::train::operation::Reduce;
529
530 const auto output_index{node.getOutputs().at(0)};
531 const auto input_index{node.getInputs().at(Reduce::Input::INPUT)};
532 const auto axes_index{node.getInputs().at(Reduce::Input::AXES)};
533
534 const auto keep_dims = node.param().keep_dims;
535
536 auto output_tensor = _tensor_reg->getPortableTensor(output_index);
537 auto input_tensor = _tensor_reg->getPortableTensor(input_index);
538 auto axes_tensor = _tensor_reg->getPortableTensor(axes_index);
539
540 if (node.param().reduce_type == ir::operation::Reduce::ReduceType::MEAN)
541 {
542 auto fn = std::make_unique<ops::MeanLayer>();
543 fn->configure(input_tensor, axes_tensor, output_tensor, keep_dims);
544 if (node.isRequiredForBackward())
545 {
546 auto back_prop_output_tensor = getBackPropOut(output_index);
547 auto back_prop_input_tensor = getBackPropIn(node, input_index);
548 fn->configureBackward(back_prop_input_tensor, back_prop_output_tensor);
549 }
550 _return_fn = std::move(fn);
551 }
552 else
553 {
554 throw std::runtime_error("ReduceLayer: unsupported reduce type");
555 }
556}

References onert::backend::train::KernelGeneratorBase::_return_fn, onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::train::TrainableOperation::isRequiredForBackward(), onert::ir::operation::Reduce::Param::keep_dims, onert::ir::operation::Reduce::MEAN, onert::ir::operation::Reduce::param(), and onert::ir::operation::Reduce::Param::reduce_type.

◆ visit() [10/11]

void onert::backend::train::KernelGenerator::visit ( const ir::train::operation::Reshape node)
override

Definition at line 558 of file KernelGenerator.cc.

559{
560 using ir::train::operation::Reshape;
561
562 const auto output_index{node.getOutputs().at(0)};
563 const auto input_index{node.getInputs().at(ir::operation::Reshape::Input::INPUT)};
564
565 auto output_tensor = _tensor_reg->getPortableTensor(output_index);
566 auto input_tensor = _tensor_reg->getPortableTensor(input_index);
567
568 // optional 2nd input
569 IPortableTensor *shape_tensor = nullptr;
570
571 if (node.getInputs().size() == 2)
572 {
573 const auto shape_index{node.getInputs().at(ir::operation::Reshape::Input::SHAPE)};
574 shape_tensor = _tensor_reg->getPortableTensor(shape_index);
575 }
576
577 auto fn = std::make_unique<ops::ReshapeLayer>();
578
579 fn->configure(input_tensor, shape_tensor, output_tensor);
580 if (node.isRequiredForBackward())
581 {
582 auto output_back_prop_tensor = getBackPropOut(output_index);
583 auto input_back_prop_tensor = getBackPropIn(node, input_index);
584 fn->configureBackward(input_back_prop_tensor, output_back_prop_tensor);
585 }
586 _return_fn = std::move(fn);
587}

References onert::backend::train::KernelGeneratorBase::_return_fn, onert::ir::OperandIndexSequence::at(), onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::Reshape::INPUT, onert::ir::train::TrainableOperation::isRequiredForBackward(), onert::ir::operation::Reshape::SHAPE, and onert::ir::OperandIndexSequence::size().

◆ visit() [11/11]

void onert::backend::train::KernelGenerator::visit ( const ir::train::operation::Softmax node)
override

Definition at line 589 of file KernelGenerator.cc.

590{
591 using ir::train::operation::Softmax;
592
593 const auto output_index{node.getOutputs().at(0)};
594 const auto input_index{node.getInputs().at(ir::operation::Softmax::Input::INPUT)};
595
596 const auto beta = node.param().beta;
597
598 auto output_tensor = _tensor_reg->getPortableTensor(output_index);
599 auto input_tensor = _tensor_reg->getPortableTensor(input_index);
600
601 auto fn = std::make_unique<ops::SoftMaxLayer>();
602
603 fn->configure(input_tensor, beta, output_tensor);
604
605 if (node.isRequiredForBackward())
606 {
607 auto output_back_prop_tensor = getBackPropOut(output_index);
608 auto input_back_prop_tensor = getBackPropIn(node, input_index);
609 fn->configureBackward(input_back_prop_tensor, output_back_prop_tensor);
610 }
611 _return_fn = std::move(fn);
612}

References onert::backend::train::KernelGeneratorBase::_return_fn, onert::ir::OperandIndexSequence::at(), onert::ir::operation::Softmax::Param::beta, onert::ir::Operation::getInputs(), onert::ir::Operation::getOutputs(), onert::ir::operation::Softmax::INPUT, onert::ir::train::TrainableOperation::isRequiredForBackward(), and onert::ir::operation::Softmax::param().


The documentation for this class was generated from the following files: