19#include "../KernelGenerator.h"
20#include "../Validator.h"
27void Validator::visit(
const ir::operation::Pad &) {
_supported =
true; }
29void KernelGenerator::visit(
const ir::operation::Pad &node)
33 const auto output_index{node.getOutputs().at(0)};
35 auto input = _tensor_reg->getPortableTensor(input_index);
36 auto pad = _tensor_reg->getPortableTensor(pad_index);
37 auto output = _tensor_reg->getPortableTensor(output_index);
39 auto fn = std::make_unique<ops::PadLayer>();
41 IPortableTensor *value =
nullptr;
42 if (node.getInputs().size() == 3)
45 value = _tensor_reg->getPortableTensor(value_index);
48 fn->configure(input, pad, value, output);
58 : _input(nullptr), _pad(nullptr), _value(nullptr), _output(nullptr), _constantValueData()
67 const auto pad_data =
reinterpret_cast<const int32_t *
>(
_pad->
buffer());
70 getBuffer<T>(
_output), constant_value_data);
92 case OperandType::FLOAT32:
95 case OperandType::QUANT_UINT8_ASYMM:
99 padImpl<uint8_t>(&pad_value);
106 case OperandType::QUANT_INT8_ASYMM:
110 padImpl<int8_t>(&pad_value);
118 throw std::runtime_error{
"Pad: unsupported data type"};
A tensor class that is portable for other backends.
int32_t data_zero_point() const override final
ir::DataType data_type() const override final
ir::Shape getShape() const override final
Get ir::Shape of tensor.
virtual uint8_t * buffer() const =0
std::unique_ptr< exec::IFunction > _return_fn
IPortableTensor * _output
ConstDataPtr _constantValueData
const IPortableTensor * _input
const IPortableTensor * _value
void padImpl(const T *constant_value_data)
const IPortableTensor * _pad
void configure(const IPortableTensor *input, const IPortableTensor *pad, const IPortableTensor *value, IPortableTensor *output)
nnfw::cker::Shape getShape(const IPortableTensor *tensor)