19#include "../KernelGenerator.h"
21#include "../Validator.h"
28void Validator::visit(
const ir::operation::Conv2D &) {
_supported =
true; }
30void KernelGenerator::visit(
const ir::operation::Conv2D &node)
32 using ir::operation::Conv2D;
34 const auto ofm_index{node.getOutputs().at(0)};
35 const auto ifm_index{node.getInputs().at(Conv2D::Input::INPUT)};
36 const auto ker_index{node.getInputs().at(Conv2D::Input::KERNEL)};
37 const auto bias_index{node.getInputs().at(Conv2D::Input::BIAS)};
39 auto ofm_tensor = _tensor_reg->getPortableTensor(ofm_index);
40 auto ifm_tensor = _tensor_reg->getPortableTensor(ifm_index);
41 auto ker_tensor = _tensor_reg->getPortableTensor(ker_index);
42 auto bias_tensor = _tensor_reg->getPortableTensor(bias_index);
44 const auto stride = node.param().stride;
45 const auto activation = node.param().activation;
46 const auto ¶m_padding = node.param().padding;
47 const auto dilation = node.param().dilation;
48 auto fn = std::make_unique<ops::ConvolutionLayer>();
50 if (_ctx.
at(ifm_index).info().isDynamic() || _ctx.
at(ker_index).info().isDynamic())
53 param_padding.param.right, param_padding.param.top, param_padding.param.bottom,
54 stride.horizontal, stride.vertical, dilation.width_factor, dilation.height_factor,
55 activation, ofm_tensor, _external_context);
60 const auto ifm_shape = _ctx.
at(ifm_index).shape().asFeature();
61 const auto ofm_shape = _ctx.
at(ofm_index).shape().asFeature();
63 const auto &ker_shape = _ctx.
at(ker_index).shape();
64 const auto ker_height = ker_shape.dim(1);
65 const auto ker_width = ker_shape.dim(2);
69 dilation.width_factor, dilation.height_factor);
72 padding.right, padding.top, padding.bottom, stride.horizontal, stride.vertical,
73 dilation.width_factor, dilation.height_factor, activation, ofm_tensor,
84 : _input(nullptr), _kernel(nullptr), _bias(nullptr), _output(nullptr),
85 _paddingType(ir::PaddingType::EXPLICIT), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
86 _paddingBottom(0), _strideWidth(0), _strideHeight(0), _dilationWidthFactor(1),
87 _dilationHeightFactor(1), _activation(ir::Activation::
NONE),
88 _conv_kernel(new
nnfw::ruy::
Conv()), _prepare(false)
97 float output_activation_min = 0, output_activation_max = 0;
116 _external_context->ruy_context());
121 const uint32_t paddingLeft,
const uint32_t paddingRight,
122 const uint32_t paddingTop,
const uint32_t paddingBottom,
123 const uint32_t strideWidth,
const uint32_t strideHeight,
124 const uint32_t dilationWidthFactor,
125 const uint32_t dilationHeightFactor,
127 const std::shared_ptr<ExternalContext> &external_context)
132 _paddingType = paddingType;
133 _paddingLeft = paddingLeft;
134 _paddingRight = paddingRight;
135 _paddingTop = paddingTop;
136 _paddingBottom = paddingBottom;
137 _strideWidth = strideWidth;
138 _strideHeight = strideHeight;
139 _dilationWidthFactor = dilationWidthFactor;
140 _dilationHeightFactor = dilationHeightFactor;
141 _activation = activation;
143 _external_context = external_context;
152 const auto ifm_shape = _input->
getShape().asFeature();
153 const auto ofm_shape = _output->
getShape().asFeature();
155 const auto ker_shape = _kernel->
getShape();
156 const auto ker_height = ker_shape.dim(1);
157 const auto ker_width = ker_shape.dim(2);
164 param_padding.
type = _paddingType;
167 param_padding.
param.
top = _paddingTop;
172 _dilationWidthFactor, _dilationHeightFactor);
174 _paddingLeft = padding.left;
175 _paddingRight = padding.right;
176 _paddingTop = padding.top;
177 _paddingBottom = padding.bottom;
179 if (_input->
data_type() == OperandType::FLOAT32)
185 throw std::runtime_error{
"Conv: unsupported data type"};
198 _strideWidth, _strideHeight, _dilationWidthFactor, _dilationHeightFactor);
void Conv(const float *input_data, const Dims< 4 > &input_dims, const float *filter_data, const Dims< 4 > &filter_dims, const float *bias_data, const Dims< 4 > &bias_dims, int stride_width, int stride_height, int pad_width, int pad_height, float *output_data, const Dims< 4 > &output_dims, float *im2col_data, const Dims< 4 > &im2col_dims)
void prepare(const Shape &input_shape, const Shape &kernel_shape, const Shape &output_shape, uint32_t stride_width, uint32_t stride_height, uint32_t dilation_width_factor, uint32_t dilation_height_factor)
A tensor class that is portable for other backends.
ir::DataType data_type() const override final
ir::Shape getShape() const override final
Get ir::Shape of tensor.
bool is_dynamic() const override final
Return true if the tensor needs dynamic allocation, meaning that during compile-time the outpus shape...
bool is_constant() const override final
Return true if the tensor is constant.
virtual uint8_t * buffer() const =0
std::unique_ptr< exec::IFunction > _return_fn
void configure(const IPortableTensor *input, const IPortableTensor *kernel, const IPortableTensor *bias, ir::PaddingType _paddingType, const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t dilationWidthFactor, const uint32_t dilationHeightFactor, const ir::Activation activation, IPortableTensor *output, const std::shared_ptr< ExternalContext > &external_context)
const Object & at(const Index &index) const
Get the object that is associated with the given index.
nnfw::ruy::Shape getTensorShape(const IPortableTensor *tensor)
const ExplicitPadding calculatePadding(const Padding &padding, const FeatureShape &ifm_shape, const FeatureShape &ofm_shape, const Stride &stride, uint32_t kw, uint32_t kh, uint32_t dwf=1, uint32_t dhf=1)
void CalculateActivationRange(ir::Activation activation, T *activation_min, T *activation_max)
float float_activation_max
PaddingValues padding_values
float float_activation_min
int16_t dilation_height_factor
int16_t dilation_width_factor