31 : _input(nullptr), _kernel(nullptr), _bias(nullptr), _output(nullptr),
32 _paddingType(ir::PaddingType::EXPLICIT), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
33 _paddingBottom(0), _strideWidth(0), _strideHeight(0), _dilationWidthFactor(1),
34 _dilationHeightFactor(1), _activation(ir::Activation::
NONE),
35 _conv_kernel(new
nnfw::ruy::
Conv()), _prepare(false)
44 float output_activation_min = 0, output_activation_max = 0;
63 _external_context->ruy_context());
68 const uint32_t paddingLeft,
const uint32_t paddingRight,
69 const uint32_t paddingTop,
const uint32_t paddingBottom,
70 const uint32_t strideWidth,
const uint32_t strideHeight,
71 const uint32_t dilationWidthFactor,
72 const uint32_t dilationHeightFactor,
74 const std::shared_ptr<ExternalContext> &external_context)
79 _paddingType = paddingType;
80 _paddingLeft = paddingLeft;
81 _paddingRight = paddingRight;
82 _paddingTop = paddingTop;
83 _paddingBottom = paddingBottom;
84 _strideWidth = strideWidth;
85 _strideHeight = strideHeight;
86 _dilationWidthFactor = dilationWidthFactor;
87 _dilationHeightFactor = dilationHeightFactor;
88 _activation = activation;
90 _external_context = external_context;
99 const auto ifm_shape = _input->
getShape().asFeature();
100 const auto ofm_shape = _output->
getShape().asFeature();
102 const auto ker_shape = _kernel->
getShape();
103 const auto ker_height = ker_shape.dim(1);
104 const auto ker_width = ker_shape.dim(2);
111 param_padding.
type = _paddingType;
114 param_padding.
param.
top = _paddingTop;
119 _dilationWidthFactor, _dilationHeightFactor);
121 _paddingLeft = padding.left;
122 _paddingRight = padding.right;
123 _paddingTop = padding.top;
124 _paddingBottom = padding.bottom;
126 if (_input->
data_type() == OperandType::FLOAT32)
132 throw std::runtime_error{
"Conv: unsupported data type"};
145 _strideWidth, _strideHeight, _dilationWidthFactor, _dilationHeightFactor);
void Conv(const float *input_data, const Dims< 4 > &input_dims, const float *filter_data, const Dims< 4 > &filter_dims, const float *bias_data, const Dims< 4 > &bias_dims, int stride_width, int stride_height, int pad_width, int pad_height, float *output_data, const Dims< 4 > &output_dims, float *im2col_data, const Dims< 4 > &im2col_dims)
void prepare(const Shape &input_shape, const Shape &kernel_shape, const Shape &output_shape, uint32_t stride_width, uint32_t stride_height, uint32_t dilation_width_factor, uint32_t dilation_height_factor)
A tensor class that is portable for other backends.
ir::DataType data_type() const override final
ir::Shape getShape() const override final
Get ir::Shape of tensor.
bool is_dynamic() const override final
Return true if the tensor needs dynamic allocation, meaning that during compile-time the outpus shape...
bool is_constant() const override final
Return true if the tensor is constant.
virtual uint8_t * buffer() const =0
void configure(const IPortableTensor *input, const IPortableTensor *kernel, const IPortableTensor *bias, ir::PaddingType _paddingType, const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t dilationWidthFactor, const uint32_t dilationHeightFactor, const ir::Activation activation, IPortableTensor *output, const std::shared_ptr< ExternalContext > &external_context)
nnfw::ruy::Shape getTensorShape(const IPortableTensor *tensor)
const ExplicitPadding calculatePadding(const Padding &padding, const FeatureShape &ifm_shape, const FeatureShape &ofm_shape, const Stride &stride, uint32_t kw, uint32_t kh, uint32_t dwf=1, uint32_t dhf=1)
void CalculateActivationRange(ir::Activation activation, T *activation_min, T *activation_max)
float float_activation_max
PaddingValues padding_values
float float_activation_min
int16_t dilation_height_factor
int16_t dilation_width_factor