19#include "kernels/Utils.h"
21#include "PALDepthwiseConv2D.h"
31void evalFloat(
const circle::Tensor *input,
const circle::Tensor *filter,
32 const circle::Tensor *bias,
const circle::Tensor *output,
33 const circle::DepthwiseConv2DOptions *options,
BaseRuntimeGraph *runtime_graph)
35 float activation_min{};
36 float activation_max{};
38 &activation_min, &activation_max);
45 params.stride_height =
options->stride_h();
46 params.stride_width =
options->stride_w();
47 params.dilation_height_factor =
options->dilation_h_factor();
48 params.dilation_width_factor =
options->dilation_w_factor();
49 params.float_activation_min = activation_min;
50 params.float_activation_max = activation_max;
51 params.depth_multiplier =
options->depth_multiplier();
53 auto *
input_data = runtime_graph->getDataByTensor(input);
54 auto *
output_data = runtime_graph->getDataByTensor(output);
56 auto *
filter_data = runtime_graph->getConstDataByTensor(filter);
57 auto *
bias_data = runtime_graph->getConstDataByTensor(bias);
68 luci_interpreter_pal::DepthwiseConv2D(
71 output_shape, kernels::getTensorData<float>(output_data));
83 const auto input = kernel.
input();
84 const auto filter = kernel.
filter();
85 const auto bias = kernel.
bias();
86 const auto output = kernel.
output();
92 const auto *options =
cur_op->builtin_options_as_DepthwiseConv2DOptions();
94 if (Tensor::element_type(input) == DataType::FLOAT32 &&
95 Tensor::element_type(filter) == DataType::FLOAT32)
101 assert(
false &&
"Unsupported type.");
110 switch (options->fused_activation_function())
112 case circle::ActivationFunctionType_NONE:
113 case circle::ActivationFunctionType_RELU:
114 case circle::ActivationFunctionType_RELU6:
115 case circle::ActivationFunctionType_RELU_N1_TO_1:
118 assert(
false &&
"Unsupported fused activation");
127 const auto input = kernel.
input();
128 const auto weights = kernel.
filter();
129 const auto bias = kernel.
bias();
130 const auto output = kernel.
output();
132 const auto *options =
cur_op->builtin_options_as_DepthwiseConv2DOptions();
134 switch (Tensor::element_type(input))
137 case DataType::FLOAT32:
138 if (Tensor::element_type(weights) == DataType::FLOAT32)
140 evalFloat(input, weights, bias, output, options, runtime_graph);
145 assert(
false &&
"Unsupported type.");
uint8_t * getConstDataByTensor(const circle::Tensor *raw_tensor)
const circle::Tensor * output() const
const circle::Tensor * input() const
const circle::Tensor * filter() const
const circle::Tensor * bias() const
#define LUCI_INTERPRETER_CHECK(cond)
const luci_interpreter::RuntimeShape output_shape
void calculateActivationRange(Activation activation, T *activation_min, T *activation_max)
void getTensorDims(const circle::Tensor *tensor, BaseRuntimeGraph *runtime_graph, int32_t *dims)
void configure_kernel_CircleDepthwiseConv2D(const circle::Operator *cur_op, BaseRuntimeGraph *runtime_graph)
RuntimeGraph BaseRuntimeGraph
int32_t computeConvPadding(const circle::Tensor *input, const circle::Tensor *filter, circle::Padding padding_type, int32_t stride, int32_t dilation, int axis)
void execute_kernel_CircleDepthwiseConv2D(const circle::Operator *cur_op, BaseRuntimeGraph *runtime_graph)
T must_cast(loco::Node *node)
FusedActFunc luci_actfunc(const circle::ActivationFunctionType type)
const loco::Dimension & dim(uint32_t axis) const
PaddingValues padding_values