56 const circle::Tensor *input1;
57 const circle::Tensor *input2;
58 const circle::Tensor *output;
67 runtime_kernel.
readKernel(op_index, runtime_context);
69 input1 = runtime_kernel.
inputs[input1TensorIdx];
70 input2 = runtime_kernel.
inputs[input2TensorIdx];
71 output = runtime_kernel.
outputs[outputTensorIdx];
72 assert(input1 !=
nullptr);
73 assert(input2 !=
nullptr);
74 assert(output !=
nullptr);
78 input1_data = runtime_kernel.
inputs_data[input1TensorIdx];
79 input2_data = runtime_kernel.
inputs_data[input2TensorIdx];
80 output_data = runtime_kernel.
outputs_data[outputTensorIdx];
81 assert(input1_data !=
nullptr);
82 assert(input2_data !=
nullptr);
83 assert(output_data !=
nullptr);
95 switch (input1->type())
98 case circle::TensorType_FLOAT32:
101 circle::ActivationFunctionType::ActivationFunctionType_NONE, ¶ms.float_activation_min,
102 ¶ms.float_activation_max);
106 params, input1_shape, core::utils::castInputData<float>(input1_data), input2_shape,
107 core::utils::castInputData<float>(input2_data),
output_shape,
108 core::utils::castOutputData<float>(output_data));
113 core::utils::castInputData<float>(input1_data),
114 core::utils::castInputData<float>(input2_data),
115 core::utils::castOutputData<float>(output_data));
121 case circle::TensorType_INT8:
126 circle::ActivationFunctionType::ActivationFunctionType_NONE, ¶ms.float_activation_min,
127 ¶ms.float_activation_max);
130 (*input1->quantization()->scale())[0],
131 static_cast<int32_t
>((*input1->quantization()->zero_point())[0])};
133 (*input2->quantization()->scale())[0],
134 static_cast<int32_t
>((*input2->quantization()->zero_point())[0])};
136 (*output->quantization()->scale())[0],
137 static_cast<int32_t
>((*output->quantization()->zero_point())[0])};
142 params, input1_shape, in1_qparams, core::utils::castInputData<int8_t>(input1_data),
143 input2_shape, in2_qparams, core::utils::castInputData<int8_t>(input2_data),
output_shape,
144 out_qparams, core::utils::castOutputData<int8_t>(output_data));
149 params, input1_shape.
flatSize(), in1_qparams,
150 core::utils::castInputData<int8_t>(input1_data), in2_qparams,
151 core::utils::castInputData<int8_t>(input2_data), out_qparams,
152 core::utils::castOutputData<int8_t>(output_data));
160 assert(
false &&
"Unsupported type.");
OMStatus BroadcastSquaredDifference4DSlow(const core::BinaryArithmeticBroadcastParams ¶ms, const core::OMRuntimeShape &input1_shape, const T *input1_data, const core::OMRuntimeShape &input2_shape, const T *input2_data, const core::OMRuntimeShape &output_shape, T *output_data)
OMStatus QuantizedBroadcastSquaredDifference4DSlow(const core::BinaryArithmeticBroadcastParams ¶ms, const core::OMRuntimeShape &input1_shape, const onert_micro::core::QuantizationParams &input1_qparams, const T *input1_data, const core::OMRuntimeShape &input2_shape, const onert_micro::core::QuantizationParams &input2_qparams, const T *input2_data, const core::OMRuntimeShape &output_shape, const onert_micro::core::QuantizationParams &output_qparams, T *output_data)
OMStatus QuantizedSquaredDifference(const core::BinaryArithmeticBroadcastParams ¶ms, const int flat_size, const onert_micro::core::QuantizationParams &input1_qparams, const T *input1_data, const onert_micro::core::QuantizationParams &input2_qparams, const T *input2_data, const onert_micro::core::QuantizationParams &output_qparams, T *output_data)