56 const circle::Tensor *input1;
57 const circle::Tensor *input2;
58 const circle::Tensor *output;
69 status = runtime_kernel.
readKernel(op_index, runtime_context);
73 input1 = runtime_kernel.
inputs[input1TensorIdx];
74 input2 = runtime_kernel.
inputs[input2TensorIdx];
75 output = runtime_kernel.
outputs[outputTensorIdx];
76 assert(input1 !=
nullptr);
77 assert(input2 !=
nullptr);
78 assert(output !=
nullptr);
80 status = runtime_kernel.
getDataFromStorage(op_index, runtime_storage, runtime_context);
84 input1_data = runtime_kernel.
inputs_data[input1TensorIdx];
85 input2_data = runtime_kernel.
inputs_data[input2TensorIdx];
86 output_data = runtime_kernel.
outputs_data[outputTensorIdx];
87 assert(input1_data !=
nullptr);
88 assert(input2_data !=
nullptr);
89 assert(output_data !=
nullptr);
99 switch (input1->type())
102 case circle::TensorType_FLOAT32:
105 circle::ActivationFunctionType::ActivationFunctionType_NONE, ¶ms.float_activation_min,
106 ¶ms.float_activation_max);
110 params, input1_shape, core::utils::castInputData<float>(input1_data), input2_shape,
111 core::utils::castInputData<float>(input2_data),
output_shape,
112 core::utils::castOutputData<float>(output_data));
117 core::utils::castInputData<float>(input1_data),
118 core::utils::castInputData<float>(input2_data),
119 core::utils::castOutputData<float>(output_data));
125 case circle::TensorType_INT8:
130 circle::ActivationFunctionType::ActivationFunctionType_NONE, ¶ms.float_activation_min,
131 ¶ms.float_activation_max);
134 (*input1->quantization()->scale())[0],
135 static_cast<int32_t
>((*input1->quantization()->zero_point())[0])};
137 (*input2->quantization()->scale())[0],
138 static_cast<int32_t
>((*input2->quantization()->zero_point())[0])};
140 (*output->quantization()->scale())[0],
141 static_cast<int32_t
>((*output->quantization()->zero_point())[0])};
146 params, input1_shape, in1_qparams, core::utils::castInputData<int8_t>(input1_data),
147 input2_shape, in2_qparams, core::utils::castInputData<int8_t>(input2_data),
output_shape,
148 out_qparams, core::utils::castOutputData<int8_t>(output_data));
153 params, input1_shape.
flatSize(), in1_qparams,
154 core::utils::castInputData<int8_t>(input1_data), in2_qparams,
155 core::utils::castInputData<int8_t>(input2_data), out_qparams,
156 core::utils::castOutputData<int8_t>(output_data));
164 assert(
false &&
"Unsupported type.");
OMStatus BroadcastSquaredDifference4DSlow(const core::BinaryArithmeticBroadcastParams ¶ms, const core::OMRuntimeShape &input1_shape, const T *input1_data, const core::OMRuntimeShape &input2_shape, const T *input2_data, const core::OMRuntimeShape &output_shape, T *output_data)
OMStatus QuantizedBroadcastSquaredDifference4DSlow(const core::BinaryArithmeticBroadcastParams ¶ms, const core::OMRuntimeShape &input1_shape, const onert_micro::core::QuantizationParams &input1_qparams, const T *input1_data, const core::OMRuntimeShape &input2_shape, const onert_micro::core::QuantizationParams &input2_qparams, const T *input2_data, const core::OMRuntimeShape &output_shape, const onert_micro::core::QuantizationParams &output_qparams, T *output_data)
OMStatus QuantizedSquaredDifference(const core::BinaryArithmeticBroadcastParams ¶ms, const int flat_size, const onert_micro::core::QuantizationParams &input1_qparams, const T *input1_data, const onert_micro::core::QuantizationParams &input2_qparams, const T *input2_data, const onert_micro::core::QuantizationParams &output_qparams, T *output_data)