18#ifndef __NNFW_CKER_PORTABLE_TENSOR_UTILS_H__
19#define __NNFW_CKER_PORTABLE_TENSOR_UTILS_H__
23#include <ruy/context.h>
45 return a < 0.f ? 0.f : a;
47 return std::max(0.f, std::min(a, 6.f));
51 return 1.0f / (1.0f + std::exp(-a));
65 for (
int i = 0; i < v_size; i++)
67 vector[i] = std::max(std::min(clipping_value, vector[i]),
static_cast<T
>(-clipping_value));
74 for (
int b = 0; b < n_batch; b++)
76 memcpy(batch_vector + b * v_size, vector, v_size *
sizeof(
float));
83 for (
int b = 0; b < n_batch; b++)
85 for (
int i = 0; i < v_size; ++i)
87 batch_vector[i] += vector[i];
89 batch_vector += v_size;
95 for (
int i = 0; i < v_size; ++i)
97 if (*vector++ != 0.0f)
107 for (
int v = 0; v < v_size; v++)
109 *result++ = (activation_func)(*vector++);
115 for (
int v = 0; v < v_size; v++)
117 *result++ = 1.0f - *vector++;
122 int8_t *quantized_values,
float *min_value,
123 float *max_value,
float *scaling_factor)
125 auto minmax = std::minmax_element(values, values +
size);
126 *min_value = *minmax.first;
127 *max_value = *minmax.second;
128 const int kScale = 127;
129 const float range = std::max(std::abs(*min_value), std::abs(*max_value));
132 memset(quantized_values, 0,
size *
sizeof(int8_t));
136 *scaling_factor = range / kScale;
137 const float scaling_factor_inv = kScale / range;
138 for (
int i = 0; i <
size; ++i)
140 const int32_t quantized_value =
141 static_cast<int32_t
>(std::round(values[i] * scaling_factor_inv));
143 quantized_values[i] = std::min(kScale, std::max(-kScale, quantized_value));
148 int8_t *quantized_values,
float *scaling_factor,
152 const int32_t kMinScale = -128;
153 const int32_t kMaxScale = 127;
154 const double qmin_double = kMinScale;
155 const double qmax_double = kMaxScale;
156 const auto minmax = std::minmax_element(values, values +
size);
157 const double rmin =
static_cast<double>(std::min(0.0f, *minmax.first));
158 const double rmax =
static_cast<double>(std::max(0.0f, *minmax.second));
161 memset(quantized_values, 0,
size *
sizeof(int8_t));
168 double scale = (rmax - rmin) / (qmax_double - qmin_double);
169 const double zero_point_from_min = qmin_double - rmin / scale;
170 const double zero_point_from_max = qmax_double - rmax / scale;
171 const double zero_point_from_min_error = std::abs(qmin_double) + std::abs(rmin / scale);
172 const double zero_point_from_max_error = std::abs(qmax_double) + std::abs(rmax / scale);
173 const double zero_point_double = zero_point_from_min_error < zero_point_from_max_error
174 ? zero_point_from_min
175 : zero_point_from_max;
176 int8_t nudged_zero_point = 0;
177 if (zero_point_double <= qmin_double)
179 nudged_zero_point = kMinScale;
181 else if (zero_point_double >= qmax_double)
183 nudged_zero_point = kMaxScale;
187 nudged_zero_point =
static_cast<int8_t
>(round(zero_point_double));
189 *scaling_factor = scale;
190 *
offset = nudged_zero_point;
192 const float scaling_factor_inv = 1.0f / *scaling_factor;
193 for (
int i = 0; i <
size; ++i)
195 const int32_t quantized_value =
196 static_cast<int32_t
>(std::round(*
offset + values[i] * scaling_factor_inv));
197 quantized_values[i] = std::min(kMaxScale, std::max(kMinScale, quantized_value));
202 const int m_rows,
const int m_cols,
203 const int8_t *__restrict__ vectors,
204 const float *scaling_factors,
int n_batch,
205 float *__restrict__ result,
209 for (batch = 0; batch < n_batch; ++batch, vectors += m_cols)
211 const float batch_scaling_factor = scaling_factors[batch];
213 const int8_t *row_ptr = matrix;
214 for (row = 0; row < m_rows; ++row, result += result_stride)
220 __builtin_prefetch(row_ptr, 0 , 3 );
222 for (col = 0; col < m_cols; ++col, ++row_ptr)
224 dotprod += (*row_ptr) * (vectors[col]);
226 *result += (dotprod * batch_scaling_factor);
232 const int m_rows,
const int m_cols,
233 const int8_t *__restrict__ vector,
234 const float *scaling_factors,
int n_batch,
235 int32_t *,
float *__restrict__ result,
236 int result_stride, ruy::Context *)
239 n_batch, result, result_stride);
243 const float *vector,
int n_batch,
244 float *result,
int result_stride)
246 float *result_in_batch = result;
247 for (
int b = 0; b < n_batch; b++)
249 const float *matrix_ptr = matrix;
250 for (
int r = 0; r < m_rows; r++)
252 float dot_prod = 0.0f;
253 const float *vector_in_batch = vector + b * m_cols;
254 for (
int c = 0; c < m_cols; c++)
256 dot_prod += *matrix_ptr++ * *vector_in_batch++;
258 *result_in_batch += dot_prod;
259 result_in_batch += result_stride;
265 int v_size,
int n_batch)
267 for (
int batch = 0; batch < n_batch; ++batch)
270 for (
int i = 0; i < v_size; ++i)
272 sum += input_vector[i];
274 const float mean = sum / v_size;
275 float sum_diff_sq = 0.0f;
276 for (
int i = 0; i < v_size; ++i)
278 const float diff = input_vector[i] - mean;
279 sum_diff_sq += diff * diff;
281 const float variance = sum_diff_sq / v_size;
282 constexpr float kNormalizationConstant = 1e-8f;
283 const float stddev_inv = 1.0f / std::sqrt(variance + kNormalizationConstant);
284 for (
int i = 0; i < v_size; ++i)
286 output_vector[i] = (input_vector[i] - mean) * stddev_inv;
288 input_vector += v_size;
289 output_vector += v_size;
float operator()(float a) const
ActivationFunctor(FusedActivationFunctionType act)
__global uchar * offset(const Image *img, int x, int y)
void PortableMeanStddevNormalization(const float *input_vector, float *output_vector, int v_size, int n_batch)
void PortableSub1Vector(const float *vector, int v_size, float *result)
void PortableZeroVector(float *vector, int v_size)
void PortableCwiseClipping(T *vector, const int v_size, const T clipping_value)
void PortableApplyActivationToVector(const float *vector, int v_size, FusedActivationFunctionType activation, float *result)
void PortableSymmetricQuantizeFloats(const float *values, const int size, int8_t *quantized_values, float *min_value, float *max_value, float *scaling_factor)
void PortableMatrixBatchVectorMultiplyAccumulate(const int8_t *__restrict__ matrix, const int m_rows, const int m_cols, const int8_t *__restrict__ vectors, const float *scaling_factors, int n_batch, float *__restrict__ result, int result_stride)
void PortableVectorBatchVectorAssign(const float *vector, int v_size, int n_batch, float *batch_vector)
void PortableAsymmetricQuantizeFloats(const float *values, const int size, int8_t *quantized_values, float *scaling_factor, int32_t *offset)
bool PortableIsZeroVector(const float *vector, int v_size)
void PortableVectorBatchVectorAdd(const float *vector, int v_size, int n_batch, float *batch_vector)
FusedActivationFunctionType