ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Loss.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2016 The TensorFlow Authors. All Rights Reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#ifndef __NNFW_CKER_TRAIN_OPERATION_LOSS_H__
19#define __NNFW_CKER_TRAIN_OPERATION_LOSS_H__
20
21#include <numeric>
22
23#include "cker/Shape.h"
25#include "cker/eigen/Utils.h"
26#include "cker/eigen/xent_op.h"
28#include "cker/train/Types.h"
29
30namespace nnfw
31{
32namespace cker
33{
34namespace train
35{
36
37template <typename T> inline T square(T value) { return value * value; }
38template <typename T> inline T log_threshold() { return static_cast<T>(1e-20); }
39
40template <typename T>
41inline void MSE(const Shape &y_pred_shape, const T *y_pred_data, const Shape &y_true_shape,
42 const T *y_true_data, const Shape &output_shape, T *output_data)
43{
44 if (output_shape.DimensionsCount() != 1)
45 throw std::runtime_error("cker::MSE: output dimension count should be 1");
46 if (output_shape.Dims(0) != y_pred_shape.Dims(0))
47 throw std::runtime_error("cker::MSE: output and y_pred do not have the same batch");
48 if (y_pred_shape != y_true_shape)
49 throw std::runtime_error("cker::MSE: y_pred_shape != y_true_shape");
50
51 const auto batch = y_pred_shape.Dims(0);
52 const auto size = FlatSizeSkipDim(y_pred_shape, 0);
53
54 for (int b = 0; b < batch; ++b)
55 {
56 float sum = 0.f;
57 for (int i = 0; i < size; ++i)
58 {
59 sum += square(y_pred_data[b * size + i] - y_true_data[b * size + i]);
60 }
61 output_data[b] = static_cast<T>(sum / size);
62 }
63}
64
65template <typename T>
66inline void MSEGrad(const Shape &y_pred_shape, const T *y_pred_data, const Shape &y_true_shape,
67 const T *y_true_data, const Shape &grad_shape, T *grad_data,
68 LossReductionType reduction_type)
69{
70 if (y_pred_shape != y_true_shape)
71 throw std::runtime_error("cker::MSEGrad: y_pred_shape != y_true_shape");
72 if (y_pred_shape != grad_shape)
73 throw std::runtime_error("cker::MSEGrad: y_pred_shape != grad_shape");
74
75 const int batch_size = grad_shape.Dims(0);
76 const auto flat_size = FlatSizeSkipDim(grad_shape, 0);
77 auto reduction_size = 1;
78 switch (reduction_type)
79 {
81 reduction_size = batch_size * flat_size;
82 break;
84 reduction_size = flat_size;
85 break;
86 default:
87 throw std::runtime_error("Unsupported reduction type");
88 }
89
90 for (int b = 0; b < batch_size; ++b)
91 {
92 for (int i = 0; i < flat_size; ++i)
93 {
94 const int offset = b * flat_size + i;
95 assert(offset >= 0);
96 grad_data[offset] =
97 static_cast<T>(-2 * (y_true_data[offset] - y_pred_data[offset]) / reduction_size);
98 }
99 }
100}
101
102template <typename T>
103inline void CategoricalCrossEntropy(const Shape &y_pred_shape, const T *y_pred_data,
104 const Shape &y_true_shape, const T *y_true_data,
105 const Shape &output_shape, T *output_data)
106{
107 if (output_shape.DimensionsCount() != 1)
108 throw std::runtime_error("cker::CategoricalCrossEntropy: output dimension count should be 1");
109 if (y_pred_shape != y_true_shape)
110 throw std::runtime_error(
111 "cker::CategoricalCrossEntropy: y_pred and y_true do not have the same shape");
112 if (output_shape.Dims(0) != y_pred_shape.Dims(0))
113 throw std::runtime_error(
114 "cker::CategoricalCrossEntropy: output and y_pred do not have the same batch");
115
116 const auto y_pred = MapAsMatrixWithLastDimAsRows(y_pred_data, y_pred_shape);
117 const auto y_true = MapAsMatrixWithLastDimAsRows(y_true_data, y_true_shape);
118 auto output = MapAsVector(output_data, output_shape);
119
120 output = -(y_true.array() * y_pred.array().cwiseMax(log_threshold<T>()).log()).colwise().sum();
121}
122
123template <typename T>
124inline void CategoricalCrossEntropyGrad(const Shape &y_pred_shape, const T *y_pred_data,
125 const Shape &y_true_shape, const T *y_true_data,
126 const Shape &grad_shape, T *grad_data,
127 LossReductionType reduction_type)
128{
129 if (y_pred_shape != y_true_shape)
130 throw std::runtime_error(
131 "cker::CategoricalCrossEntropyGrad: y_pred and y_true do not have the same shape");
132 if (y_pred_shape != grad_shape)
133 throw std::runtime_error(
134 "cker::CategoricalCrossEntropyGrad: y_pred and grad do not have the same shape");
135
136 const auto y_pred = MapAsMatrixWithLastDimAsRows(y_pred_data, y_pred_shape);
137 const auto y_true = MapAsMatrixWithLastDimAsRows(y_true_data, y_true_shape);
138 auto grad = MapAsMatrixWithLastDimAsRows(grad_data, grad_shape);
139
140 const int32_t batch_size = grad_shape.Dims(0);
141 int32_t reduction_size = 1;
142 switch (reduction_type)
143 {
145 reduction_size = batch_size;
146 break;
148 reduction_size = 1;
149 break;
150 default:
151 throw std::runtime_error("Unsupported reduction type");
152 }
153 assert(reduction_size > 0);
154
155 grad = -(y_true.array() / y_pred.array().cwiseMax(log_threshold<T>())) /
156 static_cast<T>(reduction_size);
157}
158
159template <typename T>
160void CategoricalCrossEntropyWithLogits(const Shape &logits_shape, const T *logits_data,
161 const Shape &y_true_shape, const T *y_true_data,
162 const Shape &loss_out_shape, T *loss_out_data,
163 const Shape &grad_shape, T *grad_data,
164 LossReductionType reduction_type)
165{
166 // TODO Enable broadcast shapes
167 if (loss_out_shape.DimensionsCount() != 1)
168 throw std::runtime_error(
169 "cker::CategoricalCrossEntropyWithLogits: loss output dimension count should be 1");
170 if (logits_shape != y_true_shape)
171 throw std::runtime_error(
172 "cker::CategoricalCrossEntropyWithLogits: logits and y_true do not have the same shape");
173 if (loss_out_shape.Dims(0) != logits_shape.Dims(0))
174 throw std::runtime_error(
175 "cker::CategoricalCrossEntropyWithLogits: loss_out and logits do not have the same batch");
176 if (logits_shape != grad_shape)
177 throw std::runtime_error(
178 "cker::CategoricalCrossEntropyWithLogits: logits and grad do not have the same shape");
179
180 auto shape_in = logits_shape;
181
182 BCast bcast(BCast::FromShape(shape_in), BCast::FromShape(y_true_shape),
183 /*fewer_dims_optimization=*/false);
184
185 // loss is 1-D (one per example), and size is batch_size.
186
187 Tensor logits_in;
188 Tensor labels_in;
189 Tensor scratch;
190 Tensor loss_out;
191 Tensor back_out;
192
193 logits_in.shape.ReplaceWith(shape_in.DimensionsCount(), shape_in.DimsData());
194 logits_in.buffer = const_cast<T *>(logits_data);
195
196 labels_in.shape.ReplaceWith(y_true_shape.DimensionsCount(), y_true_shape.DimsData());
197 labels_in.buffer = const_cast<T *>(y_true_data);
198
199 scratch.shape.ReplaceWith(shape_in.DimensionsCount(), shape_in.DimsData());
200 std::vector<T> scratch_vec(shape_in.Dims(0) * shape_in.Dims(1), static_cast<T>(0));
201 scratch.buffer = scratch_vec.data();
202
203 Shape shape_loss_out{shape_in.Dims(0)};
204 loss_out.shape.ReplaceWith(shape_loss_out.DimensionsCount(), shape_loss_out.DimsData());
205 loss_out.buffer = loss_out_data;
206
207 back_out.shape.ReplaceWith(shape_in.DimensionsCount(), shape_in.DimsData());
208 back_out.buffer = grad_data;
209
210 if (shape_in.Dims(0) > 0)
211 {
212 const int32_t batch_size = grad_shape.Dims(0);
213 int32_t reduction_size = 1;
214 switch (reduction_type)
215 {
217 reduction_size = batch_size;
218 break;
220 reduction_size = 1;
221 break;
222 default:
223 throw std::runtime_error("Unsupported reduction type");
224 }
225 assert(reduction_size > 0);
226
229 const Eigen::DSizes<Eigen::DenseIndex, 2> shape{shape_in.Dims(0), shape_in.Dims(1)};
230
231 functor(device, shape, BCast::ToIndexArray<2>(bcast.x_bcast()),
232 BCast::ToIndexArray<2>(bcast.y_bcast()),
233 logits_in.template shaped<const T, 2>(bcast.x_reshape()),
234 labels_in.template shaped<const T, 2>(bcast.y_reshape()), scratch.matrix<T>(),
235 loss_out.vec<T>(), back_out.matrix<T>(), static_cast<T>(reduction_size));
236 }
237}
238
239} // namespace train
240} // namespace cker
241} // namespace nnfw
242
243#endif // __NNFW_CKER_TRAIN_OPERATION_LOSS_H__
static Vec FromShape(const Shape &shape)
Definition BCast.h:444
const Vec & y_bcast() const
Definition BCast.h:408
const Vec & x_reshape() const
Definition BCast.h:405
const Vec & y_reshape() const
Definition BCast.h:407
const Vec & x_bcast() const
Definition BCast.h:406
int32_t DimensionsCount() const
Definition Shape.h:91
void ReplaceWith(int dimensions_count, const int32_t *dims_data)
Definition Shape.h:130
int32_t Dims(int i) const
Definition Shape.h:92
int32_t * DimsData()
Definition Shape.h:112
__global uchar * offset(const Image *img, int x, int y)
Definition helpers.h:540
const luci_interpreter::RuntimeShape output_shape
const Eigen::ThreadPoolDevice * GetThreadPoolDevice()
void CategoricalCrossEntropyGrad(const Shape &y_pred_shape, const T *y_pred_data, const Shape &y_true_shape, const T *y_true_data, const Shape &grad_shape, T *grad_data, LossReductionType reduction_type)
Definition Loss.h:124
T log_threshold()
Definition Loss.h:38
void CategoricalCrossEntropy(const Shape &y_pred_shape, const T *y_pred_data, const Shape &y_true_shape, const T *y_true_data, const Shape &output_shape, T *output_data)
Definition Loss.h:103
T square(T value)
Definition Loss.h:37
void MSE(const Shape &y_pred_shape, const T *y_pred_data, const Shape &y_true_shape, const T *y_true_data, const Shape &output_shape, T *output_data)
Definition Loss.h:41
void MSEGrad(const Shape &y_pred_shape, const T *y_pred_data, const Shape &y_true_shape, const T *y_true_data, const Shape &grad_shape, T *grad_data, LossReductionType reduction_type)
Definition Loss.h:66
void CategoricalCrossEntropyWithLogits(const Shape &logits_shape, const T *logits_data, const Shape &y_true_shape, const T *y_true_data, const Shape &loss_out_shape, T *loss_out_data, const Shape &grad_shape, T *grad_data, LossReductionType reduction_type)
Definition Loss.h:160
Eigen::ThreadPoolDevice CPUDevice
Definition xent_op.h:69
MatrixMap< Scalar > MapAsMatrixWithLastDimAsRows(Scalar *data, const Shape &shape)
Definition Utils.h:60
int FlatSizeSkipDim(const Shape &shape, int skip_dim)
Definition Shape.h:253
VectorMap< Scalar > MapAsVector(Scalar *data, const Shape &shape)
Definition Utils.h:43
Definition topk_v2.h:30
int32_t size[5]
Definition Slice.cpp:35
TTypes< T >::Vec vec()
Definition Tensor.h:161
TTypes< T >::Matrix matrix()
Definition Tensor.h:163