ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
OperationUtils.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "OperationUtils.h"
18
22
24{
25
27{
28 if (tensor == nullptr)
29 return nnfw::cker::Shape();
30
31 assert(!tensor->is_dynamic() && "Dynamic tensor is not supported yet");
32
33 const ir::Shape &shape = tensor->get_info().shape();
34 auto rank = shape.rank();
35 nnfw::cker::Shape ret(rank);
36 auto data = ret.DimsData();
37 for (int i = 0; i < rank; ++i)
38 {
39 data[i] = shape.dim(i);
40 }
41 return ret;
42}
43
45 const IPortableTensor *output,
46 const IPortableTensor *input_backprop,
47 IPortableTensor *output_backprop)
48{
49 assert(output != nullptr);
50 assert(input_backprop != nullptr);
51
52 // handle NONE - just propagate incoming gradient
53 if (activation == ir::Activation::NONE)
54 {
55 return input_backprop;
56 }
57
58 assert(output_backprop != nullptr);
59
60 // handle other activation
61 switch (activation)
62 {
64 nnfw::cker::train::ReLUGrad(getShape(output), getBuffer<float>(output),
65 getShape(input_backprop), getBuffer<float>(input_backprop),
66 getShape(output_backprop), getBuffer<float>(output_backprop));
67 break;
69 nnfw::cker::train::ReLU6Grad(getShape(output), getBuffer<float>(output),
70 getShape(input_backprop), getBuffer<float>(input_backprop),
71 getShape(output_backprop), getBuffer<float>(output_backprop));
72 break;
73 // TODO: Add other activation backpropagation here
74 default:
75 throw std::runtime_error("Unsupported activation type yet");
76 }
77 return output_backprop;
78}
79
80void biasGrad(const IPortableTensor *input_backprop, IPortableTensor *bias_grad)
81{
82 assert(bias_grad);
83
84 nnfw::cker::Shape input_backprop_shape = getShape(input_backprop);
85 float *input_backprop_buffer = reinterpret_cast<float *>(input_backprop->buffer());
86
87 nnfw::cker::Shape bias_grad_shape = getShape(bias_grad);
88 float *bias_grad_buffer = getBuffer<float>(bias_grad);
89
90 nnfw::cker::functor::biasReductionHelper(input_backprop_buffer, input_backprop_shape,
91 bias_grad_buffer, bias_grad_shape);
92}
93
106
107} // namespace onert::backend::train::ops
int32_t * DimsData()
Definition Shape.h:112
A tensor class that is portable for other backends.
virtual uint8_t * buffer() const =0
void biasReductionHelper(float *input_backprop_buffer, const Shape &input_backprop_shape, float *bias_grad_buffer, const Shape &bias_grad_shape)
void ReLUGrad(const Shape &output_shape, const float *output_data, const Shape &incoming_shape, const float *incoming_data, const Shape &grad_shape, float *grad_data)
Definition ReLU.h:32
void ReLU6Grad(const Shape &output_shape, const float *output_data, const Shape &incoming_shape, const float *incoming_data, const Shape &grad_shape, float *grad_data)
Definition ReLU6.h:31
void biasGrad(const IPortableTensor *input_backprop, IPortableTensor *bias_grad)
backpropagate bias
const IPortableTensor * backpropActivation(const ir::Activation &activation, const IPortableTensor *output, const IPortableTensor *input_backprop, IPortableTensor *output_backprop)
backpropagate acitvation
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
Get shape of tensor.
nnfw::cker::train::LossReductionType convertLossReductionType(ir::train::LossReductionType type)
convert loss reduction type