ONE - On-device Neural Engine
Loading...
Searching...
No Matches
ElementwiseActivationLayer.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
18
19#include "OperationUtils.h"
20
21#include <cker/operation/ELU.h>
24#include <cker/operation/ReLU.h>
26#include <cker/operation/Tanh.h>
27
28namespace onert
29{
30namespace backend
31{
32namespace cpu
33{
34namespace ops
35{
36
38 : _input(nullptr), _output(nullptr), _kernel()
39{
40 // DO NOTHING
41}
42
44{
45 const auto input_scale = static_cast<double>(_input->data_scale());
46 const auto input_zero_point = static_cast<int32_t>(_input->data_zero_point());
47 const auto output_scale = static_cast<double>(_output->data_scale());
48 const auto output_zero_point = static_cast<int32_t>(_output->data_zero_point());
49 const float inverse_scale = 1 / output_scale;
50 int32_t maxval = std::numeric_limits<uint8_t>::max();
51 int32_t minval = std::numeric_limits<uint8_t>::min();
52 for (int32_t val = minval; val <= maxval; ++val)
53 {
54 const float dequantized = input_scale * (val - input_zero_point);
55 float transformed = 0.f;
57 {
58 transformed = std::tanh(dequantized);
59 }
60 else if (op_type == ElementwiseActivationType::kLogistic)
61 {
62 transformed = 1.0f / (1.0f + std::exp(-dequantized));
63 }
64 else
65 {
66 throw std::runtime_error("ElementwiseActivationLayer : unsupported activation type");
67 }
68 const float rescaled = std::round(transformed * inverse_scale);
69 const int32_t quantized = static_cast<int32_t>(rescaled + output_zero_point);
70 _table[val] = static_cast<uint8_t>(std::max(std::min(maxval, quantized), minval));
71 }
72}
73
75 IPortableTensor *output)
76{
77 const int size = MatchingFlatSize(getShape(input), getShape(output));
78 const uint8_t *input_data = getBuffer<uint8_t>(input);
79 uint8_t *output_data = getBuffer<uint8_t>(output);
80
81 for (int i = 0; i < size; ++i)
82 {
83 output_data[i] = _table[input_data[i]];
84 }
85}
86
88 float alpha, float beta,
90{
91 _input = input;
92 _output = output;
93
94 switch (op_type)
95 {
97 if (input->data_type() == OperandType::FLOAT32)
98 {
99 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
100 nnfw::cker::ELU(getShape(input), getBuffer<float>(input), getShape(output),
101 getBuffer<float>(output));
102 };
103 }
104 else
105 {
106 throw std::runtime_error{"ElementwiseActivationLayer(Elu): unsupported data type"};
107 }
108 break;
110 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
111 {
112 PopulateLookupTable(op_type);
114 std::placeholders::_1, std::placeholders::_2);
115 }
116 else if (_input->data_type() == OperandType::FLOAT32)
117 {
118 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
119 nnfw::cker::Logistic(getShape(input), getBuffer<float>(input), getShape(output),
120 getBuffer<float>(output));
121 };
122 }
123 else
124 {
125 throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"};
126 }
127 break;
129 if (_input->data_type() == OperandType::FLOAT32)
130 {
131 if (alpha == std::numeric_limits<float>::infinity() && beta == 0.f)
132 {
133 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
134 nnfw::cker::ReLU(getShape(input), getBuffer<float>(input), getShape(output),
135 getBuffer<float>(output));
136 };
137 }
138 else if (alpha == 6.f && beta == 0.f)
139 {
140 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
141 nnfw::cker::ReLU6(getShape(input), getBuffer<float>(input), getShape(output),
142 getBuffer<float>(output));
143 };
144 }
145 else
146 {
147 throw std::runtime_error(
148 "ElementwiseActivationLayer : This layer suppports only ReLU(0-inf) and ReLU6(0-6)");
149 }
150 }
151 else
152 {
153 throw std::runtime_error{"ElementwiseActivationLayer(ReLU): unsupported data type"};
154 }
155 break;
157 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
158 {
159 PopulateLookupTable(op_type);
161 std::placeholders::_1, std::placeholders::_2);
162 }
163 else if (_input->data_type() == OperandType::FLOAT32)
164 {
165 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
166 nnfw::cker::Tanh(getShape(input), getBuffer<float>(input), getShape(output),
167 getBuffer<float>(output));
168 };
169 }
170 else
171 {
172 throw std::runtime_error{"ElementwiseActivationLayer(Tanh): unsupported data type"};
173 }
174 break;
176 if (_input->data_type() == OperandType::FLOAT32)
177 {
178 _kernel = [alpha](const IPortableTensor *input, IPortableTensor *output) {
180 getBuffer<float>(input), getShape(output),
181 getBuffer<float>(output));
182 };
183 }
184 else
185 {
186 throw std::runtime_error{"ElementwiseActivationLayer(LeakyReLU): unsupported data type"};
187 }
188 break;
189 default:
190 throw std::runtime_error("ElementwiseActivationLayer: unsupported op type");
191 }
192}
193
195
196} // namespace ops
197} // namespace cpu
198} // namespace backend
199} // namespace onert
int MatchingFlatSize(const Dims< N > &dims, const Dims< N > &check_dims_0)
Definition Dims.h:108
A tensor class that is portable for other backends.
float data_scale() const override final
int32_t data_zero_point() const override final
ir::DataType data_type() const override final
void PopulateLookupTable(const ElementwiseActivationType op_type)
std::function< void(const IPortableTensor *input, IPortableTensor *output)> _kernel
void configure(const IPortableTensor *input, IPortableTensor *output, float alpha, float beta, const ElementwiseActivationType op_type)
void EvalUsingLookupTable(const IPortableTensor *input, IPortableTensor *output)
void LeakyReLU(const LeakyReluParams &params, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition LeakyReLU.h:31
void Logistic(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition Logistic.h:32
void Tanh(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition Tanh.h:31
void ReLU6(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition ReLU6.h:32
void ELU(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition ELU.h:30
void ReLU(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition ReLU.h:32
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
int32_t size[5]
Definition Slice.cpp:35