ONE - On-device Neural Engine
Loading...
Searching...
No Matches
ElementwiseActivationLayer.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
18
19#include "OperationUtils.h"
20#include "../KernelGenerator.h"
21#include "../Validator.h"
22
23#include <cker/operation/ELU.h>
26#include <cker/operation/ReLU.h>
28#include <cker/operation/Tanh.h>
29#include <cker/operation/GELU.h>
30
31namespace onert::backend::cpu
32{
33
34void Validator::visit(const ir::operation::ElementwiseActivation &) { _supported = true; }
35
36ops::ElementwiseActivationType
57
58void KernelGenerator::visit(const ir::operation::ElementwiseActivation &node)
59{
60 const auto output_index{node.getOutputs().at(0)};
62
63 auto output_tensor = _tensor_reg->getPortableTensor(output_index);
64 auto input_tensor = _tensor_reg->getPortableTensor(input_index);
65
66 auto fn = std::make_unique<ops::ElementwiseActivationLayer>();
67
68 fn->configure(input_tensor, output_tensor, node.param().alpha, node.param().beta,
70
71 _return_fn = std::move(fn);
72}
73
74} // namespace onert::backend::cpu
75
77{
78
80 : _input(nullptr), _output(nullptr), _kernel()
81{
82 // DO NOTHING
83}
84
86{
87 const auto input_scale = static_cast<double>(_input->data_scale());
88 const auto input_zero_point = static_cast<int32_t>(_input->data_zero_point());
89 const auto output_scale = static_cast<double>(_output->data_scale());
90 const auto output_zero_point = static_cast<int32_t>(_output->data_zero_point());
91 const float inverse_scale = 1 / output_scale;
92 int32_t maxval = std::numeric_limits<uint8_t>::max();
93 int32_t minval = std::numeric_limits<uint8_t>::min();
94 for (int32_t val = minval; val <= maxval; ++val)
95 {
96 const float dequantized = input_scale * (val - input_zero_point);
97 float transformed = 0.f;
99 {
100 transformed = std::tanh(dequantized);
101 }
102 else if (op_type == ElementwiseActivationType::kLogistic)
103 {
104 transformed = 1.0f / (1.0f + std::exp(-dequantized));
105 }
106 else
107 {
108 throw std::runtime_error("ElementwiseActivationLayer : unsupported activation type");
109 }
110 const float rescaled = std::round(transformed * inverse_scale);
111 const int32_t quantized = static_cast<int32_t>(rescaled + output_zero_point);
112 _table[val] = static_cast<uint8_t>(std::max(std::min(maxval, quantized), minval));
113 }
114}
115
117 IPortableTensor *output)
118{
119 const int size = MatchingFlatSize(getShape(input), getShape(output));
120 const uint8_t *input_data = getBuffer<uint8_t>(input);
121 uint8_t *output_data = getBuffer<uint8_t>(output);
122
123 for (int i = 0; i < size; ++i)
124 {
125 output_data[i] = _table[input_data[i]];
126 }
127}
128
130 float alpha, float beta, bool approximate,
132{
133 _input = input;
134 _output = output;
135
136 switch (op_type)
137 {
139 if (input->data_type() == OperandType::FLOAT32)
140 {
141 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
142 nnfw::cker::ELU(getShape(input), getBuffer<float>(input), getShape(output),
143 getBuffer<float>(output));
144 };
145 }
146 else
147 {
148 throw std::runtime_error{"ElementwiseActivationLayer(Elu): unsupported data type"};
149 }
150 break;
152 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
153 {
154 PopulateLookupTable(op_type);
156 std::placeholders::_1, std::placeholders::_2);
157 }
158 else if (_input->data_type() == OperandType::FLOAT32)
159 {
160 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
161 nnfw::cker::Logistic(getShape(input), getBuffer<float>(input), getShape(output),
162 getBuffer<float>(output));
163 };
164 }
165 else
166 {
167 throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"};
168 }
169 break;
171 if (_input->data_type() == OperandType::FLOAT32)
172 {
173 if (alpha == std::numeric_limits<float>::infinity() && beta == 0.f)
174 {
175 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
176 nnfw::cker::ReLU(getShape(input), getBuffer<float>(input), getShape(output),
177 getBuffer<float>(output));
178 };
179 }
180 else if (alpha == 6.f && beta == 0.f)
181 {
182 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
183 nnfw::cker::ReLU6(getShape(input), getBuffer<float>(input), getShape(output),
184 getBuffer<float>(output));
185 };
186 }
187 else
188 {
189 throw std::runtime_error(
190 "ElementwiseActivationLayer : This layer suppports only ReLU(0-inf) and ReLU6(0-6)");
191 }
192 }
193 else
194 {
195 throw std::runtime_error{"ElementwiseActivationLayer(ReLU): unsupported data type"};
196 }
197 break;
199 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
200 {
201 PopulateLookupTable(op_type);
203 std::placeholders::_1, std::placeholders::_2);
204 }
205 else if (_input->data_type() == OperandType::FLOAT32)
206 {
207 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
208 nnfw::cker::Tanh(getShape(input), getBuffer<float>(input), getShape(output),
209 getBuffer<float>(output));
210 };
211 }
212 else
213 {
214 throw std::runtime_error{"ElementwiseActivationLayer(Tanh): unsupported data type"};
215 }
216 break;
218 if (_input->data_type() == OperandType::FLOAT32)
219 {
220 _kernel = [alpha](const IPortableTensor *input, IPortableTensor *output) {
222 getBuffer<float>(input), getShape(output),
223 getBuffer<float>(output));
224 };
225 }
226 else
227 {
228 throw std::runtime_error{"ElementwiseActivationLayer(LeakyReLU): unsupported data type"};
229 }
230 break;
232 if (_input->data_type() == OperandType::FLOAT32)
233 {
234 _kernel = [approximate](const IPortableTensor *input, IPortableTensor *output) {
236 getBuffer<float>(input), getShape(output), getBuffer<float>(output));
237 };
238 }
239 else
240 {
241 throw std::runtime_error{"ElementwiseActivationLayer(GELU): unsupported data type"};
242 }
243 break;
244 default:
245 throw std::runtime_error("ElementwiseActivationLayer: unsupported op type");
246 }
247}
248
250
251} // namespace onert::backend::cpu::ops
int MatchingFlatSize(const Dims< N > &dims, const Dims< N > &check_dims_0)
Definition Dims.h:108
A tensor class that is portable for other backends.
float data_scale() const override final
int32_t data_zero_point() const override final
ir::DataType data_type() const override final
std::unique_ptr< exec::IFunction > _return_fn
void PopulateLookupTable(const ElementwiseActivationType op_type)
std::function< void(const IPortableTensor *input, IPortableTensor *output)> _kernel
void configure(const IPortableTensor *input, IPortableTensor *output, float alpha, float beta, bool approximate, const ElementwiseActivationType op_type)
void EvalUsingLookupTable(const IPortableTensor *input, IPortableTensor *output)
const OperandIndex & at(IOIndex set_index) const
const OperandIndexSequence & getOutputs() const override
Definition Operation.h:54
OperandIndexSequence & getInputs()
Definition Operation.h:51
void LeakyReLU(const LeakyReluParams &params, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition LeakyReLU.h:31
void Logistic(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition Logistic.h:32
void Tanh(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition Tanh.h:31
void GELU(const GELUParams &params, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition GELU.h:40
void ReLU6(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition ReLU6.h:32
void ELU(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition ELU.h:30
void ReLU(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition ReLU.h:32
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
ops::ElementwiseActivationType convertElementwiseActivationType(ir::operation::ElementwiseActivation::Type type_ir)
int32_t size[5]
Definition Slice.cpp:35