ONE - On-device Neural Engine
Loading...
Searching...
No Matches
SoftMaxLayer.h
Go to the documentation of this file.
1
/*
2
* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3
*
4
* Licensed under the Apache License, Version 2.0 (the "License");
5
* you may not use this file except in compliance with the License.
6
* You may obtain a copy of the License at
7
*
8
* http://www.apache.org/licenses/LICENSE-2.0
9
*
10
* Unless required by applicable law or agreed to in writing, software
11
* distributed under the License is distributed on an "AS IS" BASIS,
12
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
* See the License for the specific language governing permissions and
14
* limitations under the License.
15
*/
16
17
#ifndef __ONERT_BACKEND_CPU_OPS_SOFTMAXLAYER_H__
18
#define __ONERT_BACKEND_CPU_OPS_SOFTMAXLAYER_H__
19
20
#include <
backend/IPortableTensor.h
>
21
22
#include <
exec/IFunction.h
>
23
24
namespace
onert
25
{
26
namespace
backend
27
{
28
namespace
cpu
29
{
30
namespace
ops
31
{
32
33
class
SoftMaxLayer
:
public
::onert::exec::IFunction
34
{
35
public
:
36
SoftMaxLayer
();
37
38
public
:
39
void
softmaxFloat32
();
40
41
template
<
typename
T>
void
softmaxQuant8
();
42
43
void
configure
(
const
IPortableTensor
*input,
const
float
beta,
IPortableTensor
*output);
44
45
void
run
()
override
;
46
47
protected
:
48
const
IPortableTensor
*
_input
;
49
IPortableTensor
*
_output
;
50
51
private
:
52
float
_beta;
53
54
float
_table[256];
55
uint8_t _uint8_table1[256];
56
uint8_t _uint8_table2[256];
57
};
58
59
}
// namespace ops
60
}
// namespace cpu
61
}
// namespace backend
62
}
// namespace onert
63
64
#endif
// __ONERT_BACKEND_CPU_OPS_SOFTMAXLAYER_H__
IFunction.h
IPortableTensor.h
onert::backend::IPortableTensor
A tensor class that is portable for other backends.
Definition
IPortableTensor.h:39
onert::backend::cpu::ops::SoftMaxLayer
Definition
SoftMaxLayer.h:34
onert::backend::cpu::ops::SoftMaxLayer::run
void run() override
Definition
SoftMaxLayer.cc:109
onert::backend::cpu::ops::SoftMaxLayer::softmaxFloat32
void softmaxFloat32()
Definition
SoftMaxLayer.cc:37
onert::backend::cpu::ops::SoftMaxLayer::configure
void configure(const IPortableTensor *input, const float beta, IPortableTensor *output)
Definition
SoftMaxLayer.cc:88
onert::backend::cpu::ops::SoftMaxLayer::_input
const IPortableTensor * _input
Definition
SoftMaxLayer.h:48
onert::backend::cpu::ops::SoftMaxLayer::softmaxQuant8
void softmaxQuant8()
Definition
SoftMaxLayer.cc:70
onert::backend::cpu::ops::SoftMaxLayer::_output
IPortableTensor * _output
Definition
SoftMaxLayer.h:49
onert::backend::cpu::ops::SoftMaxLayer::SoftMaxLayer
SoftMaxLayer()
Definition
SoftMaxLayer.cc:32
onert::exec::IFunction
Definition
IFunction.h:26
mir::ops
Definition
AbsOp.h:25
onert
Definition
CustomKernel.cc:20
runtime
onert
backend
cpu
ops
SoftMaxLayer.h
Generated by
1.9.8