ONE - On-device Neural Engine
Loading...
Searching...
No Matches
SoftMaxLayer.cc
Go to the documentation of this file.
1
/*
2
* Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
3
*
4
* Licensed under the Apache License, Version 2.0 (the "License");
5
* you may not use this file except in compliance with the License.
6
* You may obtain a copy of the License at
7
*
8
* http://www.apache.org/licenses/LICENSE-2.0
9
*
10
* Unless required by applicable law or agreed to in writing, software
11
* distributed under the License is distributed on an "AS IS" BASIS,
12
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
* See the License for the specific language governing permissions and
14
* limitations under the License.
15
*/
16
17
#include "
SoftMaxLayer.h
"
18
19
#include "
OperationUtils.h
"
20
21
#include <
cker/train/operation/SoftMax.h
>
22
23
namespace
onert
24
{
25
namespace
backend
26
{
27
namespace
train
28
{
29
namespace
ops
30
{
31
32
SoftMaxLayer::SoftMaxLayer
()
33
: cpu::
ops
::
SoftMaxLayer
(), _back_prop_input{nullptr}, _back_prop_output{nullptr}
34
{
35
// DO NOTHING
36
}
37
38
void
SoftMaxLayer::configureBackward
(
IPortableTensor
*back_prop_input,
39
const
IPortableTensor
*back_prop_output)
40
{
41
_back_prop_input = back_prop_input;
42
_back_prop_output = back_prop_output;
43
}
44
45
void
SoftMaxLayer::forward
(
bool
) {
cpu::ops::SoftMaxLayer::run
(); }
46
47
void
SoftMaxLayer::backward
()
48
{
49
assert(_back_prop_output->
data_type
() ==
_input
->
data_type
());
50
switch
(_back_prop_output->
data_type
())
51
{
52
case
OperandType::FLOAT32:
53
{
54
nnfw::cker::train::SoftMaxGrad
(
55
getShape
(
_output
), getBuffer<float>(
_output
),
getShape
(_back_prop_output),
56
getBuffer<float>(_back_prop_output),
getShape
(_back_prop_input),
57
getBuffer<float>(_back_prop_input));
58
break
;
59
}
60
default
:
61
throw
std::runtime_error(
"train SoftMaxLayer: unsupported data type"
);
62
}
63
}
64
65
}
// namespace ops
66
}
// namespace train
67
}
// namespace backend
68
}
// namespace onert
onert::backend::IPortableTensor
A tensor class that is portable for other backends.
Definition
IPortableTensor.h:39
onert::backend::IPortableTensor::data_type
ir::DataType data_type() const override final
Definition
IPortableTensor.h:56
onert::backend::cpu::ops::SoftMaxLayer::run
void run() override
Definition
SoftMaxLayer.cc:109
onert::backend::cpu::ops::SoftMaxLayer::_input
const IPortableTensor * _input
Definition
SoftMaxLayer.h:48
onert::backend::cpu::ops::SoftMaxLayer::_output
IPortableTensor * _output
Definition
SoftMaxLayer.h:49
onert::backend::train::ops::SoftMaxLayer
Definition
SoftMaxLayer.h:35
onert::backend::train::ops::SoftMaxLayer::forward
void forward(bool training) override
Definition
SoftMaxLayer.cc:45
onert::backend::train::ops::SoftMaxLayer::configureBackward
void configureBackward(IPortableTensor *back_prop_input, const IPortableTensor *back_prop_output)
Definition
SoftMaxLayer.cc:38
onert::backend::train::ops::SoftMaxLayer::backward
void backward() override
Definition
SoftMaxLayer.cc:47
onert::backend::train::ops::SoftMaxLayer::SoftMaxLayer
SoftMaxLayer()
Definition
SoftMaxLayer.cc:32
SoftMax.h
mir::ops
Definition
AbsOp.h:25
nnfw::cker::train::SoftMaxGrad
void SoftMaxGrad(const Shape &output_shape, const float *output_data, const Shape &incoming_shape, const float *incoming_data, const Shape &grad_shape, float *grad_data)
Definition
SoftMax.h:30
onert::backend::train::ops::getShape
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
Get shape of tensor.
Definition
OperationUtils.cc:32
onert
Definition
CustomKernel.cc:20
SoftMaxLayer.h
OperationUtils.h
runtime
onert
backend
train
ops
SoftMaxLayer.cc
Generated by
1.9.8