ONE - On-device Neural Engine
Loading...
Searching...
No Matches
L2Normalization.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "../KernelGenerator.h"
18#include "../Validator.h"
19
20#include <AclKernelGen.h>
21
23{
24
25void Validator::visit(const ir::operation::L2Normalization &) { _supported = true; }
26
27void KernelGenerator::visit(const ir::operation::L2Normalization &node)
28{
29 const auto ofm_index{node.getOutputs().at(0)};
30 const auto ifm_index{node.getInputs().at(ir::operation::L2Normalization::Input::INPUT)};
31
32 // {CL|Neon}L2Normalization performs the reduction only along dimension 0
33 // L2 Normalization always performs the reduction along the depth axis
34 // Thus, we repurpose {CL|Neon}NormalizationLayers to act as depthwise L2 normalizations by
35 // choosing normalization parameters as below
36
37 const auto &ifm_shape = _ctx.at(ifm_index).shape();
38 // TODO Support optional constant dimension that normalization would be performed on
39 const auto normalization_axis = _ctx.at(ifm_index).shape().rank() - 1;
40 int32_t radius =
41 2 * ifm_shape.dim(normalization_axis) + 1; // normSize = depth(last dimension) * 2 + 1
42 float alpha = 1.0f; // In the implementation to make alpha_ become 1
43 float beta = 0.5f; // pow(reduction, -0.5) = 1 / sqrt(reduction)
44 float bias = 0.0f; // Don't offset the reduction.
45
46 auto ofm_tensor = _tensor_reg->getAclTensor(ofm_index);
47 auto ifm_tensor = _tensor_reg->getAclTensor(ifm_index);
48
49 const auto norm_info = ::arm_compute::NormalizationLayerInfo(::arm_compute::NormType::CROSS_MAP,
50 radius, alpha, beta, bias, false);
51
52 auto fn = acl_common::generateLayer<arm_compute::NENormalizationLayer>(
53 ifm_tensor->handle(), ofm_tensor->handle(), norm_info);
54
56}
57
58} // namespace onert::backend::acl_neon
std::unique_ptr< exec::IFunction > _return_fn
const Object & at(const Index &index) const
Get the object that is associated with the given index.
std::unique_ptr< AclFunction > asAclFunction(std::unique_ptr<::arm_compute::IFunction > &&layer)
Definition Convert.cc:246