ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Pad.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "Pad.h"
18#include "Common.h"
19
20#include "mir/ShapeRange.h"
21#include "mir/Tensor.h"
22
23namespace mir_interpreter
24{
25
26using namespace mir;
27
28template <typename T> struct PadImpl
29{
30 static void run(const mir::TensorVariant &inputv, const mir::ops::PadOp &op,
31 mir::TensorVariant &result);
32};
33
34template <typename T>
35void PadImpl<T>::run(const TensorVariant &inputv, const ops::PadOp &op, TensorVariant &result)
36{
37 Tensor<T> result_accessor(result);
38 Tensor<T> input(inputv);
39
40 Shape out_shape = result_accessor.getShape();
41
42 ShapeRange out_range(out_shape);
43 const int rank = op.getInputShape(0).rank();
44 const auto &padding_before = op.getPaddingBefore();
45 const auto &padding_after = op.getPaddingAfter();
46
47 Index temp_index;
48 temp_index.resize(rank);
49
50 bool index_on_padding(false);
51 for (const Index &ind : out_range)
52 {
53 index_on_padding = false;
54
55 for (int32_t i = 0; i < rank; i++)
56 {
57 // index on input values
58 if (ind.at(i) >= padding_before[i] && ind.at(i) < out_shape.dim(i) - padding_after[i])
59 {
60 temp_index.at(i) = ind.at(i) - padding_before[i];
61 }
62 else
63 { // not in input
64 index_on_padding = true;
65 break;
66 }
67 }
68 if (index_on_padding)
69 {
70 result_accessor.at(ind) = op.getPaddingValue();
71 }
72 else
73 {
74 result_accessor.at(ind) = input.at(temp_index);
75 }
76 }
77}
78
79void Pad(const mir::TensorVariant &input, const mir::ops::PadOp &op, mir::TensorVariant &result)
80{
81 dispatch<PadImpl>(input.getElementType(), input, op, result);
82};
83
84} // namespace mir_interpreter
Index & resize(int32_t size)
resize index to given dimension number
Definition Index.cpp:24
int32_t & at(int32_t axis)
return position on given axis
Definition Index.h:64
const Shape & getInputShape(std::size_t index) const
Definition Operation.h:161
int32_t & dim(int32_t axis) noexcept
Definition Shape.h:47
int32_t rank() const
Definition Shape.h:43
const Shape & getShape() const
Definition Tensor.h:48
T at(const Index &id) const
Definition Tensor.h:31
float getPaddingValue() const
Definition PadOp.h:47
const std::vector< std::int32_t > & getPaddingBefore() const
Definition PadOp.h:43
const std::vector< std::int32_t > & getPaddingAfter() const
Definition PadOp.h:45
void Pad(const mir::TensorVariant &input, const mir::ops::PadOp &op, mir::TensorVariant &result)
Implements PadOp for interpreter backend.
Definition Pad.cpp:79
static void run(const mir::TensorVariant &inputv, const mir::ops::PadOp &op, mir::TensorVariant &result)
Definition Pad.cpp:35