ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Pad.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright (C) 2017 The Android Open Source Project
4 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19#include "Pad.h"
20#include "Assert.h"
21#include "Logging.h"
22
23#include "internal/Dims.h"
24
25#include <vector>
26#include <cstring> // For 'memset'
27
28bool padPrepare(const Shape& input, const int32_t* paddingsData, const Shape& paddingsShape,
29 Shape* output)
30{
31 // Currently only 4D tensors are supported.
32 uint32_t numInputDims = getNumberOfDimensions(input);
33 ASSERT(numInputDims == 4);
34
35 // paddings need to be provided as a 2-D int32 tensor.
36 ASSERT(paddingsShape.type == OperandType::TENSOR_INT32);
37 ASSERT(getNumberOfDimensions(paddingsShape) == 2);
38 ASSERT(getSizeOfDimension(paddingsShape, 0) == numInputDims);
39 ASSERT(getSizeOfDimension(paddingsShape, 1) == 2);
40
41 std::vector<uint32_t> outDims(numInputDims);
42 for (uint32_t i = 0; i < numInputDims; ++i)
43 {
44 int32_t beforePadding = *paddingsData++;
45 int32_t afterPadding = *paddingsData++;
46 // Pad value has to be greater than equal to 0.
47 ASSERT(beforePadding >= 0 && afterPadding >= 0);
48 outDims[i] = beforePadding + getSizeOfDimension(input, i) + afterPadding;
49 }
50 output->type = input.type;
51 output->dimensions = outDims;
52 output->offset = input.offset;
53 output->scale = input.scale;
54
55 return true;
56}
57
58namespace
59{
60
61// From optimized_ops.h in TensorFlow Lite
62template <typename T>
63inline void Pad(const T* input_data, const Dims<4>& input_dims,
64 const std::vector<int>& left_paddings,
65 const std::vector<int>& right_paddings, T* output_data,
66 const Dims<4>& output_dims) {
67 const int output_batch = ArraySize(output_dims, 3);
68 const int output_height = ArraySize(output_dims, 2);
69 const int output_width = ArraySize(output_dims, 1);
70 const int output_depth = ArraySize(output_dims, 0);
71
72 const int left_b_padding = left_paddings[3];
73 const int left_h_padding = left_paddings[2];
74 const int left_w_padding = left_paddings[1];
75 const int left_d_padding = left_paddings[0];
76
77 const int right_b_padding = right_paddings[3];
78 const int right_h_padding = right_paddings[2];
79 const int right_w_padding = right_paddings[1];
80 const int right_d_padding = right_paddings[0];
81
82 const int input_depth = ArraySize(input_dims, 0);
83
84 if (left_b_padding != 0)
85 {
86 memset(output_data, 0, left_b_padding * output_height * output_width * output_depth *
87 sizeof(T));
88 }
89 for (int out_b = left_b_padding; out_b < output_batch - right_b_padding; ++out_b)
90 {
91 if (left_h_padding != 0)
92 {
93 memset(output_data + Offset(output_dims, 0, 0, 0, out_b), 0,
94 left_h_padding * output_width * output_depth * sizeof(T));
95 }
96 for (int out_h = left_h_padding; out_h < output_height - right_h_padding; ++out_h)
97 {
98 if (left_w_padding != 0)
99 {
100 memset(output_data + Offset(output_dims, 0, 0, out_h, out_b), 0,
101 left_w_padding * output_depth * sizeof(T));
102 }
103 for (int out_w = left_w_padding; out_w < output_width - right_w_padding; ++out_w)
104 {
105 if (left_d_padding != 0)
106 {
107 memset(output_data + Offset(output_dims, 0, out_w, out_h, out_b), 0,
108 left_d_padding * sizeof(T));
109 }
110
111 T* out = output_data +
112 Offset(output_dims, left_d_padding, out_w, out_h, out_b);
113 const T* in =
114 input_data + Offset(input_dims, 0, out_w - left_w_padding,
115 out_h - left_h_padding, out_b - left_b_padding);
116 memcpy(out, in, input_depth * sizeof(T));
117
118 if (right_d_padding != 0)
119 {
120 memset(
121 output_data + Offset(output_dims, output_depth - right_d_padding,
122 out_w, out_h, out_b),
123 0, right_d_padding * sizeof(T));
124 }
125 }
126 if (right_w_padding != 0)
127 {
128 memset(
129 output_data + Offset(output_dims, 0, output_width - right_w_padding,
130 out_h, out_b),
131 0, right_w_padding * output_depth * sizeof(T));
132 }
133 }
134 if (right_h_padding != 0)
135 {
136 memset(output_data + Offset(output_dims, 0, 0,
137 output_height - right_h_padding, out_b),
138 0, right_h_padding * output_width * output_depth * sizeof(T));
139 }
140 }
141 if (right_b_padding != 0)
142 {
143 memset(output_data +
144 Offset(output_dims, 0, 0, 0, output_batch - right_b_padding),
145 0,
146 right_b_padding * output_height * output_width * output_depth *
147 sizeof(T));
148 }
149}
150
151} // namespace
152
153bool padGeneric(const uint8_t* inputData, const Shape& inputShape, const int32_t* paddings,
154 uint8_t* outputData, const Shape& outputShape)
155{
156 int32_t numInputDims = static_cast<int32_t>(getNumberOfDimensions(inputShape));
157
158 std::vector<int> beforePadding;
159 std::vector<int> afterPadding;
160 // The lower level implementation expects the paddings in the reverse order.
161 for (int32_t i = numInputDims - 1; i >= 0; --i)
162 {
163 beforePadding.push_back(paddings[i * 2]);
164 afterPadding.push_back(paddings[i * 2 + 1]);
165 }
166
167 if (inputShape.type == OperandType::TENSOR_FLOAT32)
168 {
169 ::Pad(reinterpret_cast<const float*>(inputData),
170 convertShapeToDims(inputShape),
171 beforePadding, afterPadding,
172 reinterpret_cast<float*>(outputData),
173 convertShapeToDims(outputShape));
174 }
175 else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM)
176 {
177 ::Pad(reinterpret_cast<const uint8_t*>(inputData),
178 convertShapeToDims(inputShape),
179 beforePadding, afterPadding,
180 reinterpret_cast<uint8_t*>(outputData),
181 convertShapeToDims(outputShape));
182 }
183 else
184 {
185 LOG(ERROR) << "Unsupported data type";
186 return false;
187 }
188 return true;
189}
#define ASSERT(v)
Definition Assert.h:24
int Offset(const Dims< 4 > &dims, int i0, int i1, int i2, int i3)
Definition Dims.h:64
int ArraySize(const Dims< N > &array, int index)
Definition Dims.h:76
Dims< 4 > convertShapeToDims(const Shape &shape)
Definition Dims.h:31
#define LOG(...)
Definition Logging.h:36
uint32_t getSizeOfDimension(const Shape &shape, uint32_t dimensionIdx)
Definition Shape.cpp:60
uint32_t getNumberOfDimensions(const Shape &shape)
Definition Shape.cpp:58
bool padGeneric(const uint8_t *inputData, const Shape &inputShape, const int32_t *paddings, uint8_t *outputData, const Shape &outputShape)
Definition Pad.cpp:153
bool padPrepare(const Shape &input, const int32_t *paddingsData, const Shape &paddingsShape, Shape *output)
Definition Pad.cpp:28
list input_data
Definition infer.py:29
void Pad(const int32_t *padding_data, int32_t pad_rank, const Shape &input_shape, const T *input_data, const Shape &output_shape, T *output_data, const T *constant_value_data)
Definition Pad.h:30
Definition Dims.h:26
Definition Shape.h:28
OperandType type
Definition Shape.h:29