ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Squeeze.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2018 The TensorFlow Authors. All Rights Reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include "kernels/Squeeze.h"
19
20#include "kernels/Utils.h"
21
22#include <stdexcept>
23
24namespace luci_interpreter
25{
26namespace kernels
27{
28
29Squeeze::Squeeze(const Tensor *input, Tensor *output, const SqueezeParams &params)
30 : KernelWithParams<SqueezeParams>({input}, {output}, params)
31{
32}
33
35{
36 int input_num_dims = input()->shape().num_dims();
37 int num_squeeze_dims = params().squeeze_dims.size();
38 assert(input_num_dims <= 8);
39 bool should_squeeze[8] = {false};
40 int num_squeezed_dims = 0;
41 if (num_squeeze_dims == 0)
42 {
43 for (int idx = 0; idx < input_num_dims; ++idx)
44 {
45 if (input()->shape().dim(idx) == 1)
46 {
47 should_squeeze[idx] = true;
48 ++num_squeezed_dims;
49 }
50 }
51 }
52 else
53 {
54 for (int idx = 0; idx < num_squeeze_dims; ++idx)
55 {
56 int current = params().squeeze_dims[idx] < 0 ? params().squeeze_dims[idx] + input_num_dims
57 : params().squeeze_dims[idx];
58 assert(current >= 0 && current < input_num_dims && input()->shape().dim(current) == 1);
59 if (!should_squeeze[current])
60 ++num_squeezed_dims;
61 should_squeeze[current] = true;
62 }
63 }
64 Shape output_shape(input_num_dims - num_squeezed_dims);
65 for (int in_idx = 0, out_idx = 0; in_idx < input_num_dims; ++in_idx)
66 {
67 if (!should_squeeze[in_idx])
68 {
69 output_shape.dim(out_idx++) = input()->shape().dim(in_idx);
70 }
71 }
73}
74
75void Squeeze::execute() const
76{
77 assert(input()->shape().num_elements() == output()->shape().num_elements());
78
79 const auto *input_data = input()->data<void>();
80 auto *output_data = output()->data<void>();
81 std::memcpy(output_data, input_data,
82 getDataTypeSize(input()->element_type()) * input()->shape().num_elements());
83}
84
85} // namespace kernels
86} // namespace luci_interpreter
const SqueezeParams & params() const
Definition Kernel.h:67
int32_t dim(int i) const
Definition Tensor.h:41
int num_dims() const
Definition Tensor.h:39
void resize(const Shape &new_shape)
Definition Tensor.cpp:56
const Shape & shape() const
Definition Tensor.h:107
const T * data() const
Definition Tensor.h:127
Squeeze(const Tensor *input, Tensor *output, const SqueezeParams &params)
Definition Squeeze.cpp:29
void execute() const override
Definition Squeeze.cpp:75
const Tensor * input() const
Definition Squeeze.h:34
const luci_interpreter::RuntimeShape output_shape
size_t getDataTypeSize(DataType data_type)
Definition DataType.h:33
std::vector< int32_t > squeeze_dims