ONE - On-device Neural Engine
Loading...
Searching...
No Matches
ReshapeLayer.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "ReshapeLayer.h"
18
19#include "../KernelGenerator.h"
20#include "../Validator.h"
21
22namespace onert::backend::cpu
23{
24
25void Validator::visit(const ir::operation::Reshape &) { _supported = true; }
26
27void KernelGenerator::visit(const ir::operation::Reshape &node)
28{
29 const auto output_index{node.getOutputs().at(0)};
30 const auto input_index{node.getInputs().at(ir::operation::Reshape::Input::INPUT)};
31
32 auto output_tensor = _tensor_reg->getPortableTensor(output_index);
33 auto input_tensor = _tensor_reg->getPortableTensor(input_index);
34
35 // optional 2nd input
36 IPortableTensor *shape_tensor = nullptr;
37
38 if (node.getInputs().size() == 2)
39 {
40 const auto shape_index{node.getInputs().at(ir::operation::Reshape::Input::SHAPE)};
41 shape_tensor = _tensor_reg->getPortableTensor(shape_index);
42 }
43
44 auto fn = std::make_unique<ops::ReshapeLayer>();
45
46 fn->configure(input_tensor, shape_tensor, output_tensor);
47 _return_fn = std::move(fn);
48}
49
50} // namespace onert::backend::cpu
51
53{
54
55ReshapeLayer::ReshapeLayer() : _input(nullptr), _shape(nullptr), _output(nullptr)
56{
57 // DO NOTHING
58}
59
61{
62 // output buffer equals to input buffer means that copy is not needed
63 if (_output->buffer() != _input->buffer())
64 {
65 size_t count = _input->total_size();
66 memcpy(_output->buffer(), _input->buffer(), count);
67 }
68}
69
71 IPortableTensor *output)
72{
73 _input = input;
74 /* note : shape is optional. If not provided from model, _shape is nullptr. */
75 _shape = shape;
76 _output = output;
77}
78
80
81} // namespace onert::backend::cpu::ops
A tensor class that is portable for other backends.
size_t total_size() const override final
virtual uint8_t * buffer() const =0
std::unique_ptr< exec::IFunction > _return_fn
void configure(const IPortableTensor *input, const IPortableTensor *shape, IPortableTensor *output)