ONE - On-device Neural Engine
Loading...
Searching...
No Matches
PermuteLayer.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef __ONERT_BACKEND_BUILTIN_KERNEL_PERMUTELAYER_H__
18#define __ONERT_BACKEND_BUILTIN_KERNEL_PERMUTELAYER_H__
19
20#include "../ExternalContext.h"
21#include "../../../exec/IPermuteFunction.h"
22
23#include <ruy/thread_pool.h> // from @ruy
24
25namespace onert
26{
27namespace backend
28{
29namespace builtin
30{
31namespace kernel
32{
33
35{
36public:
37 PermuteLayer(const std::vector<ITensor *> &src_tensors, const std::vector<ITensor *> &dst_tensors,
38 const std::vector<ir::PermuteType> &types,
39 const std::shared_ptr<ExternalContext> &external_context);
40
41 void optimize() override;
42
43 void run() override;
44
45private:
46 std::shared_ptr<ExternalContext> _external_context;
47
48private:
49 void appendPermuteTasks(const ITensor *src_tensor, ITensor *dst_tensor,
50 const ir::Shape &loop_shape, size_t size,
51 const ir::PermuteType &permute_type);
52
53 void runPermuteTasks(backend::ITensor *src, uint8_t *dst_buffer);
54
55 struct PermuteWorkerTask : ruy::Task
56 {
57 using Strides = ir::Coordinates;
58
59 PermuteWorkerTask(const ITensor &src_tensor, ITensor &dst_tensor,
60 const ir::Coordinates &start_coords, const ir::Shape &loop_shape, size_t size,
61 const ir::PermuteType &permute_type)
62 : _src_buffer{src_tensor.buffer()}, _dst_buffer{dst_tensor.buffer()},
63 _src_start_offset{src_tensor.calcOffset(start_coords)},
64 _dst_start_offset{dst_tensor.calcOffset(start_coords)}, _src_strides{}, _dst_strides{},
65 _loop_shape{loop_shape}, _size{size}, _permute_type{permute_type}
66 {
67 // Set strides
68 setStrides(src_tensor, &_src_strides);
69 setStrides(dst_tensor, &_dst_strides);
70 }
71 // Constructor for a copy
72 PermuteWorkerTask(const uint8_t *src_buffer, uint8_t *dst_buffer, uint32_t src_start_offset,
73 uint32_t dst_start_offset, size_t size)
74 : _src_buffer{src_buffer}, _dst_buffer{dst_buffer}, _src_start_offset{src_start_offset},
75 _dst_start_offset{dst_start_offset}, _src_strides{0}, _dst_strides{0}, _loop_shape{1},
76 _size{size}, _permute_type{ir::PermuteType::COPY}
77 {
78 // DO NOTHING
79 }
80 void setBuffers(const uint8_t *src_buffer, uint8_t *dst_buffer)
81 {
82 _src_buffer = src_buffer;
83 _dst_buffer = dst_buffer;
84 }
85 void Run() override
86 {
87 ShapeLoop(_loop_shape, [&](const onert::ir::Coordinates &coords) {
88 size_t src_offset = _src_start_offset;
89 size_t dst_offset = _dst_start_offset;
90 assert(static_cast<size_t>(_loop_shape.rank()) == coords.size());
91 ir::Coordinates dst_coords = coords;
92 if (_permute_type != ir::PermuteType::COPY && _loop_shape.rank() == 4)
93 {
94 dst_coords = ir::convertCoordinates(coords, _permute_type);
95 }
96 for (auto i = 0; i < _loop_shape.rank(); ++i)
97 {
98 assert(coords[i] >= 0 && dst_coords[i] >= 0);
99 src_offset += coords[i] * _src_strides[i];
100 dst_offset += dst_coords[i] * _dst_strides[i];
101 }
102 memcpy(_dst_buffer + dst_offset, _src_buffer + src_offset, _size);
103 });
104 }
105
106 private:
107 void setStrides(const ITensor &tensor, Strides *strides)
108 {
109 auto shape = tensor.getShape();
110 const size_t rank = shape.rank();
111 for (size_t i = 0; i < rank; ++i)
112 {
113 ir::Coordinates no_step(rank), one_step(rank);
114 one_step.set(i, 1);
115 if (shape.dim(i) > 1)
116 {
117 strides->set(i, tensor.calcOffset(one_step) - tensor.calcOffset(no_step));
118 }
119 else
120 {
121 // If dimension value is 0 or 1, the stride of the dimension will be not used
122 // Do not call calcOffset() with coordinate value that is greater than dimension value
123 strides->set(i, 0);
124 }
125 assert((*strides)[i] >= 0);
126 }
127 }
128
129 private:
130 const uint8_t *_src_buffer;
131 uint8_t *_dst_buffer;
132 size_t _src_start_offset;
133 size_t _dst_start_offset;
134 Strides _src_strides;
135 Strides _dst_strides;
136 const ir::Shape _loop_shape;
137 const size_t _size;
138 const ir::PermuteType _permute_type;
139 };
140 std::unordered_map<const ITensor *, std::vector<PermuteWorkerTask>> _tasks_map;
141};
142
143} // namespace kernel
144} // namespace builtin
145} // namespace backend
146} // namespace onert
147
148#endif // __ONERT_BACKEND_BUILTIN_KERNEL_PERMUTELAYER_H__
Array< CornerBox > coords
virtual size_t calcOffset(const ir::Coordinates &coords) const =0
virtual uint8_t * buffer() const =0
Class to represent position(offset) of tensor. Assume that the front is higher dimensional....
Definition Coordinates.h:37
void set(size_t dimension, int32_t coordinate)
Set the coordinate of one of the coordinates.
Definition Coordinates.h:79
Coordinates convertCoordinates(const Coordinates &coords, const PermuteType &type)
Convert coordinate for layout change.
PermuteType
Definition Layout.h:37
int32_t size[5]
Definition Slice.cpp:35
void ShapeLoop(const onert::ir::Shape &shape, L lambda_function)
Definition Utils.h:60