ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Reshape.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "OMStatus.h"
18
19#include "core/OMUtils.h"
20#include "core/OMDataType.h"
21
24
25using namespace onert_micro;
26using namespace onert_micro::train;
27
28namespace
29{
30
31constexpr uint32_t inputTensorIdx = 0;
32constexpr uint32_t outputTensorIdx = 0;
33
34} // namespace
35
36/*
37 * - Calculate input gradient - Optional (not required if it is last op)
38 */
39OMStatus onert_micro::train::train_kernel_CircleReshape(const OMBackpropExecuteArgs &args)
40{
41 // Check is it last layer for training
42 if (args.is_last_layer)
43 return Ok;
44
45 core::OMRuntimeContext &runtime_context = args.backward_context;
46 core::OMRuntimeStorage &runtime_storage = args.backward_storage;
47 uint16_t op_index = args.kernel_index;
48
49 execute::OMRuntimeKernel runtime_kernel;
50 runtime_kernel.readKernel(op_index, runtime_context);
51
52 const circle::Tensor *input = runtime_kernel.inputs[inputTensorIdx];
53 const circle::Tensor *output = runtime_kernel.outputs[outputTensorIdx];
54
55 assert(input != nullptr);
56 assert(output != nullptr);
57
58 OMStatus status = Ok;
59
60 status = runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
61 if (status != Ok)
62 return status;
63
64 uint8_t *input_data = runtime_kernel.inputs_data[inputTensorIdx];
65 uint8_t *output_data = runtime_kernel.outputs_data[outputTensorIdx];
66
67 assert(input_data != nullptr);
68 assert(output_data != nullptr);
69
70 // Check is it inplace kernel
71 if (input_data == output_data)
72 return Ok;
73
74 const core::OMRuntimeShape shape(input);
75
76 const size_t element_size =
77 static_cast<uint32_t>(getOMDataTypeSize(core::onertMicroDatatype(input->type())));
78 const int32_t num_elements = shape.flatSize();
79 std::memcpy(input_data, output_data, num_elements * element_size);
80
81 return status;
82}
uint8_t * outputs_data[maxOutputSize]
OMStatus getDataFromStorage(uint16_t op_index, core::OMRuntimeStorage &storage, core::OMRuntimeContext &context)
OMStatus readKernel(uint16_t op_index, core::OMRuntimeContext &runtime_context)
const circle::Tensor * outputs[maxOutputSize]
const circle::Tensor * inputs[maxInputSize]
constexpr uint32_t outputTensorIdx
args
Definition infer.py:21
list input_data
Definition infer.py:29
uint32_t num_elements(const Shape &shape)
The number of elements of a feature map of a given shape.
Definition Shape.h:59
OMDataType onertMicroDatatype(const circle::TensorType type)
size_t getOMDataTypeSize(OMDataType data_type)
Definition OMDataType.h:179