ONE - On-device Neural Engine
Loading...
Searching...
No Matches
ConvBackend.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "ConvBackend.h"
18
21
22#include <tensorflow/contrib/lite/kernels/register.h>
23#include <tensorflow/contrib/lite/model.h>
24#include <tensorflow/contrib/lite/builtin_op_data.h>
25
26#include <cstdlib>
27#include <stdexcept>
28
29using namespace ::tflite;
30using namespace ::tflite::ops::builtin;
31
37template <typename T> T *typed_malloc(void)
38{
39 if (auto res = reinterpret_cast<T *>(malloc(sizeof(T))))
40 {
41 return res;
42 }
43 throw std::bad_alloc{};
44}
45
46// Comment from 'context.h'
47//
48// Parameters for asymmetric quantization. Quantized values can be converted
49// back to float using:
50// real_value = scale * (quantized_value - zero_point);
51static inline TfLiteQuantizationParams make_default_quantization(void)
52{
53 return TfLiteQuantizationParams{1.0f, 0};
54}
55
56static inline std::vector<int> as_dims(const nncc::core::ADT::feature::Shape &shape)
57{
58 const int N = 1;
59 const int C = static_cast<int>(shape.depth());
60 const int H = static_cast<int>(shape.height());
61 const int W = static_cast<int>(shape.width());
62
63 return std::vector<int>{N, H, W, C};
64}
65
66static inline std::vector<int> as_dims(const nncc::core::ADT::kernel::Shape &shape)
67{
68 const int N = static_cast<int>(shape.count());
69 const int C = static_cast<int>(shape.depth());
70 const int H = static_cast<int>(shape.height());
71 const int W = static_cast<int>(shape.width());
72
73 return std::vector<int>{N, H, W, C};
74}
75
77 : _ifm_name{model.ifm_name()}, _ofm_name{model.ofm_name()}
78{
81
84
85 // Set kernel data
86 const auto &ker_shape = model.ker_shape();
87
88 _kernel.resize(num_elements(ker_shape));
89
90 auto kernel_overlay = make_overlay<float, NHWCLayout>(ker_shape, _kernel.data());
91
92 for (uint32_t n = 0; n < ker_shape.count(); ++n)
93 {
94 for (uint32_t ch = 0; ch < ker_shape.depth(); ++ch)
95 {
96 for (uint32_t row = 0; row < ker_shape.height(); ++row)
97 {
98 for (uint32_t col = 0; col < ker_shape.width(); ++col)
99 {
100 kernel_overlay.at(n, ch, row, col) = model.ker_data().at(n, ch, row, col);
101 }
102 }
103 }
104 }
105
106 // Set bias data
107 _bias.resize(ker_shape.count(), 0.0f);
108
109 // Initialize interpreter
110 auto quantization = make_default_quantization();
111
112 // Create Tensors
113 // 0 -> OFM
114 // 1 -> IFM
115 // 2 -> Kernel
116 // 3 -> Bias
117 _interp.AddTensors(4);
118
119 _interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, _ofm_name.c_str(),
120 as_dims(model.ofm_shape()), quantization);
121
122 _interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, _ifm_name.c_str(),
123 as_dims(model.ifm_shape()), quantization);
124
125 _interp.SetTensorParametersReadOnly(
126 2, kTfLiteFloat32 /* type */, "kernel" /* name */, as_dims(model.ker_shape()), quantization,
127 reinterpret_cast<const char *>(_kernel.data()), _kernel.size() * sizeof(float));
128
129 _interp.SetTensorParametersReadOnly(
130 3, kTfLiteFloat32 /* type */, "bias" /* name */, {static_cast<int>(_bias.size())}, quantization,
131 reinterpret_cast<const char *>(_bias.data()), _bias.size() * sizeof(float));
132
133 auto param = typed_malloc<TfLiteConvParams>();
134
135 param->padding = kTfLitePaddingValid;
136 param->stride_width = 1;
137 param->stride_height = 1;
138 param->activation = kTfLiteActNone;
139
140 _interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
141 BuiltinOpResolver().FindOp(BuiltinOperator_CONV_2D));
142
143 _interp.SetInputs({1});
144 _interp.SetOutputs({0});
145}
uint32_t depth(void) const
Definition Shape.h:44
uint32_t width(void) const
Definition Shape.h:46
uint32_t height(void) const
Definition Shape.h:45
uint32_t width(void) const
Definition Shape.h:47
uint32_t height(void) const
Definition Shape.h:46
uint32_t count(void) const
Definition Shape.h:44
uint32_t depth(void) const
Definition Shape.h:45
C
Definition infer.py:52
Dims< uint32_t > as_dims(const nncc::core::ADT::tensor::Shape &)
Definition dims.cpp:24
uint32_t num_elements(const Shape &shape)
Return the number of elements in a kernel of a given shape.
Definition Shape.h:61
Overlay< T, T * > make_overlay(const Shape &shape, T *base)
Definition Overlay.h:71
T * typed_malloc(void)
Allocate memory with malloc and return a typed pointer.
ConvBackend(const nnsuite::conv::Model &model)