ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Utils.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2022 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "Utils.h"
18#include "StringUtils.h"
19
20#include <luci_interpreter/core/Tensor.h>
23
24#include <pybind11/numpy.h>
25#include <stdexcept>
26#include <vector>
27
29
30namespace py = pybind11;
31using namespace py::literals;
32
33#define THROW_UNLESS(COND, MSG) \
34 if (not(COND)) \
35 throw std::runtime_error(MSG);
36
37namespace
38{
39
40py::array numpyArray(const Tensor *tensor)
41{
42 assert(tensor != nullptr); // FIX_CALLER_UNLESS
43
44 const auto tensor_shape = tensor->shape();
45
46 uint32_t size = 1;
47 std::vector<uint32_t> shape(tensor_shape.num_dims());
48 for (int i = 0; i < tensor_shape.num_dims(); i++)
49 {
50 THROW_UNLESS(tensor_shape.dim(i) >= 0, "Negative dimension detected in " + tensor->name());
51
52 shape[i] = tensor_shape.dim(i);
53 size *= shape[i];
54 }
55
56 if (size == 0)
57 return py::none();
58
59 switch (tensor->element_type())
60 {
61 case loco::DataType::FLOAT32:
62 return py::array_t<float, py::array::c_style>(shape, tensor->data<float>());
63 case loco::DataType::S16:
64 return py::array_t<int16_t, py::array::c_style>(shape, tensor->data<int16_t>());
65 case loco::DataType::S32:
66 return py::array_t<int32_t, py::array::c_style>(shape, tensor->data<int32_t>());
67 case loco::DataType::S64:
68 return py::array_t<int64_t, py::array::c_style>(shape, tensor->data<int64_t>());
69 case loco::DataType::U8:
70 return py::array_t<uint8_t, py::array::c_style>(shape, tensor->data<uint8_t>());
71 case loco::DataType::BOOL:
72 return py::array_t<bool, py::array::c_style>(shape, tensor->data<bool>());
73 default:
74 throw std::runtime_error("Unsupported data type");
75 }
76}
77
78py::dict quantparam(const Tensor *tensor)
79{
80 assert(tensor != nullptr); // FIX_CALLER_UNLESS
81
82 auto scale = tensor->scales();
83 auto zp = tensor->zero_points();
84
85 py::list py_scale;
86 for (auto s : scale)
87 {
88 py_scale.append(s);
89 }
90
91 py::list py_zp;
92 for (auto z : zp)
93 {
94 py_zp.append(z);
95 }
96
97 auto quantparam = py::dict("scale"_a = py_scale, "zero_point"_a = py_zp,
98 "quantized_dimension"_a = tensor->quantized_dimension());
99 return quantparam;
100}
101
102} // namespace
103
104namespace dalgona
105{
106
107py::object none() { return py::none(); }
108
109std::vector<py::dict> inputsPyArray(const luci::CircleNode *node,
111{
112 assert(node != nullptr); // FIX_CALLER_UNLESS
113 assert(interpreter != nullptr); // FIX_CALLER_UNLESS
114
115 std::vector<py::dict> inputs;
116 for (uint32_t i = 0; i < node->arity(); ++i)
117 {
118 const auto input_tensor = interpreter->getTensor(node->arg(i));
119 auto circle_node = static_cast<luci::CircleNode *>(node->arg(i));
120
121 // skip invalid inputs (e.g., non-existing bias in TCONV)
122 if (circle_node->opcode() == luci::CircleOpcode::CIRCLEOUTPUTEXCLUDE)
123 continue;
124
125 auto py_input =
126 py::dict("name"_a = circle_node->name(), "data"_a = numpyArray(input_tensor),
127 "quantparam"_a = quantparam(input_tensor),
128 "is_const"_a = circle_node->opcode() == luci::CircleOpcode::CIRCLECONST);
129 inputs.push_back(py_input);
130 }
131 return inputs;
132}
133
134std::vector<py::dict> outputsPyArray(const luci::CircleNode *node,
136{
137 std::vector<py::dict> outputs;
138 for (auto succ : loco::succs(node))
139 {
140 const auto output_tensor = interpreter->getTensor(succ);
141 auto circle_node = static_cast<luci::CircleNode *>(succ);
142
143 auto opcode_str = toString(circle_node->opcode());
144 // Check if node is a multi-output node
145 // Assumption: Multi-output virtual nodes have 'Out' prefix
146 // TODO Fix this if the assumption changes
147 THROW_UNLESS(opcode_str.substr(opcode_str.length() - 3) == "Out",
148 "Invalid output detected in " + node->name());
149
150 auto py_output =
151 py::dict("name"_a = circle_node->name(), "data"_a = numpyArray(output_tensor),
152 "quantparam"_a = quantparam(output_tensor),
153 "is_const"_a = circle_node->opcode() == luci::CircleOpcode::CIRCLECONST);
154 outputs.push_back(py_output);
155 }
156 return outputs;
157}
158
159// Note: Only returns 1 output
161{
162 assert(node != nullptr); // FIX_CALLER_UNLESS
163 assert(interpreter != nullptr); // FIX_CALLER_UNLESS
164
165 const auto tensor = interpreter->getTensor(node);
166
167 THROW_UNLESS(tensor != nullptr, "Null tensor detected in " + node->name());
168
169 auto py_output = py::dict("name"_a = node->name(), "data"_a = numpyArray(tensor),
170 "quantparam"_a = quantparam(tensor),
171 "is_const"_a = node->opcode() == luci::CircleOpcode::CIRCLECONST);
172 return py_output;
173}
174
176{
177 switch (node->opcode())
178 {
179 // TODO Update this list when new Op is added
180 // Tip: grep "public GraphBuilderMultiOutput" in luci/import
181 case luci::CircleOpcode::BIDIRECTIONAL_SEQUENCE_LSTM:
182 case luci::CircleOpcode::CUSTOM:
183 case luci::CircleOpcode::IF:
184 case luci::CircleOpcode::NON_MAX_SUPPRESSION_V4:
185 case luci::CircleOpcode::NON_MAX_SUPPRESSION_V5:
186 case luci::CircleOpcode::SPLIT:
187 case luci::CircleOpcode::SPLIT_V:
188 case luci::CircleOpcode::TOPK_V2:
189 case luci::CircleOpcode::UNIQUE:
190 case luci::CircleOpcode::UNPACK:
191 return true;
192 default:
193 return false;
194 }
195}
196
197} // namespace dalgona
198
199#undef THROW_UNLESS
#define THROW_UNLESS(COND, MSG)
virtual Node * arg(uint32_t N) const =0
Access N-th argument node.
virtual uint32_t arity(void) const =0
Return the number of arguments.
const Dimension & dim(uint32_t axis) const
Definition TensorShape.h:38
#define THROW_UNLESS(COND, MSG)
Definition Utils.cpp:33
std::vector< py::dict > inputsPyArray(const luci::CircleNode *node, luci_interpreter::Interpreter *interpreter)
Definition Utils.cpp:109
const std::string toString(luci::CircleOpcode opcode)
std::vector< py::dict > outputsPyArray(const luci::CircleNode *node, luci_interpreter::Interpreter *interpreter)
Definition Utils.cpp:134
py::dict outputPyArray(const luci::CircleNode *node, luci_interpreter::Interpreter *interpreter)
Definition Utils.cpp:160
py::object none()
Definition Utils.cpp:107
bool multi_out_node(const luci::CircleNode *node)
Definition Utils.cpp:175
std::set< Node * > succs(const Node *node)
Enumerate all the successors of a given node.
Definition Node.cpp:46
int32_t size[5]
Definition Slice.cpp:35
This file contains utility macro.
NodeName name(void) const
virtual CircleOpcode opcode(void) const =0