34void ModelAnalyzer::appendOperationToInference(
Operation *op,
const string &function_name,
35 std::vector<size_t> aux_args)
38 vector<size_t> node_output_tensors;
41 if (op->getType() == Operation::Type::input)
44 const string &
tensor_name = op->getOutput(0)->getName();
45 const auto tensor_id = declareInputTensor(tensor_name, op->getOutputShape(0));
46 node_output_tensors.push_back(tensor_id);
48 else if (op->getType() == Operation::Type::constant)
52 const auto tensor_id = declareTemporaryTensor();
53 node_output_tensors.push_back(tensor_id);
55 else if (op->getType() == Operation::Type::output)
57 assert(!op->getInput(0)->getName().empty());
64 const auto tensor_id =
66 node_output_tensors.push_back(tensor_id);
71 vector<size_t> node_input_tensors;
72 for (
const Operation::Output *input : op->
getInputs())
74 size_t idx =
input->getIndex();
76 assert(_opToDescr.find(prev_op) != _opToDescr.end());
77 auto call =
dynamic_cast<const CallFunction *
>(_opToDescr[prev_op]);
79 const size_t &in_tensor_id = call->outputs[idx];
80 node_input_tensors.push_back(in_tensor_id);
83 std::copy(aux_args.begin(), aux_args.end(), std::back_inserter(node_input_tensors));
84 unique_ptr<Action> operation_call(
new CallFunction(
85 op, function_name, std::move(node_input_tensors), std::move(node_output_tensors)));
86 _inferenceSequence.push_back(std::move(operation_call));
87 _opToDescr[op] = _inferenceSequence.back().get();
90void ModelAnalyzer::updateMaxTemporarySize(
const size_t size)
92 _max_temp_size = std::max(_max_temp_size,
size);
95size_t ModelAnalyzer::declareInputTensor(
const std::string &name,
const mir::Shape &shape)
97 assert(!name.empty() &&
"Input tensor must have name");
98 size_t id = _allocatedTensors++;
99 _tensors.push_back({id, TensorDescriptor::Type::input, name, shape});
100 _inputs.push_back(
id);
104size_t ModelAnalyzer::declarePersistentTensor(
const std::string &name)
106 assert(!name.empty());
107 size_t id = _allocatedTensors++;
108 _tensors.push_back({id, TensorDescriptor::Type::persistent, name, {}});
109 _persistent_tensors.push_back(
id);
113size_t ModelAnalyzer::declareTemporaryTensor()
115 size_t id = _allocatedTensors++;
116 _tensors.push_back({id, TensorDescriptor::Type::temporary,
"", {}});
120void ModelAnalyzer::gatherDefUseInfo(
const vector<unique_ptr<Action>> &post_order,
121 map<size_t, size_t> &first_def, map<size_t, size_t> &last_use)
124 for (
size_t pos = 0; pos < post_order.size(); ++pos)
126 const unique_ptr<Action> &
action = post_order[pos];
127 const CallFunction *call =
dynamic_cast<CallFunction *
>(
action.get());
131 for (
size_t output_tensor_id : call->outputs)
133 const TensorDescriptor &td = _tensors[output_tensor_id];
134 if (td.type != TensorDescriptor::Type::temporary)
137 if (!first_def.count(output_tensor_id))
138 first_def[output_tensor_id] = pos;
142 for (
size_t input_tensor_id : call->
inputs)
144 const TensorDescriptor &td = _tensors[input_tensor_id];
145 if (td.type != TensorDescriptor::Type::temporary)
148 last_use[input_tensor_id] = pos;
153void ModelAnalyzer::constructInferenceSequence(
const vector<Operation *> &post_order)
156 for (
auto it = post_order.rbegin(); it != post_order.rend(); ++it)
164 map<size_t, size_t> first_def;
165 map<size_t, size_t> last_use;
168 gatherDefUseInfo(_inferenceSequence, first_def, last_use);
176 std::vector<unique_ptr<Action>> old_inference_seq;
177 old_inference_seq.swap(_inferenceSequence);
178 _inferenceSequence.reserve(old_inference_seq.size());
180 for (
size_t pos = 0; pos < old_inference_seq.size(); ++pos)
182 unique_ptr<Action> &
action = old_inference_seq[pos];
183 const CallFunction *call =
dynamic_cast<CallFunction *
>(
action.get());
187 for (
size_t output_tensor_id : call->outputs)
189 const TensorDescriptor &td = _tensors[output_tensor_id];
190 assert(td.id == output_tensor_id);
191 if (td.type != TensorDescriptor::Type::temporary)
194 if (first_def[output_tensor_id] == pos)
196 unique_ptr<Action> tmp_constructor(
new CreateTmp(output_tensor_id));
197 _inferenceSequence.push_back(std::move(tmp_constructor));
202 _inferenceSequence.push_back(std::move(action));
205 for (
size_t input_tensor_id : call->
inputs)
207 const TensorDescriptor &td = _tensors[input_tensor_id];
208 assert(td.id == input_tensor_id);
209 if (td.type != TensorDescriptor::Type::temporary)
212 if (last_use[input_tensor_id] == pos)
214 unique_ptr<Action> tmp_destructor(
new DestroyTmp(input_tensor_id));
215 _inferenceSequence.push_back(std::move(tmp_destructor));
221void ModelAnalyzer::collectOutputs(
const mir::Graph *g)
225 auto op_call =
dynamic_cast<const CallFunction *
>(_opToDescr[out_op]);
226 assert(op_call->inputs.size() == 1);
227 _outputs.push_back(op_call->inputs[0]);
234 stack<pair<Operation *, size_t>> s;
236 vector<Operation *> post_order;
238 set<Operation *> visited;
240 vector<Operation *> init_ops;
243 if (op->getNumInputs() == 0)
245 init_ops.emplace_back(op);
250 _temp_tensor_id = declareTemporaryTensor();
255 if (!visited.count(in))
267 auto edge = top.second++;
269 std::vector<Operation *> next_nodes;
272 const auto &uses = out.getUses();
273 std::transform(uses.begin(), uses.end(), std::back_inserter(next_nodes),
274 [](Operation::Use use) { return use.getNode(); });
276 if (edge == next_nodes.size())
279 post_order.push_back(node);
286 if (!visited.count(successor))
288 visited.insert(successor);
289 s.push({next_nodes[edge], 0});
295 constructInferenceSequence(post_order);
307 const int32_t tmp_size = kernel_shape.
dim(1) * kernel_shape.dim(2) * kernel_shape.dim(3) *
308 out_shape.dim(0) * out_shape.dim(1) * out_shape.dim(2);
309 updateMaxTemporarySize(
static_cast<size_t>(tmp_size));
310 appendOperationToInference(&op,
"conv2d", {_temp_tensor_id});
315 appendOperationToInference(&op,
"depthwiseConv2d");
326 appendOperationToInference(&op,
"fullConnect");
336 appendOperationToInference(&op,
"in");
346 if (output->getUses().empty())
349 appendOperationToInference(&op,
"constant");
361 assert(in_shape.rank() == 4);
362 assert(in_shape.rank() == out_shape.rank());
364 if (in_shape.dim(0) != out_shape.dim(0) || in_shape.dim(3) != out_shape.dim(3))
365 throw std::runtime_error(
"Not supported Resize on other dims besides height and width!");
370 appendOperationToInference(&op,
"resize");
373 assert(
false &&
"Not Implemented!");
381 appendOperationToInference(&op,
"tanhActivation");
390 const int32_t tmp_size = kernel_shape.
dim(0) * kernel_shape.dim(1) * kernel_shape.dim(3) *
391 out_shape.dim(0) * out_shape.dim(1) * out_shape.dim(2);
392 updateMaxTemporarySize(
static_cast<size_t>(tmp_size));
393 appendOperationToInference(&op,
"convTransposed2d", {_temp_tensor_id});
404 appendOperationToInference(&op,
"reduceMean");
409 appendOperationToInference(&op,
"transpose");
418 appendOperationToInference(&op,
"leakyRelu");
427 appendOperationToInference(&op,
"ElementWise<Add>");
432 appendOperationToInference(&op,
"ElementWise<Div>");
437 appendOperationToInference(&op,
"ElementWise<Max>");
442 appendOperationToInference(&op,
"ElementWise<Mul>");
447 appendOperationToInference(&op,
"ElementWise<Sub>");
std::size_t getNumInputs() const
std::deque< Output > & getOutputs()
Output * getOutput(std::size_t index)
const Shape & getInputShape(std::size_t index) const
const Shape & getOutputShape(std::size_t index) const
int32_t & dim(int32_t axis) noexcept
Description of tensor concatenation operation.
std::int32_t getNumGroups() const
Gather operation as defined by ONNX spec. https://github.com/onnx/onnx/blob/master/docs/Operators....
Resize operation scales are such that output = input * scale for each dimension and the number of dim...
ResizeMethod getMode() const
description of softmax operation.
Tensor transpose operation.
void visit(mir::ops::AbsOp &) override
const std::vector< size_t > & getOutputs() const
void visit_fallback(mir::Operation &op) override
const std::vector< size_t > & getInputs() const
void analyze(const mir::Graph *g)
contructs inference sequence
const char * tensor_name(const circle::Tensor *tensor)