ONE - On-device Neural Engine
Loading...
Searching...
No Matches
KernelGenerator.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2022 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "KernelGenerator.h"
18
19#include "ops/BulkLayer.h"
20
21#include <backend/Backend.h>
22#include <backend/IConfig.h>
23#include <memory>
24#include <util/Utils.h>
25#include <util/logging.h>
27
28#include <stdexcept>
29
30namespace onert
31{
32namespace backend
33{
34namespace trix
35{
36
38 const std::shared_ptr<TensorBuilder> &tensor_builder,
39 const std::shared_ptr<basic::TensorRegistry> &tensor_reg,
40 const std::shared_ptr<DevContext> &dev_context)
41 : basic::KernelGeneratorBase{graph}, _ctx(graph.operands()), _operations_ctx{graph.operations()},
42 _tensor_builder(tensor_builder), _tensor_reg{tensor_reg}, _dev_context{dev_context}
43{
44 // DO NOTHING
45}
46
47std::unique_ptr<exec::FunctionSequence> KernelGenerator::generate(ir::OperationIndex ind)
48{
49 auto ret = std::make_unique<exec::FunctionSequence>();
50 ret->enableDynamicShapeInferer(false);
51
52 const auto &op = _graph.operations().at(ind);
53 op.accept(*this);
54 ret->append(releaseFunction());
55 return ret;
56}
57
58void KernelGenerator::visit(const ir::operation::Bulk &node)
59{
61
62 std::vector<IPortableTensor *> output_tensors;
63 for (const auto &ofm_idx : node.getOutputs())
64 output_tensors.emplace_back(_tensor_reg->getPortableTensor(ofm_idx));
65
66 std::vector<const IPortableTensor *> input_tensors;
67 for (const auto &ifm_idx : node.getInputs())
68 input_tensors.emplace_back(_tensor_reg->getPortableTensor(ifm_idx));
69
70 // parameters
71 const auto &binary_path = node.param().binary_path;
72
73 auto fn = std::make_unique<ops::BulkLayer>();
74
75 fn->configure(input_tensors, output_tensors, binary_path, _dev_context);
76
77 _return_fn = std::move(fn);
78}
79
80} // namespace trix
81} // namespace backend
82} // namespace onert
std::unique_ptr< exec::IFunction > _return_fn
std::unique_ptr< exec::IFunction > releaseFunction()
KernelGenerator(const ir::Graph &graph, const std::shared_ptr< TensorBuilder > &tensor_builder, const std::shared_ptr< basic::TensorRegistry > &tensor_reg, const std::shared_ptr< DevContext > &dev_context)
std::unique_ptr< exec::FunctionSequence > generate(ir::OperationIndex op_ind) override
const Operations & operations() const override
Definition Graph.h:114
const Param & param() const
Definition Bulk.h:45
const Object & at(const Index &index) const
Get the object that is associated with the given index.
This file contains utility macro.