ONE - On-device Neural Engine
Loading...
Searching...
No Matches
minimal.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "nnfw.h"
18#include <vector>
19#include <iostream>
20
21uint64_t num_elems(const nnfw_tensorinfo *ti)
22{
23 uint64_t n = 1;
24 for (uint32_t i = 0; i < ti->rank; ++i)
25 {
26 n *= ti->dims[i];
27 }
28 return n;
29}
30
31int main(const int argc, char **argv)
32{
33 nnfw_session *session = nullptr;
34 nnfw_create_session(&session);
35
36 // Loading nnpackage
37 nnfw_load_model_from_file(session, argv[1]);
38
39 // Use acl_neon backend for CONV_2D and acl_cl for otherwise.
40 // Note that defalut backend is acl_cl
41 nnfw_set_op_backend(session, "CONV_2D", "acl_neon");
42
43 // Compile model
44 nnfw_prepare(session);
45
46 // Prepare input. Here we just allocate dummy input arrays.
47 std::vector<float> input;
49 nnfw_input_tensorinfo(session, 0, &ti); // get first input's info
50 uint32_t input_elements = num_elems(&ti);
51 input.resize(input_elements);
52 // TODO: Please add initialization for your input.
53 nnfw_set_input(session, 0, ti.dtype, input.data(), sizeof(float) * input_elements);
54
55 // Prepare output
56 std::vector<float> output;
57 nnfw_output_tensorinfo(session, 0, &ti); // get first output's info
58 uint32_t output_elements = num_elems(&ti);
59 output.resize(output_elements);
60 nnfw_set_output(session, 0, ti.dtype, output.data(), sizeof(float) * output_elements);
61
62 // Do inference
63 nnfw_run(session);
64
65 // TODO: Please print or compare the output value in your way.
66
67 nnfw_close_session(session);
68
69 std::cout << "nnpackage " << argv[1] << " runs successfully." << std::endl;
70 return 0;
71}
int main(void)
uint64_t num_elems(const nnfw_tensorinfo *ti)
Get the total number of elements in nnfw_tensorinfo->dims.
Definition minimal.cc:21
This file describes runtime API.
NNFW_STATUS nnfw_set_input(nnfw_session *session, uint32_t index, NNFW_TYPE type, const void *buffer, size_t length)
Set input buffer.
Definition nnfw_api.cc:96
NNFW_STATUS nnfw_output_tensorinfo(nnfw_session *session, uint32_t index, nnfw_tensorinfo *tensor_info)
Get i-th output tensor info.
Definition nnfw_api.cc:141
NNFW_STATUS nnfw_input_tensorinfo(nnfw_session *session, uint32_t index, nnfw_tensorinfo *tensor_info)
Get i-th input tensor info.
Definition nnfw_api.cc:134
NNFW_STATUS nnfw_run(nnfw_session *session)
Run inference.
Definition nnfw_api.cc:78
NNFW_STATUS nnfw_prepare(nnfw_session *session)
Prepare session to be ready for inference.
Definition nnfw_api.cc:72
NNFW_STATUS nnfw_set_output(nnfw_session *session, uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
Set output buffer.
Definition nnfw_api.cc:103
NNFW_STATUS nnfw_set_op_backend(nnfw_session *session, const char *op, const char *backend)
Set the operation's backend.
Definition nnfw_api.cc:173
NNFW_STATUS nnfw_create_session(nnfw_session **session)
Create a new session instance.
NNFW_STATUS nnfw_close_session(nnfw_session *session)
Close a session instance.
Definition nnfw_api.cc:60
NNFW_STATUS nnfw_load_model_from_file(nnfw_session *session, const char *package_file_path)
Load model from nnpackage file or directory.
tensor info describes the type and shape of tensors
NNFW_TYPE dtype
int32_t dims[NNFW_MAX_RANK]