ONE - On-device Neural Engine
Loading...
Searching...
No Matches
model_dump.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "support/CommandLine.h"
18#include "mir/IrDotDumper.h"
19
20#include <caffe_importer.h>
21
22#include <exception>
23#include <iostream>
24
25using namespace nnc;
26using namespace mir;
27
28int main(int argc, const char **argv)
29{
30 cli::Option<std::string> model_path(cli::optname("--model"), cli::overview("Path to the model"));
32
33 try
34 {
35 auto graph = mir_caffe::loadModel(model_path);
36 dumpGraph(graph.get(), std::cout);
37 }
38 catch (std::exception &e)
39 {
40 std::cout << "Error: " << e.what() << std::endl;
41 return -1;
42 }
43
44 return 0;
45}
int main(void)
static CommandLine * getParser()
singleton method
void parseCommandLine(int argc, const char **argv, bool check_nonoptional=true)
parse command line option
this class describes command line option
std::unique_ptr< mir::Graph > loadModel(const std::string &filename)
void dumpGraph(const Graph *graph, std::ostream &stream)