forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
lite_interpreter_model_load.cc
33 lines (28 loc) · 1.06 KB
/
lite_interpreter_model_load.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#include "ATen/ATen.h"
#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/autograd/generated/variable_factories.h>
#include <torch/csrc/jit/mobile/import.h>
#include <torch/csrc/jit/mobile/module.h>
#include <torch/csrc/jit/serialization/import.h>
#include "torch/script.h"
C10_DEFINE_string(model, "", "The given bytecode model to check if it is supported by lite_interpreter.");
int main(int argc, char** argv) {
c10::SetUsageMessage(
"Check if exported bytecode model is runnable by lite_interpreter.\n"
"Example usage:\n"
"./lite_interpreter_model_load"
" --model=<model_file>");
if (!c10::ParseCommandLineFlags(&argc, &argv)) {
std::cerr << "Failed to parse command line flags!" << std::endl;
return 1;
}
if (FLAGS_model.empty()) {
std::cerr << FLAGS_model << ":Model file is not provided\n";
return -1;
}
// TODO: avoid having to set this guard for custom mobile build with mobile
// interpreter.
c10::InferenceMode mode;
torch::jit::mobile::Module bc = torch::jit::_load_for_mobile(FLAGS_model);
return 0;
}