Skip to content

Commit

Permalink
Add option for saving inference model in pruning demo. (PaddlePaddle#196
Browse files Browse the repository at this point in the history
)
  • Loading branch information
wanghaoshuang authored Mar 30, 2020
1 parent e4c6ae5 commit 96f645a
Showing 1 changed file with 8 additions and 0 deletions.
8 changes: 8 additions & 0 deletions demo/prune/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
add_arg('model_path', str, "./models", "The path to save model.")
add_arg('pruned_ratio', float, None, "The ratios to be pruned.")
add_arg('criterion', str, "l1_norm", "The prune criterion to be used, support l1_norm and batch_norm_scale.")
add_arg('save_inference', bool, False, "Whether to save inference model.")
# yapf: enable

model_list = models.__all__
Expand Down Expand Up @@ -230,6 +231,13 @@ def train(epoch, program):
test(i, pruned_val_program)
save_model(exe, pruned_val_program,
os.path.join(args.model_path, str(i)))
if args.save_inference:
infer_model_path = os.path.join(args.model_path, "infer_models",
str(i))
fluid.io.save_inference_model(infer_model_path, ["image"], [out],
exe, pruned_val_program)
_logger.info("Saved inference model into [{}]".format(
infer_model_path))


def main():
Expand Down

0 comments on commit 96f645a

Please sign in to comment.