Skip to content

Commit

Permalink
fix ptq compatibility (PaddlePaddle#1208)
Browse files Browse the repository at this point in the history
* fix ptq compatibility

* touch off CI
  • Loading branch information
yghstill authored Jun 30, 2022
1 parent 919a9b1 commit 6a438d9
Showing 1 changed file with 50 additions and 24 deletions.
74 changes: 50 additions & 24 deletions paddleslim/quant/quanter.py
Original file line number Diff line number Diff line change
Expand Up @@ -453,30 +453,56 @@ def quant_post_static(
Returns:
None
"""
post_training_quantization = PostTrainingQuantization(
executor=executor,
sample_generator=sample_generator,
batch_generator=batch_generator,
data_loader=data_loader,
model_dir=model_dir,
model_filename=model_filename,
params_filename=params_filename,
batch_size=batch_size,
batch_nums=batch_nums,
scope=scope,
algo=algo,
round_type=round_type,
hist_percent=hist_percent,
bias_correction=bias_correction,
quantizable_op_type=quantizable_op_type,
is_full_quantize=is_full_quantize,
weight_bits=weight_bits,
activation_bits=activation_bits,
activation_quantize_type=activation_quantize_type,
weight_quantize_type=weight_quantize_type,
onnx_format=onnx_format,
skip_tensor_list=skip_tensor_list,
optimize_model=optimize_model)
try:
post_training_quantization = PostTrainingQuantization(
executor=executor,
sample_generator=sample_generator,
batch_generator=batch_generator,
data_loader=data_loader,
model_dir=model_dir,
model_filename=model_filename,
params_filename=params_filename,
batch_size=batch_size,
batch_nums=batch_nums,
scope=scope,
algo=algo,
round_type=round_type,
hist_percent=hist_percent,
bias_correction=bias_correction,
quantizable_op_type=quantizable_op_type,
is_full_quantize=is_full_quantize,
weight_bits=weight_bits,
activation_bits=activation_bits,
activation_quantize_type=activation_quantize_type,
weight_quantize_type=weight_quantize_type,
onnx_format=onnx_format,
skip_tensor_list=skip_tensor_list, # support in Paddle >= 2.3.1
optimize_model=optimize_model)
except:
post_training_quantization = PostTrainingQuantization(
executor=executor,
sample_generator=sample_generator,
batch_generator=batch_generator,
data_loader=data_loader,
model_dir=model_dir,
model_filename=model_filename,
params_filename=params_filename,
batch_size=batch_size,
batch_nums=batch_nums,
scope=scope,
algo=algo,
round_type=round_type,
hist_percent=hist_percent,
bias_correction=bias_correction,
quantizable_op_type=quantizable_op_type,
is_full_quantize=is_full_quantize,
weight_bits=weight_bits,
activation_bits=activation_bits,
activation_quantize_type=activation_quantize_type,
weight_quantize_type=weight_quantize_type,
onnx_format=onnx_format,
optimize_model=optimize_model)

post_training_quantization.quantize()
post_training_quantization.save_quantized_model(
quantize_model_path,
Expand Down

0 comments on commit 6a438d9

Please sign in to comment.