support quantization in export model

Former-commit-id: f32500ae6edccab7d14df4c92467e15986866def
This commit is contained in:
hiyouga
2023-12-15 23:44:50 +08:00
parent 9121722999
commit 296711d502
9 changed files with 120 additions and 32 deletions

View File

@@ -38,10 +38,11 @@ def export_model(args: Optional[Dict[str, Any]] = None):
model_args, _, finetuning_args, _ = get_infer_args(args)
model, tokenizer = load_model_and_tokenizer(model_args, finetuning_args)
if getattr(model, "quantization_method", None) in ["gptq", "awq"]:
raise ValueError("Cannot export a GPTQ or AWQ quantized model.")
if getattr(model, "quantization_method", None):
raise ValueError("Cannot export a quantized model.")
model.config.use_cache = True
model = model.to("cpu")
model.save_pretrained(finetuning_args.export_dir, max_shard_size="{}GB".format(finetuning_args.export_size))
try: