support GPTQ tuning #729 #1481 #1545 , fix chatglm template #1453 #1480 #1569

Former-commit-id: fdccc6cc9b68890199e9250cabdb996ff2f853b9
This commit is contained in:
hiyouga
2023-11-20 22:52:11 +08:00
parent 28258aecd2
commit 0105cd48f2
5 changed files with 43 additions and 4 deletions

View File

@@ -37,8 +37,13 @@ def run_exp(args: Optional[Dict[str, Any]] = None, callbacks: Optional[List["Tra
def export_model(args: Optional[Dict[str, Any]] = None, max_shard_size: Optional[str] = "10GB"):
model_args, _, finetuning_args, _ = get_infer_args(args)
model, tokenizer = load_model_and_tokenizer(model_args, finetuning_args)
if getattr(model, "quantization_method", None) == "gptq":
raise ValueError("Cannot export a GPTQ quantized model.")
model.config.use_cache = True
model.save_pretrained(finetuning_args.export_dir, max_shard_size=max_shard_size)
try:
tokenizer.padding_side = "left" # restore padding side
tokenizer.init_kwargs["padding_side"] = "left"