support autogptq in llama board #246

Former-commit-id: fea01226703d1534b5cf511bcb6a49e73bc86ce1
This commit is contained in:
hiyouga
2023-12-16 16:31:30 +08:00
parent 04dc3f4614
commit 9f77e8b025
12 changed files with 123 additions and 65 deletions

View File

@@ -76,8 +76,13 @@ def configure_quantization(
if finetuning_args.export_quantization_bit is not None: # gptq
require_version("optimum>=1.16.0", "To fix: pip install optimum>=1.16.0")
require_version("auto_gptq>=0.5.0", "To fix: pip install auto_gptq>=0.5.0")
if getattr(config, "model_type", None) == "chatglm":
raise ValueError("ChatGLM model is not supported.")
config_kwargs["quantization_config"] = GPTQConfig(
bits=finetuning_args.export_quantization_bit,
tokenizer=tokenizer,
dataset=get_quantization_dataset(tokenizer, model_args, finetuning_args)
)
config_kwargs["device_map"] = "auto"