Former-commit-id: 3b20c89b342a068356ffc29c3724b645775c65db
This commit is contained in:
hiyouga
2024-04-15 15:32:58 +08:00
parent 106a0104da
commit 9338f878a3
4 changed files with 18 additions and 8 deletions

View File

@@ -32,6 +32,9 @@ def init_adapter(
logger.info("Adapter is not found at evaluation, load the base model.")
return model
if finetuning_args.finetuning_type != "lora" and getattr(model, "quantization_method", None):
raise ValueError("You can only use lora for quantized models.")
if finetuning_args.finetuning_type == "full" and is_trainable:
logger.info("Fine-tuning method: Full")
if not finetuning_args.pure_bf16:
@@ -129,9 +132,12 @@ def init_adapter(
if finetuning_args.use_llama_pro:
target_modules = find_expanded_modules(model, target_modules, finetuning_args.num_layer_trainable)
if finetuning_args.use_dora and getattr(model, "quantization_method", None) is not None:
if getattr(model, "quantization_method", None) != QuantizationMethod.BITS_AND_BYTES:
raise ValueError("DoRA is not compatible with PTQ-quantized models.")
if (
finetuning_args.use_dora
and getattr(model, "quantization_method", None) is not None
and getattr(model, "quantization_method", None) != QuantizationMethod.BITS_AND_BYTES
):
raise ValueError("DoRA is not compatible with PTQ-quantized models.")
peft_kwargs = {
"r": finetuning_args.lora_rank,