fix modelscope data hub
Former-commit-id: 5b63e8c22538a4788e4b6c8df50e6e6be93ceeac
This commit is contained in:
@@ -66,8 +66,8 @@ def init_adapter(
|
||||
|
||||
if model_args.checkpoint_dir is not None:
|
||||
is_mergeable = True
|
||||
if getattr(model, "quantization_method", None) == "gptq":
|
||||
assert len(model_args.checkpoint_dir) == 1, "GPTQ quantized model only accepts a single checkpoint."
|
||||
if getattr(model, "quantization_method", None): # merge lora in quantized model is unstable
|
||||
assert len(model_args.checkpoint_dir) == 1, "Quantized model only accepts a single checkpoint."
|
||||
is_mergeable = False
|
||||
|
||||
if (is_trainable and finetuning_args.resume_lora_training) or (not is_mergeable):
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import os
|
||||
import math
|
||||
import torch
|
||||
from types import MethodType
|
||||
@@ -13,7 +12,6 @@ from transformers import (
|
||||
PreTrainedModel,
|
||||
PreTrainedTokenizerBase
|
||||
)
|
||||
from transformers.models.llama import modeling_llama as LlamaModule
|
||||
from transformers.utils.versions import require_version
|
||||
from trl import AutoModelForCausalLMWithValueHead
|
||||
|
||||
|
||||
@@ -44,12 +44,12 @@ def _verify_model_args(model_args: "ModelArguments", finetuning_args: "Finetunin
|
||||
if model_args.quantization_bit is not None and finetuning_args.finetuning_type != "lora":
|
||||
raise ValueError("Quantization is only compatible with the LoRA method.")
|
||||
|
||||
if (
|
||||
model_args.checkpoint_dir is not None
|
||||
and len(model_args.checkpoint_dir) != 1
|
||||
and finetuning_args.finetuning_type != "lora"
|
||||
):
|
||||
raise ValueError("Multiple checkpoints are only available for LoRA tuning.")
|
||||
if model_args.checkpoint_dir is not None and len(model_args.checkpoint_dir) != 1:
|
||||
if finetuning_args.finetuning_type != "lora":
|
||||
raise ValueError("Multiple checkpoints are only available for LoRA tuning.")
|
||||
|
||||
if model_args.quantization_bit is not None:
|
||||
raise ValueError("Quantized model only accepts a single checkpoint. Merge them first.")
|
||||
|
||||
|
||||
def parse_train_args(args: Optional[Dict[str, Any]] = None) -> _TRAIN_CLS:
|
||||
|
||||
Reference in New Issue
Block a user