support llama pro #2338 , add rslora

Former-commit-id: 40d659b7f30dd5a004703c176ec1f22dc864e505
This commit is contained in:
hiyouga
2024-02-15 02:27:36 +08:00
parent b403f8d8a8
commit 596b6828cb
24 changed files with 438 additions and 203 deletions

View File

@@ -1,5 +1,5 @@
from .loader import load_model_and_tokenizer
from .utils import dispatch_model, get_modelcard_args, load_valuehead_params
from .utils import dispatch_model, load_valuehead_params
__all__ = ["load_model_and_tokenizer", "dispatch_model", "get_modelcard_args", "load_valuehead_params"]
__all__ = ["load_model_and_tokenizer", "dispatch_model", "load_valuehead_params"]

View File

@@ -1,8 +1,7 @@
import inspect
from typing import TYPE_CHECKING
import torch
from peft import LoraConfig, PeftModel, TaskType, get_peft_model
from peft import LoraConfig, LoraModel, PeftModel, TaskType, get_peft_model
from transformers.integrations import is_deepspeed_zero3_enabled
from ..extras.logging import get_logger
@@ -47,12 +46,22 @@ def init_adapter(
if not num_layers:
raise ValueError("Current model does not support freeze tuning.")
if finetuning_args.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0
trainable_layer_ids = [num_layers - k - 1 for k in range(finetuning_args.num_layer_trainable)]
else: # fine-tuning the first n layers if num_layer_trainable < 0
trainable_layer_ids = [k for k in range(-finetuning_args.num_layer_trainable)] # noqa: C416
if finetuning_args.use_llama_pro:
if num_layers % finetuning_args.num_layer_trainable != 0:
raise ValueError(
"`num_layers` {} should be divisible by `num_layer_trainable` {}.".format(
num_layers, finetuning_args.num_layer_trainable
)
)
freeze_modules = set()
stride = num_layers // finetuning_args.num_layer_trainable
trainable_layer_ids = range(stride - 1, num_layers + stride - 1, stride)
elif finetuning_args.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0
trainable_layer_ids = range(num_layers - finetuning_args.num_layer_trainable, num_layers)
else: # fine-tuning the first n layers if num_layer_trainable < 0
trainable_layer_ids = range(-finetuning_args.num_layer_trainable)
freeze_modules = {"all"}
for name, _ in model.named_modules():
if "0." in name:
freeze_modules.add(name.split("0.")[-1].split(".")[0])
@@ -65,13 +74,13 @@ def init_adapter(
)
for idx in trainable_layer_ids:
trainable_layers.append("{:d}.{}".format(idx, module_name))
trainable_layers.append(".{:d}.{}".format(idx, module_name if module_name != "all" else ""))
for name, param in model.named_parameters():
if not any(trainable_layer in name for trainable_layer in trainable_layers):
param.requires_grad_(False)
else:
if any(trainable_layer in name for trainable_layer in trainable_layers):
param.data = param.data.to(torch.float32)
else:
param.requires_grad_(False)
if finetuning_args.finetuning_type == "lora":
logger.info("Fine-tuning method: LoRA")
@@ -94,7 +103,7 @@ def init_adapter(
adapter_to_merge = model_args.adapter_name_or_path
for adapter in adapter_to_merge:
model = PeftModel.from_pretrained(model, adapter)
model: "LoraModel" = PeftModel.from_pretrained(model, adapter)
model = model.merge_and_unload()
if len(adapter_to_merge) > 0:
@@ -114,22 +123,14 @@ def init_adapter(
"target_modules": target_modules,
"lora_alpha": finetuning_args.lora_alpha,
"lora_dropout": finetuning_args.lora_dropout,
"use_rslora": finetuning_args.use_rslora,
}
if model_args.use_unsloth:
from unsloth import FastLlamaModel, FastMistralModel # type: ignore
from unsloth import FastLanguageModel # type: ignore
unsloth_peft_kwargs = {"model": model, "max_seq_length": model_args.model_max_length}
if "loftq_config" in inspect.signature(FastLlamaModel.get_peft_model).parameters:
unsloth_peft_kwargs["loftq_config"] = {}
if getattr(model.config, "model_type", None) == "llama":
model = FastLlamaModel.get_peft_model(**peft_kwargs, **unsloth_peft_kwargs)
elif getattr(model.config, "model_type", None) == "mistral":
model = FastMistralModel.get_peft_model(**peft_kwargs, **unsloth_peft_kwargs)
else:
raise NotImplementedError
model = FastLanguageModel.get_peft_model(**peft_kwargs, **unsloth_peft_kwargs)
else:
lora_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
@@ -142,7 +143,7 @@ def init_adapter(
for param in filter(lambda p: p.requires_grad, model.parameters()):
param.data = param.data.to(torch.bfloat16 if finetuning_args.lora_bf16_mode else torch.float32)
if model_args.adapter_name_or_path is not None:
logger.info("Loaded adapter(s): {}".format(",".join(model_args.adapter_name_or_path)))
if model_args.adapter_name_or_path is not None:
logger.info("Loaded adapter(s): {}".format(",".join(model_args.adapter_name_or_path)))
return model

View File

@@ -55,7 +55,7 @@ def load_model_and_tokenizer(
model = None
if is_trainable and model_args.use_unsloth:
from unsloth import FastLlamaModel, FastMistralModel # type: ignore
from unsloth import FastLanguageModel # type: ignore
unsloth_kwargs = {
"model_name": model_args.model_name_or_path,
@@ -63,14 +63,12 @@ def load_model_and_tokenizer(
"dtype": model_args.compute_dtype,
"load_in_4bit": model_args.quantization_bit == 4,
"token": model_args.hf_hub_token,
"device_map": get_current_device(),
"device_map": {"": get_current_device()},
"rope_scaling": getattr(config, "rope_scaling", None),
}
if getattr(config, "model_type", None) == "llama":
model, _ = FastLlamaModel.from_pretrained(**unsloth_kwargs)
elif getattr(config, "model_type", None) == "mistral":
model, _ = FastMistralModel.from_pretrained(**unsloth_kwargs)
else:
try:
model, _ = FastLanguageModel.from_pretrained(**unsloth_kwargs)
except NotImplementedError:
logger.warning("Unsloth does not support model type {}.".format(getattr(config, "model_type", None)))
model_args.use_unsloth = False
@@ -87,17 +85,6 @@ def load_model_and_tokenizer(
**config_kwargs,
)
# Add llama-factory tag to push these tags on the Hub.
# the feature is available since 4.37.0 but adding the check
# just in case
if hasattr(model, "add_model_tags"):
model.add_model_tags(["llama-factory"])
else:
logger.warning_once(
"Was not able to properly tag the model, if you want to use the model tagging feature, make sure to "
"have transformers>=4.37.0 installed on your environment."
)
patch_model(model, tokenizer, model_args, is_trainable)
register_autoclass(config, model, tokenizer)
@@ -134,4 +121,12 @@ def load_model_and_tokenizer(
if not is_trainable:
logger.info("This IS expected that the trainable params is 0 if you are using model for inference only.")
if model_args.print_param_status:
for name, param in model.named_parameters():
print(
"name: {}, dtype: {}, device: {}, trainable: {}".format(
name, param.dtype, param.device, param.requires_grad
)
)
return model, tokenizer

View File

@@ -300,6 +300,11 @@ def patch_model(
if is_trainable:
patch_mixtral_replace_moe_impl()
try:
model.add_model_tags(["llama-factory"])
except Exception:
logger.warning("Cannot properly tag the model.")
def patch_valuehead_model(model: "AutoModelForCausalLMWithValueHead") -> None:
def tie_weights(self: "AutoModelForCausalLMWithValueHead") -> None:

View File

@@ -1,5 +1,5 @@
import inspect
from typing import TYPE_CHECKING, Any, Dict, List
from typing import TYPE_CHECKING, Dict, List
import torch
from transformers import PreTrainedModel
@@ -13,7 +13,7 @@ from ..extras.misc import get_current_device
if TYPE_CHECKING:
from transformers import PretrainedConfig, PreTrainedTokenizer
from ..hparams import DataArguments, FinetuningArguments, ModelArguments
from ..hparams import ModelArguments
logger = get_logger(__name__)
@@ -76,18 +76,6 @@ def find_all_linear_modules(model: "PreTrainedModel") -> List[str]:
return list(module_names)
def get_modelcard_args(
model_args: "ModelArguments", data_args: "DataArguments", finetuning_args: "FinetuningArguments"
) -> Dict[str, Any]:
return {
"tasks": "text-generation",
"license": "other",
"finetuned_from": model_args.model_name_or_path,
"dataset": [dataset.strip() for dataset in data_args.dataset.split(",")],
"tags": ["llama-factory"] + (["lora"] if finetuning_args.finetuning_type == "lora" else []),
}
def load_valuehead_params(path_or_repo_id: str, model_args: "ModelArguments") -> Dict[str, torch.Tensor]:
r"""
Loads value head parameters from Hugging Face Hub or local disk.