support llama pro #2338 , add rslora

Former-commit-id: 40d659b7f30dd5a004703c176ec1f22dc864e505
This commit is contained in:
hiyouga
2024-02-15 02:27:36 +08:00
parent b403f8d8a8
commit 596b6828cb
24 changed files with 438 additions and 203 deletions

View File

@@ -9,10 +9,13 @@ class ModelArguments:
"""
model_name_or_path: str = field(
metadata={"help": "Path to the model weight or identifier from huggingface.co/models or modelscope.cn/models."}
metadata={
"help": "Path to the model weight or identifier from huggingface.co/models or modelscope.cn/models."
},
)
adapter_name_or_path: Optional[str] = field(
default=None, metadata={"help": "Path to the adapter weight or identifier from huggingface.co/models."}
default=None,
metadata={"help": "Path to the adapter weight or identifier from huggingface.co/models."},
)
cache_dir: Optional[str] = field(
default=None,
@@ -23,7 +26,8 @@ class ModelArguments:
metadata={"help": "Whether or not to use one of the fast tokenizer (backed by the tokenizers library)."},
)
resize_vocab: Optional[bool] = field(
default=False, metadata={"help": "Whether or not to resize the tokenizer vocab and the embedding layers."}
default=False,
metadata={"help": "Whether or not to resize the tokenizer vocab and the embedding layers."},
)
split_special_tokens: Optional[bool] = field(
default=False,
@@ -34,60 +38,88 @@ class ModelArguments:
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
quantization_bit: Optional[int] = field(
default=None, metadata={"help": "The number of bits to quantize the model."}
default=None,
metadata={"help": "The number of bits to quantize the model."},
)
quantization_type: Optional[Literal["fp4", "nf4"]] = field(
default="nf4", metadata={"help": "Quantization data type to use in int4 training."}
default="nf4",
metadata={"help": "Quantization data type to use in int4 training."},
)
double_quantization: Optional[bool] = field(
default=True, metadata={"help": "Whether or not to use double quantization in int4 training."}
default=True,
metadata={"help": "Whether or not to use double quantization in int4 training."},
)
rope_scaling: Optional[Literal["linear", "dynamic"]] = field(
default=None, metadata={"help": "Which scaling strategy should be adopted for the RoPE embeddings."}
default=None,
metadata={"help": "Which scaling strategy should be adopted for the RoPE embeddings."},
)
flash_attn: Optional[bool] = field(
default=False, metadata={"help": "Enable FlashAttention-2 for faster training."}
default=False,
metadata={"help": "Enable FlashAttention-2 for faster training."},
)
shift_attn: Optional[bool] = field(
default=False, metadata={"help": "Enable shift short attention (S^2-Attn) proposed by LongLoRA."}
default=False,
metadata={"help": "Enable shift short attention (S^2-Attn) proposed by LongLoRA."},
)
use_unsloth: Optional[bool] = field(
default=False, metadata={"help": "Whether or not to use unsloth's optimization for the LoRA training."}
default=False,
metadata={"help": "Whether or not to use unsloth's optimization for the LoRA training."},
)
disable_gradient_checkpointing: Optional[bool] = field(
default=False, metadata={"help": "Whether or not to disable gradient checkpointing."}
default=False,
metadata={"help": "Whether or not to disable gradient checkpointing."},
)
upcast_layernorm: Optional[bool] = field(
default=False, metadata={"help": "Whether or not to upcast the layernorm weights in fp32."}
default=False,
metadata={"help": "Whether or not to upcast the layernorm weights in fp32."},
)
upcast_lmhead_output: Optional[bool] = field(
default=False, metadata={"help": "Whether or not to upcast the output of lm_head in fp32."}
default=False,
metadata={"help": "Whether or not to upcast the output of lm_head in fp32."},
)
hf_hub_token: Optional[str] = field(
default=None,
metadata={"help": "Auth token to log in with Hugging Face Hub."},
)
ms_hub_token: Optional[str] = field(
default=None,
metadata={"help": "Auth token to log in with ModelScope Hub."},
)
hf_hub_token: Optional[str] = field(default=None, metadata={"help": "Auth token to log in with Hugging Face Hub."})
ms_hub_token: Optional[str] = field(default=None, metadata={"help": "Auth token to log in with ModelScope Hub."})
export_dir: Optional[str] = field(
default=None, metadata={"help": "Path to the directory to save the exported model."}
default=None,
metadata={"help": "Path to the directory to save the exported model."},
)
export_size: Optional[int] = field(
default=1, metadata={"help": "The file shard size (in GB) of the exported model."}
default=1,
metadata={"help": "The file shard size (in GB) of the exported model."},
)
export_quantization_bit: Optional[int] = field(
default=None, metadata={"help": "The number of bits to quantize the exported model."}
default=None,
metadata={"help": "The number of bits to quantize the exported model."},
)
export_quantization_dataset: Optional[str] = field(
default=None, metadata={"help": "Path to the dataset or dataset name to use in quantizing the exported model."}
default=None,
metadata={"help": "Path to the dataset or dataset name to use in quantizing the exported model."},
)
export_quantization_nsamples: Optional[int] = field(
default=128, metadata={"help": "The number of samples used for quantization."}
default=128,
metadata={"help": "The number of samples used for quantization."},
)
export_quantization_maxlen: Optional[int] = field(
default=1024, metadata={"help": "The maximum length of the model inputs used for quantization."}
default=1024,
metadata={"help": "The maximum length of the model inputs used for quantization."},
)
export_legacy_format: Optional[bool] = field(
default=False, metadata={"help": "Whether or not to save the `.bin` files instead of `.safetensors`."}
default=False,
metadata={"help": "Whether or not to save the `.bin` files instead of `.safetensors`."},
)
export_hub_model_id: Optional[str] = field(
default=None, metadata={"help": "The name of the repository if push the model to the Hugging Face hub."}
default=None,
metadata={"help": "The name of the repository if push the model to the Hugging Face hub."},
)
print_param_status: Optional[bool] = field(
default=False,
metadata={"help": "For debugging purposes, print the status of the parameters in the model."},
)
def __post_init__(self):