[config] update args (#7231)

Former-commit-id: f71a901840811bf560df671ec63a146ff99140c6
This commit is contained in:
hoshi-hiyouga
2025-03-10 23:04:43 +08:00
committed by GitHub
parent cf58a6d860
commit 71a1c1321a
16 changed files with 89 additions and 74 deletions

View File

@@ -20,6 +20,7 @@ import math
from typing import TYPE_CHECKING
from ...extras import logging
from ...extras.constants import RopeScaling
if TYPE_CHECKING:
@@ -39,33 +40,32 @@ def configure_rope(config: "PretrainedConfig", model_args: "ModelArguments", is_
logger.warning_rank0("Current model does not support RoPE scaling.")
return
rope_kwargs = {}
rope_kwargs = {"rope_type": getattr(model_args.rope_scaling, "value", model_args.rope_scaling)} # handle enum
if model_args.model_max_length is not None:
if is_trainable and model_args.rope_scaling == "dynamic":
if is_trainable and model_args.rope_scaling == RopeScaling.DYNAMIC:
logger.warning_rank0(
"Dynamic NTK scaling may not work well with fine-tuning. "
"See: https://github.com/huggingface/transformers/pull/24653"
)
current_max_length = getattr(config, "max_position_embeddings", None)
if current_max_length and model_args.model_max_length > current_max_length:
logger.info_rank0(f"Enlarge max model length from {current_max_length} to {model_args.model_max_length}.")
setattr(config, "max_position_embeddings", model_args.model_max_length)
rope_kwargs["factor"] = float(math.ceil(model_args.model_max_length / current_max_length))
else:
logger.warning_rank0("Input length is smaller than max length. Consider increase input length.")
rope_kwargs["factor"] = 1.0
if (not current_max_length) or model_args.model_max_length <= current_max_length:
logger.warning_rank0("Input length is smaller than max length. Disabling rope scaling.")
return
if model_args.rope_scaling == "dynamic":
logger.info_rank0(f"Enlarge max model length from {current_max_length} to {model_args.model_max_length}.")
setattr(config, "max_position_embeddings", model_args.model_max_length)
rope_kwargs["factor"] = float(math.ceil(model_args.model_max_length / current_max_length))
if model_args.rope_scaling == RopeScaling.DYNAMIC:
rope_kwargs["original_max_position_embeddings"] = current_max_length
elif model_args.rope_scaling == "llama3":
elif model_args.rope_scaling == RopeScaling.LLAMA3:
rope_kwargs["original_max_position_embeddings"] = current_max_length
rope_kwargs["low_freq_factor"] = 1.0
rope_kwargs["high_freq_factor"] = 4.0
else:
rope_kwargs["factor"] = 2.0
setattr(config, "rope_scaling", {"rope_type": model_args.rope_scaling, **rope_kwargs})
setattr(config, "rope_scaling", rope_kwargs)
logger.info_rank0(
f"Using {model_args.rope_scaling} scaling strategy and setting scaling factor to {rope_kwargs['factor']}."
f"Using {rope_kwargs['rope_type']} scaling strategy and setting scaling factor to {rope_kwargs['factor']}."
)