fix mrope

Former-commit-id: 55bee1d333549ca19858b3f5c1b7b86926e5fb09
This commit is contained in:
hiyouga
2024-12-12 15:08:17 +00:00
parent cfff136b2a
commit fb22651faf
11 changed files with 32 additions and 9 deletions

View File

@@ -27,7 +27,7 @@ from typing_extensions import override
from ...extras import logging
from ...extras.constants import IGNORE_INDEX
from ...extras.packages import is_transformers_version_equal_to_4_46
from ...extras.packages import is_transformers_version_equal_to_4_46, is_transformers_version_greater_than
from ..callbacks import PissaConvertCallback, SaveProcessorCallback
from ..trainer_utils import create_custom_optimizer, create_custom_scheduler
@@ -51,6 +51,9 @@ class CustomSeq2SeqTrainer(Seq2SeqTrainer):
def __init__(
self, finetuning_args: "FinetuningArguments", processor: Optional["ProcessorMixin"], **kwargs
) -> None:
if is_transformers_version_greater_than("4.46"):
kwargs["processing_class"] = kwargs.pop("tokenizer")
super().__init__(**kwargs)
self.finetuning_args = finetuning_args

View File

@@ -56,6 +56,7 @@ def run_sft(
data_collator = SFTDataCollatorWith4DAttentionMask(
template=template,
model=model,
pad_to_multiple_of=8 if training_args.do_train else None, # for shift short attention
label_pad_token_id=IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id,
block_diag_attn=model_args.block_diag_attn,