move configure_packing to llamafactory.model.patcher and fix constants

Former-commit-id: 9c5e972c9c81957f2e9e30bf284ef1c076de9fd0
This commit is contained in:
ancv
2024-06-21 00:45:06 +07:00
parent dd7a1dbfae
commit 6c185a2c57
6 changed files with 16 additions and 12 deletions

View File

@@ -12,7 +12,6 @@ from ...model import load_model, load_tokenizer
from ..trainer_utils import create_modelcard_and_push
from .metric import ComputeMetrics
from .trainer import CustomSeq2SeqTrainer
from ...model.model_utils.packing import configure_packing
if TYPE_CHECKING:
from transformers import Seq2SeqTrainingArguments, TrainerCallback
@@ -33,9 +32,6 @@ def run_sft(
dataset = get_dataset(model_args, data_args, training_args, stage="sft", **tokenizer_module)
model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train)
if data_args.efficient_packing:
configure_packing(model.config, model_args)
if training_args.predict_with_generate:
tokenizer.padding_side = "left" # use left-padding in generation