Former-commit-id: 43a56cb331fae899ca35b0c312730d4ab79d0c42
This commit is contained in:
hiyouga
2024-07-15 01:04:56 +08:00
parent 68365045b4
commit e4d11a117b
18 changed files with 46 additions and 41 deletions

View File

@@ -83,11 +83,12 @@ def cal_ppl(
train_on_prompt=train_on_prompt,
output_dir="dummy_dir",
overwrite_cache=True,
do_train=True,
)
)
tokenizer_module = load_tokenizer(model_args)
tokenizer = tokenizer_module["tokenizer"]
dataset_module = get_dataset(model_args, data_args, training_args, stage, **tokenizer_module)
trainset = get_dataset(model_args, data_args, training_args, stage, **tokenizer_module)["train_dataset"]
model = load_model(tokenizer, model_args, finetuning_args, is_trainable=False)
if stage == "pt":
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
@@ -100,7 +101,7 @@ def cal_ppl(
else:
raise NotImplementedError("Stage does not supported: {}.".format(stage))
dataloader = DataLoader(dataset_module["eval_dataset"], batch_size, shuffle=False, collate_fn=data_collator, pin_memory=True)
dataloader = DataLoader(trainset, batch_size, shuffle=False, collate_fn=data_collator, pin_memory=True)
criterion = torch.nn.CrossEntropyLoss(reduction="none")
total_ppl = 0
perplexities = []