fix up
Former-commit-id: 43a56cb331fae899ca35b0c312730d4ab79d0c42
This commit is contained in:
@@ -61,11 +61,12 @@ def calculate_lr(
|
||||
packing=packing,
|
||||
output_dir="dummy_dir",
|
||||
overwrite_cache=True,
|
||||
do_train=True,
|
||||
)
|
||||
)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
tokenizer = tokenizer_module["tokenizer"]
|
||||
dataset_module = get_dataset(model_args, data_args, training_args, stage, **tokenizer_module)
|
||||
trainset = get_dataset(model_args, data_args, training_args, stage, **tokenizer_module)["train_dataset"]
|
||||
if stage == "pt":
|
||||
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
|
||||
elif stage == "sft":
|
||||
@@ -73,7 +74,7 @@ def calculate_lr(
|
||||
else:
|
||||
raise NotImplementedError("Stage does not supported: {}.".format(stage))
|
||||
|
||||
dataloader = DataLoader(dataset_module["eval_dataset"], batch_size, shuffle=False, collate_fn=data_collator, pin_memory=True)
|
||||
dataloader = DataLoader(trainset, batch_size, shuffle=False, collate_fn=data_collator, pin_memory=True)
|
||||
valid_tokens, total_tokens = 0, 0
|
||||
for batch in tqdm(dataloader):
|
||||
valid_tokens += torch.sum(batch["labels"] != IGNORE_INDEX).item()
|
||||
|
||||
Reference in New Issue
Block a user