fix ppo dataset bug #4012

Former-commit-id: 7fc51b2e93698ae5e012566af8481f4d861c873d
This commit is contained in:
hiyouga
2024-06-06 19:03:20 +08:00
parent d5559461c1
commit ca95e98ca0
4 changed files with 4 additions and 4 deletions

View File

@@ -18,7 +18,7 @@ def preprocess_pretrain_dataset(
if data_args.template == "gemma":
text_examples = [tokenizer.bos_token + example for example in text_examples]
result = tokenizer(text_examples, add_special_tokens=False, max_length=data_args.cutoff_len)
result = tokenizer(text_examples, add_special_tokens=False, max_length=data_args.cutoff_len, truncation=True)
else:
tokenized_examples = tokenizer(text_examples, add_special_tokens=False)
concatenated_examples = {k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys()}