support Qwen-7B, fix InternLM-7B inference

Former-commit-id: 25d2ca29ecb70cbfd5206333c667042a0c4d2e5a
This commit is contained in:
hiyouga
2023-08-03 15:53:32 +08:00
parent da08fa7c63
commit 2e19afedb8
8 changed files with 89 additions and 25 deletions

View File

@@ -67,7 +67,7 @@ def load_model_and_tokenizer(
**config_kwargs
)
if tokenizer.pad_token_id is None or tokenizer.pad_token_id == 64000: # 64000 for baichuan model (older version)
tokenizer.pad_token_id = 0 # set as the <unk> token
tokenizer.pad_token = tokenizer.eos_token
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
is_mergeable = True