support Qwen-7B, fix InternLM-7B inference

Former-commit-id: 25d2ca29ecb70cbfd5206333c667042a0c4d2e5a
This commit is contained in:
hiyouga
2023-08-03 15:53:32 +08:00
parent da08fa7c63
commit 2e19afedb8
8 changed files with 89 additions and 25 deletions

View File

@@ -1,8 +1,7 @@
import torch
from typing import TYPE_CHECKING, List, Optional, Tuple
from transformers.generation.utils import LogitsProcessorList
from transformers.generation.logits_process import LogitsProcessor
from transformers import LogitsProcessor, LogitsProcessorList, StoppingCriteria, StoppingCriteriaList
from llmtuner.extras.constants import LAYERNORM_NAMES
@@ -46,6 +45,22 @@ def get_logits_processor() -> LogitsProcessorList:
return logits_processor
class StopWordsCriteria(StoppingCriteria):
def __init__(self, stop_ids: List[int]) -> None:
super().__init__()
self.stop_ids = stop_ids
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
return any([stop_id in input_ids[:, -1] for stop_id in self.stop_ids])
def get_stopwords_criteria(stop_ids: List[int]) -> StoppingCriteriaList:
stopwords_criteria = StoppingCriteriaList()
stopwords_criteria.append(StopWordsCriteria(stop_ids))
return stopwords_criteria
def count_parameters(model: torch.nn.Module) -> Tuple[int, int]:
r"""
Returns the number of trainable parameters and number of all parameters in the model.