fix llava config

Former-commit-id: b13d032325e45d401a9dbc64d4c73e308eff3288
This commit is contained in:
hiyouga
2024-05-12 00:02:49 +08:00
parent 436afcba57
commit 2bcd5b2b73
5 changed files with 15 additions and 15 deletions

View File

@@ -8,7 +8,7 @@ from ...extras.logging import get_logger
if TYPE_CHECKING:
from transformers import PretrainedConfig, PreTrainedModel
from transformers import PreTrainedModel
from ...hparams import ModelArguments
@@ -16,11 +16,6 @@ if TYPE_CHECKING:
logger = get_logger(__name__)
def configure_valuehead(config: "PretrainedConfig") -> None:
if getattr(config, "model_type", None) == "llava":
setattr(config, "hidden_size", getattr(config.vision_config, "intermediate_size", None))
def load_valuehead_params(path_or_repo_id: str, model_args: "ModelArguments") -> Dict[str, torch.Tensor]:
r"""
Loads value head parameters from Hugging Face Hub or local disk.