fix llava config
Former-commit-id: b13d032325e45d401a9dbc64d4c73e308eff3288
This commit is contained in:
@@ -8,7 +8,7 @@ from ...extras.logging import get_logger
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from transformers import PretrainedConfig, PreTrainedModel
|
||||
from transformers import PreTrainedModel
|
||||
|
||||
from ...hparams import ModelArguments
|
||||
|
||||
@@ -16,11 +16,6 @@ if TYPE_CHECKING:
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
def configure_valuehead(config: "PretrainedConfig") -> None:
|
||||
if getattr(config, "model_type", None) == "llava":
|
||||
setattr(config, "hidden_size", getattr(config.vision_config, "intermediate_size", None))
|
||||
|
||||
|
||||
def load_valuehead_params(path_or_repo_id: str, model_args: "ModelArguments") -> Dict[str, torch.Tensor]:
|
||||
r"""
|
||||
Loads value head parameters from Hugging Face Hub or local disk.
|
||||
|
||||
Reference in New Issue
Block a user