try to past test

Former-commit-id: 3b6bfae0e5fe795a70d530b2765f27d95c5862f8
This commit is contained in:
BUAADreamer
2024-09-10 13:12:51 +08:00
parent 66b870fd08
commit 514f976cc1
4 changed files with 54 additions and 43 deletions

View File

@@ -296,11 +296,11 @@ class LlavaPlugin(BasePlugin):
class LlavaNextPlugin(BasePlugin):
@override
def process_messages(
self,
messages: Sequence[Dict[str, str]],
images: Sequence["ImageInput"],
videos: Sequence["VideoInput"],
processor: Optional["ProcessorMixin"],
self,
messages: Sequence[Dict[str, str]],
images: Sequence["ImageInput"],
videos: Sequence["VideoInput"],
processor: Optional["ProcessorMixin"],
) -> List[Dict[str, str]]:
self._validate_input(images, videos)
num_image_tokens = 0
@@ -318,13 +318,13 @@ class LlavaNextPlugin(BasePlugin):
@override
def get_mm_inputs(
self,
images: Sequence["ImageInput"],
videos: Sequence["VideoInput"],
imglens: Sequence[int],
vidlens: Sequence[int],
seqlens: Sequence[int],
processor: Optional["ProcessorMixin"],
self,
images: Sequence["ImageInput"],
videos: Sequence["VideoInput"],
imglens: Sequence[int],
vidlens: Sequence[int],
seqlens: Sequence[int],
processor: Optional["ProcessorMixin"],
) -> Dict[str, Union[List[int], "torch.Tensor"]]:
self._validate_input(images, videos)
return _get_mm_inputs(images, videos, processor)
@@ -379,6 +379,7 @@ class LlavaNextVideoPlugin(BasePlugin):
res.update(video_res)
return res
class PaliGemmaPlugin(BasePlugin):
@override
def process_messages(

View File

@@ -120,9 +120,12 @@ def load_config(model_args: "ModelArguments") -> "PretrainedConfig":
"""
init_kwargs = _get_init_kwargs(model_args)
if "LLaVA-NeXT-Video" in model_args.model_name_or_path:
from transformers import PretrainedConfig, LlavaNextVideoConfig, CLIPVisionConfig, LlamaConfig
from transformers import CLIPVisionConfig, LlamaConfig, LlavaNextVideoConfig, PretrainedConfig
official_config = PretrainedConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs)
config = LlavaNextVideoConfig(CLIPVisionConfig(**official_config.vision_config), LlamaConfig(**official_config.text_config))
config = LlavaNextVideoConfig(
CLIPVisionConfig(**official_config.vision_config), LlamaConfig(**official_config.text_config)
)
setattr(config, "visual_inputs", True)
return config
return AutoConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs)
@@ -163,6 +166,7 @@ def load_model(
load_class = AutoModelForCausalLM
if "llava_next_video" == getattr(config, "model_type"):
from transformers import LlavaNextVideoForConditionalGeneration
load_class = LlavaNextVideoForConditionalGeneration
if model_args.train_from_scratch:

View File

@@ -108,7 +108,13 @@ def configure_visual_model(config: "PretrainedConfig") -> None:
Patches VLMs before loading them.
"""
model_type = getattr(config, "model_type", None)
if model_type in ["llava", "llava_next", "video_llava", "idefics2", "llava_next_video"]: # required for ds zero3 and valuehead models
if model_type in [
"llava",
"llava_next",
"video_llava",
"idefics2",
"llava_next_video",
]: # required for ds zero3 and valuehead models
setattr(config, "hidden_size", getattr(config.text_config, "hidden_size", None))
if getattr(config, "is_yi_vl_derived_model", None):