support llava-next(video)
Former-commit-id: 27e94593ac467e56e3a7f5c64f4ff6cee81f4b47
This commit is contained in:
@@ -209,6 +209,50 @@ class BasePlugin:
|
||||
return {}
|
||||
|
||||
|
||||
class Idefics2Plugin(BasePlugin):
|
||||
@override
|
||||
def process_messages(
|
||||
self,
|
||||
messages: Sequence[Dict[str, str]],
|
||||
images: Sequence["ImageInput"],
|
||||
videos: Sequence["VideoInput"],
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> List[Dict[str, str]]:
|
||||
self._validate_input(images, videos)
|
||||
num_image_tokens = 0
|
||||
messages = deepcopy(messages)
|
||||
fake_image_token = processor.fake_image_token.content
|
||||
image_str = f"{fake_image_token}{self.image_token * processor.image_seq_len}{fake_image_token}"
|
||||
image_str = image_str * 5
|
||||
|
||||
for message in messages:
|
||||
content = message["content"]
|
||||
while IMAGE_PLACEHOLDER in content:
|
||||
num_image_tokens += 1
|
||||
content = content.replace(IMAGE_PLACEHOLDER, "{{image}}", 1)
|
||||
content = content.replace("{{image}}", image_str)
|
||||
content = content.replace(f"{fake_image_token}{fake_image_token}", f"{fake_image_token}")
|
||||
message["content"] = content
|
||||
|
||||
if len(images) != num_image_tokens:
|
||||
raise ValueError("The number of images does not match the number of {} tokens".format(IMAGE_PLACEHOLDER))
|
||||
|
||||
return messages
|
||||
|
||||
@override
|
||||
def get_mm_inputs(
|
||||
self,
|
||||
images: Sequence["ImageInput"],
|
||||
videos: Sequence["VideoInput"],
|
||||
imglens: Sequence[int],
|
||||
vidlens: Sequence[int],
|
||||
seqlens: Sequence[int],
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> Dict[str, Union[List[int], "torch.Tensor"]]:
|
||||
self._validate_input(images, videos)
|
||||
return _get_mm_inputs(images, videos, processor)
|
||||
|
||||
|
||||
class LlavaPlugin(BasePlugin):
|
||||
@override
|
||||
def process_messages(
|
||||
@@ -249,6 +293,92 @@ class LlavaPlugin(BasePlugin):
|
||||
return _get_mm_inputs(images, videos, processor)
|
||||
|
||||
|
||||
class LlavaNextPlugin(BasePlugin):
|
||||
@override
|
||||
def process_messages(
|
||||
self,
|
||||
messages: Sequence[Dict[str, str]],
|
||||
images: Sequence["ImageInput"],
|
||||
videos: Sequence["VideoInput"],
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> List[Dict[str, str]]:
|
||||
self._validate_input(images, videos)
|
||||
num_image_tokens = 0
|
||||
messages = deepcopy(messages)
|
||||
for message in messages:
|
||||
content = message["content"]
|
||||
while IMAGE_PLACEHOLDER in content:
|
||||
num_image_tokens += 1
|
||||
content = content.replace(IMAGE_PLACEHOLDER, "{{image}}", 1)
|
||||
|
||||
if len(images) != num_image_tokens:
|
||||
raise ValueError("The number of images does not match the number of {} tokens".format(IMAGE_PLACEHOLDER))
|
||||
|
||||
return messages
|
||||
|
||||
@override
|
||||
def get_mm_inputs(
|
||||
self,
|
||||
images: Sequence["ImageInput"],
|
||||
videos: Sequence["VideoInput"],
|
||||
imglens: Sequence[int],
|
||||
vidlens: Sequence[int],
|
||||
seqlens: Sequence[int],
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> Dict[str, Union[List[int], "torch.Tensor"]]:
|
||||
self._validate_input(images, videos)
|
||||
return _get_mm_inputs(images, videos, processor)
|
||||
|
||||
|
||||
class LlavaNextVideoPlugin(BasePlugin):
|
||||
@override
|
||||
def process_messages(
|
||||
self,
|
||||
messages: Sequence[Dict[str, str]],
|
||||
images: Sequence["ImageInput"],
|
||||
videos: Sequence["VideoInput"],
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> List[Dict[str, str]]:
|
||||
self._validate_input(images, videos)
|
||||
num_image_tokens = 0
|
||||
num_video_tokens = 0
|
||||
messages = deepcopy(messages)
|
||||
for message in messages:
|
||||
content = message["content"]
|
||||
while IMAGE_PLACEHOLDER in content:
|
||||
num_image_tokens += 1
|
||||
content = content.replace(IMAGE_PLACEHOLDER, "{{image}}", 1)
|
||||
while VIDEO_PLACEHOLDER in content:
|
||||
num_video_tokens += 1
|
||||
content = content.replace(VIDEO_PLACEHOLDER, "{{video}}", 1)
|
||||
|
||||
if len(images) != num_image_tokens:
|
||||
raise ValueError("The number of images does not match the number of {} tokens".format(IMAGE_PLACEHOLDER))
|
||||
|
||||
if len(videos) != num_video_tokens:
|
||||
raise ValueError("The number of videos does not match the number of {} tokens".format(IMAGE_PLACEHOLDER))
|
||||
|
||||
return messages
|
||||
|
||||
@override
|
||||
def get_mm_inputs(
|
||||
self,
|
||||
images: Sequence["ImageInput"],
|
||||
videos: Sequence["VideoInput"],
|
||||
imglens: Sequence[int],
|
||||
vidlens: Sequence[int],
|
||||
seqlens: Sequence[int],
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> Dict[str, Union[List[int], "torch.Tensor"]]:
|
||||
self._validate_input(images, videos)
|
||||
video_processor = getattr(processor, "video_processor")
|
||||
res = _get_mm_inputs(images, [], processor)
|
||||
if len(videos) != 0:
|
||||
videos = _regularize_videos(videos, processor)
|
||||
video_res = video_processor(videos, return_tensors="pt")
|
||||
res.update(video_res)
|
||||
return res
|
||||
|
||||
class PaliGemmaPlugin(BasePlugin):
|
||||
@override
|
||||
def process_messages(
|
||||
@@ -380,11 +510,59 @@ class Qwen2vlPlugin(BasePlugin):
|
||||
return _get_mm_inputs(images, videos, processor)
|
||||
|
||||
|
||||
class VideoLlavaPlugin(BasePlugin):
|
||||
@override
|
||||
def process_messages(
|
||||
self,
|
||||
messages: Sequence[Dict[str, str]],
|
||||
images: Sequence["ImageInput"],
|
||||
videos: Sequence["VideoInput"],
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> List[Dict[str, str]]:
|
||||
self._validate_input(images, videos)
|
||||
num_image_tokens = 0
|
||||
num_video_tokens = 0
|
||||
messages = deepcopy(messages)
|
||||
for message in messages:
|
||||
content = message["content"]
|
||||
while IMAGE_PLACEHOLDER in content:
|
||||
num_image_tokens += 1
|
||||
content = content.replace(IMAGE_PLACEHOLDER, "{{image}}", 1)
|
||||
while VIDEO_PLACEHOLDER in content:
|
||||
num_video_tokens += 1
|
||||
content = content.replace(VIDEO_PLACEHOLDER, "{{video}}", 1)
|
||||
|
||||
if len(images) != num_image_tokens:
|
||||
raise ValueError("The number of images does not match the number of {} tokens".format(IMAGE_PLACEHOLDER))
|
||||
|
||||
if len(videos) != num_video_tokens:
|
||||
raise ValueError("The number of videos does not match the number of {} tokens".format(IMAGE_PLACEHOLDER))
|
||||
|
||||
return messages
|
||||
|
||||
@override
|
||||
def get_mm_inputs(
|
||||
self,
|
||||
images: Sequence["ImageInput"],
|
||||
videos: Sequence["VideoInput"],
|
||||
imglens: Sequence[int],
|
||||
vidlens: Sequence[int],
|
||||
seqlens: Sequence[int],
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> Dict[str, Union[List[int], "torch.Tensor"]]:
|
||||
self._validate_input(images, videos)
|
||||
return _get_mm_inputs(images, videos, processor)
|
||||
|
||||
|
||||
PLUGINS = {
|
||||
"base": BasePlugin,
|
||||
"idefics2": Idefics2Plugin,
|
||||
"llava": LlavaPlugin,
|
||||
"llava_next": LlavaNextPlugin,
|
||||
"llava_next_video": LlavaNextVideoPlugin,
|
||||
"paligemma": PaliGemmaPlugin,
|
||||
"qwen2_vl": Qwen2vlPlugin,
|
||||
"video_llava": VideoLlavaPlugin,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -680,6 +680,16 @@ _register_template(
|
||||
)
|
||||
|
||||
|
||||
_register_template(
|
||||
name="idefics2",
|
||||
format_user=StringFormatter(slots=["User:{{content}}<end_of_utterance>\nAssistant:"]),
|
||||
format_separator=EmptyFormatter(slots=["\n"]),
|
||||
stop_words=["<end_of_utterance>"],
|
||||
replace_eos=True,
|
||||
mm_plugin=get_mm_plugin(name="idefics2", image_token="<image>"),
|
||||
)
|
||||
|
||||
|
||||
_register_template(
|
||||
name="intern",
|
||||
format_user=StringFormatter(slots=["<|User|>:{{content}}\n<|Bot|>:"]),
|
||||
@@ -753,6 +763,28 @@ _register_template(
|
||||
)
|
||||
|
||||
|
||||
_register_template(
|
||||
name="llava_next",
|
||||
format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]),
|
||||
default_system=(
|
||||
"A chat between a curious user and an artificial intelligence assistant. "
|
||||
"The assistant gives helpful, detailed, and polite answers to the user's questions."
|
||||
),
|
||||
mm_plugin=get_mm_plugin(name="llava_next", image_token="<image>"),
|
||||
)
|
||||
|
||||
|
||||
_register_template(
|
||||
name="llava_next_video",
|
||||
format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]),
|
||||
default_system=(
|
||||
"A chat between a curious user and an artificial intelligence assistant. "
|
||||
"The assistant gives helpful, detailed, and polite answers to the user's questions."
|
||||
),
|
||||
mm_plugin=get_mm_plugin(name="llava_next_video", image_token="<image>", video_token="<video>"),
|
||||
)
|
||||
|
||||
|
||||
_register_template(
|
||||
name="mistral",
|
||||
format_user=StringFormatter(slots=["[INST] {{content}} [/INST]"]),
|
||||
@@ -897,6 +929,17 @@ _register_template(
|
||||
)
|
||||
|
||||
|
||||
_register_template(
|
||||
name="video_llava",
|
||||
format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]),
|
||||
default_system=(
|
||||
"A chat between a curious user and an artificial intelligence assistant. "
|
||||
"The assistant gives helpful, detailed, and polite answers to the user's questions."
|
||||
),
|
||||
mm_plugin=get_mm_plugin(name="video_llava", image_token="<image>", video_token="<video>"),
|
||||
)
|
||||
|
||||
|
||||
_register_template(
|
||||
name="xuanyuan",
|
||||
format_user=StringFormatter(slots=["Human: {{content}} Assistant:"]),
|
||||
|
||||
@@ -583,6 +583,23 @@ register_model_group(
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Idefics2-Base": {
|
||||
DownloadSource.DEFAULT: "HuggingFaceM4/idefics2-8b-base",
|
||||
},
|
||||
"Idefics2-Chat": {
|
||||
DownloadSource.DEFAULT: "HuggingFaceM4/idefics2-8b",
|
||||
},
|
||||
"Idefics2-Chatty": {
|
||||
DownloadSource.DEFAULT: "HuggingFaceM4/idefics2-8b-chatty",
|
||||
},
|
||||
},
|
||||
template="idefics2",
|
||||
vision=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"InternLM-7B": {
|
||||
@@ -812,6 +829,49 @@ register_model_group(
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"LLaVA-NeXT-7B-Chat": {
|
||||
DownloadSource.DEFAULT: "llava-hf/llava-v1.6-vicuna-7b-hf",
|
||||
},
|
||||
"LLaVA-NeXT-13B-Chat": {
|
||||
DownloadSource.DEFAULT: "llava-hf/llava-v1.6-vicuna-13b-hf",
|
||||
},
|
||||
"LLaVA-NeXT-34B-Chat": {
|
||||
DownloadSource.DEFAULT: "llava-hf/llava-v1.6-34b-hf",
|
||||
},
|
||||
"LLaVA-NeXT-Mistral-7B-Chat": {
|
||||
DownloadSource.DEFAULT: "llava-hf/llava-v1.6-mistral-7b-hf",
|
||||
},
|
||||
},
|
||||
template="llava_next",
|
||||
vision=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"LLaVA-NeXT-Video-7B-Chat": {
|
||||
DownloadSource.DEFAULT: "llava-hf/LLaVA-NeXT-Video-7B-hf",
|
||||
},
|
||||
"LLaVA-NeXT-Video-34B-Chat": {
|
||||
DownloadSource.DEFAULT: "llava-hf/LLaVA-NeXT-Video-34B-hf",
|
||||
},
|
||||
"LLaVA-NeXT-Video-7B-32k-Chat": {
|
||||
DownloadSource.DEFAULT: "llava-hf/LLaVA-NeXT-Video-7B-32K-hf",
|
||||
},
|
||||
"LLaVA-NeXT-Video-7B-DPO": {
|
||||
DownloadSource.DEFAULT: "llava-hf/LLaVA-NeXT-Video-7B-DPO-hf",
|
||||
},
|
||||
"LLaVA-NeXT-Video-34B-DPO": {
|
||||
DownloadSource.DEFAULT: "llava-hf/LLaVA-NeXT-Video-34B-DPO-hf",
|
||||
},
|
||||
},
|
||||
template="llava_next_video",
|
||||
vision=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"MiniCPM-2B-SFT-Chat": {
|
||||
@@ -1475,6 +1535,17 @@ register_model_group(
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Video-LLaVA-7B-Chat": {
|
||||
DownloadSource.DEFAULT: "LanguageBind/Video-LLaVA-7B-hf",
|
||||
},
|
||||
},
|
||||
template="video_llava",
|
||||
vision=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"XuanYuan-6B": {
|
||||
|
||||
@@ -107,7 +107,8 @@ def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule":
|
||||
setattr(processor, "video_factor", 2)
|
||||
else:
|
||||
setattr(processor, "video_factor", 1)
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print(e)
|
||||
processor = None
|
||||
|
||||
# Avoid load tokenizer, see:
|
||||
@@ -123,6 +124,12 @@ def load_config(model_args: "ModelArguments") -> "PretrainedConfig":
|
||||
Loads model config.
|
||||
"""
|
||||
init_kwargs = _get_init_kwargs(model_args)
|
||||
if "LLaVA-NeXT-Video" in model_args.model_name_or_path:
|
||||
from transformers import PretrainedConfig, LlavaNextVideoConfig, CLIPVisionConfig, LlamaConfig
|
||||
official_config = PretrainedConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs)
|
||||
config = LlavaNextVideoConfig(CLIPVisionConfig(**official_config.vision_config), LlamaConfig(**official_config.text_config))
|
||||
setattr(config, "visual_inputs", True)
|
||||
return config
|
||||
return AutoConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs)
|
||||
|
||||
|
||||
@@ -159,6 +166,9 @@ def load_model(
|
||||
load_class = AutoModelForVision2Seq
|
||||
else:
|
||||
load_class = AutoModelForCausalLM
|
||||
if "llava_next_video" == getattr(config, "model_type"):
|
||||
from transformers import LlavaNextVideoForConditionalGeneration
|
||||
load_class = LlavaNextVideoForConditionalGeneration
|
||||
|
||||
if model_args.train_from_scratch:
|
||||
model = load_class.from_config(config)
|
||||
|
||||
@@ -34,7 +34,7 @@ def find_all_linear_modules(model: "PreTrainedModel", freeze_vision_tower: bool)
|
||||
forbidden_modules.add("output_layer")
|
||||
elif model_type == "internlm2":
|
||||
forbidden_modules.add("output")
|
||||
elif model_type in ["llava", "paligemma"]:
|
||||
elif model_type in ["idefics2", "llava", "llava_next", "llava_next_video", "paligemma", "video_llava"]:
|
||||
forbidden_modules.add("multi_modal_projector")
|
||||
elif model_type == "qwen2_vl":
|
||||
forbidden_modules.add("merger")
|
||||
|
||||
@@ -108,7 +108,7 @@ def configure_visual_model(config: "PretrainedConfig") -> None:
|
||||
Patches VLMs before loading them.
|
||||
"""
|
||||
model_type = getattr(config, "model_type", None)
|
||||
if model_type == "llava": # required for ds zero3 and valuehead models
|
||||
if model_type in ["llava", "llava_next", "video_llava", "idefics2", "llava_next_video"]: # required for ds zero3 and valuehead models
|
||||
setattr(config, "hidden_size", getattr(config.text_config, "hidden_size", None))
|
||||
|
||||
if getattr(config, "is_yi_vl_derived_model", None):
|
||||
@@ -150,7 +150,7 @@ def get_image_seqlen(config: "PretrainedConfig") -> int:
|
||||
image_seqlen += 1
|
||||
elif model_type == "paligemma":
|
||||
image_seqlen = config.vision_config.num_image_tokens
|
||||
elif model_type == "qwen2_vl": # variable length
|
||||
else:
|
||||
image_seqlen = -1
|
||||
|
||||
return image_seqlen
|
||||
|
||||
Reference in New Issue
Block a user