support llava-next(video)

Former-commit-id: 27e94593ac467e56e3a7f5c64f4ff6cee81f4b47
This commit is contained in:
BUAADreamer
2024-09-10 12:31:53 +08:00
parent dfff411e1a
commit 484128b641
11 changed files with 394 additions and 33 deletions

View File

@@ -107,7 +107,8 @@ def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule":
setattr(processor, "video_factor", 2)
else:
setattr(processor, "video_factor", 1)
except Exception:
except Exception as e:
print(e)
processor = None
# Avoid load tokenizer, see:
@@ -123,6 +124,12 @@ def load_config(model_args: "ModelArguments") -> "PretrainedConfig":
Loads model config.
"""
init_kwargs = _get_init_kwargs(model_args)
if "LLaVA-NeXT-Video" in model_args.model_name_or_path:
from transformers import PretrainedConfig, LlavaNextVideoConfig, CLIPVisionConfig, LlamaConfig
official_config = PretrainedConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs)
config = LlavaNextVideoConfig(CLIPVisionConfig(**official_config.vision_config), LlamaConfig(**official_config.text_config))
setattr(config, "visual_inputs", True)
return config
return AutoConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs)
@@ -159,6 +166,9 @@ def load_model(
load_class = AutoModelForVision2Seq
else:
load_class = AutoModelForCausalLM
if "llava_next_video" == getattr(config, "model_type"):
from transformers import LlavaNextVideoForConditionalGeneration
load_class = LlavaNextVideoForConditionalGeneration
if model_args.train_from_scratch:
model = load_class.from_config(config)