Merge pull request #5290 from simonJJJ/qwen2_vl
support qwen2-vl Former-commit-id: 7156f832af8505b26371559d340c0e69eb962bbc
This commit is contained in:
@@ -212,7 +212,7 @@ def _setup_lora_tuning(
|
||||
target_modules = find_expanded_modules(model, target_modules, finetuning_args.freeze_trainable_layers)
|
||||
|
||||
if model_args.visual_inputs and finetuning_args.freeze_vision_tower:
|
||||
target_modules = "^(?!.*vision_tower).*(?:{}).*".format("|".join(target_modules))
|
||||
target_modules = "^(?!.*(?:vision_tower|visual)).*(?:{}).*".format("|".join(target_modules))
|
||||
|
||||
if (
|
||||
finetuning_args.use_dora
|
||||
|
||||
@@ -36,6 +36,8 @@ def find_all_linear_modules(model: "PreTrainedModel", freeze_vision_tower: bool)
|
||||
forbidden_modules.add("output")
|
||||
elif model.config.model_type in ["llava", "paligemma"]:
|
||||
forbidden_modules.add("multi_modal_projector")
|
||||
elif model.config.model_type in ["qwen2_vl"]:
|
||||
forbidden_modules.add("merger")
|
||||
|
||||
if freeze_vision_tower:
|
||||
forbidden_modules.add("vision_tower")
|
||||
|
||||
Reference in New Issue
Block a user