refactor mm training
Former-commit-id: 179c0558699e287cbf38a2d73bff47e86d589c5a
This commit is contained in:
@@ -27,7 +27,7 @@ logger = get_logger(__name__)
|
||||
|
||||
|
||||
def configure_liger_kernel(config: "PretrainedConfig", model_args: "ModelArguments", is_trainable: bool) -> None:
|
||||
if not is_trainable or not model_args.use_liger_kernel:
|
||||
if not is_trainable or not model_args.enable_liger_kernel:
|
||||
return
|
||||
|
||||
if getattr(config, "model_type", None) == "gemma":
|
||||
|
||||
@@ -353,7 +353,7 @@ def llama_sdpa_attention_forward(
|
||||
|
||||
|
||||
def _apply_llama_patch() -> None:
|
||||
require_version("transformers>=4.41.2,<=4.43.4", "To fix: pip install transformers>=4.41.2,<=4.43.4")
|
||||
require_version("transformers>=4.41.2,<=4.44.3", "To fix: pip install transformers>=4.41.2,<=4.44.3")
|
||||
LlamaAttention.forward = llama_attention_forward
|
||||
LlamaFlashAttention2.forward = llama_flash_attention_2_forward
|
||||
LlamaSdpaAttention.forward = llama_sdpa_attention_forward
|
||||
|
||||
@@ -36,11 +36,14 @@ def find_all_linear_modules(model: "PreTrainedModel", freeze_vision_tower: bool)
|
||||
forbidden_modules.add("output")
|
||||
elif model.config.model_type in ["llava", "paligemma"]:
|
||||
forbidden_modules.add("multi_modal_projector")
|
||||
elif model.config.model_type in ["qwen2_vl"]:
|
||||
elif model.config.model_type == "qwen2_vl":
|
||||
forbidden_modules.add("merger")
|
||||
|
||||
if freeze_vision_tower:
|
||||
forbidden_modules.add("vision_tower")
|
||||
if model.config.model_type == "qwen2_vl":
|
||||
forbidden_modules.add("visual")
|
||||
else:
|
||||
forbidden_modules.add("vision_tower")
|
||||
|
||||
module_names = set()
|
||||
for name, module in model.named_modules():
|
||||
|
||||
@@ -114,7 +114,7 @@ def get_unpad_data(attention_mask: "torch.Tensor") -> Tuple["torch.Tensor", "tor
|
||||
|
||||
|
||||
def _patch_for_block_diag_attn(model_type: str) -> None:
|
||||
require_version("transformers>=4.41.2,<=4.43.4", "To fix: pip install transformers>=4.41.2,<=4.43.4")
|
||||
require_version("transformers>=4.41.2,<=4.44.3", "To fix: pip install transformers>=4.41.2,<=4.44.3")
|
||||
if is_transformers_version_greater_than_4_43():
|
||||
import transformers.modeling_flash_attention_utils
|
||||
|
||||
|
||||
Reference in New Issue
Block a user