refactor mllm param logic
Former-commit-id: b895c190945cf5d991cb4e4dea2ae73cc9c8d246
This commit is contained in:
@@ -364,6 +364,10 @@ class FinetuningArguments(
|
||||
default=True,
|
||||
metadata={"help": "Whether ot not to freeze vision tower in MLLM training."},
|
||||
)
|
||||
freeze_multi_modal_projector: bool = field(
|
||||
default=True,
|
||||
metadata={"help": "Whether or not to freeze the multi modal projector in MLLM training."},
|
||||
)
|
||||
train_mm_proj_only: bool = field(
|
||||
default=False,
|
||||
metadata={"help": "Whether or not to train the multimodal projector for MLLM only."},
|
||||
@@ -398,6 +402,7 @@ class FinetuningArguments(
|
||||
self.additional_target: Optional[List[str]] = split_arg(self.additional_target)
|
||||
self.galore_target: List[str] = split_arg(self.galore_target)
|
||||
self.freeze_vision_tower = self.freeze_vision_tower or self.train_mm_proj_only
|
||||
self.freeze_multi_modal_projector = self.freeze_multi_modal_projector and not self.train_mm_proj_only
|
||||
self.use_ref_model = self.stage == "dpo" and self.pref_loss not in ["orpo", "simpo"]
|
||||
|
||||
assert self.finetuning_type in ["lora", "freeze", "full"], "Invalid fine-tuning method."
|
||||
|
||||
Reference in New Issue
Block a user