fix full/freeze tuning for mllm

Former-commit-id: df5860ddb593d5b82163a585d12160b41dbce0f3
This commit is contained in:
hiyouga
2024-05-27 20:37:57 +08:00
parent 48ff9fb150
commit 3b28c003dd
6 changed files with 62 additions and 47 deletions

View File

@@ -85,10 +85,6 @@ class ModelArguments:
default=False,
metadata={"help": "Whethor or not to use multimodal LLM that accepts visual inputs."},
)
tune_mm_proj: bool = field(
default=False,
metadata={"help": "Whethor or not only finetune mm_projector for MLLM."},
)
moe_aux_loss_coef: Optional[float] = field(
default=None,
metadata={"help": "Coefficient of the auxiliary router loss in mixture-of-experts model."},