add multimodal LLM BLIP-2 and InstructBLIP
Former-commit-id: a730f89a972f1a9d37c718c716f199cb8d4903b2
This commit is contained in:
@@ -88,6 +88,10 @@ class DataArguments:
|
||||
default=None,
|
||||
metadata={"help": "Path to save or load the tokenized datasets."},
|
||||
)
|
||||
image_path: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={"help": "Path to images."},
|
||||
)
|
||||
|
||||
def __post_init__(self):
|
||||
if self.reserved_label_len >= self.cutoff_len:
|
||||
|
||||
@@ -260,7 +260,7 @@ class FinetuningArguments(FreezeArguments, LoraArguments, RLHFArguments, GaloreA
|
||||
default=False,
|
||||
metadata={"help": "Whether or not to train model in purely bf16 precision (without AMP)."},
|
||||
)
|
||||
stage: Literal["pt", "sft", "rm", "ppo", "dpo", "orpo"] = field(
|
||||
stage: Literal["pt", "sft", "rm", "ppo", "dpo", "orpo", "sft_mm"] = field(
|
||||
default="sft",
|
||||
metadata={"help": "Which stage will be performed in training."},
|
||||
)
|
||||
|
||||
@@ -165,6 +165,10 @@ class ModelArguments:
|
||||
default=False,
|
||||
metadata={"help": "For debugging purposes, print the status of the parameters in the model."},
|
||||
)
|
||||
use_qformer: bool = field(
|
||||
default=False,
|
||||
metadata={"help": "Whether use qformer for Multimodal LLM."},
|
||||
)
|
||||
|
||||
def __post_init__(self):
|
||||
self.compute_dtype = None
|
||||
|
||||
Reference in New Issue
Block a user