support loftq
Former-commit-id: e7ac2eb7f7daae17525a278ffbe2f82c0fbd8093
This commit is contained in:
@@ -55,6 +55,10 @@ class LoraArguments:
|
||||
Phi-1.5 choices: [\"Wqkv\", \"out_proj\", \"fc1\", \"fc2\"], \
|
||||
Others choices: the same as LLaMA."}
|
||||
)
|
||||
loftq_init: Optional[bool] = field(
|
||||
default=False,
|
||||
metadata={"help": "Use LoftQ initialization for quantized LoRA fine-tuning."}
|
||||
)
|
||||
resume_lora_training: Optional[bool] = field(
|
||||
default=True,
|
||||
metadata={"help": "Whether to resume training from the last LoRA weights or create new weights after merging them."}
|
||||
|
||||
Reference in New Issue
Block a user