support dpo-ftx

Former-commit-id: 86dfa04f9821556019fa777106787f73eb70b452
This commit is contained in:
hiyouga
2023-12-16 19:21:41 +08:00
parent 9f77e8b025
commit d81ad2d4bc
6 changed files with 103 additions and 25 deletions

View File

@@ -70,6 +70,14 @@ class RLHFArguments:
default=0.1,
metadata={"help": "The beta parameter for the DPO loss."}
)
dpo_loss: Optional[Literal["sigmoid", "hinge"]] = field(
default="sigmoid",
metadata={"help": "The type of DPO loss to use."}
)
dpo_ftx: Optional[float] = field(
default=0,
metadata={"help": "The supervised fine-tuning loss coefficient in DPO training."}
)
ppo_buffer_size: Optional[int] = field(
default=1,
metadata={"help": "The number of mini-batches to make experience buffer in a PPO optimization step."}