fix incorrect loss value for vlms

Former-commit-id: 0aa29a71ce958343a2086090d647eb63b8f5f5be
This commit is contained in:
hiyouga
2024-10-30 08:56:46 +00:00
parent 03213984ec
commit 8185eb1890
12 changed files with 48 additions and 22 deletions

View File

@@ -101,7 +101,7 @@ class CustomDPOTrainer(DPOTrainer):
self.callback_handler.add_callback(PissaConvertCallback)
if finetuning_args.use_badam:
from badam import BAdamCallback, clip_grad_norm_old_version
from badam import BAdamCallback, clip_grad_norm_old_version # type: ignore
self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_old_version, self.accelerator)
self.add_callback(BAdamCallback)
@@ -274,7 +274,7 @@ class CustomDPOTrainer(DPOTrainer):
https://github.com/huggingface/transformers/blob/v4.46.0/src/transformers/trainer.py#L3605
"""
loss = super().compute_loss(model, inputs, return_outputs)
if kwargs.pop("num_items_in_batch", False) and is_transformers_version_equal_to_4_46():
if is_transformers_version_equal_to_4_46() and kwargs.pop("num_items_in_batch", False):
loss /= self.args.gradient_accumulation_steps
return loss