[breaking] bump transformers to 4.45.0 & improve ci (#7746)
* update ci * fix * fix * fix * fix * fix
This commit is contained in:
@@ -43,7 +43,6 @@ import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from ...extras import logging
|
||||
from ...extras.misc import check_version
|
||||
from ...extras.packages import is_transformers_version_greater_than
|
||||
|
||||
|
||||
@@ -117,6 +116,5 @@ def configure_packing(model_args: "ModelArguments", is_trainable: bool) -> None:
|
||||
if not is_trainable or not model_args.block_diag_attn:
|
||||
return
|
||||
|
||||
check_version("transformers>=4.43.0")
|
||||
transformers.modeling_flash_attention_utils._get_unpad_data = get_unpad_data
|
||||
logger.info_rank0("Using block diagonal attention for sequence packing without cross-attention.")
|
||||
|
||||
Reference in New Issue
Block a user