[deps] bump transformers to 4.49.0 (#8564)
This commit is contained in:
@@ -27,6 +27,10 @@ from typing import TYPE_CHECKING, BinaryIO, Literal, Optional, TypedDict, Union
|
||||
import numpy as np
|
||||
import torch
|
||||
from transformers.image_utils import get_image_size, is_valid_image, to_numpy_array
|
||||
from transformers.models.mllama.processing_mllama import (
|
||||
convert_sparse_cross_attention_mask_to_dense,
|
||||
get_cross_attention_token_mask,
|
||||
)
|
||||
from typing_extensions import override
|
||||
|
||||
from ..extras.constants import AUDIO_PLACEHOLDER, IGNORE_INDEX, IMAGE_PLACEHOLDER, VIDEO_PLACEHOLDER
|
||||
@@ -51,17 +55,10 @@ if is_pyav_available():
|
||||
import av
|
||||
|
||||
|
||||
if is_transformers_version_greater_than("4.45.0"):
|
||||
from transformers.models.mllama.processing_mllama import (
|
||||
convert_sparse_cross_attention_mask_to_dense,
|
||||
get_cross_attention_token_mask,
|
||||
)
|
||||
|
||||
|
||||
if is_transformers_version_greater_than("4.52.0"):
|
||||
from transformers.image_utils import make_flat_list_of_images
|
||||
from transformers.video_utils import make_batched_videos
|
||||
elif is_transformers_version_greater_than("4.49.0"):
|
||||
else:
|
||||
from transformers.image_utils import make_batched_videos, make_flat_list_of_images
|
||||
|
||||
|
||||
|
||||
@@ -35,7 +35,6 @@ from transformers.utils import (
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
from . import logging
|
||||
from .packages import is_transformers_version_greater_than
|
||||
|
||||
|
||||
_is_fp16_available = is_torch_npu_available() or is_torch_cuda_available()
|
||||
@@ -94,15 +93,11 @@ def check_version(requirement: str, mandatory: bool = False) -> None:
|
||||
|
||||
def check_dependencies() -> None:
|
||||
r"""Check the version of the required packages."""
|
||||
check_version(
|
||||
"transformers>=4.45.0,<=4.52.4,!=4.46.0,!=4.46.1,!=4.46.2,!=4.46.3,!=4.47.0,!=4.47.1,!=4.48.0,!=4.52.0"
|
||||
)
|
||||
check_version("transformers>=4.49.0,<=4.52.4,!=4.52.0")
|
||||
check_version("datasets>=2.16.0,<=3.6.0")
|
||||
check_version("accelerate>=1.3.0,<=1.7.0")
|
||||
check_version("peft>=0.14.0,<=0.15.2")
|
||||
check_version("trl>=0.8.6,<=0.9.6")
|
||||
if is_transformers_version_greater_than("4.46.0") and not is_transformers_version_greater_than("4.48.1"):
|
||||
logger.warning_rank0_once("There are known bugs in transformers v4.46.0-v4.48.0, please use other versions.")
|
||||
|
||||
|
||||
def calculate_tps(dataset: list[dict[str, Any]], metrics: dict[str, float], stage: Literal["sft", "rm"]) -> float:
|
||||
|
||||
@@ -348,7 +348,7 @@ def get_train_args(args: Optional[Union[dict[str, Any], list[str]]] = None) -> _
|
||||
if finetuning_args.finetuning_type == "lora":
|
||||
# https://github.com/huggingface/transformers/blob/v4.50.0/src/transformers/trainer.py#L782
|
||||
training_args.label_names = training_args.label_names or ["labels"]
|
||||
|
||||
|
||||
if "swanlab" in training_args.report_to and finetuning_args.use_swanlab:
|
||||
training_args.report_to.remove("swanlab")
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ import torch
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
AutoModelForCausalLM,
|
||||
AutoModelForImageTextToText,
|
||||
AutoModelForSeq2SeqLM,
|
||||
AutoModelForTextToWaveform,
|
||||
AutoModelForVision2Seq,
|
||||
@@ -29,7 +30,6 @@ from trl import AutoModelForCausalLMWithValueHead
|
||||
|
||||
from ..extras import logging
|
||||
from ..extras.misc import count_parameters, skip_check_imports, try_download_model_from_other_hub
|
||||
from ..extras.packages import is_transformers_version_greater_than
|
||||
from .adapter import init_adapter
|
||||
from .model_utils.liger_kernel import apply_liger_kernel
|
||||
from .model_utils.misc import register_autoclass
|
||||
@@ -39,10 +39,6 @@ from .model_utils.valuehead import load_valuehead_params
|
||||
from .patcher import patch_config, patch_model, patch_processor, patch_tokenizer, patch_valuehead_model
|
||||
|
||||
|
||||
if is_transformers_version_greater_than("4.46.0"):
|
||||
from transformers import AutoModelForImageTextToText
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer, ProcessorMixin
|
||||
|
||||
@@ -162,10 +158,7 @@ def load_model(
|
||||
else:
|
||||
if type(config) in AutoModelForVision2Seq._model_mapping.keys(): # image-text
|
||||
load_class = AutoModelForVision2Seq
|
||||
elif (
|
||||
is_transformers_version_greater_than("4.46.0")
|
||||
and type(config) in AutoModelForImageTextToText._model_mapping.keys()
|
||||
): # image-text
|
||||
elif type(config) in AutoModelForImageTextToText._model_mapping.keys(): # image-text
|
||||
load_class = AutoModelForImageTextToText
|
||||
elif type(config) in AutoModelForSeq2SeqLM._model_mapping.keys(): # audio-text
|
||||
load_class = AutoModelForSeq2SeqLM
|
||||
|
||||
Reference in New Issue
Block a user