[deps] update to transformers 4.52 (#8125)

This commit is contained in:
hoshi-hiyouga
2025-05-21 05:16:18 +08:00
committed by GitHub
parent 56926d76f9
commit 9ae17cd173
28 changed files with 365 additions and 109 deletions

View File

@@ -135,8 +135,7 @@ def _check_plugin(
expected_mm_inputs: dict[str, Any] = {},
expected_no_mm_inputs: dict[str, Any] = {},
) -> None:
# test omni_messages
if plugin.__class__.__name__ == "Qwen2OmniPlugin":
if plugin.__class__.__name__ == "Qwen2OmniPlugin": # test omni_messages
assert plugin.process_messages(OMNI_MESSAGES, IMAGES, NO_VIDEOS, AUDIOS, processor) == expected_mm_messages
assert plugin.process_token_ids(INPUT_IDS, LABELS, IMAGES, NO_VIDEOS, AUDIOS, tokenizer, processor) == (
expected_input_ids,
@@ -146,8 +145,7 @@ def _check_plugin(
plugin.get_mm_inputs(IMAGES, NO_VIDEOS, AUDIOS, IMGLENS, NO_VIDLENS, AUDLENS, BATCH_IDS, processor),
expected_mm_inputs,
)
# test mm_messages
if plugin.__class__.__name__ != "BasePlugin":
elif plugin.__class__.__name__ != "BasePlugin": # test mm_messages
assert plugin.process_messages(MM_MESSAGES, IMAGES, NO_VIDEOS, NO_AUDIOS, processor) == expected_mm_messages
assert plugin.process_token_ids(INPUT_IDS, LABELS, IMAGES, NO_VIDEOS, NO_AUDIOS, tokenizer, processor) == (
expected_input_ids,
@@ -201,7 +199,7 @@ def test_gemma3_plugin():
_check_plugin(**check_inputs)
@pytest.mark.xfail(reason="Unknown error.")
@pytest.mark.skipif(not is_transformers_version_greater_than("4.52.0"), reason="Requires transformers>=4.52.0")
def test_internvl_plugin():
image_seqlen = 256
tokenizer_module = _load_tokenizer_module(model_name_or_path="OpenGVLab/InternVL3-1B-hf")
@@ -219,7 +217,7 @@ def test_internvl_plugin():
_check_plugin(**check_inputs)
@pytest.mark.xfail(reason="Unknown error.")
@pytest.mark.skipif(not is_transformers_version_greater_than("4.51.0"), reason="Requires transformers>=4.51.0")
def test_llama4_plugin():
tokenizer_module = _load_tokenizer_module(model_name_or_path=TINY_LLAMA4)
processor = tokenizer_module["processor"]
@@ -321,10 +319,9 @@ def test_pixtral_plugin():
_check_plugin(**check_inputs)
@pytest.mark.xfail(reason="Unknown error.")
@pytest.mark.skipif(not is_transformers_version_greater_than("4.52.0"), reason="Requires transformers>=4.52.0")
def test_qwen2_omni_plugin():
image_seqlen = 4
audio_seqlen = 2
image_seqlen, audio_seqlen = 4, 2
tokenizer_module = _load_tokenizer_module(model_name_or_path="Qwen/Qwen2.5-Omni-7B")
qwen2_omni_plugin = get_mm_plugin(
name="qwen2_omni", audio_token="<|AUDIO|>", image_token="<|IMAGE|>", video_token="<|VIDEO|>"

View File

@@ -127,20 +127,21 @@ def test_encode_multiturn(use_fast: bool):
@pytest.mark.parametrize("use_fast", [True, False])
@pytest.mark.parametrize("cot_messages", [True, False])
@pytest.mark.parametrize("enable_thinking", [True, False])
@pytest.mark.parametrize("enable_thinking", [True, False, None])
def test_reasoning_encode_oneturn(use_fast: bool, cot_messages: bool, enable_thinking: bool):
messages = MESSAGES_WITH_THOUGHT if cot_messages else MESSAGES
input_messages = MESSAGES_WITH_THOUGHT if cot_messages else MESSAGES
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B", use_fast=use_fast)
data_args = DataArguments(template="qwen3", enable_thinking=enable_thinking)
template = get_template_and_fix_tokenizer(tokenizer, data_args)
prompt_ids, answer_ids = template.encode_oneturn(tokenizer, messages)
prompt_ids, answer_ids = template.encode_oneturn(tokenizer, input_messages)
output_messages = MESSAGES if enable_thinking is False else input_messages
prompt_str = (
f"<|im_start|>user\n{messages[0]['content']}<|im_end|>\n<|im_start|>assistant\n"
f"<|im_start|>user\n{output_messages[0]['content']}<|im_end|>\n<|im_start|>assistant\n"
f"{MESSAGES[1]['content']}<|im_end|>\n"
f"<|im_start|>user\n{messages[2]['content']}<|im_end|>\n<|im_start|>assistant\n"
f"<|im_start|>user\n{output_messages[2]['content']}<|im_end|>\n<|im_start|>assistant\n"
)
answer_str = f"{messages[3]['content']}<|im_end|>\n"
if not cot_messages:
answer_str = f"{output_messages[3]['content']}<|im_end|>\n"
if not cot_messages or enable_thinking is False:
if enable_thinking:
answer_str = "<think>\n\n</think>\n\n" + answer_str
else:
@@ -151,18 +152,19 @@ def test_reasoning_encode_oneturn(use_fast: bool, cot_messages: bool, enable_thi
@pytest.mark.parametrize("use_fast", [True, False])
@pytest.mark.parametrize("cot_messages", [True, False])
@pytest.mark.parametrize("enable_thinking", [True, False])
@pytest.mark.parametrize("enable_thinking", [True, False, None])
def test_reasoning_encode_multiturn(use_fast: bool, cot_messages: bool, enable_thinking: bool):
messages = MESSAGES_WITH_THOUGHT if cot_messages else MESSAGES
input_messages = MESSAGES_WITH_THOUGHT if cot_messages else MESSAGES
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B", use_fast=use_fast)
data_args = DataArguments(template="qwen3", enable_thinking=enable_thinking)
template = get_template_and_fix_tokenizer(tokenizer, data_args)
encoded_pairs = template.encode_multiturn(tokenizer, messages)
prompt_str_1 = f"<|im_start|>user\n{messages[0]['content']}<|im_end|>\n<|im_start|>assistant\n"
answer_str_1 = f"{messages[1]['content']}<|im_end|>\n"
prompt_str_2 = f"<|im_start|>user\n{messages[2]['content']}<|im_end|>\n<|im_start|>assistant\n"
answer_str_2 = f"{messages[3]['content']}<|im_end|>\n"
if not cot_messages:
encoded_pairs = template.encode_multiturn(tokenizer, input_messages)
output_messages = MESSAGES if enable_thinking is False else input_messages
prompt_str_1 = f"<|im_start|>user\n{output_messages[0]['content']}<|im_end|>\n<|im_start|>assistant\n"
answer_str_1 = f"{output_messages[1]['content']}<|im_end|>\n"
prompt_str_2 = f"<|im_start|>user\n{output_messages[2]['content']}<|im_end|>\n<|im_start|>assistant\n"
answer_str_2 = f"{output_messages[3]['content']}<|im_end|>\n"
if not cot_messages or enable_thinking is False:
if enable_thinking:
answer_str_1 = "<think>\n\n</think>\n\n" + answer_str_1
answer_str_2 = "<think>\n\n</think>\n\n" + answer_str_2

View File

@@ -16,6 +16,7 @@ import pytest
import torch
from transformers import AutoConfig, AutoModelForVision2Seq
from llamafactory.extras.packages import is_transformers_version_greater_than
from llamafactory.hparams import FinetuningArguments, ModelArguments
from llamafactory.model.adapter import init_adapter
@@ -45,10 +46,12 @@ def test_visual_full(freeze_vision_tower: bool, freeze_multi_modal_projector: bo
assert param.requires_grad != freeze_language_model
@pytest.mark.parametrize("freeze_vision_tower", (False, True))
def test_visual_lora(freeze_vision_tower: bool):
@pytest.mark.parametrize("freeze_vision_tower,freeze_language_model", ((False, False), (False, True), (True, False)))
def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool):
model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct")
finetuning_args = FinetuningArguments(finetuning_type="lora", freeze_vision_tower=freeze_vision_tower)
finetuning_args = FinetuningArguments(
finetuning_type="lora", freeze_vision_tower=freeze_vision_tower, freeze_language_model=freeze_language_model
)
config = AutoConfig.from_pretrained(model_args.model_name_or_path)
with torch.device("meta"):
model = AutoModelForVision2Seq.from_config(config)
@@ -61,10 +64,15 @@ def test_visual_lora(freeze_vision_tower: bool):
else:
frozen_params.add(name)
if freeze_vision_tower:
assert "base_model.model.visual.blocks.0.attn.qkv.lora_A.default.weight" not in trainable_params
if is_transformers_version_greater_than("4.52.0"):
visual_param_name = "base_model.model.model.visual.blocks.0.attn.qkv.lora_A.default.weight"
language_param_name = "base_model.model.model.language_model.layers.0.self_attn.q_proj.lora_A.default.weight"
merger_param_name = "base_model.model.model.visual.merger.lora_A.default.weight"
else:
assert "base_model.model.visual.blocks.0.attn.qkv.lora_A.default.weight" in trainable_params
visual_param_name = "base_model.model.visual.blocks.0.attn.qkv.lora_A.default.weight"
language_param_name = "base_model.model.model.layers.0.self_attn.q_proj.lora_A.default.weight"
merger_param_name = "base_model.model.visual.merger.lora_A.default.weight"
assert "merger" not in trainable_params
assert "base_model.model.model.layers.0.self_attn.q_proj.lora_A.default.weight" in trainable_params
assert (visual_param_name in trainable_params) != freeze_vision_tower
assert (language_param_name in trainable_params) != freeze_language_model
assert (merger_param_name in trainable_params) is False

View File

@@ -1,2 +1,2 @@
# change if test fails or cache is outdated
0.9.3.106
0.9.3.107