[deps] update to transformers 4.52 (#8125)
This commit is contained in:
@@ -57,19 +57,11 @@ if is_transformers_version_greater_than("4.45.0"):
|
||||
)
|
||||
|
||||
|
||||
if is_transformers_version_greater_than("4.49.0"):
|
||||
try:
|
||||
from transformers.image_utils import make_batched_videos, make_flat_list_of_images
|
||||
except ImportError:
|
||||
try:
|
||||
# If that fails, try importing from the new location
|
||||
from transformers.image_utils import make_flat_list_of_images
|
||||
from transformers.video_utils import make_batched_videos
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import make_batched_videos and make_flat_list_of_images. "
|
||||
"In Transformers 4.52.0, make_batched_videos will be moved to transformers.video_utils."
|
||||
)
|
||||
if is_transformers_version_greater_than("4.52.0"):
|
||||
from transformers.image_utils import make_flat_list_of_images
|
||||
from transformers.video_utils import make_batched_videos
|
||||
elif is_transformers_version_greater_than("4.49.0"):
|
||||
from transformers.image_utils import make_batched_videos, make_flat_list_of_images
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
||||
@@ -52,7 +52,7 @@ class Template:
|
||||
efficient_eos: bool
|
||||
replace_eos: bool
|
||||
replace_jinja_template: bool
|
||||
enable_thinking: bool
|
||||
enable_thinking: Optional[bool]
|
||||
mm_plugin: "BasePlugin"
|
||||
|
||||
def encode_oneturn(
|
||||
@@ -411,14 +411,17 @@ class ReasoningTemplate(Template):
|
||||
for i in range(1, len(messages) - 2, 2):
|
||||
messages[i]["content"] = self.remove_thought(messages[i]["content"])
|
||||
|
||||
if self.enable_thinking is False: # remove all cot
|
||||
messages[-1]["content"] = self.remove_thought(messages[-1]["content"])
|
||||
|
||||
prompt_ids, response_ids = super().encode_oneturn(tokenizer, messages, system, tools)
|
||||
if (
|
||||
self.thought_words[0] not in messages[-1]["content"]
|
||||
and self.thought_words[1] not in messages[-1]["content"]
|
||||
):
|
||||
if not self.enable_thinking:
|
||||
prompt_ids = prompt_ids + self.get_thought_word_ids(tokenizer)
|
||||
else:
|
||||
): # add empty cot
|
||||
if not self.enable_thinking: # do not compute loss
|
||||
prompt_ids += self.get_thought_word_ids(tokenizer)
|
||||
else: # do compute loss
|
||||
response_ids = self.get_thought_word_ids(tokenizer) + response_ids
|
||||
|
||||
return prompt_ids, response_ids
|
||||
@@ -431,15 +434,20 @@ class ReasoningTemplate(Template):
|
||||
system: Optional[str] = None,
|
||||
tools: Optional[str] = None,
|
||||
) -> list[tuple[list[int], list[int]]]:
|
||||
messages = deepcopy(messages)
|
||||
if self.enable_thinking is False: # remove all cot
|
||||
for i in range(1, len(messages), 2):
|
||||
messages[i]["content"] = self.remove_thought(messages[i]["content"])
|
||||
|
||||
encoded_messages = self._encode(tokenizer, messages, system, tools)
|
||||
for i in range(0, len(messages), 2):
|
||||
if (
|
||||
self.thought_words[0] not in messages[i + 1]["content"]
|
||||
and self.thought_words[1] not in messages[i + 1]["content"]
|
||||
):
|
||||
if not self.enable_thinking:
|
||||
): # add empty cot
|
||||
if not self.enable_thinking: # do not compute loss
|
||||
encoded_messages[i] += self.get_thought_word_ids(tokenizer)
|
||||
else:
|
||||
else: # do compute loss
|
||||
encoded_messages[i + 1] = self.get_thought_word_ids(tokenizer) + encoded_messages[i + 1]
|
||||
|
||||
return [(encoded_messages[i], encoded_messages[i + 1]) for i in range(0, len(encoded_messages), 2)]
|
||||
@@ -463,7 +471,7 @@ def register_template(
|
||||
efficient_eos: bool = False,
|
||||
replace_eos: bool = False,
|
||||
replace_jinja_template: bool = False,
|
||||
enable_thinking: bool = True,
|
||||
enable_thinking: Optional[bool] = True,
|
||||
mm_plugin: "BasePlugin" = get_mm_plugin(name="base"),
|
||||
template_class: type["Template"] = Template,
|
||||
) -> None:
|
||||
|
||||
@@ -2566,6 +2566,14 @@ register_model_group(
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen2.5-Omni-7B",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen2.5-Omni-7B",
|
||||
},
|
||||
"Qwen2.5-Omni-7B-GPTQ-Int4": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen2.5-Omni-7B-GPTQ-Int4",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen2.5-Omni-7B-GPTQ-Int4",
|
||||
},
|
||||
"Qwen2.5-Omni-7B-AWQ": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen2.5-Omni-7B-AWQ",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen2.5-Omni-7B-AWQ",
|
||||
},
|
||||
},
|
||||
template="qwen2_omni",
|
||||
multimodal=True,
|
||||
|
||||
@@ -94,7 +94,9 @@ def check_version(requirement: str, mandatory: bool = False) -> None:
|
||||
|
||||
def check_dependencies() -> None:
|
||||
r"""Check the version of the required packages."""
|
||||
check_version("transformers>=4.45.0,<=4.51.3,!=4.46.0,!=4.46.1,!=4.46.2,!=4.46.3,!=4.47.0,!=4.47.1,!=4.48.0")
|
||||
check_version(
|
||||
"transformers>=4.45.0,<=4.52.1,!=4.46.0,!=4.46.1,!=4.46.2,!=4.46.3,!=4.47.0,!=4.47.1,!=4.48.0,!=4.52.0"
|
||||
)
|
||||
check_version("datasets>=2.16.0,<=3.6.0")
|
||||
check_version("accelerate>=0.34.0,<=1.7.0")
|
||||
check_version("peft>=0.14.0,<=0.15.2")
|
||||
|
||||
@@ -119,7 +119,7 @@ class DataArguments:
|
||||
default=None,
|
||||
metadata={"help": "Override the default system message in the template."},
|
||||
)
|
||||
enable_thinking: bool = field(
|
||||
enable_thinking: Optional[bool] = field(
|
||||
default=True,
|
||||
metadata={"help": "Whether or not to enable thinking mode for reasoning models."},
|
||||
)
|
||||
|
||||
@@ -235,10 +235,6 @@ class ProcessorArguments:
|
||||
default=False,
|
||||
metadata={"help": "Whether to crop the image to patches for internvl."},
|
||||
)
|
||||
use_audio_in_video: bool = field(
|
||||
default=False,
|
||||
metadata={"help": "Whether or not to use audio in video inputs."},
|
||||
)
|
||||
video_max_pixels: int = field(
|
||||
default=256 * 256,
|
||||
metadata={"help": "The maximum number of pixels of video inputs."},
|
||||
@@ -255,6 +251,10 @@ class ProcessorArguments:
|
||||
default=128,
|
||||
metadata={"help": "The maximum number of sampled frames for video inputs."},
|
||||
)
|
||||
use_audio_in_video: bool = field(
|
||||
default=False,
|
||||
metadata={"help": "Whether or not to use audio in video inputs."},
|
||||
)
|
||||
audio_sampling_rate: int = field(
|
||||
default=16000,
|
||||
metadata={"help": "The sampling rate of audio inputs."},
|
||||
|
||||
@@ -24,6 +24,7 @@ import transformers.models
|
||||
from transformers.activations import ACT2FN
|
||||
|
||||
from ...extras import logging
|
||||
from ...extras.packages import is_transformers_version_greater_than
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -281,7 +282,7 @@ _register_composite_model(
|
||||
model_type="qwen2_vl",
|
||||
projector_key="visual.merger",
|
||||
vision_model_keys=["visual.patch_embed", "visual.blocks"],
|
||||
language_model_keys=["model", "lm_head"],
|
||||
language_model_keys=["language_model"] if is_transformers_version_greater_than("4.52.0") else ["model", "lm_head"],
|
||||
lora_conflict_keys=["patch_embed"],
|
||||
)
|
||||
|
||||
@@ -290,6 +291,6 @@ _register_composite_model(
|
||||
model_type="qwen2_5_vl",
|
||||
projector_key="visual.merger",
|
||||
vision_model_keys=["visual.patch_embed", "visual.blocks"],
|
||||
language_model_keys=["model", "lm_head"],
|
||||
language_model_keys=["language_model"] if is_transformers_version_greater_than("4.52.0") else ["model", "lm_head"],
|
||||
lora_conflict_keys=["patch_embed"],
|
||||
)
|
||||
|
||||
@@ -85,8 +85,8 @@ def patch_processor(
|
||||
setattr(processor, "video_min_pixels", model_args.video_min_pixels)
|
||||
setattr(processor, "video_fps", model_args.video_fps)
|
||||
setattr(processor, "video_maxlen", model_args.video_maxlen)
|
||||
setattr(processor, "audio_sampling_rate", model_args.audio_sampling_rate)
|
||||
setattr(processor, "use_audio_in_video", model_args.use_audio_in_video)
|
||||
setattr(processor, "audio_sampling_rate", model_args.audio_sampling_rate)
|
||||
|
||||
|
||||
def patch_config(
|
||||
|
||||
@@ -121,11 +121,11 @@ class CustomDPOTrainer(DPOTrainer):
|
||||
return super().create_scheduler(num_training_steps, optimizer)
|
||||
|
||||
@override
|
||||
def _get_train_sampler(self) -> Optional["torch.utils.data.Sampler"]:
|
||||
def _get_train_sampler(self, *args, **kwargs) -> Optional["torch.utils.data.Sampler"]:
|
||||
if self.finetuning_args.disable_shuffling:
|
||||
return torch.utils.data.SequentialSampler(self.train_dataset)
|
||||
|
||||
return super()._get_train_sampler()
|
||||
return super()._get_train_sampler(*args, **kwargs)
|
||||
|
||||
@override
|
||||
def get_batch_samples(self, *args, **kwargs):
|
||||
|
||||
@@ -34,7 +34,6 @@ from ..trainer_utils import create_custom_optimizer, create_custom_scheduler, ge
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import torch.utils.data
|
||||
from transformers import PreTrainedModel, ProcessorMixin
|
||||
|
||||
from ...hparams import FinetuningArguments
|
||||
@@ -119,12 +118,12 @@ class CustomKTOTrainer(KTOTrainer):
|
||||
return super().create_scheduler(num_training_steps, optimizer)
|
||||
|
||||
@override
|
||||
def _get_train_sampler(self) -> Optional["torch.utils.data.Sampler"]:
|
||||
def _get_train_sampler(self, *args, **kwargs) -> Optional["torch.utils.data.Sampler"]:
|
||||
r"""Replace the sequential sampler of KTO Trainer created by trl with the random sampler."""
|
||||
if self.finetuning_args.disable_shuffling:
|
||||
return torch.utils.data.SequentialSampler(self.train_dataset)
|
||||
|
||||
return Trainer._get_train_sampler(self)
|
||||
return Trainer._get_train_sampler(self, *args, **kwargs)
|
||||
|
||||
@override
|
||||
def get_batch_samples(self, *args, **kwargs):
|
||||
|
||||
@@ -70,11 +70,11 @@ class CustomTrainer(Trainer):
|
||||
return super().create_scheduler(num_training_steps, optimizer)
|
||||
|
||||
@override
|
||||
def _get_train_sampler(self) -> Optional["torch.utils.data.Sampler"]:
|
||||
def _get_train_sampler(self, *args, **kwargs) -> Optional["torch.utils.data.Sampler"]:
|
||||
if self.finetuning_args.disable_shuffling:
|
||||
return torch.utils.data.SequentialSampler(self.train_dataset)
|
||||
|
||||
return super()._get_train_sampler()
|
||||
return super()._get_train_sampler(*args, **kwargs)
|
||||
|
||||
@override
|
||||
def compute_loss(self, model, inputs, *args, **kwargs):
|
||||
|
||||
@@ -78,11 +78,11 @@ class PairwiseTrainer(Trainer):
|
||||
return super().create_scheduler(num_training_steps, optimizer)
|
||||
|
||||
@override
|
||||
def _get_train_sampler(self) -> Optional["torch.utils.data.Sampler"]:
|
||||
def _get_train_sampler(self, *args, **kwargs) -> Optional["torch.utils.data.Sampler"]:
|
||||
if self.finetuning_args.disable_shuffling:
|
||||
return torch.utils.data.SequentialSampler(self.train_dataset)
|
||||
|
||||
return super()._get_train_sampler()
|
||||
return super()._get_train_sampler(*args, **kwargs)
|
||||
|
||||
@override
|
||||
def compute_loss(
|
||||
|
||||
@@ -92,11 +92,11 @@ class CustomSeq2SeqTrainer(Seq2SeqTrainer):
|
||||
return super().create_scheduler(num_training_steps, optimizer)
|
||||
|
||||
@override
|
||||
def _get_train_sampler(self) -> Optional["torch.utils.data.Sampler"]:
|
||||
def _get_train_sampler(self, *args, **kwargs) -> Optional["torch.utils.data.Sampler"]:
|
||||
if self.finetuning_args.disable_shuffling:
|
||||
return torch.utils.data.SequentialSampler(self.train_dataset)
|
||||
|
||||
return super()._get_train_sampler()
|
||||
return super()._get_train_sampler(*args, **kwargs)
|
||||
|
||||
@override
|
||||
def compute_loss(self, model, inputs, *args, **kwargs):
|
||||
|
||||
@@ -205,6 +205,14 @@ def load_eval_results(path: os.PathLike) -> str:
|
||||
return f"```json\n{result}\n```\n"
|
||||
|
||||
|
||||
def calculate_pixels(pixels: str) -> int:
|
||||
r"""Calculate the number of pixels from the expression."""
|
||||
if "*" in pixels:
|
||||
return int(pixels.split("*")[0]) * int(pixels.split("*")[1])
|
||||
else:
|
||||
return int(pixels)
|
||||
|
||||
|
||||
def create_ds_config() -> None:
|
||||
r"""Create deepspeed config in the current directory."""
|
||||
os.makedirs(DEFAULT_CACHE_DIR, exist_ok=True)
|
||||
|
||||
@@ -106,11 +106,11 @@ def create_train_tab(engine: "Engine") -> dict[str, "Component"]:
|
||||
use_llama_pro = gr.Checkbox()
|
||||
|
||||
with gr.Column():
|
||||
enable_thinking = gr.Checkbox(value=True)
|
||||
report_to = gr.Dropdown(
|
||||
choices=["none", "all", "wandb", "mlflow", "neptune", "tensorboard"],
|
||||
value=["none"],
|
||||
choices=["none", "wandb", "mlflow", "neptune", "tensorboard", "all"],
|
||||
value="none",
|
||||
allow_custom_value=True,
|
||||
multiselect=True,
|
||||
)
|
||||
|
||||
input_elems.update(
|
||||
@@ -126,6 +126,7 @@ def create_train_tab(engine: "Engine") -> dict[str, "Component"]:
|
||||
mask_history,
|
||||
resize_vocab,
|
||||
use_llama_pro,
|
||||
enable_thinking,
|
||||
report_to,
|
||||
}
|
||||
)
|
||||
@@ -143,6 +144,7 @@ def create_train_tab(engine: "Engine") -> dict[str, "Component"]:
|
||||
mask_history=mask_history,
|
||||
resize_vocab=resize_vocab,
|
||||
use_llama_pro=use_llama_pro,
|
||||
enable_thinking=enable_thinking,
|
||||
report_to=report_to,
|
||||
)
|
||||
)
|
||||
@@ -231,6 +233,42 @@ def create_train_tab(engine: "Engine") -> dict[str, "Component"]:
|
||||
)
|
||||
)
|
||||
|
||||
with gr.Accordion(open=False) as mm_tab:
|
||||
with gr.Row():
|
||||
freeze_vision_tower = gr.Checkbox(value=True)
|
||||
freeze_multi_modal_projector = gr.Checkbox(value=True)
|
||||
freeze_language_model = gr.Checkbox(value=False)
|
||||
|
||||
with gr.Row():
|
||||
image_max_pixels = gr.Textbox(value="768*768")
|
||||
image_min_pixels = gr.Textbox(value="32*32")
|
||||
video_max_pixels = gr.Textbox(value="256*256")
|
||||
video_min_pixels = gr.Textbox(value="16*16")
|
||||
|
||||
input_elems.update(
|
||||
{
|
||||
freeze_vision_tower,
|
||||
freeze_multi_modal_projector,
|
||||
freeze_language_model,
|
||||
image_max_pixels,
|
||||
image_min_pixels,
|
||||
video_max_pixels,
|
||||
video_min_pixels,
|
||||
}
|
||||
)
|
||||
elem_dict.update(
|
||||
dict(
|
||||
mm_tab=mm_tab,
|
||||
freeze_vision_tower=freeze_vision_tower,
|
||||
freeze_multi_modal_projector=freeze_multi_modal_projector,
|
||||
freeze_language_model=freeze_language_model,
|
||||
image_max_pixels=image_max_pixels,
|
||||
image_min_pixels=image_min_pixels,
|
||||
video_max_pixels=video_max_pixels,
|
||||
video_min_pixels=video_min_pixels,
|
||||
)
|
||||
)
|
||||
|
||||
with gr.Accordion(open=False) as galore_tab:
|
||||
with gr.Row():
|
||||
use_galore = gr.Checkbox()
|
||||
|
||||
@@ -871,6 +871,28 @@ LOCALES = {
|
||||
"info": "拡張ブロックのパラメータのみをトレーニングします。",
|
||||
},
|
||||
},
|
||||
"enable_thinking": {
|
||||
"en": {
|
||||
"label": "Enable thinking",
|
||||
"info": "Whether or not to enable thinking mode for reasoning models.",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Включить мысли",
|
||||
"info": "Включить режим мысли для моделей решающего характера.",
|
||||
},
|
||||
"zh": {
|
||||
"label": "启用思考模式",
|
||||
"info": "是否启用推理模型的思考模式。",
|
||||
},
|
||||
"ko": {
|
||||
"label": "생각 모드 활성화",
|
||||
"info": "추론 모델의 생각 모드를 활성화할지 여부.",
|
||||
},
|
||||
"ja": {
|
||||
"label": "思考モードを有効化",
|
||||
"info": "推論モデルの思考モードを有効にするかどうか。",
|
||||
},
|
||||
},
|
||||
"report_to": {
|
||||
"en": {
|
||||
"label": "Enable external logger",
|
||||
@@ -1374,6 +1396,177 @@ LOCALES = {
|
||||
"info": "PPO トレーニングにおいて報酬スコアをホワイトニング処理します。",
|
||||
},
|
||||
},
|
||||
"mm_tab": {
|
||||
"en": {
|
||||
"label": "Multimodal configurations",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Конфигурации мультимедиа",
|
||||
},
|
||||
"zh": {
|
||||
"label": "多模态参数设置",
|
||||
},
|
||||
"ko": {
|
||||
"label": "멀티모달 구성",
|
||||
},
|
||||
"ja": {
|
||||
"label": "多モーダル設定",
|
||||
},
|
||||
},
|
||||
"freeze_vision_tower": {
|
||||
"en": {
|
||||
"label": "Freeze vision tower",
|
||||
"info": "Freeze the vision tower in the model.",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Заморозить башню визиона",
|
||||
"info": "Заморозить башню визиона в модели.",
|
||||
},
|
||||
"zh": {
|
||||
"label": "冻结视觉编码器",
|
||||
"info": "冻结模型中的视觉编码器。",
|
||||
},
|
||||
"ko": {
|
||||
"label": "비전 타워 고정",
|
||||
"info": "모델의 비전 타워를 고정합니다.",
|
||||
},
|
||||
"ja": {
|
||||
"label": "ビジョンタワーの固定",
|
||||
"info": "モデルのビジョンタワーを固定します。",
|
||||
},
|
||||
},
|
||||
"freeze_multi_modal_projector": {
|
||||
"en": {
|
||||
"label": "Freeze multi-modal projector",
|
||||
"info": "Freeze the multi-modal projector in the model.",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Заморозить мультимодальный проектор",
|
||||
"info": "Заморозить мультимодальный проектор в модели.",
|
||||
},
|
||||
"zh": {
|
||||
"label": "冻结多模态投影器",
|
||||
"info": "冻结模型中的多模态投影器。",
|
||||
},
|
||||
"ko": {
|
||||
"label": "멀티모달 프로젝터 고정",
|
||||
"info": "모델의 멀티모달 프로젝터를 고정합니다.",
|
||||
},
|
||||
"ja": {
|
||||
"label": "多モーダルプロジェクターの固定",
|
||||
"info": "モデルの多モーダルプロジェクターを固定します。",
|
||||
},
|
||||
},
|
||||
"freeze_language_model": {
|
||||
"en": {
|
||||
"label": "Freeze language model",
|
||||
"info": "Freeze the language model in the model.",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Заморозить язык модели",
|
||||
"info": "Заморозить язык модели в модели.",
|
||||
},
|
||||
"zh": {
|
||||
"label": "冻结语言模型",
|
||||
"info": "冻结模型中的语言模型。",
|
||||
},
|
||||
"ko": {
|
||||
"label": "언어 모델 고정",
|
||||
"info": "모델의 언어 모델을 고정합니다.",
|
||||
},
|
||||
"ja": {
|
||||
"label": "言語モデルの固定",
|
||||
"info": "モデルの言語モデルを固定します。",
|
||||
},
|
||||
},
|
||||
"image_max_pixels": {
|
||||
"en": {
|
||||
"label": "Image max pixels",
|
||||
"info": "The maximum number of pixels of image inputs.",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Максимальное количество пикселей изображения",
|
||||
"info": "Максимальное количество пикселей изображения.",
|
||||
},
|
||||
"zh": {
|
||||
"label": "图像最大像素",
|
||||
"info": "输入图像的最大像素数。",
|
||||
},
|
||||
"ko": {
|
||||
"label": "이미지 최대 픽셀",
|
||||
"info": "이미지 입력의 최대 픽셀 수입니다.",
|
||||
},
|
||||
"ja": {
|
||||
"label": "画像最大ピクセル",
|
||||
"info": "画像入力の最大ピクセル数です。",
|
||||
},
|
||||
},
|
||||
"image_min_pixels": {
|
||||
"en": {
|
||||
"label": "Image min pixels",
|
||||
"info": "The minimum number of pixels of image inputs.",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Минимальное количество пикселей изображения",
|
||||
"info": "Минимальное количество пикселей изображения.",
|
||||
},
|
||||
"zh": {
|
||||
"label": "图像最小像素",
|
||||
"info": "输入图像的最小像素数。",
|
||||
},
|
||||
"ko": {
|
||||
"label": "이미지 최소 픽셀",
|
||||
"info": "이미지 입력의 최소 픽셀 수입니다.",
|
||||
},
|
||||
"ja": {
|
||||
"label": "画像最小ピクセル",
|
||||
"info": "画像入力の最小ピクセル数です。",
|
||||
},
|
||||
},
|
||||
"video_max_pixels": {
|
||||
"en": {
|
||||
"label": "Video max pixels",
|
||||
"info": "The maximum number of pixels of video inputs.",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Максимальное количество пикселей видео",
|
||||
"info": "Максимальное количество пикселей видео.",
|
||||
},
|
||||
"zh": {
|
||||
"label": "视频最大像素",
|
||||
"info": "输入视频的最大像素数。",
|
||||
},
|
||||
"ko": {
|
||||
"label": "비디오 최대 픽셀",
|
||||
"info": "비디오 입력의 최대 픽셀 수입니다.",
|
||||
},
|
||||
"ja": {
|
||||
"label": "ビデオ最大ピクセル",
|
||||
"info": "ビデオ入力の最大ピクセル数です。",
|
||||
},
|
||||
},
|
||||
"video_min_pixels": {
|
||||
"en": {
|
||||
"label": "Video min pixels",
|
||||
"info": "The minimum number of pixels of video inputs.",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Минимальное количество пикселей видео",
|
||||
"info": "Минимальное количество пикселей видео.",
|
||||
},
|
||||
"zh": {
|
||||
"label": "视频最小像素",
|
||||
"info": "输入视频的最小像素数。",
|
||||
},
|
||||
"ko": {
|
||||
"label": "비디오 최소 픽셀",
|
||||
"info": "비디오 입력의 최소 픽셀 수입니다.",
|
||||
},
|
||||
"ja": {
|
||||
"label": "ビデオ最小ピクセル",
|
||||
"info": "ビデオ入力の最小ピクセル数です。",
|
||||
},
|
||||
},
|
||||
"galore_tab": {
|
||||
"en": {
|
||||
"label": "GaLore configurations",
|
||||
@@ -2468,23 +2661,6 @@ LOCALES = {
|
||||
"label": "HTML タグをエスケープ",
|
||||
},
|
||||
},
|
||||
"enable_thinking": {
|
||||
"en": {
|
||||
"label": "Enable thinking",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Включить мышление",
|
||||
},
|
||||
"zh": {
|
||||
"label": "启用思考",
|
||||
},
|
||||
"ko": {
|
||||
"label": "사고를 활성화하다",
|
||||
},
|
||||
"ja": {
|
||||
"label": "思考を可能にする",
|
||||
},
|
||||
},
|
||||
"clear_btn": {
|
||||
"en": {
|
||||
"value": "Clear history",
|
||||
|
||||
@@ -29,6 +29,7 @@ from .common import (
|
||||
DEFAULT_CACHE_DIR,
|
||||
DEFAULT_CONFIG_DIR,
|
||||
abort_process,
|
||||
calculate_pixels,
|
||||
gen_cmd,
|
||||
get_save_dir,
|
||||
load_args,
|
||||
@@ -162,7 +163,15 @@ class Runner:
|
||||
mask_history=get("train.mask_history"),
|
||||
resize_vocab=get("train.resize_vocab"),
|
||||
use_llama_pro=get("train.use_llama_pro"),
|
||||
enable_thinking=get("train.enable_thinking"),
|
||||
report_to=get("train.report_to"),
|
||||
freeze_vision_tower=get("train.freeze_vision_tower"),
|
||||
freeze_multi_modal_projector=get("train.freeze_multi_modal_projector"),
|
||||
freeze_language_model=get("train.freeze_language_model"),
|
||||
image_max_pixels=calculate_pixels(get("train.image_max_pixels")),
|
||||
image_min_pixels=calculate_pixels(get("train.image_min_pixels")),
|
||||
video_max_pixels=calculate_pixels(get("train.video_max_pixels")),
|
||||
video_min_pixels=calculate_pixels(get("train.video_min_pixels")),
|
||||
use_galore=get("train.use_galore"),
|
||||
use_apollo=get("train.use_apollo"),
|
||||
use_badam=get("train.use_badam"),
|
||||
@@ -256,12 +265,6 @@ class Runner:
|
||||
args["badam_switch_interval"] = get("train.badam_switch_interval")
|
||||
args["badam_update_ratio"] = get("train.badam_update_ratio")
|
||||
|
||||
# report_to
|
||||
if "none" in args["report_to"]:
|
||||
args["report_to"] = "none"
|
||||
elif "all" in args["report_to"]:
|
||||
args["report_to"] = "all"
|
||||
|
||||
# swanlab config
|
||||
if get("train.use_swanlab"):
|
||||
args["swanlab_project"] = get("train.swanlab_project")
|
||||
|
||||
Reference in New Issue
Block a user