[misc] update format (#7277)
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
# Copyright 2024 HuggingFace Inc., Daniel Han-Chen & the Unsloth team and the LlamaFactory team.
|
||||
# Copyright 2025 HuggingFace Inc., Daniel Han-Chen & the Unsloth team and the LlamaFactory team.
|
||||
#
|
||||
# This code is inspired by the HuggingFace's Transformers and PEFT library,
|
||||
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/modeling_utils.py
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2024 EleutherAI, HuggingFace Inc., Yukang Chen, and the LlamaFactory team.
|
||||
# Copyright 2025 EleutherAI, HuggingFace Inc., Yukang Chen, and the LlamaFactory team.
|
||||
#
|
||||
# This code is based on the EleutherAI's GPT-NeoX and the HuggingFace's Transformers libraries.
|
||||
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/llama/modeling_llama.py
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from collections.abc import Sequence
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import torch
|
||||
@@ -27,7 +26,7 @@ if TYPE_CHECKING:
|
||||
from ...hparams import ModelArguments
|
||||
|
||||
|
||||
def _set_z3_leaf_modules(model: "PreTrainedModel", leaf_modules: Sequence["torch.nn.Module"]) -> None:
|
||||
def _set_z3_leaf_modules(model: "PreTrainedModel", leaf_modules: list["torch.nn.Module"]) -> None:
|
||||
check_version("deepspeed>=0.13.0")
|
||||
from deepspeed.utils import set_z3_leaf_modules # type: ignore
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2024 Musab Gultekin and the LlamaFactory team.
|
||||
# Copyright 2025 Musab Gultekin and the LlamaFactory team.
|
||||
#
|
||||
# This code is based on the Musab Gultekin's functionary library.
|
||||
# https://github.com/MeetKai/functionary/blob/main/functionary/train/packing/monkey_patch_packing.py
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2024 HuggingFace Inc. and the LlamaFactory team.
|
||||
# Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
|
||||
#
|
||||
# This code is inspired by the HuggingFace's Transformers and Optimum library.
|
||||
# https://github.com/huggingface/transformers/blob/v4.41.0/src/transformers/utils/quantization_config.py
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2024 LMSYS and the LlamaFactory team.
|
||||
# Copyright 2025 LMSYS and the LlamaFactory team.
|
||||
# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
|
||||
#
|
||||
# This code is inspired by the LMSYS's FastChat library.
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from collections.abc import Sequence
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
@@ -180,7 +179,7 @@ def get_forbidden_modules(config: "PretrainedConfig", finetuning_args: "Finetuni
|
||||
|
||||
|
||||
def patch_target_modules(
|
||||
model: "PreTrainedModel", finetuning_args: "FinetuningArguments", target_modules: Sequence[str]
|
||||
model: "PreTrainedModel", finetuning_args: "FinetuningArguments", target_modules: list[str]
|
||||
) -> list[str]:
|
||||
r"""Freeze vision tower for VLM LoRA tuning."""
|
||||
model_type = getattr(model.config, "model_type", None)
|
||||
|
||||
Reference in New Issue
Block a user