Former-commit-id: 627d1c91e675f1d9ebf47bad123cbbf29821da4d
This commit is contained in:
hiyouga
2024-03-09 02:01:26 +08:00
parent 2f095e2017
commit 43b2ede0f8
7 changed files with 28 additions and 20 deletions

View File

@@ -24,7 +24,7 @@ if TYPE_CHECKING:
from transformers import PretrainedConfig, PreTrainedTokenizer
from trl import AutoModelForCausalLMWithValueHead
from ..hparams import ModelArguments,FinetuningArguments
from ..hparams import ModelArguments
logger = get_logger(__name__)
@@ -265,7 +265,6 @@ def patch_config(
config: "PretrainedConfig",
tokenizer: "PreTrainedTokenizer",
model_args: "ModelArguments",
finetuning_args: "FinetuningArguments",
init_kwargs: Dict[str, Any],
is_trainable: bool,
) -> None:
@@ -290,8 +289,7 @@ def patch_config(
if not is_deepspeed_zero3_enabled():
init_kwargs["low_cpu_mem_usage"] = model_args.low_cpu_mem_usage
if "device_map" not in init_kwargs: # quant models cannot use auto device map
if finetuning_args.stage not in ["ppo"]: #ppo stage should not set device map
init_kwargs["device_map"] = model_args.device_map or {"": get_current_device()}
init_kwargs["device_map"] = model_args.device_map or {"": get_current_device()}
def patch_model(