support llama pro #2338 , add rslora

Former-commit-id: 40d659b7f30dd5a004703c176ec1f22dc864e505
This commit is contained in:
hiyouga
2024-02-15 02:27:36 +08:00
parent b403f8d8a8
commit 596b6828cb
24 changed files with 438 additions and 203 deletions

View File

@@ -10,6 +10,7 @@ from transformers.utils import (
WEIGHTS_NAME,
is_torch_bf16_gpu_available,
is_torch_cuda_available,
is_torch_mps_available,
is_torch_npu_available,
is_torch_xpu_available,
)
@@ -133,6 +134,8 @@ def get_current_device() -> torch.device:
device = "xpu:{}".format(os.environ.get("LOCAL_RANK", "0"))
elif is_torch_npu_available():
device = "npu:{}".format(os.environ.get("LOCAL_RANK", "0"))
elif is_torch_mps_available():
device = "mps:{}".format(os.environ.get("LOCAL_RANK", "0"))
elif is_torch_cuda_available():
device = "cuda:{}".format(os.environ.get("LOCAL_RANK", "0"))
else: