add test cases

Former-commit-id: 731176ff34cdf0cbf6b41c40c69f4ceb54c2daf6
This commit is contained in:
hiyouga
2024-06-15 04:05:54 +08:00
parent f4f315fd11
commit 3ff9b87012
9 changed files with 184 additions and 34 deletions

View File

@@ -2,7 +2,7 @@ import os
import torch
from llamafactory.hparams import get_train_args
from llamafactory.hparams import get_infer_args, get_train_args
from llamafactory.model import load_model, load_tokenizer
@@ -23,8 +23,15 @@ TRAIN_ARGS = {
"fp16": True,
}
INFER_ARGS = {
"model_name_or_path": TINY_LLAMA,
"finetuning_type": "freeze",
"template": "llama3",
"infer_dtype": "float16",
}
def test_freeze_all_modules():
def test_freeze_train_all_modules():
model_args, _, _, finetuning_args, _ = get_train_args({"freeze_trainable_layers": 1, **TRAIN_ARGS})
tokenizer_module = load_tokenizer(model_args)
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
@@ -37,7 +44,7 @@ def test_freeze_all_modules():
assert param.dtype == torch.float16
def test_freeze_extra_modules():
def test_freeze_train_extra_modules():
model_args, _, _, finetuning_args, _ = get_train_args(
{"freeze_trainable_layers": 1, "freeze_extra_modules": "embed_tokens,lm_head", **TRAIN_ARGS}
)
@@ -50,3 +57,12 @@ def test_freeze_extra_modules():
else:
assert param.requires_grad is False
assert param.dtype == torch.float16
def test_freeze_inference():
model_args, _, finetuning_args, _ = get_infer_args(INFER_ARGS)
tokenizer_module = load_tokenizer(model_args)
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=False)
for param in model.parameters():
assert param.requires_grad is False
assert param.dtype == torch.float16