add test cases

Former-commit-id: 731176ff34cdf0cbf6b41c40c69f4ceb54c2daf6
This commit is contained in:
hiyouga
2024-06-15 04:05:54 +08:00
parent f4f315fd11
commit 3ff9b87012
9 changed files with 184 additions and 34 deletions

View File

@@ -2,7 +2,7 @@ import os
import torch
from llamafactory.hparams import get_train_args
from llamafactory.hparams import get_infer_args, get_train_args
from llamafactory.model import load_model, load_tokenizer
@@ -23,11 +23,27 @@ TRAIN_ARGS = {
"fp16": True,
}
INFER_ARGS = {
"model_name_or_path": TINY_LLAMA,
"finetuning_type": "full",
"template": "llama3",
"infer_dtype": "float16",
}
def test_full():
def test_full_train():
model_args, _, _, finetuning_args, _ = get_train_args(TRAIN_ARGS)
tokenizer_module = load_tokenizer(model_args)
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
for param in model.parameters():
assert param.requires_grad is True
assert param.dtype == torch.float32
def test_full_inference():
model_args, _, finetuning_args, _ = get_infer_args(INFER_ARGS)
tokenizer_module = load_tokenizer(model_args)
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=False)
for param in model.parameters():
assert param.requires_grad is False
assert param.dtype == torch.float16