init unittest
Former-commit-id: 1c6f21cb8878ced043fe0b27c72cad2ef6ee990e
This commit is contained in:
35
tests/model/test_attn.py
Normal file
35
tests/model/test_attn.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import os
|
||||
|
||||
from transformers.utils import is_flash_attn_2_available, is_torch_sdpa_available
|
||||
|
||||
from llamafactory.hparams import get_infer_args
|
||||
from llamafactory.model import load_model, load_tokenizer
|
||||
|
||||
|
||||
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-LlamaForCausalLM")
|
||||
|
||||
|
||||
def test_attention():
|
||||
attention_available = ["off"]
|
||||
if is_torch_sdpa_available():
|
||||
attention_available.append("sdpa")
|
||||
|
||||
if is_flash_attn_2_available():
|
||||
attention_available.append("fa2")
|
||||
|
||||
llama_attention_classes = {
|
||||
"off": "LlamaAttention",
|
||||
"sdpa": "LlamaSdpaAttention",
|
||||
"fa2": "LlamaFlashAttention2",
|
||||
}
|
||||
for requested_attention in attention_available:
|
||||
model_args, _, finetuning_args, _ = get_infer_args({
|
||||
"model_name_or_path": TINY_LLAMA,
|
||||
"template": "llama2",
|
||||
"flash_attn": requested_attention,
|
||||
})
|
||||
tokenizer = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer["tokenizer"], model_args, finetuning_args)
|
||||
for module in model.modules():
|
||||
if "Attention" in module.__class__.__name__:
|
||||
assert module.__class__.__name__ == llama_attention_classes[requested_attention]
|
||||
@@ -1,30 +0,0 @@
|
||||
import os
|
||||
import time
|
||||
|
||||
from openai import OpenAI
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
require_version("openai>=1.5.0", "To fix: pip install openai>=1.5.0")
|
||||
|
||||
|
||||
def main():
|
||||
client = OpenAI(
|
||||
api_key="0",
|
||||
base_url="http://localhost:{}/v1".format(os.environ.get("API_PORT", 8000)),
|
||||
)
|
||||
messages = [{"role": "user", "content": "Write a long essay about environment protection as long as possible."}]
|
||||
num_tokens = 0
|
||||
start_time = time.time()
|
||||
for _ in range(8):
|
||||
result = client.chat.completions.create(messages=messages, model="test")
|
||||
num_tokens += result.usage.completion_tokens
|
||||
|
||||
elapsed_time = time.time() - start_time
|
||||
print("Throughput: {:.2f} tokens/s".format(num_tokens / elapsed_time))
|
||||
# --infer_backend hf: 27.22 tokens/s (1.0x)
|
||||
# --infer_backend vllm: 73.03 tokens/s (2.7x)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,64 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
from typing import Sequence
|
||||
|
||||
from openai import OpenAI
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
require_version("openai>=1.5.0", "To fix: pip install openai>=1.5.0")
|
||||
|
||||
|
||||
def calculate_gpa(grades: Sequence[str], hours: Sequence[int]) -> float:
|
||||
grade_to_score = {"A": 4, "B": 3, "C": 2}
|
||||
total_score, total_hour = 0, 0
|
||||
for grade, hour in zip(grades, hours):
|
||||
total_score += grade_to_score[grade] * hour
|
||||
total_hour += hour
|
||||
return round(total_score / total_hour, 2)
|
||||
|
||||
|
||||
def main():
|
||||
client = OpenAI(
|
||||
api_key="0",
|
||||
base_url="http://localhost:{}/v1".format(os.environ.get("API_PORT", 8000)),
|
||||
)
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "calculate_gpa",
|
||||
"description": "Calculate the Grade Point Average (GPA) based on grades and credit hours",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"grades": {"type": "array", "items": {"type": "string"}, "description": "The grades"},
|
||||
"hours": {"type": "array", "items": {"type": "integer"}, "description": "The credit hours"},
|
||||
},
|
||||
"required": ["grades", "hours"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
tool_map = {"calculate_gpa": calculate_gpa}
|
||||
|
||||
messages = []
|
||||
messages.append({"role": "user", "content": "My grades are A, A, B, and C. The credit hours are 3, 4, 3, and 2."})
|
||||
result = client.chat.completions.create(messages=messages, model="test", tools=tools)
|
||||
if result.choices[0].message.tool_calls is None:
|
||||
raise ValueError("Cannot retrieve function call from the response.")
|
||||
|
||||
messages.append(result.choices[0].message)
|
||||
tool_call = result.choices[0].message.tool_calls[0].function
|
||||
print(tool_call)
|
||||
# Function(arguments='{"grades": ["A", "A", "B", "C"], "hours": [3, 4, 3, 2]}', name='calculate_gpa')
|
||||
name, arguments = tool_call.name, json.loads(tool_call.arguments)
|
||||
tool_result = tool_map[name](**arguments)
|
||||
messages.append({"role": "tool", "content": json.dumps({"gpa": tool_result}, ensure_ascii=False)})
|
||||
result = client.chat.completions.create(messages=messages, model="test", tools=tools)
|
||||
print(result.choices[0].message.content)
|
||||
# Based on the grades and credit hours you provided, your Grade Point Average (GPA) is 3.42.
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user