[train] KTransformers SFT as backend engine for LLaMA-Factory (#9400)
Co-authored-by: jimmy128 <jimmy128@noreply.gitcode.com> Co-authored-by: Yaowei Zheng <hiyouga@buaa.edu.cn>
This commit is contained in:
10
examples/inference/deepseek2_lora_sft_kt.yaml
Normal file
10
examples/inference/deepseek2_lora_sft_kt.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
model_name_or_path: deepseek-ai/DeepSeek-V2-Lite
|
||||
adapter_name_or_path: saves/Kllama_deepseekV2
|
||||
template: deepseek
|
||||
infer_backend: ktransformers # choices: [huggingface, vllm, sglang, ktransformers]
|
||||
trust_remote_code: true
|
||||
|
||||
use_kt: true # use KTransformers as LoRA sft backend to inference
|
||||
kt_optimize_rule: examples/kt_optimize_rules/DeepSeek-V2-Lite-Chat-sft-amx.yaml
|
||||
cpu_infer: 32
|
||||
chunk_size: 8192
|
||||
9
examples/inference/deepseek3_kt.yaml
Normal file
9
examples/inference/deepseek3_kt.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
model_name_or_path: opensourcerelease/DeepSeek-V3-bf16
|
||||
template: deepseek
|
||||
infer_backend: ktransformers # choices: [huggingface, vllm, sglang, ktransformers]
|
||||
trust_remote_code: true
|
||||
|
||||
use_kt: true # use KTransformers as LoRA sft backend to inference
|
||||
kt_optimize_rule: examples/kt_optimize_rules/DeepSeek-V3-Chat-sft-amx-multi-gpu.yaml
|
||||
cpu_infer: 32
|
||||
chunk_size: 8192
|
||||
10
examples/inference/deepseek3_lora_sft_kt.yaml
Normal file
10
examples/inference/deepseek3_lora_sft_kt.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
model_name_or_path: opensourcerelease/DeepSeek-V3-bf16
|
||||
adapter_name_or_path: saves/Kllama_deepseekV3
|
||||
template: deepseek
|
||||
infer_backend: ktransformers # choices: [huggingface, vllm, sglang, ktransformers]
|
||||
trust_remote_code: true
|
||||
|
||||
use_kt: true # use KTransformers as LoRA sft backend to inference
|
||||
kt_optimize_rule: examples/kt_optimize_rules/DeepSeek-V3-Chat-sft-amx-multi-gpu.yaml
|
||||
cpu_infer: 32
|
||||
chunk_size: 8192
|
||||
@@ -1,4 +1,4 @@
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
template: llama3
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang]
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang, ktransformers]
|
||||
trust_remote_code: true
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
model_name_or_path: saves/llama3-8b/full/sft
|
||||
template: llama3
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang]
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang, ktransformers]
|
||||
trust_remote_code: true
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
adapter_name_or_path: saves/llama3-8b/lora/sft
|
||||
template: llama3
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang]
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang, ktransformers]
|
||||
trust_remote_code: true
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
model_name_or_path: Qwen/Qwen2.5-VL-7B-Instruct
|
||||
template: qwen2_vl
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang]
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang, ktransformers]
|
||||
trust_remote_code: true
|
||||
|
||||
Reference in New Issue
Block a user