mirror of
https://github.com/hiyouga/LlamaFactory.git
synced 2026-02-01 20:23:37 +00:00
[model] update kt code (#9406)
This commit is contained in:
@@ -439,6 +439,7 @@ class SwanLabArguments:
|
||||
metadata={"help": "The Lark(飞书) secret for SwanLab."},
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FinetuningArguments(
|
||||
SwanLabArguments,
|
||||
|
||||
@@ -485,7 +485,9 @@ class KTransformersArguments:
|
||||
)
|
||||
kt_optimize_rule: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={"help": "Path To The KTransformers Optimize Rule; See https://github.com/kvcache-ai/ktransformers/."},
|
||||
metadata={
|
||||
"help": "Path To The KTransformers Optimize Rule; See https://github.com/kvcache-ai/ktransformers/."
|
||||
},
|
||||
)
|
||||
cpu_infer: Optional[int] = field(
|
||||
default=32,
|
||||
@@ -517,9 +519,16 @@ class KTransformersArguments:
|
||||
metadata={"help": "Force-Think Toggle For The KT Engine."},
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelArguments(
|
||||
SGLangArguments, VllmArguments, KTransformersArguments, ExportArguments, ProcessorArguments, QuantizationArguments, BaseModelArguments
|
||||
SGLangArguments,
|
||||
VllmArguments,
|
||||
KTransformersArguments,
|
||||
ExportArguments,
|
||||
ProcessorArguments,
|
||||
QuantizationArguments,
|
||||
BaseModelArguments,
|
||||
):
|
||||
r"""Arguments pertaining to which model/config/tokenizer we are going to fine-tune or infer.
|
||||
|
||||
|
||||
@@ -90,6 +90,7 @@ class RayArguments:
|
||||
elif self.ray_storage_filesystem == "gs" or self.ray_storage_filesystem == "gcs":
|
||||
self.ray_storage_filesystem = fs.GcsFileSystem()
|
||||
|
||||
|
||||
@dataclass
|
||||
class TrainingArguments(RayArguments, BaseTrainingArguments):
|
||||
r"""Arguments pertaining to the trainer."""
|
||||
|
||||
Reference in New Issue
Block a user