Files
LlamaFactory/src/export_model.py
hiyouga 5eef8d5d98 use low_cpu_mem_usage to speed up loading
Former-commit-id: 7891e4c200566a4a47088e93efd1fbebcb46528e
2023-06-03 18:19:01 +08:00

24 lines
790 B
Python

# coding=utf-8
# Exports the fine-tuned model.
# Usage: python export_model.py --checkpoint_dir path_to_checkpoint --output_dir path_to_save_model
from transformers import HfArgumentParser, TrainingArguments
from utils import ModelArguments, FinetuningArguments, load_pretrained
def main():
parser = HfArgumentParser((ModelArguments, TrainingArguments, FinetuningArguments))
model_args, training_args, finetuning_args = parser.parse_args_into_dataclasses()
model, tokenizer = load_pretrained(model_args, finetuning_args)
model.save_pretrained(training_args.output_dir, max_shard_size="10GB")
tokenizer.save_pretrained(training_args.output_dir)
print("model and tokenizer have been saved at:", training_args.output_dir)
if __name__ == "__main__":
main()