update example docs

Former-commit-id: 102cd42768d9eb2cf1219309a25b41e26149067e
This commit is contained in:
hiyouga
2024-05-06 22:51:02 +08:00
parent 5c9da798b5
commit 50c71dd29f
33 changed files with 962 additions and 508 deletions

View File

@@ -0,0 +1,39 @@
# model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
# method
stage: sft
do_train: true
finetuning_type: full
mixture_of_depths: convert
# dataset
dataset: identity,alpaca_gpt4_en
template: llama3
cutoff_len: 1024
max_samples: 1000
val_size: 0.1
overwrite_cache: true
preprocessing_num_workers: 16
# output
output_dir: saves/llama3-8b-mod/full/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
# train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
optim: paged_adamw_8bit
learning_rate: 0.0001
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_steps: 0.1
pure_bf16: true
# eval
per_device_eval_batch_size: 1
evaluation_strategy: steps
eval_steps: 500

View File

@@ -1,33 +0,0 @@
#!/bin/bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \
--stage sft \
--do_train \
--model_name_or_path meta-llama/Llama-2-7b-hf \
--dataset alpaca_gpt4_en,glaive_toolcall \
--dataset_dir ../../../data \
--template default \
--finetuning_type full \
--mixture_of_depths convert \
--output_dir ../../../saves/LLaMA2-7B/mod/sft \
--overwrite_cache \
--overwrite_output_dir \
--cutoff_len 1024 \
--preprocessing_num_workers 16 \
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 8 \
--optim paged_adamw_8bit \
--lr_scheduler_type cosine \
--logging_steps 10 \
--warmup_steps 20 \
--save_steps 100 \
--eval_steps 100 \
--evaluation_strategy steps \
--load_best_model_at_end \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--max_samples 3000 \
--val_size 0.1 \
--plot_loss \
--pure_bf16