update example docs

Former-commit-id: 102cd42768d9eb2cf1219309a25b41e26149067e
This commit is contained in:
hiyouga
2024-05-06 22:51:02 +08:00
parent 5c9da798b5
commit 50c71dd29f
33 changed files with 962 additions and 508 deletions

View File

@@ -1,27 +1,38 @@
# model
model_name_or_path: ISTA-DASLab/Meta-Llama-3-8B-Instruct-AQLM-2Bit-1x16
# method
stage: sft
do_train: true
model_name_or_path: BlackSamorez/Llama-2-7b-AQLM-2Bit-1x16-hf
dataset: alpaca_gpt4_en,glaive_toolcall
dataset_dir: data
template: default
finetuning_type: lora
lora_target: q_proj,v_proj
output_dir: ../../saves/LLaMA2-7B/lora/sft
overwrite_cache: true
overwrite_output_dir: true
# dataset
dataset: identity,alpaca_gpt4_en
template: llama3
cutoff_len: 1024
per_device_train_batch_size: 1
per_device_eval_batch_size: 1
gradient_accumulation_steps: 8
lr_scheduler_type: cosine
logging_steps: 10
save_steps: 100
eval_steps: 100
evaluation_strategy: steps
load_best_model_at_end: true
learning_rate: 5e-5
num_train_epochs: 3.0
max_samples: 3000
max_samples: 1000
val_size: 0.1
overwrite_cache: true
preprocessing_num_workers: 16
# output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
# train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 0.0001
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_steps: 0.1
fp16: true
# eval
per_device_eval_batch_size: 1
evaluation_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,38 @@
# model
model_name_or_path: TechxGenus/Meta-Llama-3-8B-Instruct-AWQ
# method
stage: sft
do_train: true
finetuning_type: lora
lora_target: q_proj,v_proj
# dataset
dataset: identity,alpaca_gpt4_en
template: llama3
cutoff_len: 1024
max_samples: 1000
val_size: 0.1
overwrite_cache: true
preprocessing_num_workers: 16
# output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
# train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 0.0001
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_steps: 0.1
fp16: true
# eval
per_device_eval_batch_size: 1
evaluation_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,42 @@
# model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
quantization_bit: 4
# method
stage: sft
do_train: true
finetuning_type: lora
lora_target: q_proj,v_proj
# ddp
ddp_timeout: 180000000
# dataset
dataset: identity,alpaca_gpt4_en
template: llama3
cutoff_len: 1024
max_samples: 1000
val_size: 0.1
overwrite_cache: true
preprocessing_num_workers: 16
# output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
# train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 0.0001
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_steps: 0.1
fp16: true
# eval
per_device_eval_batch_size: 1
evaluation_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,38 @@
# model
model_name_or_path: TechxGenus/Meta-Llama-3-8B-Instruct-GPTQ
# method
stage: sft
do_train: true
finetuning_type: lora
lora_target: q_proj,v_proj
# dataset
dataset: identity,alpaca_gpt4_en
template: llama3
cutoff_len: 1024
max_samples: 1000
val_size: 0.1
overwrite_cache: true
preprocessing_num_workers: 16
# output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
# train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 0.0001
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_steps: 0.1
fp16: true
# eval
per_device_eval_batch_size: 1
evaluation_strategy: steps
eval_steps: 500