update examples

Former-commit-id: 3b5f138155d96b346bda18e465cf60ec7d99e19c
This commit is contained in:
hiyouga
2024-05-17 01:02:00 +08:00
parent 45329d9e3c
commit a3320f26cf
27 changed files with 155 additions and 155 deletions

View File

@@ -1,14 +1,14 @@
# model
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
# method
### method
stage: dpo
do_train: true
finetuning_type: lora
lora_target: q_proj,v_proj
dpo_ftx: 1.0
# dataset
### dataset
dataset: orca_rlhf
template: llama3
cutoff_len: 1024
@@ -16,14 +16,14 @@ max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
# output
### output
output_dir: saves/llama3-8b/lora/dpo
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
# train
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 0.00001
@@ -32,7 +32,7 @@ lr_scheduler_type: cosine
warmup_steps: 0.1
fp16: true
# eval
### eval
val_size: 0.1
per_device_eval_batch_size: 1
evaluation_strategy: steps

View File

@@ -1,19 +1,19 @@
# model
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
adapter_name_or_path: saves/llama3-8b/lora/sft
# method
### method
finetuning_type: lora
# dataset
### dataset
task: mmlu
split: test
template: fewshot
lang: en
n_shot: 5
# output
### output
save_dir: saves/llama3-8b/lora/eval
# eval
### eval
batch_size: 4

View File

@@ -1,13 +1,13 @@
# model
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
# method
### method
stage: orpo
do_train: true
finetuning_type: lora
lora_target: q_proj,v_proj
# dataset
### dataset
dataset: orca_rlhf
template: llama3
cutoff_len: 1024
@@ -15,14 +15,14 @@ max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
# output
### output
output_dir: saves/llama3-8b/lora/orpo
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
# train
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 0.00001
@@ -31,7 +31,7 @@ lr_scheduler_type: cosine
warmup_steps: 0.1
fp16: true
# eval
### eval
val_size: 0.1
per_device_eval_batch_size: 1
evaluation_strategy: steps

View File

@@ -1,14 +1,14 @@
# model
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
reward_model: saves/llama3-8b/lora/reward
# method
### method
stage: ppo
do_train: true
finetuning_type: lora
lora_target: q_proj,v_proj
# dataset
### dataset
dataset: identity,alpaca_gpt4_en
template: llama3
cutoff_len: 1024
@@ -16,14 +16,14 @@ max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
# output
### output
output_dir: saves/llama3-8b/lora/ppo
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
# train
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 0.00001
@@ -32,7 +32,7 @@ lr_scheduler_type: cosine
warmup_steps: 0.1
fp16: true
# generate
### generate
max_new_tokens: 512
top_k: 0
top_p: 0.9

View File

@@ -1,13 +1,13 @@
# model
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
adapter_name_or_path: saves/llama3-8b/lora/sft
# method
### method
stage: sft
do_predict: true
finetuning_type: lora
# dataset
### dataset
dataset: identity,alpaca_gpt4_en
template: llama3
cutoff_len: 1024
@@ -15,10 +15,10 @@ max_samples: 50
overwrite_cache: true
preprocessing_num_workers: 16
# output
### output
output_dir: saves/llama3-8b/lora/predict
overwrite_output_dir: true
# eval
### eval
per_device_eval_batch_size: 1
predict_with_generate: true

View File

@@ -1,27 +1,27 @@
# model
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
# method
### method
stage: pt
do_train: true
finetuning_type: lora
lora_target: q_proj,v_proj
# dataset
### dataset
dataset: c4_demo
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
# output
### output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
# train
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 0.0001
@@ -30,7 +30,7 @@ lr_scheduler_type: cosine
warmup_steps: 0.1
fp16: true
# eval
### eval
val_size: 0.1
per_device_eval_batch_size: 1
evaluation_strategy: steps

View File

@@ -1,13 +1,13 @@
# model
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
# method
### method
stage: rm
do_train: true
finetuning_type: lora
lora_target: q_proj,v_proj
# dataset
### dataset
dataset: orca_rlhf
template: llama3
cutoff_len: 1024
@@ -15,14 +15,14 @@ max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
# output
### output
output_dir: saves/llama3-8b/lora/reward
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
# train
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 0.00001
@@ -31,7 +31,7 @@ lr_scheduler_type: cosine
warmup_steps: 0.1
fp16: true
# eval
### eval
val_size: 0.1
per_device_eval_batch_size: 1
evaluation_strategy: steps

View File

@@ -1,13 +1,13 @@
# model
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
# method
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: q_proj,v_proj
# dataset
### dataset
dataset: identity,alpaca_gpt4_en
template: llama3
cutoff_len: 1024
@@ -15,14 +15,14 @@ max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
# output
### output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
# train
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 0.0001
@@ -31,7 +31,7 @@ lr_scheduler_type: cosine
warmup_steps: 0.1
fp16: true
# eval
### eval
val_size: 0.1
per_device_eval_batch_size: 1
evaluation_strategy: steps

View File

@@ -1,13 +1,13 @@
# model
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
# method
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: q_proj,v_proj
# dataset
### dataset
dataset: identity,alpaca_gpt4_en
template: llama3
cutoff_len: 1024
@@ -16,6 +16,6 @@ overwrite_cache: true
preprocessing_num_workers: 16
tokenized_path: saves/llama3-8b/dataset/sft
# output
### output
output_dir: saves/llama3-8b/lora/sft
overwrite_output_dir: true

View File

@@ -1,14 +1,14 @@
# model
### model
model_name_or_path: llava-hf/llava-1.5-7b-hf
visual_inputs: true
# method
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: q_proj,v_proj
# dataset
### dataset
dataset: mllm_demo
template: vicuna
cutoff_len: 1024
@@ -16,14 +16,14 @@ max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
# output
### output
output_dir: saves/llava1_5-7b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
# train
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 0.0001
@@ -32,7 +32,7 @@ lr_scheduler_type: cosine
warmup_steps: 0.1
fp16: true
# eval
### eval
val_size: 0.1
per_device_eval_batch_size: 1
evaluation_strategy: steps