disable valset by default (#6690)

Former-commit-id: a1a94f364e33d1d73852f74eda4fa581e6b16533
This commit is contained in:
hoshi-hiyouga
2025-01-17 21:09:30 +08:00
committed by GitHub
parent 31daa6570b
commit 332f637592
30 changed files with 142 additions and 114 deletions

View File

@@ -6,6 +6,7 @@ trust_remote_code: true
stage: dpo
do_train: true
finetuning_type: lora
lora_rank: 8
lora_target: all
pref_beta: 0.1
pref_loss: sigmoid # choices: [sigmoid (dpo), orpo, simpo]
@@ -36,7 +37,7 @@ bf16: true
ddp_timeout: 180000000
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500
# val_size: 0.1
# per_device_eval_batch_size: 1
# eval_strategy: steps
# eval_steps: 500

View File

@@ -6,6 +6,7 @@ trust_remote_code: true
stage: kto
do_train: true
finetuning_type: lora
lora_rank: 8
lora_target: all
pref_beta: 0.1
@@ -35,7 +36,7 @@ bf16: true
ddp_timeout: 180000000
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500
# val_size: 0.1
# per_device_eval_batch_size: 1
# eval_strategy: steps
# eval_steps: 500

View File

@@ -7,6 +7,7 @@ trust_remote_code: true
stage: ppo
do_train: true
finetuning_type: lora
lora_rank: 8
lora_target: all
### dataset

View File

@@ -6,6 +6,7 @@ trust_remote_code: true
stage: pt
do_train: true
finetuning_type: lora
lora_rank: 8
lora_target: all
### dataset
@@ -33,7 +34,7 @@ bf16: true
ddp_timeout: 180000000
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500
# val_size: 0.1
# per_device_eval_batch_size: 1
# eval_strategy: steps
# eval_steps: 500

View File

@@ -6,6 +6,7 @@ trust_remote_code: true
stage: rm
do_train: true
finetuning_type: lora
lora_rank: 8
lora_target: all
### dataset
@@ -34,7 +35,7 @@ bf16: true
ddp_timeout: 180000000
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500
# val_size: 0.1
# per_device_eval_batch_size: 1
# eval_strategy: steps
# eval_steps: 500

View File

@@ -6,6 +6,7 @@ trust_remote_code: true
stage: sft
do_train: true
finetuning_type: lora
lora_rank: 8
lora_target: all
### dataset
@@ -34,7 +35,7 @@ bf16: true
ddp_timeout: 180000000
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500
# val_size: 0.1
# per_device_eval_batch_size: 1
# eval_strategy: steps
# eval_steps: 500

View File

@@ -6,6 +6,7 @@ trust_remote_code: true
stage: sft
do_train: true
finetuning_type: lora
lora_rank: 8
lora_target: all
deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json]
@@ -35,7 +36,7 @@ bf16: true
ddp_timeout: 180000000
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500
# val_size: 0.1
# per_device_eval_batch_size: 1
# eval_strategy: steps
# eval_steps: 500

View File

@@ -6,6 +6,7 @@ trust_remote_code: true
stage: sft
do_train: true
finetuning_type: lora
lora_rank: 8
lora_target: all
### dataset
@@ -24,6 +25,13 @@ save_steps: 500
plot_loss: true
overwrite_output_dir: true
### ray
ray_run_name: llama3_8b_sft_lora
ray_num_workers: 4 # number of GPUs to use
resources_per_worker:
GPU: 1
placement_strategy: PACK
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
@@ -35,14 +43,7 @@ bf16: true
ddp_timeout: 180000000
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500
### ray
ray_run_name: llama3_8b_sft_lora
ray_num_workers: 4 # number of GPUs to use
resources_per_worker:
GPU: 1
placement_strategy: PACK
# val_size: 0.1
# per_device_eval_batch_size: 1
# eval_strategy: steps
# eval_steps: 500

View File

@@ -6,6 +6,7 @@ trust_remote_code: true
stage: sft
do_train: true
finetuning_type: lora
lora_rank: 8
lora_target: all
### dataset

View File

@@ -1,11 +1,14 @@
### model
model_name_or_path: llava-hf/llava-1.5-7b-hf
image_resolution: 262144
video_resolution: 16384
trust_remote_code: true
### method
stage: sft
do_train: true
finetuning_type: lora
lora_rank: 8
lora_target: all
### dataset
@@ -34,7 +37,7 @@ bf16: true
ddp_timeout: 180000000
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500
# val_size: 0.1
# per_device_eval_batch_size: 1
# eval_strategy: steps
# eval_steps: 500

View File

@@ -1,11 +1,14 @@
### model
model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
image_resolution: 262144
video_resolution: 16384
trust_remote_code: true
### method
stage: dpo
do_train: true
finetuning_type: lora
lora_rank: 8
lora_target: all
pref_beta: 0.1
pref_loss: sigmoid # choices: [sigmoid (dpo), orpo, simpo]
@@ -36,7 +39,7 @@ bf16: true
ddp_timeout: 180000000
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500
# val_size: 0.1
# per_device_eval_batch_size: 1
# eval_strategy: steps
# eval_steps: 500

View File

@@ -1,11 +1,14 @@
### model
model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
image_resolution: 262144
video_resolution: 16384
trust_remote_code: true
### method
stage: sft
do_train: true
finetuning_type: lora
lora_rank: 8
lora_target: all
### dataset
@@ -34,7 +37,7 @@ bf16: true
ddp_timeout: 180000000
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500
# val_size: 0.1
# per_device_eval_batch_size: 1
# eval_strategy: steps
# eval_steps: 500