refactor mm training
Former-commit-id: 179c0558699e287cbf38a2d73bff47e86d589c5a
This commit is contained in:
@@ -33,6 +33,7 @@ llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llava1_5_lora_sft.yaml
|
||||
llamafactory-cli train examples/train_lora/qwen2vl_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### Reward Modeling
|
||||
|
||||
@@ -33,6 +33,7 @@ llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llava1_5_lora_sft.yaml
|
||||
llamafactory-cli train examples/train_lora/qwen2vl_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### 奖励模型训练
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
### model
|
||||
model_name_or_path: qwen2-vl-hf/qwen2-vl-7b-hf
|
||||
visual_inputs: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
deepspeed: examples/deepspeed/ds_z3_config.json
|
||||
|
||||
### dataset
|
||||
dataset: qwen2vl_demo
|
||||
template: qwen2vl
|
||||
cutoff_len: 1024
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
|
||||
### output
|
||||
output_dir: saves/qwen2-vl-7b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 1
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
eval_strategy: steps
|
||||
eval_steps: 500
|
||||
@@ -1,5 +1,5 @@
|
||||
### model
|
||||
model_name_or_path: qwen2-vl-hf/qwen2-vl-7b-hf
|
||||
model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
|
||||
visual_inputs: true
|
||||
|
||||
### method
|
||||
@@ -9,23 +9,23 @@ finetuning_type: lora
|
||||
lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: qwen2vl_demo
|
||||
template: qwen2vl
|
||||
dataset: mllm_demo
|
||||
template: qwen2_vl
|
||||
cutoff_len: 1024
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
|
||||
### output
|
||||
output_dir: saves/qwen2-vl-7b/lora/sft
|
||||
output_dir: saves/qwen2_vl-7b/lora/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 2
|
||||
gradient_accumulation_steps: 1
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
|
||||
Reference in New Issue
Block a user