add llava and instructblip

Former-commit-id: 142fb6f4541a1acfefe66ff2574dabde53b00c06
This commit is contained in:
BUAADreamer
2024-04-25 00:22:43 +08:00
parent 1451297c78
commit 12c51655ce
16 changed files with 273 additions and 214 deletions

View File

@@ -3,20 +3,20 @@
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft_mm \
--do_train \
--model_name_or_path /home/LAB/fengzc/LLM/checkpoints/Salesforce/instructblip-vicuna-7b \
--dataset llava_instruct_100 \
--model_name_or_path Salesforce/instructblip-vicuna-7b \
--dataset mllm_instruct_example \
--dataset_dir data \
--template default \
--finetuning_type lora \
--lora_target q_proj,k_proj \
--lora_target all \
--output_dir saves/instructblip-vicuna-7b/lora/sft \
--overwrite_cache \
--overwrite_output_dir \
--cutoff_len 1024 \
--preprocessing_num_workers 16 \
--per_device_train_batch_size 4 \
--per_device_train_batch_size 3 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 8 \
--gradient_accumulation_steps 1 \
--lr_scheduler_type cosine \
--logging_steps 1 \
--warmup_steps 20 \
@@ -25,10 +25,8 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--evaluation_strategy steps \
--load_best_model_at_end \
--learning_rate 1e-5 \
--num_train_epochs 3.0 \
--num_train_epochs 50 \
--max_samples 3000 \
--val_size 0.1 \
--plot_loss \
--quantization_bit 8 \
--image_path /home/LAB/fengzc/LLM/checkpoints/liuhaotian/LLaVA-Instruct-150K/images/coco/train2017 \
--use_qformer
--bf16

View File

@@ -3,20 +3,20 @@
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft_mm \
--do_train \
--model_name_or_path /home/LAB/fengzc/LLM/checkpoints/Salesforce/blip2-opt-2.7b \
--dataset llava_instruct_100 \
--model_name_or_path llava-hf/llava-1.5-7b-hf \
--dataset mllm_instruct_example \
--dataset_dir data \
--template default \
--finetuning_type lora \
--lora_target q_proj,k_proj \
--output_dir saves/blip2-opt-2.7b/lora/sft \
--lora_target all \
--output_dir saves/llava-1.5-7b/lora/sft \
--overwrite_cache \
--overwrite_output_dir \
--cutoff_len 1024 \
--preprocessing_num_workers 16 \
--per_device_train_batch_size 4 \
--per_device_train_batch_size 3 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 8 \
--gradient_accumulation_steps 1 \
--lr_scheduler_type cosine \
--logging_steps 1 \
--warmup_steps 20 \
@@ -25,9 +25,8 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--evaluation_strategy steps \
--load_best_model_at_end \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--num_train_epochs 100 \
--max_samples 3000 \
--val_size 0.1 \
--plot_loss \
--quantization_bit 8 \
--image_path /home/LAB/fengzc/LLM/checkpoints/liuhaotian/LLaVA-Instruct-150K/images/coco/train2017
--bf16