support layerwise galore

Former-commit-id: d43a4da0947897d0be3f62fad3107754d4c89f2b
This commit is contained in:
hiyouga
2024-03-10 00:24:11 +08:00
parent c635bbe465
commit 7ff8a064f3
14 changed files with 109 additions and 51 deletions

View File

@@ -15,7 +15,7 @@ CUDA_VISIBLE_DEVICES=0 python ../../../src/train_bash.py \
--preprocessing_num_workers 16 \
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 2 \
--gradient_accumulation_steps 1 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--warmup_steps 20 \

View File

@@ -16,7 +16,7 @@ CUDA_VISIBLE_DEVICES=0 python ../../../src/train_bash.py \
--preprocessing_num_workers 16 \
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 2 \
--gradient_accumulation_steps 1 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--warmup_steps 20 \

View File

@@ -9,8 +9,9 @@ CUDA_VISIBLE_DEVICES=0 python ../../../src/train_bash.py \
--template default \
--finetuning_type full \
--use_galore \
--galore_layerwise \
--galore_target mlp,self_attn \
--galore_rank 32 \
--galore_rank 128 \
--output_dir ../../../saves/LLaMA2-7B/galore/sft \
--overwrite_cache \
--overwrite_output_dir \
@@ -18,7 +19,7 @@ CUDA_VISIBLE_DEVICES=0 python ../../../src/train_bash.py \
--preprocessing_num_workers 16 \
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 2 \
--gradient_accumulation_steps 1 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--warmup_steps 20 \

View File

@@ -10,8 +10,9 @@ CUDA_VISIBLE_DEVICES=0 python ../../../src/train_bash.py \
--finetuning_type full \
--optim adamw_8bit \
--use_galore \
--galore_layerwise \
--galore_target mlp,self_attn \
--galore_rank 16 \
--galore_rank 128 \
--output_dir ../../../saves/LLaMA2-7B/galore/sft \
--overwrite_cache \
--overwrite_output_dir \
@@ -19,7 +20,7 @@ CUDA_VISIBLE_DEVICES=0 python ../../../src/train_bash.py \
--preprocessing_num_workers 16 \
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 2 \
--gradient_accumulation_steps 1 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--warmup_steps 20 \