support fsdp + qlora

Former-commit-id: b894bf8e84be689db258021f0638e9ac939abcbc
This commit is contained in:
hiyouga
2024-03-21 00:36:06 +08:00
parent 5ed234ca63
commit 935ee0a023
15 changed files with 87 additions and 19 deletions

View File

@@ -0,0 +1,25 @@
compute_environment: LOCAL_MACHINE
debug: false
distributed_type: FSDP
downcast_bf16: 'no'
fsdp_config:
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_backward_prefetch: BACKWARD_PRE
fsdp_cpu_ram_efficient_loading: true
fsdp_forward_prefetch: false
fsdp_offload_params: true
fsdp_sharding_strategy: FULL_SHARD
fsdp_state_dict_type: SHARDED_STATE_DICT
fsdp_sync_module_states: true
fsdp_use_orig_params: false
machine_rank: 0
main_training_function: main
mixed_precision: fp16
num_machines: 1
num_processes: 2
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false

View File

@@ -0,0 +1,5 @@
```bash
pip install git+https://github.com/huggingface/transformers.git
pip install "accelerate>=0.28.0"
pip install "bitsandbytes>=0.43.0"
```

View File

@@ -0,0 +1,33 @@
#!/bin/bash
CUDA_VISIBLE_DEVICES=0,1 accelerate launch \
--config_file ../accelerate/fsdp_config.yaml \
../../src/train_bash.py \
--stage sft \
--do_train \
--model_name_or_path meta-llama/Llama-2-70b-hf \
--dataset alpaca_gpt4_en,glaive_toolcall \
--dataset_dir ../../data \
--template default \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--output_dir ../../saves/LLaMA2-70B/lora/sft \
--overwrite_cache \
--overwrite_output_dir \
--cutoff_len 1024 \
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 8 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 100 \
--eval_steps 100 \
--evaluation_strategy steps \
--load_best_model_at_end \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--max_samples 3000 \
--val_size 0.1 \
--quantization_bit 4 \
--plot_loss \
--fp16

View File

@@ -7,11 +7,11 @@ python -m torch.distributed.run \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT \
../../src/train_bash.py \
--deepspeed ds_z3_config.json \
--deepspeed ../deepspeed/ds_z3_config.json \
--stage sft \
--do_train \
--model_name_or_path meta-llama/Llama-2-7b-hf \
--dataset alpaca_gpt4_en \
--dataset alpaca_gpt4_en,glaive_toolcall \
--dataset_dir ../../data \
--template default \
--finetuning_type full \

View File

@@ -1,11 +1,11 @@
#!/bin/bash
deepspeed --num_gpus 4 ../../src/train_bash.py \
--deepspeed ds_z3_config.json \
--deepspeed ../deepspeed/ds_z3_config.json \
--stage sft \
--do_train \
--model_name_or_path meta-llama/Llama-2-7b-hf \
--dataset alpaca_gpt4_en \
--dataset alpaca_gpt4_en,glaive_toolcall \
--dataset_dir ../../data \
--template default \
--finetuning_type full \

View File

@@ -1,7 +1,7 @@
#!/bin/bash
CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch \
--config_file master_config.yaml \
--config_file ../accelerate/master_config.yaml \
../../src/train_bash.py \
--stage sft \
--do_train \

View File

@@ -1,7 +1,7 @@
#!/bin/bash
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 accelerate launch \
--config_file single_config.yaml \
--config_file ../accelerate/single_config.yaml \
../../src/train_bash.py \
--stage sft \
--do_train \