update docs
Former-commit-id: a4a2e94241bea6f96590f6cb8ca8b5cddee1917e
This commit is contained in:
@@ -1,30 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \
|
||||
--stage sft \
|
||||
--do_train \
|
||||
--model_name_or_path BlackSamorez/Llama-2-7b-AQLM-2Bit-1x16-hf \
|
||||
--dataset alpaca_gpt4_en,glaive_toolcall \
|
||||
--dataset_dir ../../data \
|
||||
--template default \
|
||||
--finetuning_type lora \
|
||||
--lora_target q_proj,v_proj \
|
||||
--output_dir ../../saves/LLaMA2-7B/lora/sft \
|
||||
--overwrite_cache \
|
||||
--overwrite_output_dir \
|
||||
--cutoff_len 1024 \
|
||||
--per_device_train_batch_size 1 \
|
||||
--per_device_eval_batch_size 1 \
|
||||
--gradient_accumulation_steps 8 \
|
||||
--lr_scheduler_type cosine \
|
||||
--logging_steps 10 \
|
||||
--save_steps 100 \
|
||||
--eval_steps 100 \
|
||||
--evaluation_strategy steps \
|
||||
--load_best_model_at_end \
|
||||
--learning_rate 5e-5 \
|
||||
--num_train_epochs 3.0 \
|
||||
--max_samples 3000 \
|
||||
--val_size 0.1 \
|
||||
--plot_loss \
|
||||
--fp16
|
||||
@@ -1,30 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \
|
||||
--stage sft \
|
||||
--do_train \
|
||||
--model_name_or_path TheBloke/Llama-2-7B-AWQ \
|
||||
--dataset alpaca_gpt4_en,glaive_toolcall \
|
||||
--dataset_dir ../../data \
|
||||
--template default \
|
||||
--finetuning_type lora \
|
||||
--lora_target q_proj,v_proj \
|
||||
--output_dir ../../saves/LLaMA2-7B/lora/sft \
|
||||
--overwrite_cache \
|
||||
--overwrite_output_dir \
|
||||
--cutoff_len 1024 \
|
||||
--per_device_train_batch_size 1 \
|
||||
--per_device_eval_batch_size 1 \
|
||||
--gradient_accumulation_steps 8 \
|
||||
--lr_scheduler_type cosine \
|
||||
--logging_steps 10 \
|
||||
--save_steps 100 \
|
||||
--eval_steps 100 \
|
||||
--evaluation_strategy steps \
|
||||
--load_best_model_at_end \
|
||||
--learning_rate 5e-5 \
|
||||
--num_train_epochs 3.0 \
|
||||
--max_samples 3000 \
|
||||
--val_size 0.1 \
|
||||
--plot_loss \
|
||||
--fp16
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \
|
||||
--stage sft \
|
||||
--do_train \
|
||||
--model_name_or_path meta-llama/Llama-2-7b-hf \
|
||||
--dataset alpaca_gpt4_en,glaive_toolcall \
|
||||
--dataset_dir ../../data \
|
||||
--template default \
|
||||
--finetuning_type lora \
|
||||
--lora_target q_proj,v_proj \
|
||||
--output_dir ../../saves/LLaMA2-7B/lora/sft \
|
||||
--overwrite_cache \
|
||||
--overwrite_output_dir \
|
||||
--cutoff_len 1024 \
|
||||
--per_device_train_batch_size 1 \
|
||||
--per_device_eval_batch_size 1 \
|
||||
--gradient_accumulation_steps 8 \
|
||||
--lr_scheduler_type cosine \
|
||||
--logging_steps 10 \
|
||||
--save_steps 100 \
|
||||
--eval_steps 100 \
|
||||
--evaluation_strategy steps \
|
||||
--load_best_model_at_end \
|
||||
--learning_rate 5e-5 \
|
||||
--num_train_epochs 3.0 \
|
||||
--max_samples 3000 \
|
||||
--val_size 0.1 \
|
||||
--quantization_bit 4 \
|
||||
--plot_loss \
|
||||
--fp16
|
||||
@@ -1,30 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \
|
||||
--stage sft \
|
||||
--do_train \
|
||||
--model_name_or_path TheBloke/Llama-2-7B-GPTQ \
|
||||
--dataset alpaca_gpt4_en,glaive_toolcall \
|
||||
--dataset_dir ../../data \
|
||||
--template default \
|
||||
--finetuning_type lora \
|
||||
--lora_target q_proj,v_proj \
|
||||
--output_dir ../../saves/LLaMA2-7B/lora/sft \
|
||||
--overwrite_cache \
|
||||
--overwrite_output_dir \
|
||||
--cutoff_len 1024 \
|
||||
--per_device_train_batch_size 1 \
|
||||
--per_device_eval_batch_size 1 \
|
||||
--gradient_accumulation_steps 8 \
|
||||
--lr_scheduler_type cosine \
|
||||
--logging_steps 10 \
|
||||
--save_steps 100 \
|
||||
--eval_steps 100 \
|
||||
--evaluation_strategy steps \
|
||||
--load_best_model_at_end \
|
||||
--learning_rate 5e-5 \
|
||||
--num_train_epochs 3.0 \
|
||||
--max_samples 3000 \
|
||||
--val_size 0.1 \
|
||||
--plot_loss \
|
||||
--fp16
|
||||
27
examples/qlora_single_gpu/llama3_lora_sft_aqlm.yaml
Normal file
27
examples/qlora_single_gpu/llama3_lora_sft_aqlm.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
stage: sft
|
||||
do_train: true
|
||||
model_name_or_path: BlackSamorez/Llama-2-7b-AQLM-2Bit-1x16-hf
|
||||
dataset: alpaca_gpt4_en,glaive_toolcall
|
||||
dataset_dir: data
|
||||
template: default
|
||||
finetuning_type: lora
|
||||
lora_target: q_proj,v_proj
|
||||
output_dir: ../../saves/LLaMA2-7B/lora/sft
|
||||
overwrite_cache: true
|
||||
overwrite_output_dir: true
|
||||
cutoff_len: 1024
|
||||
per_device_train_batch_size: 1
|
||||
per_device_eval_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
lr_scheduler_type: cosine
|
||||
logging_steps: 10
|
||||
save_steps: 100
|
||||
eval_steps: 100
|
||||
evaluation_strategy: steps
|
||||
load_best_model_at_end: true
|
||||
learning_rate: 5e-5
|
||||
num_train_epochs: 3.0
|
||||
max_samples: 3000
|
||||
val_size: 0.1
|
||||
plot_loss: true
|
||||
fp16: true
|
||||
0
examples/qlora_single_gpu/llama3_lora_sft_awq.yaml
Normal file
0
examples/qlora_single_gpu/llama3_lora_sft_awq.yaml
Normal file
0
examples/qlora_single_gpu/llama3_lora_sft_gptq.yaml
Normal file
0
examples/qlora_single_gpu/llama3_lora_sft_gptq.yaml
Normal file
Reference in New Issue
Block a user