[example] add bash usage (#7794)

This commit is contained in:
hoshi-hiyouga
2025-04-22 00:25:51 +08:00
committed by GitHub
parent 12ada72ed4
commit b07628dea5
13 changed files with 184 additions and 98 deletions

View File

@@ -24,7 +24,13 @@ llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
Advanced usage:
```bash
CUDA_VISIBLE_DEVICES=0,1 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml learning_rate=1e-5 logging_steps=1
CUDA_VISIBLE_DEVICES=0,1 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml \
learning_rate=1e-5 \
logging_steps=1
```
```bash
bash examples/train_lora/llama3_lora_sft.sh
```
## Examples
@@ -215,12 +221,6 @@ llamafactory-cli api examples/inference/llama3_lora_sft.yaml
### Extras
#### Full-Parameter Fine-Tuning using Muon
```bash
llamafactory-cli train examples/extras/muon/qwen2_full_sft.yaml
```
#### Full-Parameter Fine-Tuning using GaLore
```bash
@@ -245,6 +245,12 @@ llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
llamafactory-cli train examples/extras/adam_mini/qwen2_full_sft.yaml
```
#### Full-Parameter Fine-Tuning using Muon
```bash
llamafactory-cli train examples/extras/muon/qwen2_full_sft.yaml
```
#### LoRA+ Fine-Tuning
```bash

View File

@@ -24,7 +24,13 @@ llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
高级用法:
```bash
CUDA_VISIBLE_DEVICES=0,1 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml learning_rate=1e-5 logging_steps=1
CUDA_VISIBLE_DEVICES=0,1 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml \
learning_rate=1e-5 \
logging_steps=1
```
```bash
bash examples/train_lora/llama3_lora_sft.sh
```
## 示例
@@ -215,12 +221,6 @@ llamafactory-cli api examples/inference/llama3_lora_sft.yaml
### 杂项
#### 使用 Muon 进行全参数训练
```bash
llamafactory-cli train examples/extras/muon/qwen2_full_sft.yaml
```
#### 使用 GaLore 进行全参数训练
```bash
@@ -245,6 +245,12 @@ llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
llamafactory-cli train examples/extras/adam_mini/qwen2_full_sft.yaml
```
#### 使用 Muon 进行全参数训练
```bash
llamafactory-cli train examples/extras/muon/qwen2_full_sft.yaml
```
#### LoRA+ 微调
```bash

View File

@@ -0,0 +1,36 @@
#!/bin/bash
set -x
MODEL_PATH=meta-llama/Meta-Llama-3-8B-Instruct
llamafactory-cli train \
--model_name_or_path ${MODEL_PATH} \
--trust_remote_code \
--stage sft \
--do_train \
--finetuning_type lora \
--lora_rank 8 \
--lora_target all \
--dataset identity,alpaca_en_demo \
--template llama3 \
--cutoff_len 2048 \
--max_samples 1000 \
--overwrite_cache \
--preprocessing_num_workers 16 \
--dataloader_num_workers 4 \
--output_dir saves/llama3-8b/lora/sft \
--logging_steps 10 \
--save_steps 500 \
--plot_loss \
--overwrite_output_dir \
--save_only_model false \
--report_to none \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 8 \
--learning_rate 1e-4 \
--num_train_epochs 3.0 \
--lr_scheduler_type cosine \
--warmup_ratio 0.1 \
--bf16 \
--ddp_timeout 180000000