78 Commits

Author SHA1 Message Date
hiyouga
ec334f5891 release v0.2.2, fix #1478 #1466
Former-commit-id: c9534c411716e1dceb54c5eb35fe845c93ee2973
2023-11-13 23:09:05 +08:00
hiyouga
885efe772e fix #424
Former-commit-id: ca24d445f825e120e659f5cd080a954c2243b8f2
2023-11-13 22:42:23 +08:00
hiyouga
64fc9ba678 refactor evaluation, upgrade trl to 074
Former-commit-id: ed09ebe2c1926ffdb0520b3866f7fd03a9aed046
2023-11-13 22:20:35 +08:00
hiyouga
989eccd286 fix flashattn warning
Former-commit-id: 6eb095d39bd82fdbdb729a0ea57fc7246e3a60d6
2023-11-10 18:34:54 +08:00
hiyouga
f0766a2ab0 add todo
Former-commit-id: 0bd884feb11736d0ab24ca19885151cb47d9dcd3
2023-11-10 14:38:18 +08:00
hiyouga
178b85ff9a refactor constants
Former-commit-id: a4d4c3fd35276f20e3b354e9d13ea971029c8775
2023-11-10 14:16:10 +08:00
hiyouga
68dd1ef121 tiny fix
Former-commit-id: 97ba2027bb1ddc01a3c824c40d5a180828810c2c
2023-11-09 17:20:49 +08:00
hoshi-hiyouga
b222cffe98 Merge pull request #1454 from yyq/main
Update finetuning_args.py

Former-commit-id: e67d8b93705383a8590f99e26e9fe8f663712aef
2023-11-09 17:12:18 +08:00
Yanqing
b4f1ab93d1 Update finetuning_args.py
更新 chatglm/falcon/bloom 的 lora_target 的名称

Former-commit-id: 06606739af035a80ae9ddba9d12c965ed289305d
2023-11-09 17:04:40 +08:00
hiyouga
f2e139f5cd fix #1452
Former-commit-id: 4d16214467715df458e24d03bb7d303d62b8bdcd
2023-11-09 16:41:32 +08:00
hiyouga
a9cbca1604 update readme
Former-commit-id: f7ead54042868550a3e8a6928ea3c0e2673f15b3
2023-11-09 16:00:24 +08:00
hiyouga
3a30ce6c16 release v0.2.1
Former-commit-id: 1c30f2be0140f5ab47c2bc811170d0271a0cdad6
2023-11-09 15:54:16 +08:00
hiyouga
48ec5355f9 add template, modify datasets
Former-commit-id: 81e54beb4d0f792f4fd7f450643caaf10f2f0b7d
2023-11-09 15:53:23 +08:00
hoshi-hiyouga
11859bc322 Merge pull request #1436 from lvzii/main
fix tokenizer config changed after pretrain

Former-commit-id: f485c3983e413fd3a3a57b451800705b072869a7
2023-11-09 14:30:50 +08:00
hiyouga
28c67a5be8 support parquet format #1446
Former-commit-id: 44a3b9ac9f10d2012b8ad3d8c48123db9a0da2f1
2023-11-09 14:17:40 +08:00
hiyouga
44fe93e9b0 fix #1438 #1439
Former-commit-id: 84260d58dda22adc32c26bc943ed2a36fd01341d
2023-11-09 13:45:10 +08:00
lvzi
09a1681b63 fix tokenizer config changed after pretrain
Changing tokenizer's attribute at preprocessing stage will result in saving a wrong tokenizer.
for example, baichuan2

Former-commit-id: 19942b5314b84267691f0a5657d0679f2ddbe58b
2023-11-08 15:50:46 +08:00
hiyouga
f5ba2190fb fix ppo train and dpo eval
Former-commit-id: ced863031836632cb5920e22ae6991f251372118
2023-11-07 22:48:51 +08:00
hiyouga
14a38b5069 fix #1422
Former-commit-id: 25d7bbd0a5142f001bd2ff498df07b24137050a9
2023-11-07 19:42:01 +08:00
hiyouga
f23e5b602a fix reward model loading
Former-commit-id: 9709ca501180a1afce32e9043aedb359762b437d
2023-11-07 17:20:51 +08:00
hiyouga
857696ed9c fix args
Former-commit-id: 44d0fa2ac6a6423c7ddaf91eb8998c1b9248c04e
2023-11-07 16:36:06 +08:00
hiyouga
2084133058 update info
Former-commit-id: 89643b8ac1e3fa8d2f29f1c88e4d4503410c0d05
2023-11-07 16:28:21 +08:00
hiyouga
f7f0c3070e delete file
Former-commit-id: 7d6355db0fd5809b99f3fa42753cf4dffd251fd1
2023-11-07 16:20:12 +08:00
hiyouga
46235aa514 fix #1418
Former-commit-id: 9bfecc72c53cf95fea4a9ff02ec40a65da6d4f54
2023-11-07 16:17:22 +08:00
hiyouga
2eb65d21ac upgrade peft, fix #1088 #1411
Former-commit-id: aa7d104f8e050d12cb8f585bc8a52c850995500f
2023-11-07 16:13:36 +08:00
hiyouga
37a0d62a82 update requirements
Former-commit-id: 82ebbbbb80b3f3f616274210970738d0f44b5a0a
2023-11-06 19:01:21 +08:00
hiyouga
21ac46e439 use seed in evaluate.py
Former-commit-id: ab5cac1dfa681933f3266827f80068ce798b4c56
2023-11-06 18:17:51 +08:00
hiyouga
ba3e8ba20c update readme (list in alphabetical order)
Former-commit-id: e6a67b5477ee095bd92764581cfe6af57e799a69
2023-11-06 17:18:12 +08:00
hiyouga
2c48e798ca update templates
Former-commit-id: 85be2e242b062283f192c4c4d0715dc1e8a68589
2023-11-06 12:25:47 +08:00
hiyouga
4e40f5b62b fix #1383
Former-commit-id: 9b8a782aa80f27c3e2a2e2621f9be17cae1a27e8
2023-11-06 11:42:23 +08:00
hiyouga
2a8892b785 fix deepseek template
Former-commit-id: 1fdbcdad9a1cdb20299350efd87a8e5cb8c625a3
2023-11-05 13:08:46 +08:00
hiyouga
ee3b33ff03 support deepseek coder #1378
Former-commit-id: ae0c829917b9de10e71199c85c77a52cdcd2b7b3
2023-11-05 12:51:03 +08:00
hiyouga
b2c3001f8e fix #1365
Former-commit-id: 0277d120e62164bb7fa1d6043b8fcc52c881fe96
2023-11-05 12:21:07 +08:00
hiyouga
6cfe1e1ac2 tiny fix
Former-commit-id: 594c510a20d6c2782d7b7ffff18931e3003e6c22
2023-11-03 01:26:06 +08:00
hiyouga
52326870e4 fix #1290
Former-commit-id: ad911d258c4cea16f54d09bc192e076c21d26394
2023-11-03 00:44:53 +08:00
hiyouga
217fde0918 fix bug in data loader, support dpo eval
Former-commit-id: f4f3dcff990468a2fa864b7176adcebbcf16dac9
2023-11-03 00:34:26 +08:00
hiyouga
065021d82a update data readme
Former-commit-id: 6a65ef44ed58714c611da60b5af96b85352e8735
2023-11-03 00:15:23 +08:00
hiyouga
4bb643e685 update data readme (zh)
Former-commit-id: b32fb3a984c681732b82f6544d6c05a98c34cf4c
2023-11-02 23:42:49 +08:00
hiyouga
b77c745b1a support sharegpt format, add datasets
Former-commit-id: 202daf8987ccb7523be03ca535b572b5c9e65994
2023-11-02 23:10:04 +08:00
hiyouga
7d13501b94 support pagination in webui preview
Former-commit-id: f2307e26b9c2ce5d60917cce5a9638466ea676c8
2023-11-02 21:21:45 +08:00
hiyouga
ac74639b32 fix webui
Former-commit-id: 9192948fa221c0275ddfa579ef6b3442d45b8962
2023-11-02 18:03:14 +08:00
hiyouga
12fa56ae68 support warning in webui
Former-commit-id: 9903b523fad2f0ec0e66c3d313823bd4674bfa2b
2023-11-02 17:57:04 +08:00
hiyouga
f11b863f4b fix #1349
Former-commit-id: 556c023eab2a68560b26a7d5318a79410fb0c700
2023-11-02 17:02:44 +08:00
hiyouga
f3e4b72957 fix #1356
Former-commit-id: d2ed436108a339d405dad1be1ca15baca3d6d3e4
2023-11-02 16:51:52 +08:00
hiyouga
8d52fb46ca fix #1325
Former-commit-id: 59f2cbbd52d4646fbd1ba83032bf522ecc49a50f
2023-11-01 23:38:49 +08:00
hiyouga
dab8f45033 fix chat
Former-commit-id: 68f2b3df09c4c8638b9e225fd5b8aed3541e97a0
2023-11-01 23:07:58 +08:00
hiyouga
bff8b02543 update gradio, support multiple resp in api
Former-commit-id: a34263e7c0e07a080276d164cdab9f12f1d767d2
2023-11-01 23:02:16 +08:00
hiyouga
2406200914 fix SFT trainer
Former-commit-id: bf09b6a6cd75cc2738d9af6b8c30bcbba77fa9b5
2023-10-31 21:52:52 +08:00
hiyouga
db06fcfc84 fix #1316
Former-commit-id: 88a753fe80e277007bac2264aee24024e18f2314
2023-10-31 11:32:08 +08:00
hiyouga
93b9f74e9f update projects
Former-commit-id: 33d58e9171ad2693b9d54715eb61a6f4326c59f4
2023-10-29 22:53:47 +08:00
hiyouga
33ec844f76 add projects
Former-commit-id: 495a68cd5962dd3b3af7e4a920d91ac25531a862
2023-10-29 22:07:13 +08:00
hiyouga
0f727b393e update constants
Former-commit-id: ebacbb1072045924a7e335cc9dda488d6f0be8b3
2023-10-29 13:30:20 +08:00
hiyouga
7da2aad6ee fix vicuna template
Former-commit-id: a98eda0803e4b73a24f12d848e14161451921e98
2023-10-27 22:15:25 +08:00
hiyouga
6f09f50d02 fix chatglm3 template
Former-commit-id: 69bcbc9f6c98e4f4ad97ec0306b33ab21923d311
2023-10-27 21:12:06 +08:00
hiyouga
5919832059 update readme
Former-commit-id: 6fb92c7088316c56ce8656e540fc47b0a5a1bf18
2023-10-27 19:19:03 +08:00
hiyouga
f7635c1afc support chatglm3
Former-commit-id: ba82e13bbeed3b262d301196b1860d73f319401d
2023-10-27 19:16:28 +08:00
hiyouga
c762168ed0 support dataset cache
Former-commit-id: f79ee62eb4a2a4a01cb4e2a6aa2d07158cf8eb59
2023-10-26 21:48:45 +08:00
hiyouga
67a46e553f fix #1287
Former-commit-id: d885aca472c6448bbf9a9e8d16bead92038825e3
2023-10-26 17:49:41 +08:00
hiyouga
e406f37b54 fix #1285
Former-commit-id: 2f8fe4439506e844b147fe38b5eb878c5748c31c
2023-10-26 16:34:52 +08:00
hiyouga
62fe877124 remove filter in preprocess
Former-commit-id: 9eac08b35fec47129a29c401ca265343f8388ab0
2023-10-23 23:46:02 +08:00
hiyouga
a0e682ba79 update neftune logic
Former-commit-id: bb4f0589ed23bf0236d3e918272ad64f0a05ef39
2023-10-22 17:42:13 +08:00
hiyouga
49e8a87383 fix webui
Former-commit-id: a5a5a7bc1f53d36e1b26e418999465903cb7d9ed
2023-10-22 17:24:56 +08:00
hiyouga
b2764b49ca add new options in webui
Former-commit-id: 6698b832dd9cc2d7d60be4fa5ab90e34a7e9d8e0
2023-10-22 17:17:58 +08:00
hiyouga
06b810de8f fix recursion error
Former-commit-id: c7938188c36a71a878bca982b7dd151195164986
2023-10-22 16:28:37 +08:00
hiyouga
6da51565f5 reimplement neftune
Former-commit-id: efe9e5a194d3a9f052701d904715238816e4c09e
2023-10-22 16:15:08 +08:00
hoshi-hiyouga
1f69965239 Merge pull request #1252 from anvie/neftune
add NEFTune optimization

Former-commit-id: 85d5c5fbe731f486c3e83812227fa05edc131487
2023-10-22 15:59:20 +08:00
anvie
af2d61178d add NEFTune optimization
Former-commit-id: 603e0298af64116ac07130fe6661a9ba823c186c
2023-10-21 13:24:10 +07:00
hiyouga
6a955ccf4f fix openchat template
Former-commit-id: 88b9b657bc50495ac4c42f64195fc652fe4ca3df
2023-10-21 01:25:42 +08:00
hiyouga
c0658711ca fix tokenizer padding side in evaluate.py
Former-commit-id: bcb43ff8ba1946c1f7e7865c9d0fb47ba276935d
2023-10-21 00:30:04 +08:00
hiyouga
d602f06882 fix #1232
Former-commit-id: 49975755d47344e362145c52548fdda8783f2c0c
2023-10-20 23:28:52 +08:00
hiyouga
1cb9a38ac2 fix #1215
Former-commit-id: d91b43a8afbea4859357f2224e3d9b9d71160e6d
2023-10-19 16:19:21 +08:00
hiyouga
47a1f73d0f fix #1218
Former-commit-id: b301f35bd4a3bf368159c8f5fb4e2736f922115b
2023-10-19 16:17:41 +08:00
hiyouga
142dd63b47 fix #1228
Former-commit-id: e4e0cae3f55da2f1b566c97dbfdd7fc5b7b728a4
2023-10-19 15:54:10 +08:00
hiyouga
b1bd8370c2 fix #1217
Former-commit-id: 065fc0a6f3f005bb87e1c5c126c8b6bb470ce700
2023-10-19 15:52:24 +08:00
hiyouga
215660c8da rename webui
Former-commit-id: 26feaf80fff6177d9eb4e28ad18feb6d34d3ea27
2023-10-16 15:16:24 +08:00
hiyouga
0cafe67efe fix #1197
Former-commit-id: 00100e23fcfef9587fda4cf01c62599d996e1176
2023-10-16 15:13:46 +08:00
hoshi-hiyouga
ea83b3222b Update README_zh.md
Former-commit-id: 3450404bb9a33c3bd4b45ac4afcf51062f8c7d1d
2023-10-16 00:28:27 +08:00
hoshi-hiyouga
725087a04f Update README.md
Former-commit-id: d84896597eded79f78224faed81cc9f2df222978
2023-10-16 00:23:37 +08:00
66 changed files with 2199 additions and 1241 deletions

146
README.md
View File

@@ -12,14 +12,18 @@
\[ English | [中文](README_zh.md) \] \[ English | [中文](README_zh.md) \]
## Example: Fine-tuning large language model within 10 minutes ## LLaMA Board: A One-stop Web UI for Getting Started with LLaMA Factory
Launch an **all-in-one Web UI** via `python src/train_web.py`. Launch **LLaMA Board** via `CUDA_VISIBLE_DEVICES=0 python src/train_web.py`. (multiple GPUs are not supported yet)
Here is an example of altering the self-cognition of an instruction-tuned language model within 10 minutes on a single GPU.
https://github.com/hiyouga/LLaMA-Factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1 https://github.com/hiyouga/LLaMA-Factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1
## Changelog ## Changelog
[23/10/21] We supported **[NEFTune](https://arxiv.org/abs/2310.05914)** trick for fine-tuning. Try `--neft_alpha` argument to activate NEFTune, e.g., `--neft_alpha 5`.
[23/09/27] We supported **$S^2$-Attn** proposed by [LongLoRA](https://github.com/dvlab-research/LongLoRA) for the LLaMA models. Try `--shift_attn` argument to enable shift short attention. [23/09/27] We supported **$S^2$-Attn** proposed by [LongLoRA](https://github.com/dvlab-research/LongLoRA) for the LLaMA models. Try `--shift_attn` argument to enable shift short attention.
[23/09/23] We integrated MMLU, C-Eval and CMMLU benchmarks in this repo. See [this example](#evaluation) to evaluate your models. [23/09/23] We integrated MMLU, C-Eval and CMMLU benchmarks in this repo. See [this example](#evaluation) to evaluate your models.
@@ -48,24 +52,27 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/6ba60acc-e2e2-4bec-b846
| Model | Model size | Default module | Template | | Model | Model size | Default module | Template |
| -------------------------------------------------------- | --------------------------- | ----------------- | --------- | | -------------------------------------------------------- | --------------------------- | ----------------- | --------- |
| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - |
| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 |
| [BLOOM](https://huggingface.co/bigscience/bloom) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
| [BLOOMZ](https://huggingface.co/bigscience/bloomz) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
| [Falcon](https://huggingface.co/tiiuae/falcon-7b) | 7B/40B | query_key_value | - |
| [Baichuan](https://github.com/baichuan-inc/Baichuan-13B) | 7B/13B | W_pack | baichuan | | [Baichuan](https://github.com/baichuan-inc/Baichuan-13B) | 7B/13B | W_pack | baichuan |
| [Baichuan2](https://github.com/baichuan-inc/Baichuan2) | 7B/13B | W_pack | baichuan2 | | [Baichuan2](https://github.com/baichuan-inc/Baichuan2) | 7B/13B | W_pack | baichuan2 |
| [BLOOM](https://huggingface.co/bigscience/bloom) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
| [BLOOMZ](https://huggingface.co/bigscience/bloomz) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
| [ChatGLM3](https://github.com/THUDM/ChatGLM3) | 6B | query_key_value | chatglm3 |
| [Falcon](https://huggingface.co/tiiuae/falcon-7b) | 7B/40B/180B | query_key_value | falcon |
| [InternLM](https://github.com/InternLM/InternLM) | 7B/20B | q_proj,v_proj | intern | | [InternLM](https://github.com/InternLM/InternLM) | 7B/20B | q_proj,v_proj | intern |
| [Qwen](https://github.com/QwenLM/Qwen-7B) | 7B/14B | c_attn | chatml | | [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - |
| [XVERSE](https://github.com/xverse-ai/XVERSE-13B) | 13B | q_proj,v_proj | xverse | | [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 |
| [ChatGLM2](https://github.com/THUDM/ChatGLM2-6B) | 6B | query_key_value | chatglm2 | | [Mistral](https://huggingface.co/mistralai) | 7B | q_proj,v_proj | mistral |
| [Phi-1.5](https://huggingface.co/microsoft/phi-1_5) | 1.3B | Wqkv | - | | [Phi-1.5](https://huggingface.co/microsoft/phi-1_5) | 1.3B | Wqkv | - |
| [Qwen](https://github.com/QwenLM/Qwen) | 7B/14B | c_attn | qwen |
| [XVERSE](https://github.com/xverse-ai) | 7B/13B/65B | q_proj,v_proj | xverse |
> [!NOTE] > [!NOTE]
> **Default module** is used for the `--lora_target` argument, you can use `--lora_target all` to specify all the available modules. > **Default module** is used for the `--lora_target` argument, you can use `--lora_target all` to specify all the available modules.
> >
> For the "base" models, the `--template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the **corresponding template** for the "chat" models. > For the "base" models, the `--template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the **corresponding template** for the "chat" models.
Please refer to [template.py](src/llmtuner/extras/template.py) for a full list of models we supported.
## Supported Training Approaches ## Supported Training Approaches
| Approach | Full-parameter | Partial-parameter | LoRA | QLoRA | | Approach | Full-parameter | Partial-parameter | LoRA | QLoRA |
@@ -81,39 +88,61 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/6ba60acc-e2e2-4bec-b846
## Provided Datasets ## Provided Datasets
- For pre-training: <details><summary>Pre-training datasets</summary>
- [Wiki Demo (en)](data/wiki_demo.txt)
- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) - [Wiki Demo (en)](data/wiki_demo.txt)
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata) - [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220) - [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2)
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered) - [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
- For supervised fine-tuning: - [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca) - [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca) - [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) - [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1) - [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
- [Self-cognition (zh)](data/self_cognition.json)
- [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection) </details>
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN) <details><summary>Supervised fine-tuning datasets</summary>
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN) - [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M) - [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca)
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M) - [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M) - [Self-cognition (zh)](data/self_cognition.json)
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima) - [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k) - [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection)
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT) - [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct) - [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M) - [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa) - [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
- [UltraChat (en)](https://github.com/thunlp/UltraChat) - [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn) - [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
- [Ad Gen (zh)](https://huggingface.co/datasets/HasturOfficial/adgen) - [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
- For reward modeling or DPO training: - [UltraChat (en)](https://github.com/thunlp/UltraChat)
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf) - [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1) - [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) - [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct)
- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
- [Ad Gen (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
</details>
<details><summary>Preference datasets</summary>
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
</details>
Please refer to [data/README.md](data/README.md) for details. Please refer to [data/README.md](data/README.md) for details.
@@ -129,9 +158,9 @@ huggingface-cli login
- Python 3.8+ and PyTorch 1.13.1+ - Python 3.8+ and PyTorch 1.13.1+
- 🤗Transformers, Datasets, Accelerate, PEFT and TRL - 🤗Transformers, Datasets, Accelerate, PEFT and TRL
- sentencepiece, protobuf and tiktoken - sentencepiece, protobuf and tiktoken
- fire, jieba, rouge-chinese and nltk (used at evaluation and predict) - jieba, rouge-chinese and nltk (used at evaluation and predict)
- gradio and matplotlib (used in web_demo.py) - gradio and matplotlib (used in web UI)
- uvicorn, fastapi and sse-starlette (used in api_demo.py) - uvicorn, fastapi and sse-starlette (used in API)
And **powerful GPUs**! And **powerful GPUs**!
@@ -139,7 +168,7 @@ And **powerful GPUs**!
### Data Preparation (optional) ### Data Preparation (optional)
Please refer to `data/example_dataset` for checking the details about the format of dataset files. You can either use a single `.json` file or a [dataset loading script](https://huggingface.co/docs/datasets/dataset_script) with multiple files to create a custom dataset. Please refer to [data/README.md](data/README.md) for checking the details about the format of dataset files. You can either use a single `.json` file or a [dataset loading script](https://huggingface.co/docs/datasets/dataset_script) with multiple files to create a custom dataset.
> [!NOTE] > [!NOTE]
> Please update `data/dataset_info.json` to use your custom dataset. About the format of this file, please refer to `data/README.md`. > Please update `data/dataset_info.json` to use your custom dataset. About the format of this file, please refer to `data/README.md`.
@@ -160,17 +189,6 @@ If you want to enable the quantized LoRA (QLoRA) on the Windows platform, you wi
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl
``` ```
### All-in-one Web UI
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_web.py
```
We **strongly recommend** using the all-in-one Web UI for newcomers since it can also generate training scripts automatically, even without a GPU environment.
> [!WARNING]
> Currently the web UI only supports training on **a single GPU**.
### Train on a single GPU ### Train on a single GPU
> [!IMPORTANT] > [!IMPORTANT]
@@ -377,8 +395,7 @@ python src/export_model.py \
--template default \ --template default \
--finetuning_type lora \ --finetuning_type lora \
--checkpoint_dir path_to_checkpoint \ --checkpoint_dir path_to_checkpoint \
--export_dir path_to_export \ --export_dir path_to_export
--fp16
``` ```
### API Demo ### API Demo
@@ -449,11 +466,18 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
> [!NOTE] > [!NOTE]
> We recommend using `--per_device_eval_batch_size=1` and `--max_target_length 128` at 4/8-bit predict. > We recommend using `--per_device_eval_batch_size=1` and `--max_target_length 128` at 4/8-bit predict.
## Projects using LLaMA Factory
- **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: A large language model for Astronomy, based on ChatGLM2-6B and Qwen-14B.
- **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: A large language model specialized in Chinese legal domain, based on Baichuan-13B, is capable of retrieving and reasoning on legal knowledge.
- **[Sunsimiao](https://github.com/thomas-yanxin/Sunsimiao)**: A large language model specialized in Chinese medical domain, based on Baichuan-7B and ChatGLM-6B.
- **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: A series of large language models for Chinese medical domain, based on LLaMA2-7B and Baichuan-13B.
## License ## License
This repository is licensed under the [Apache-2.0 License](LICENSE). This repository is licensed under the [Apache-2.0 License](LICENSE).
Please follow the model licenses to use the corresponding model weights: [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2](https://ai.meta.com/llama/license/) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [Falcon](LICENSE) / [Baichuan](https://huggingface.co/baichuan-inc/baichuan-7B/resolve/main/baichuan-7B%20%E6%A8%A1%E5%9E%8B%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) / [Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/resolve/main/Baichuan%202%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) / [InternLM](https://github.com/InternLM/InternLM#open-source-license) / [Qwen](https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/LICENSE) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [ChatGLM2](https://github.com/THUDM/ChatGLM2-6B/blob/main/MODEL_LICENSE) / [Phi-1.5](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) Please follow the model licenses to use the corresponding model weights: [Baichuan](https://huggingface.co/baichuan-inc/Baichuan-13B-Base/resolve/main/Community%20License%20for%20Baichuan-13B%20Model.pdf) / [Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/resolve/main/Community%20License%20for%20Baichuan2%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [InternLM](https://github.com/InternLM/InternLM#license) / [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2](https://ai.meta.com/llama/license/) / [Mistral](LICENSE) / [Phi-1.5](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/LICENSE) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf)
## Citation ## Citation

View File

@@ -12,14 +12,18 @@
\[ [English](README.md) | 中文 \] \[ [English](README.md) | 中文 \]
## 示例:在十分钟内微调一个大模型 ## LLaMA Board: 通过一站式网页界面快速上手 LLaMA Factory
通过 `python src/train_web.py` 开启**训练推理一体化界面**。 使用 `CUDA_VISIBLE_DEVICES=0 python src/train_web.py` 启动 **LLaMA Board**。(该界面目前仅支持单卡训练)
下面是使用单张 GPU 在 10 分钟内更改对话式大型语言模型自我认知的示例。
https://github.com/hiyouga/LLaMA-Factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1 https://github.com/hiyouga/LLaMA-Factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1
## 更新日志 ## 更新日志
[23/10/21] 我们支持了 **[NEFTune](https://arxiv.org/abs/2310.05914)** 训练技巧。请使用 `--neft_alpha` 参数启用 NEFTune例如 `--neft_alpha 5`
[23/09/27] 我们针对 LLaMA 模型支持了 [LongLoRA](https://github.com/dvlab-research/LongLoRA) 提出的 **$S^2$-Attn**。请使用 `--shift_attn` 参数以启用该功能。 [23/09/27] 我们针对 LLaMA 模型支持了 [LongLoRA](https://github.com/dvlab-research/LongLoRA) 提出的 **$S^2$-Attn**。请使用 `--shift_attn` 参数以启用该功能。
[23/09/23] 我们在项目中集成了 MMLU、C-Eval 和 CMMLU 评估集。使用方法请参阅[此示例](#模型评估)。 [23/09/23] 我们在项目中集成了 MMLU、C-Eval 和 CMMLU 评估集。使用方法请参阅[此示例](#模型评估)。
@@ -34,7 +38,7 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/6ba60acc-e2e2-4bec-b846
[23/07/29] 我们在 Hugging Face 发布了两个 13B 指令微调模型。详细内容请查阅我们的 Hugging Face 项目([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft))。 [23/07/29] 我们在 Hugging Face 发布了两个 13B 指令微调模型。详细内容请查阅我们的 Hugging Face 项目([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft))。
[23/07/18] 我们开发了支持训练和测试的**浏览器一体化界面**。请尝试使用 `train_web.py` 在您的浏览器中微调模型。感谢 [@KanadeSiina](https://github.com/KanadeSiina) 和 [@codemayq](https://github.com/codemayq) 在该功能开发中付出的努力。 [23/07/18] 我们开发了支持训练和测试的**浏览器一体化界面**。请使用 `train_web.py` 在您的浏览器中微调模型。感谢 [@KanadeSiina](https://github.com/KanadeSiina) 和 [@codemayq](https://github.com/codemayq) 在该功能开发中付出的努力。
[23/07/09] 我们开源了 **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹,一个简单易用的、能迅速编辑大模型事实记忆的工具包。如果您感兴趣请关注我们的 [FastEdit](https://github.com/hiyouga/FastEdit) 项目。 [23/07/09] 我们开源了 **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹,一个简单易用的、能迅速编辑大模型事实记忆的工具包。如果您感兴趣请关注我们的 [FastEdit](https://github.com/hiyouga/FastEdit) 项目。
@@ -42,30 +46,33 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/6ba60acc-e2e2-4bec-b846
[23/06/22] 我们对齐了[示例 API](src/api_demo.py) 与 [OpenAI API](https://platform.openai.com/docs/api-reference/chat) 的格式,您可以将微调模型接入**任意基于 ChatGPT 的应用**中。 [23/06/22] 我们对齐了[示例 API](src/api_demo.py) 与 [OpenAI API](https://platform.openai.com/docs/api-reference/chat) 的格式,您可以将微调模型接入**任意基于 ChatGPT 的应用**中。
[23/06/03] 我们实现了 4 比特的 LoRA 训练(也称 **[QLoRA](https://github.com/artidoro/qlora)**)。请尝试使用 `--quantization_bit 4` 参数进行 4 比特量化微调。 [23/06/03] 我们实现了 4 比特的 LoRA 训练(也称 **[QLoRA](https://github.com/artidoro/qlora)**)。请使用 `--quantization_bit 4` 参数进行 4 比特量化微调。
## 模型 ## 模型
| 模型名 | 模型大小 | 默认模块 | Template | | 模型名 | 模型大小 | 默认模块 | Template |
| -------------------------------------------------------- | --------------------------- | ----------------- | --------- | | -------------------------------------------------------- | --------------------------- | ----------------- | --------- |
| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - |
| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 |
| [BLOOM](https://huggingface.co/bigscience/bloom) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
| [BLOOMZ](https://huggingface.co/bigscience/bloomz) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
| [Falcon](https://huggingface.co/tiiuae/falcon-7b) | 7B/40B | query_key_value | - |
| [Baichuan](https://github.com/baichuan-inc/Baichuan-13B) | 7B/13B | W_pack | baichuan | | [Baichuan](https://github.com/baichuan-inc/Baichuan-13B) | 7B/13B | W_pack | baichuan |
| [Baichuan2](https://github.com/baichuan-inc/Baichuan2) | 7B/13B | W_pack | baichuan2 | | [Baichuan2](https://github.com/baichuan-inc/Baichuan2) | 7B/13B | W_pack | baichuan2 |
| [BLOOM](https://huggingface.co/bigscience/bloom) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
| [BLOOMZ](https://huggingface.co/bigscience/bloomz) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
| [ChatGLM3](https://github.com/THUDM/ChatGLM3) | 6B | query_key_value | chatglm3 |
| [Falcon](https://huggingface.co/tiiuae/falcon-7b) | 7B/40B/180B | query_key_value | falcon |
| [InternLM](https://github.com/InternLM/InternLM) | 7B/20B | q_proj,v_proj | intern | | [InternLM](https://github.com/InternLM/InternLM) | 7B/20B | q_proj,v_proj | intern |
| [Qwen](https://github.com/QwenLM/Qwen-7B) | 7B/14B | c_attn | chatml | | [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - |
| [XVERSE](https://github.com/xverse-ai/XVERSE-13B) | 13B | q_proj,v_proj | xverse | | [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 |
| [ChatGLM2](https://github.com/THUDM/ChatGLM2-6B) | 6B | query_key_value | chatglm2 | | [Mistral](https://huggingface.co/mistralai) | 7B | q_proj,v_proj | mistral |
| [Phi-1.5](https://huggingface.co/microsoft/phi-1_5) | 1.3B | Wqkv | - | | [Phi-1.5](https://huggingface.co/microsoft/phi-1_5) | 1.3B | Wqkv | - |
| [Qwen](https://github.com/QwenLM/Qwen) | 7B/14B | c_attn | qwen |
| [XVERSE](https://github.com/xverse-ai) | 7B/13B/65B | q_proj,v_proj | xverse |
> [!NOTE] > [!NOTE]
> **默认模块**应作为 `--lora_target` 参数的默认值,可使用 `--lora_target all` 参数指定全部模块。 > **默认模块**应作为 `--lora_target` 参数的默认值,可使用 `--lora_target all` 参数指定全部模块。
> >
> 对于所有“基座”Base模型`--template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”Chat模型请务必使用**对应的模板**。 > 对于所有“基座”Base模型`--template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”Chat模型请务必使用**对应的模板**。
项目所支持模型的完整列表请参阅 [template.py](src/llmtuner/extras/template.py)。
## 训练方法 ## 训练方法
| 方法 | 全参数训练 | 部分参数训练 | LoRA | QLoRA | | 方法 | 全参数训练 | 部分参数训练 | LoRA | QLoRA |
@@ -81,41 +88,63 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/6ba60acc-e2e2-4bec-b846
## 数据集 ## 数据集
- 用于预训练: <details><summary>预训练数据集</summary>
- [Wiki Demo (en)](data/wiki_demo.txt)
- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
- 用于指令监督微调:
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca)
- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
- [Self-cognition (zh)](data/self_cognition.json)
- [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection)
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct)
- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
- [Ad Gen (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
- 用于训练奖励模型或 DPO 训练:
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
使用方法请参考 [data/README.md](data/README_zh.md) 文件。 - [Wiki Demo (en)](data/wiki_demo.txt)
- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
- [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2)
- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
</details>
<details><summary>指令微调数据集</summary>
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca)
- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
- [Self-cognition (zh)](data/self_cognition.json)
- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
- [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection)
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct)
- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
- [Ad Gen (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
</details>
<details><summary>偏好数据集</summary>
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
</details>
使用方法请参考 [data/README_zh.md](data/README_zh.md) 文件。
部分数据集的使用需要确认,我们推荐使用下述命令登录您的 Hugging Face 账户。 部分数据集的使用需要确认,我们推荐使用下述命令登录您的 Hugging Face 账户。
@@ -129,7 +158,7 @@ huggingface-cli login
- Python 3.8+ 和 PyTorch 1.13.1+ - Python 3.8+ 和 PyTorch 1.13.1+
- 🤗Transformers, Datasets, Accelerate, PEFT 和 TRL - 🤗Transformers, Datasets, Accelerate, PEFT 和 TRL
- sentencepiece, protobuf 和 tiktoken - sentencepiece, protobuf 和 tiktoken
- fire, jieba, rouge-chinese 和 nltk (用于评估及预测) - jieba, rouge-chinese 和 nltk (用于评估及预测)
- gradio 和 matplotlib (用于网页端交互) - gradio 和 matplotlib (用于网页端交互)
- uvicorn, fastapi 和 sse-starlette (用于 API) - uvicorn, fastapi 和 sse-starlette (用于 API)
@@ -139,10 +168,10 @@ huggingface-cli login
### 数据准备(可跳过) ### 数据准备(可跳过)
关于数据集文件的格式,请参考 `data/example_dataset` 文件夹的内容。构建自定义数据集时,既可以使用单个 `.json` 文件,也可以使用一个[数据加载脚本](https://huggingface.co/docs/datasets/dataset_script)和多个文件。 关于数据集文件的格式,请参考 [data/README_zh.md](data/README_zh.md) 的内容。构建自定义数据集时,既可以使用单个 `.json` 文件,也可以使用一个[数据加载脚本](https://huggingface.co/docs/datasets/dataset_script)和多个文件。
> [!NOTE] > [!NOTE]
> 使用自定义数据集时,请更新 `data/dataset_info.json` 文件,该文件的格式请参考 `data/README.md`。 > 使用自定义数据集时,请更新 `data/dataset_info.json` 文件,该文件的格式请参考 `data/README_zh.md`。
### 环境搭建(可跳过) ### 环境搭建(可跳过)
@@ -160,17 +189,6 @@ pip install -r requirements.txt
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl
``` ```
### 浏览器一体化界面
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_web.py
```
我们**极力推荐**新手使用浏览器一体化界面,因为它还可以不依赖 GPU 环境自动生成在 GPU 上运行的命令行脚本。
> [!WARNING]
> 目前网页 UI 仅支持**单卡训练**。
### 单 GPU 训练 ### 单 GPU 训练
> [!IMPORTANT] > [!IMPORTANT]
@@ -376,8 +394,7 @@ python src/export_model.py \
--template default \ --template default \
--finetuning_type lora \ --finetuning_type lora \
--checkpoint_dir path_to_checkpoint \ --checkpoint_dir path_to_checkpoint \
--output_dir path_to_export \ --export_dir path_to_export
--fp16
``` ```
### API 服务 ### API 服务
@@ -448,11 +465,18 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
> [!NOTE] > [!NOTE]
> 我们建议在量化模型的预测中使用 `--per_device_eval_batch_size=1` 和 `--max_target_length 128`。 > 我们建议在量化模型的预测中使用 `--per_device_eval_batch_size=1` 和 `--max_target_length 128`。
## 使用了 LLaMA Factory 的项目
- **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: 天文大模型 StarWhisper基于 ChatGLM2-6B 和 Qwen-14B 在天文数据上微调而得。
- **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: 中文法律领域大模型 DISC-LawLLM基于 Baichuan-13B 微调而得,具有法律推理和知识检索能力。
- **[Sunsimiao](https://github.com/thomas-yanxin/Sunsimiao)**: 孙思邈中文医疗大模型 Sumsimiao基于 Baichuan-7B 和 ChatGLM-6B 在中文医疗数据上微调而得。
- **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: 医疗大模型项目 CareGPT基于 LLaMA2-7B 和 Baichuan-13B 在中文医疗数据上微调而得。
## 协议 ## 协议
本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源。 本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源。
使用模型权重时,请遵循对应的模型协议:[LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2](https://ai.meta.com/llama/license/) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [Falcon](LICENSE) / [Baichuan](https://huggingface.co/baichuan-inc/baichuan-7B/resolve/main/baichuan-7B%20%E6%A8%A1%E5%9E%8B%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) / [Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/resolve/main/Baichuan%202%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) / [InternLM](https://github.com/InternLM/InternLM#open-source-license) / [Qwen](https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/LICENSE) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [ChatGLM2](https://github.com/THUDM/ChatGLM2-6B/blob/main/MODEL_LICENSE) / [Phi-1.5](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) 使用模型权重时,请遵循对应的模型协议:[Baichuan](https://huggingface.co/baichuan-inc/Baichuan-13B-Base/resolve/main/Community%20License%20for%20Baichuan-13B%20Model.pdf) / [Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/resolve/main/Community%20License%20for%20Baichuan2%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [InternLM](https://github.com/InternLM/InternLM#license) / [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2](https://ai.meta.com/llama/license/) / [Mistral](LICENSE) / [Phi-1.5](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/LICENSE) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf)
## 引用 ## 引用

View File

@@ -2,31 +2,106 @@ If you are using a custom dataset, please provide your dataset definition in the
```json ```json
"dataset_name": { "dataset_name": {
"hf_hub_url": "the name of the dataset repository on the HuggingFace hub. (if specified, ignore below 3 arguments)", "hf_hub_url": "the name of the dataset repository on the Hugging Face hub. (if specified, ignore below 3 arguments)",
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore below 2 arguments)", "script_url": "the name of the directory containing a dataset loading script. (if specified, ignore below 2 arguments)",
"file_name": "the name of the dataset file in the this directory. (required if above are not specified)", "file_name": "the name of the dataset file in the this directory. (required if above are not specified)",
"file_sha1": "the SHA-1 hash value of the dataset file. (optional)", "file_sha1": "the SHA-1 hash value of the dataset file. (optional, does not affect training)",
"ranking": "whether the examples contains ranked responses or not. (default: false)", "subset": "the name of the subset. (optional, default: None)",
"ranking": "whether the dataset is a preference dataset or not. (default: false)",
"formatting": "the format of the dataset. (optional, default: alpaca, can be chosen from {alpaca, sharegpt})",
"columns": { "columns": {
"prompt": "the name of the column in the datasets containing the prompts. (default: instruction)", "prompt": "the column name in the dataset containing the prompts. (default: instruction, for alpaca)",
"query": "the name of the column in the datasets containing the queries. (default: input)", "query": "the column name in the dataset containing the queries. (default: input, for alpaca)",
"response": "the name of the column in the datasets containing the responses. (default: output)", "response": "the column name in the dataset containing the responses. (default: output, for alpaca)",
"history": "the name of the column in the datasets containing the history of chat. (default: None)" "history": "the column name in the dataset containing the histories. (default: None, for alpaca)",
"messages": "the column name in the dataset containing the messages. (default: conversations, for sharegpt)",
"role": "the key in the message represents the identity. (default: from, for sharegpt)",
"content": "the key in the message represents the content. (default: value, for sharegpt)"
} }
} }
``` ```
where the `prompt` and `response` columns should contain non-empty values. The `query` column will be concatenated with the `prompt` column and used as input for the model. The `history` column should contain a list where each element is a string tuple representing a query-response pair. Given above, you can use the custom dataset via specifying `--dataset dataset_name`.
For datasets used in reward modeling or DPO training, the `response` column should be a string list, with the preferred answers appearing first, for example: Currently we support dataset in **alpaca** or **sharegpt** format, the dataset in alpaca format should follow the below format:
```json
[
{
"instruction": "user instruction (required)",
"input": "user input (optional)",
"output": "model response (required)",
"history": [
["user instruction in the first round (optional)", "model response in the first round (optional)"],
["user instruction in the second round (optional)", "model response in the second round (optional)"]
]
}
]
```
Regarding the above dataset, the `columns` in `dataset_info.json` should be:
```json
"dataset_name": {
"columns": {
"prompt": "instruction",
"query": "input",
"response": "output",
"history": "history"
}
}
```
where the `prompt` and `response` columns should contain non-empty values, represent instruction and response respectively. The `query` column will be concatenated with the `prompt` column and used as input for the model.
The `history` column is a list consisting string tuples representing query-response pairs in history. Note that the responses **in each round will be used for training**.
For the pre-training datasets, only the `prompt` column will be used for training.
For the preference datasets, the `response` column should be a string list whose length is 2, with the preferred answers appearing first, for example:
```json ```json
{ {
"instruction": "Question", "instruction": "user instruction",
"input": "", "input": "user input",
"output": [ "output": [
"Chosen answer", "chosen answer",
"Rejected answer" "rejected answer"
] ]
} }
``` ```
The dataset in sharegpt format should follow the below format:
```json
[
{
"conversations": [
{
"from": "human",
"value": "user instruction"
},
{
"from": "gpt",
"value": "model response"
}
]
}
]
```
Regarding the above dataset, the `columns` in `dataset_info.json` should be:
```json
"dataset_name": {
"columns": {
"messages": "conversations",
"role": "from",
"content": "value"
}
}
```
where the `messages` column should be a list whose length is even, and follow the `u/a/u/a/u/a` order.
Pre-training datasets and preference datasets are incompatible with the sharegpt format yet.

View File

@@ -1,32 +1,107 @@
如果您使用自定义数据集,请务必在 `dataset_info.json` 文件中以下格式提供您的数据集定义。 如果您使用自定义数据集,请务必在 `dataset_info.json` 文件中按照以下格式提供数据集定义。
```json ```json
"数据集名称": { "数据集名称": {
"hf_hub_url": "HuggingFace上的项目地址若指定则忽略下列三个参数", "hf_hub_url": "Hugging Face 上的项目地址(若指定,则忽略下列三个参数)",
"script_url": "包含数据加载脚本的本地文件夹名称(若指定,则忽略下列两个参数)", "script_url": "包含数据加载脚本的本地文件夹名称(若指定,则忽略下列两个参数)",
"file_name": "该目录下数据集文件的名称(若上述参数未指定,则此项必需)", "file_name": "该目录下数据集文件的名称(若上述参数未指定,则此项必需)",
"file_sha1": "数据集文件的SHA-1哈希值可选", "file_sha1": "数据集文件的SHA-1哈希值可选,留空不影响训练",
"ranking": "数据集是否包含排序后的回答默认false", "subset": "数据集子集的名称可选默认None",
"ranking": "是否为偏好数据集可选默认False",
"formatting": "数据集格式可选默认alpaca可以为 alpaca 或 sharegpt",
"columns": { "columns": {
"prompt": "数据集代表提示词的表头名称默认instruction", "prompt": "数据集代表提示词的表头名称默认instruction,用于 alpaca 格式",
"query": "数据集代表请求的表头名称默认input", "query": "数据集代表请求的表头名称默认input,用于 alpaca 格式",
"response": "数据集代表回答的表头名称默认output", "response": "数据集代表回答的表头名称默认output,用于 alpaca 格式",
"history": "数据集代表历史对话的表头名称默认None" "history": "数据集代表历史对话的表头名称默认None,用于 alpaca 格式)",
"messages": "数据集代表消息列表的表头名称默认conversations用于 sharegpt 格式)",
"role": "消息中代表发送者身份的键名默认from用于 sharegpt 格式)",
"content": "消息中代表文本内容的键名默认value用于 sharegpt 格式)"
} }
} }
``` ```
其中 `prompt``response` 列应当是非空的字符串。`query` 列的内容将会和 `prompt` 列拼接作为模型输入。`history` 列应当是一个列表,其中每个元素是一个字符串二元组,分别代表用户请求和模型答复 添加后可通过指定 `--dataset 数据集名称` 参数使用自定义数据集
对于训练奖励模型或 DPO 训练的数据集,`response` 列应当是一个字符串列表,排在前面的代表更优的答案,例如 该项目目前支持两种格式的数据集:**alpaca** 和 **sharegpt**,其中 alpaca 格式的数据集按照以下方式组织
```json
[
{
"instruction": "用户指令(必填)",
"input": "用户输入(选填)",
"output": "模型回答(必填)",
"history": [
["第一轮指令(选填)", "第一轮回答(选填)"],
["第二轮指令(选填)", "第二轮回答(选填)"]
]
}
]
```
对于上述格式的数据,`dataset_info.json` 中的 `columns` 应为:
```json
"数据集名称": {
"columns": {
"prompt": "instruction",
"query": "input",
"response": "output",
"history": "history"
}
}
```
其中 `prompt``response` 列应当是非空的字符串,分别代表用户指令和模型回答。`query` 列的内容将会和 `prompt` 列拼接作为模型输入。
`history` 列是由多个字符串二元组构成的列表,分别代表历史消息中每轮的指令和回答。注意每轮的模型回答**均会被用于训练**。
对于预训练数据集,仅 `prompt` 列中的内容会用于模型训练。
对于偏好数据集,`response` 列应当是一个长度为 2 的字符串列表,排在前面的代表更优的回答,例如:
```json ```json
{ {
"instruction": "Question", "instruction": "用户指令",
"input": "", "input": "用户输入",
"output": [ "output": [
"Chosen answer", "优质回答",
"Rejected answer" "劣质回答"
] ]
} }
``` ```
而 sharegpt 格式的数据集按照以下方式组织:
```json
[
{
"conversations": [
{
"from": "human",
"value": "用户指令"
},
{
"from": "gpt",
"value": "模型回答"
}
]
}
]
```
对于上述格式的数据,`dataset_info.json` 中的 `columns` 应为:
```json
"数据集名称": {
"columns": {
"messages": "conversations",
"role": "from",
"content": "value"
}
}
```
其中 `messages` 列必须为偶数长度的列表,且符合 `用户/模型/用户/模型/用户/模型` 的顺序。
预训练数据集和偏好数据集尚不支持 sharegpt 格式。

View File

@@ -1,6 +1,5 @@
import json import json
import datasets import datasets
from typing import Any, Dict, List
_DESCRIPTION = "BELLE multiturn chat dataset." _DESCRIPTION = "BELLE multiturn chat dataset."
@@ -23,7 +22,7 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.0.0") VERSION = datasets.Version("0.0.0")
def _info(self) -> datasets.DatasetInfo: def _info(self):
features = datasets.Features({ features = datasets.Features({
"instruction": datasets.Value("string"), "instruction": datasets.Value("string"),
"output": datasets.Value("string"), "output": datasets.Value("string"),
@@ -37,7 +36,7 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder):
citation=_CITATION citation=_CITATION
) )
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: def _split_generators(self, dl_manager: datasets.DownloadManager):
file_path = dl_manager.download(_URL) file_path = dl_manager.download(_URL)
return [ return [
datasets.SplitGenerator( datasets.SplitGenerator(
@@ -48,7 +47,7 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder):
) )
] ]
def _generate_examples(self, filepath: str) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat with history def _generate_examples(self, filepath: str):
with open(filepath, "r", encoding="utf-8") as f: with open(filepath, "r", encoding="utf-8") as f:
for key, row in enumerate(f): for key, row in enumerate(f):
data = json.loads(row) data = json.loads(row)

View File

@@ -3,7 +3,7 @@ import datasets
from typing import Any, Dict, List from typing import Any, Dict, List
_DESCRIPTION = "An example of dataset for LLaMA." _DESCRIPTION = "An example of dataset."
_CITATION = "" _CITATION = ""
_HOMEPAGE = "" _HOMEPAGE = ""
_LICENSE = "" _LICENSE = ""

View File

@@ -1,9 +1,9 @@
import json import json
import datasets import datasets
from typing import Any, Dict, List from typing import List
_DESCRIPTION = "Human preference data about helpfulness and harmlessness for ChatGLM." _DESCRIPTION = "Human preference data about helpfulness and harmlessness."
_CITATION = "" _CITATION = ""
_HOMEPAGE = "https://huggingface.co/datasets/Anthropic/hh-rlhf" _HOMEPAGE = "https://huggingface.co/datasets/Anthropic/hh-rlhf"
_LICENSE = "mit" _LICENSE = "mit"
@@ -42,7 +42,7 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder):
citation=_CITATION citation=_CITATION
) )
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: def _split_generators(self, dl_manager: datasets.DownloadManager):
file_path = dl_manager.download_and_extract(_URLS) file_path = dl_manager.download_and_extract(_URLS)
return [ return [
datasets.SplitGenerator( datasets.SplitGenerator(
@@ -59,7 +59,7 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder):
) )
] ]
def _generate_examples(self, filepaths: List[str]) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat for ChatGLM def _generate_examples(self, filepaths: List[str]):
key = 0 key = 0
for filepath in filepaths: for filepath in filepaths:
with open(filepath, "r", encoding="utf-8") as f: with open(filepath, "r", encoding="utf-8") as f:

View File

@@ -1,6 +1,6 @@
import json import json
import datasets import datasets
from typing import Any, Dict, List from typing import List
_DESCRIPTION = "UltraChat: Large-scale, Informative, and Diverse Multi-round Dialogue Data." _DESCRIPTION = "UltraChat: Large-scale, Informative, and Diverse Multi-round Dialogue Data."
@@ -21,15 +21,13 @@ _LICENSE = "cc-by-nc-4.0"
_BASE_DATA_URL = "https://huggingface.co/datasets/stingning/ultrachat/resolve/main/train_{idx}.jsonl" _BASE_DATA_URL = "https://huggingface.co/datasets/stingning/ultrachat/resolve/main/train_{idx}.jsonl"
class BelleMultiturn(datasets.GeneratorBasedBuilder): class UltraChat(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.0.0") VERSION = datasets.Version("0.0.0")
def _info(self) -> datasets.DatasetInfo: def _info(self):
features = datasets.Features({ features = datasets.Features({
"instruction": datasets.Value("string"), "conversations": [{"from": datasets.Value("string"), "value": datasets.Value("string")}]
"output": datasets.Value("string"),
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
}) })
return datasets.DatasetInfo( return datasets.DatasetInfo(
description=_DESCRIPTION, description=_DESCRIPTION,
@@ -39,8 +37,8 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder):
citation=_CITATION citation=_CITATION
) )
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: def _split_generators(self, dl_manager: datasets.DownloadManager):
file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(9)] # multiple shards file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(10)] # multiple shards
return [ return [
datasets.SplitGenerator( datasets.SplitGenerator(
name=datasets.Split.TRAIN, name=datasets.Split.TRAIN,
@@ -50,7 +48,7 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder):
) )
] ]
def _generate_examples(self, filepaths: List[str]) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat for ChatGLM def _generate_examples(self, filepaths: List[str]):
for filepath in filepaths: for filepath in filepaths:
with open(filepath, "r", encoding="utf-8") as f: with open(filepath, "r", encoding="utf-8") as f:
for row in f: for row in f:
@@ -58,19 +56,16 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder):
data = json.loads(row) data = json.loads(row)
except: except:
continue continue
key = data["id"] key: int = data["id"]
content = data["data"] content: List[str] = data["data"]
if len(content) % 2 == 1: if len(content) % 2 == 1:
content.pop(-1) content.pop(-1)
if len(content) < 2: if len(content) < 2:
continue continue
conversations = [{
query = content[-2] "from": "human" if i % 2 == 0 else "gpt",
response = content[-1] "value": content[i]
history = [[content[2*i], content[2*i+1]] for i in range(len(content) // 2 - 1)] } for i in range(len(content))]
yield key, { yield key, {
"instruction": query, "conversations": conversations
"output": response,
"history": history
} }

View File

@@ -1,20 +1,19 @@
torch>=1.13.1 torch>=1.13.1
transformers>=4.31.0 transformers>=4.31.0,<4.35.0
datasets>=2.12.0 datasets>=2.14.0
accelerate>=0.21.0 accelerate>=0.21.0
peft>=0.4.0 peft>=0.6.0
trl>=0.7.1 trl>=0.7.4
gradio>=3.38.0,<4.0.0
scipy scipy
sentencepiece sentencepiece
protobuf protobuf
tiktoken tiktoken
fire
jieba jieba
rouge-chinese rouge-chinese
nltk nltk
gradio==3.38.0
uvicorn uvicorn
pydantic==1.10.11 pydantic
fastapi==0.95.1 fastapi
sse-starlette sse-starlette
matplotlib matplotlib

View File

@@ -6,8 +6,8 @@ from llmtuner import ChatModel, create_app
def main(): def main():
chat_model = ChatModel() chat_model = ChatModel()
app = create_app(chat_model) app = create_app(chat_model)
uvicorn.run(app, host="0.0.0.0", port=8000, workers=1)
print("Visit http://localhost:8000/docs for API document.") print("Visit http://localhost:8000/docs for API document.")
uvicorn.run(app, host="0.0.0.0", port=8000, workers=1)
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -1,3 +1,4 @@
import readline
from llmtuner import ChatModel from llmtuner import ChatModel

View File

@@ -1,185 +1,10 @@
# coding=utf-8 from llmtuner import Evaluator
# Evaluates the performance of pre-trained models.
# Usage: python evaluate.py --model_name_or_path path_to_model --checkpoint_dir path_to_ckpt --template vanilla
# --task ceval --split validation --lang zh --n_shot 5 --batch_size 4 --save_name result
# Inspired by: https://github.com/hendrycks/test/blob/master/evaluate_flan.py
import os
import fire
import json
import torch
import numpy as np
from collections import Counter
from datasets import load_dataset
from dataclasses import dataclass
from tqdm import tqdm, trange
from typing import TYPE_CHECKING, Dict, List, Literal, Optional, Tuple
from llmtuner import ChatModel
if TYPE_CHECKING:
from datasets import Dataset
choices = ["A", "B", "C", "D"] def main():
evaluator = Evaluator()
evaluator.eval()
@dataclass
class EvalTemplate:
system: str
choice: str
answer: str
prefix: str
def parse_example(
self,
example: Dict[str, str]
) -> Tuple[str, str]:
candidates = [self.choice.format(choice=ch, content=example[ch]) for ch in choices if ch in example]
return "".join([example["question"]] + candidates + [self.answer]), example["answer"]
def format_example(
self,
target_data: Dict[str, str],
support_set: "Dataset",
subject_name: str,
use_history: bool
) -> Tuple[str, str, List[Tuple[str, str]]]:
query, resp = self.parse_example(target_data)
history = [self.parse_example(support_set[k]) for k in range(len(support_set))]
if len(history):
temp = history.pop(0)
history.insert(0, (self.system.format(subject=subject_name) + temp[0], temp[1]))
else:
query = self.system.format(subject=subject_name) + query
if not use_history:
query = "\n\n".join(["".join(item) for item in history] + [query])
history = []
return query.strip(), resp, history
eval_templates = {
"en": EvalTemplate(
system="The following are multiple choice questions (with answers) about {subject}.\n\n",
choice="\n{choice}. {content}",
answer="\nAnswer: ",
prefix=" "
),
"zh": EvalTemplate(
system="以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n",
choice="\n{choice}. {content}",
answer="\n答案:",
prefix="\n"
)
}
@torch.inference_mode()
def batch_inference(
chat_model: ChatModel,
batch_input: Dict[str, torch.Tensor],
prefix_char: str
) -> List[str]:
logits = chat_model.model(**batch_input).logits
probs = torch.nn.functional.softmax(
torch.stack(
[
logits[:, -1, chat_model.tokenizer.encode(prefix_char + choice, add_special_tokens=False)[-1]]
for choice in choices
],
dim=-1
),
dim=-1
).detach()
return [chr(ord("A") + offset.item()) for offset in torch.argmax(probs, dim=-1)]
def evaluate(
model_name_or_path: str,
finetuning_type: Optional[str] = "lora",
checkpoint_dir: Optional[str] = None,
template: Optional[str] = "vanilla",
task: Optional[str] = "ceval",
dataset_dir: Optional[str] = "evaluation",
split: Optional[Literal["validation", "test"]] = "validation",
lang: Optional[Literal["zh", "en"]] = "zh",
n_shot: Optional[int] = 5,
n_avg: Optional[int] = 1,
batch_size: Optional[int] = 4,
save_name: Optional[str] = None
):
with open(os.path.join(dataset_dir, task, "mapping.json"), "r", encoding="utf-8") as f:
categorys: Dict[str, Dict[str, str]] = json.load(f)
chat_model = ChatModel(dict(
model_name_or_path=model_name_or_path,
finetuning_type=finetuning_type,
checkpoint_dir=checkpoint_dir,
template=template
))
eval_template = eval_templates[lang]
assert chat_model.tokenizer.padding_side == "left", "only left-padded tensor can be accepted."
category_corrects: Dict[str, np.ndarray] = {
subj: np.array([], dtype="bool") for subj in ["Average", "STEM", "Social Sciences", "Humanities", "Other"]
}
pbar = tqdm(categorys.keys(), desc="Processing subjects", position=0)
results = {}
for subject in pbar:
dataset = load_dataset(os.path.join(dataset_dir, task), subject)
labels, answers, all_outputs = [], [], []
for epoch in range(n_avg):
pbar.set_postfix_str("{} Trial: {}".format(categorys[subject]["name"], epoch))
inputs, outputs = [], []
for i in trange(len(dataset[split]), desc="Formatting batches", position=1, leave=False):
support_set = dataset["train"].shuffle().select(range(min(n_shot, len(dataset["train"]))))
query, resp, history = eval_template.format_example(
target_data=dataset[split][i],
support_set=support_set,
subject_name=categorys[subject]["name"],
use_history=chat_model.template.use_history
)
input_ids, _ = chat_model.template.encode_oneturn(
tokenizer=chat_model.tokenizer, query=query, resp=resp, history=history
)
inputs.append({"input_ids": input_ids, "attention_mask": [1] * len(input_ids)})
if epoch == 0:
labels.append(resp)
for i in trange(0, len(inputs), batch_size, desc="Predicting batches", position=1, leave=False):
batch_input = chat_model.tokenizer.pad(
inputs[i : i + batch_size], return_attention_mask=True, return_tensors="pt"
).to(chat_model.model.device)
preds = batch_inference(chat_model, batch_input, eval_template.prefix)
outputs += preds
all_outputs.append(outputs)
for i in range(len(all_outputs[0])):
count = Counter([all_outputs[epoch][i] for epoch in range(n_avg)])
answers.append(count.most_common(1)[0][0])
corrects = (np.array(answers) == np.array(labels))
category_name = categorys[subject]["category"]
category_corrects[category_name] = np.concatenate([category_corrects[category_name], corrects], axis=0)
category_corrects["Average"] = np.concatenate([category_corrects["Average"], corrects], axis=0)
results[subject] = {str(i): answers[i] for i in range(len(answers))}
score_info = "\n".join([
"{:>15}: {:.2f}".format(category_name, 100 * np.mean(category_correct))
for category_name, category_correct in category_corrects.items() if len(category_correct)
])
print(score_info)
if save_name is not None:
with open(save_name + ".json", "w", encoding="utf-8", newline="\n") as f:
json.dump(results, f, indent=2)
with open(save_name + ".log", "w", encoding="utf-8", newline="\n") as f:
f.write(score_info)
if __name__ == "__main__": if __name__ == "__main__":
fire.Fire(evaluate) main()

View File

@@ -1,9 +1,10 @@
# Level: api, webui > chat > tuner > dsets > extras, hparams # Level: api, webui > chat, eval > tuner > dsets > extras, hparams
from llmtuner.api import create_app from llmtuner.api import create_app
from llmtuner.chat import ChatModel from llmtuner.chat import ChatModel
from llmtuner.eval import Evaluator
from llmtuner.tuner import export_model, run_exp from llmtuner.tuner import export_model, run_exp
from llmtuner.webui import create_ui, create_web_demo from llmtuner.webui import create_ui, create_web_demo
__version__ = "0.2.0" __version__ = "0.2.2"

View File

@@ -1,9 +1,11 @@
import json
import uvicorn import uvicorn
from fastapi import FastAPI, HTTPException from fastapi import FastAPI, HTTPException, status
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from sse_starlette import EventSourceResponse from sse_starlette import EventSourceResponse
from typing import List, Tuple from typing import List, Tuple
from pydantic import BaseModel
from llmtuner.extras.misc import torch_gc from llmtuner.extras.misc import torch_gc
from llmtuner.chat import ChatModel from llmtuner.chat import ChatModel
@@ -29,6 +31,13 @@ async def lifespan(app: FastAPI): # collects GPU memory
torch_gc() torch_gc()
def to_json(data: BaseModel) -> str:
try: # pydantic v2
return json.dumps(data.model_dump(exclude_unset=True), ensure_ascii=False)
except: # pydantic v1
return data.json(exclude_unset=True, ensure_ascii=False)
def create_app(chat_model: ChatModel) -> FastAPI: def create_app(chat_model: ChatModel) -> FastAPI:
app = FastAPI(lifespan=lifespan) app = FastAPI(lifespan=lifespan)
@@ -45,10 +54,10 @@ def create_app(chat_model: ChatModel) -> FastAPI:
model_card = ModelCard(id="gpt-3.5-turbo") model_card = ModelCard(id="gpt-3.5-turbo")
return ModelList(data=[model_card]) return ModelList(data=[model_card])
@app.post("/v1/chat/completions", response_model=ChatCompletionResponse) @app.post("/v1/chat/completions", response_model=ChatCompletionResponse, status_code=status.HTTP_200_OK)
async def create_chat_completion(request: ChatCompletionRequest): async def create_chat_completion(request: ChatCompletionRequest):
if len(request.messages) < 1 or request.messages[-1].role != Role.USER: if len(request.messages) < 1 or request.messages[-1].role != Role.USER:
raise HTTPException(status_code=400, detail="Invalid request") raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid request")
query = request.messages[-1].content query = request.messages[-1].content
prev_messages = request.messages[:-1] prev_messages = request.messages[:-1]
@@ -62,6 +71,8 @@ def create_app(chat_model: ChatModel) -> FastAPI:
for i in range(0, len(prev_messages), 2): for i in range(0, len(prev_messages), 2):
if prev_messages[i].role == Role.USER and prev_messages[i+1].role == Role.ASSISTANT: if prev_messages[i].role == Role.USER and prev_messages[i+1].role == Role.ASSISTANT:
history.append([prev_messages[i].content, prev_messages[i+1].content]) history.append([prev_messages[i].content, prev_messages[i+1].content])
else:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Only supports u/a/u/a/u...")
if request.stream: if request.stream:
generate = predict(query, history, system, request) generate = predict(query, history, system, request)
@@ -72,7 +83,8 @@ def create_app(chat_model: ChatModel) -> FastAPI:
do_sample=request.do_sample, do_sample=request.do_sample,
temperature=request.temperature, temperature=request.temperature,
top_p=request.top_p, top_p=request.top_p,
max_new_tokens=request.max_tokens max_new_tokens=request.max_tokens,
num_return_sequences=request.n
) )
usage = ChatCompletionResponseUsage( usage = ChatCompletionResponseUsage(
@@ -81,13 +93,13 @@ def create_app(chat_model: ChatModel) -> FastAPI:
total_tokens=prompt_length+response_length total_tokens=prompt_length+response_length
) )
choice_data = ChatCompletionResponseChoice( choices = [ChatCompletionResponseChoice(
index=0, index=i,
message=ChatMessage(role=Role.ASSISTANT, content=response), message=ChatMessage(role=Role.ASSISTANT, content=choice),
finish_reason=Finish.STOP finish_reason=Finish.STOP
) ) for i, choice in enumerate(response)]
return ChatCompletionResponse(model=request.model, choices=[choice_data], usage=usage) return ChatCompletionResponse(model=request.model, choices=choices, usage=usage)
async def predict(query: str, history: List[Tuple[str, str]], system: str, request: ChatCompletionRequest): async def predict(query: str, history: List[Tuple[str, str]], system: str, request: ChatCompletionRequest):
choice_data = ChatCompletionResponseStreamChoice( choice_data = ChatCompletionResponseStreamChoice(
@@ -96,7 +108,7 @@ def create_app(chat_model: ChatModel) -> FastAPI:
finish_reason=None finish_reason=None
) )
chunk = ChatCompletionStreamResponse(model=request.model, choices=[choice_data]) chunk = ChatCompletionStreamResponse(model=request.model, choices=[choice_data])
yield chunk.json(exclude_unset=True, ensure_ascii=False) yield to_json(chunk)
for new_text in chat_model.stream_chat( for new_text in chat_model.stream_chat(
query, history, system, query, history, system,
@@ -114,7 +126,7 @@ def create_app(chat_model: ChatModel) -> FastAPI:
finish_reason=None finish_reason=None
) )
chunk = ChatCompletionStreamResponse(model=request.model, choices=[choice_data]) chunk = ChatCompletionStreamResponse(model=request.model, choices=[choice_data])
yield chunk.json(exclude_unset=True, ensure_ascii=False) yield to_json(chunk)
choice_data = ChatCompletionResponseStreamChoice( choice_data = ChatCompletionResponseStreamChoice(
index=0, index=0,
@@ -122,7 +134,7 @@ def create_app(chat_model: ChatModel) -> FastAPI:
finish_reason=Finish.STOP finish_reason=Finish.STOP
) )
chunk = ChatCompletionStreamResponse(model=request.model, choices=[choice_data]) chunk = ChatCompletionStreamResponse(model=request.model, choices=[choice_data])
yield chunk.json(exclude_unset=True, ensure_ascii=False) yield to_json(chunk)
yield "[DONE]" yield "[DONE]"
return app return app

View File

@@ -20,9 +20,6 @@ class ModelCard(BaseModel):
object: Optional[str] = "model" object: Optional[str] = "model"
created: Optional[int] = Field(default_factory=lambda: int(time.time())) created: Optional[int] = Field(default_factory=lambda: int(time.time()))
owned_by: Optional[str] = "owner" owned_by: Optional[str] = "owner"
root: Optional[str] = None
parent: Optional[str] = None
permission: Optional[list] = []
class ModelList(BaseModel): class ModelList(BaseModel):

View File

@@ -26,17 +26,17 @@ class ChatModel:
**input_kwargs **input_kwargs
) -> Tuple[Dict[str, Any], int]: ) -> Tuple[Dict[str, Any], int]:
system = system or self.system_prompt system = system or self.system_prompt
prompt, _ = self.template.encode_oneturn( prompt, _ = self.template.encode_oneturn(
tokenizer=self.tokenizer, query=query, resp="", history=history, system=system tokenizer=self.tokenizer, query=query, resp="", history=history, system=system
) )
prompt_length = len(prompt)
input_ids = torch.tensor([prompt], device=self.model.device) input_ids = torch.tensor([prompt], device=self.model.device)
prompt_length = len(input_ids[0])
do_sample = input_kwargs.pop("do_sample", None) do_sample = input_kwargs.pop("do_sample", None)
temperature = input_kwargs.pop("temperature", None) temperature = input_kwargs.pop("temperature", None)
top_p = input_kwargs.pop("top_p", None) top_p = input_kwargs.pop("top_p", None)
top_k = input_kwargs.pop("top_k", None) top_k = input_kwargs.pop("top_k", None)
num_return_sequences = input_kwargs.pop("num_return_sequences", None)
repetition_penalty = input_kwargs.pop("repetition_penalty", None) repetition_penalty = input_kwargs.pop("repetition_penalty", None)
max_length = input_kwargs.pop("max_length", None) max_length = input_kwargs.pop("max_length", None)
max_new_tokens = input_kwargs.pop("max_new_tokens", None) max_new_tokens = input_kwargs.pop("max_new_tokens", None)
@@ -47,11 +47,15 @@ class ChatModel:
temperature=temperature or generating_args["temperature"], temperature=temperature or generating_args["temperature"],
top_p=top_p or generating_args["top_p"], top_p=top_p or generating_args["top_p"],
top_k=top_k or generating_args["top_k"], top_k=top_k or generating_args["top_k"],
num_return_sequences=num_return_sequences or 1,
repetition_penalty=repetition_penalty or generating_args["repetition_penalty"], repetition_penalty=repetition_penalty or generating_args["repetition_penalty"],
eos_token_id=[self.tokenizer.eos_token_id] + self.tokenizer.additional_special_tokens_ids, eos_token_id=[self.tokenizer.eos_token_id] + self.tokenizer.additional_special_tokens_ids,
pad_token_id=self.tokenizer.pad_token_id pad_token_id=self.tokenizer.pad_token_id
)) ))
if isinstance(num_return_sequences, int) and num_return_sequences > 1:
generating_args["do_sample"] = True
if max_length: if max_length:
generating_args.pop("max_new_tokens", None) generating_args.pop("max_new_tokens", None)
generating_args["max_length"] = max_length generating_args["max_length"] = max_length
@@ -75,12 +79,16 @@ class ChatModel:
history: Optional[List[Tuple[str, str]]] = None, history: Optional[List[Tuple[str, str]]] = None,
system: Optional[str] = None, system: Optional[str] = None,
**input_kwargs **input_kwargs
) -> Tuple[str, Tuple[int, int]]: ) -> Tuple[List[str], Tuple[int, int]]:
gen_kwargs, prompt_length = self.process_args(query, history, system, **input_kwargs) gen_kwargs, prompt_length = self.process_args(query, history, system, **input_kwargs)
generation_output = self.model.generate(**gen_kwargs) generate_output = self.model.generate(**gen_kwargs)
outputs = generation_output.tolist()[0][prompt_length:] response_ids = generate_output[:, prompt_length:]
response = self.tokenizer.decode(outputs, skip_special_tokens=True) response = self.tokenizer.batch_decode(response_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
response_length = len(outputs) response_length = 0
for i in range(len(response_ids)):
eos_index = (response_ids[i] == self.tokenizer.eos_token_id).nonzero()
response_length += eos_index[0].item() if len(eos_index) else len(response_ids[i])
return response, (prompt_length, response_length) return response, (prompt_length, response_length)
@torch.inference_mode() @torch.inference_mode()

View File

@@ -1,5 +1,5 @@
import os import os
from typing import TYPE_CHECKING, List, Union from typing import TYPE_CHECKING, Any, Dict, List, Union
from datasets import concatenate_datasets, interleave_datasets, load_dataset from datasets import concatenate_datasets, interleave_datasets, load_dataset
@@ -26,22 +26,23 @@ def get_dataset(
if dataset_attr.load_from == "hf_hub": if dataset_attr.load_from == "hf_hub":
data_path = dataset_attr.dataset_name data_path = dataset_attr.dataset_name
data_name = dataset_attr.subset
data_files = None data_files = None
elif dataset_attr.load_from == "script": elif dataset_attr.load_from == "script":
data_path = os.path.join(data_args.dataset_dir, dataset_attr.dataset_name) data_path = os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)
data_name = dataset_attr.subset
data_files = None data_files = None
elif dataset_attr.load_from == "file": elif dataset_attr.load_from == "file":
data_path = None data_path, data_name = None, None
data_files: List[str] = [] data_files: List[str] = []
if os.path.isdir(os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)): # is directory
if os.path.isdir(os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)): # directory
for file_name in os.listdir(os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)): for file_name in os.listdir(os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)):
data_files.append(os.path.join(data_args.dataset_dir, dataset_attr.dataset_name, file_name)) data_files.append(os.path.join(data_args.dataset_dir, dataset_attr.dataset_name, file_name))
if data_path is None: if data_path is None:
data_path = EXT2TYPE.get(file_name.split(".")[-1], None) data_path = EXT2TYPE.get(file_name.split(".")[-1], None)
else: else:
assert data_path == EXT2TYPE.get(file_name.split(".")[-1], None), "file type does not match." assert data_path == EXT2TYPE.get(file_name.split(".")[-1], None), "file types are not identical."
elif os.path.isfile(os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)): # single file elif os.path.isfile(os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)): # is file
data_files.append(os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)) data_files.append(os.path.join(data_args.dataset_dir, dataset_attr.dataset_name))
data_path = EXT2TYPE.get(dataset_attr.dataset_name.split(".")[-1], None) data_path = EXT2TYPE.get(dataset_attr.dataset_name.split(".")[-1], None)
else: else:
@@ -53,29 +54,75 @@ def get_dataset(
raise NotImplementedError raise NotImplementedError
dataset = load_dataset( dataset = load_dataset(
data_path, path=data_path,
name=data_name,
data_files=data_files, data_files=data_files,
split=data_args.split, split=data_args.split,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
streaming=data_args.streaming, token=model_args.hf_hub_token,
use_auth_token=True if model_args.use_auth_token else None streaming=data_args.streaming
) )
if max_samples is not None: if max_samples is not None: # truncate dataset
max_samples_temp = min(len(dataset), max_samples) dataset = dataset.select(range(min(len(dataset), max_samples)))
dataset = dataset.select(range(max_samples_temp))
# TODO: adapt to the sharegpt format def convert_format(examples: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
# convert dataset from sharegpt format to alpaca format
outputs = {"prompt": [], "query": [], "response": [], "history": []}
for msg_list in examples[dataset_attr.messages]:
msg_list = msg_list[:len(msg_list) // 2 * 2] # should be multiples of 2
if len(msg_list) == 0:
continue
for column_name in ["prompt", "query", "response", "history"]: # align datasets msg_pairs = []
if getattr(dataset_attr, column_name) and getattr(dataset_attr, column_name) != column_name: user_role, assistant_role = None, None
dataset = dataset.rename_column(getattr(dataset_attr, column_name), column_name) for idx in range(0, len(msg_list), 2):
if user_role is None and assistant_role is None:
user_role = msg_list[idx][dataset_attr.role]
assistant_role = msg_list[idx + 1][dataset_attr.role]
else:
if (
msg_list[idx][dataset_attr.role] != user_role
or msg_list[idx+1][dataset_attr.role] != assistant_role
):
raise ValueError("Only accepts conversation in u/a/u/a/u/a order.")
msg_pairs.append((msg_list[idx][dataset_attr.content], msg_list[idx + 1][dataset_attr.content]))
if len(msg_pairs) != 0:
outputs["prompt"].append(msg_pairs[-1][0])
outputs["query"].append("")
outputs["response"].append(msg_pairs[-1][1])
outputs["history"].append(msg_pairs[:-1])
return outputs
if dataset_attr.formatting == "sharegpt": # convert format
column_names = list(next(iter(dataset)).keys())
kwargs = {}
if not data_args.streaming:
kwargs = dict(
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=(not data_args.overwrite_cache),
desc="Converting format of dataset"
)
dataset = dataset.map(
convert_format,
batched=True,
remove_columns=column_names,
**kwargs
)
else:
for column_name in ["prompt", "query", "response", "history"]: # align dataset
if getattr(dataset_attr, column_name) and getattr(dataset_attr, column_name) != column_name:
dataset = dataset.rename_column(getattr(dataset_attr, column_name), column_name)
if dataset_attr.system_prompt: # add system prompt if dataset_attr.system_prompt: # add system prompt
system_prompt = dataset_attr.system_prompt
if data_args.streaming: if data_args.streaming:
dataset = dataset.map(lambda _: {"system": dataset_attr.system_prompt}) dataset = dataset.map(lambda _: {"system": system_prompt})
else: else:
dataset = dataset.add_column("system", [dataset_attr.system_prompt] * len(dataset)) dataset = dataset.add_column("system", [system_prompt] * len(dataset))
all_datasets.append(dataset) all_datasets.append(dataset)
@@ -88,7 +135,11 @@ def get_dataset(
elif data_args.mix_strategy.startswith("interleave"): elif data_args.mix_strategy.startswith("interleave"):
if not data_args.streaming: if not data_args.streaming:
logger.warning("We recommend using `mix_strategy=concat` in non-streaming mode.") logger.warning("We recommend using `mix_strategy=concat` in non-streaming mode.")
stopping_strategy = "first_exhausted" if data_args.mix_strategy.endswith("under") else "all_exhausted" return interleave_datasets(
return interleave_datasets(all_datasets, data_args.interleave_probs, stopping_strategy=stopping_strategy) datasets=all_datasets,
probabilities=data_args.interleave_probs,
seed=data_args.seed,
stopping_strategy="first_exhausted" if data_args.mix_strategy.endswith("under") else "all_exhausted"
)
else: else:
raise ValueError("Unknown mixing strategy.") raise ValueError("Unknown mixing strategy.")

View File

@@ -1,8 +1,12 @@
import os
import tiktoken import tiktoken
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Literal, Union
from itertools import chain from itertools import chain
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Literal, Union
from datasets import load_from_disk
from llmtuner.extras.constants import IGNORE_INDEX from llmtuner.extras.constants import IGNORE_INDEX
from llmtuner.extras.logging import get_logger
from llmtuner.extras.template import get_template_and_fix_tokenizer from llmtuner.extras.template import get_template_and_fix_tokenizer
if TYPE_CHECKING: if TYPE_CHECKING:
@@ -12,6 +16,9 @@ if TYPE_CHECKING:
from llmtuner.hparams import DataArguments from llmtuner.hparams import DataArguments
logger = get_logger(__name__)
def preprocess_dataset( def preprocess_dataset(
dataset: Union["Dataset", "IterableDataset"], dataset: Union["Dataset", "IterableDataset"],
tokenizer: "PreTrainedTokenizer", tokenizer: "PreTrainedTokenizer",
@@ -19,7 +26,6 @@ def preprocess_dataset(
training_args: "Seq2SeqTrainingArguments", training_args: "Seq2SeqTrainingArguments",
stage: Literal["pt", "sft", "rm", "ppo"] stage: Literal["pt", "sft", "rm", "ppo"]
) -> Union["Dataset", "IterableDataset"]: ) -> Union["Dataset", "IterableDataset"]:
column_names = list(next(iter(dataset)).keys())
template = get_template_and_fix_tokenizer(data_args.template, tokenizer) template = get_template_and_fix_tokenizer(data_args.template, tokenizer)
if data_args.train_on_prompt and template.efficient_eos: if data_args.train_on_prompt and template.efficient_eos:
@@ -33,7 +39,7 @@ def preprocess_dataset(
system = examples["system"][i] if "system" in examples else None system = examples["system"][i] if "system" in examples else None
yield query, response, history, system yield query, response, history, system
def preprocess_pretrain_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]: def preprocess_pretrain_dataset(examples: Dict[str, List[Any]]) -> Dict[str, List[List[int]]]:
# build grouped texts with format `X1 X2 X3 ...` # build grouped texts with format `X1 X2 X3 ...`
if isinstance(getattr(tokenizer, "tokenizer", None), tiktoken.Encoding): # for tiktoken tokenizer (Qwen) if isinstance(getattr(tokenizer, "tokenizer", None), tiktoken.Encoding): # for tiktoken tokenizer (Qwen)
kwargs = dict(allowed_special="all") kwargs = dict(allowed_special="all")
@@ -41,6 +47,7 @@ def preprocess_dataset(
kwargs = dict(add_special_tokens=True) kwargs = dict(add_special_tokens=True)
if hasattr(tokenizer, "add_eos_token"): # for LLaMA tokenizer if hasattr(tokenizer, "add_eos_token"): # for LLaMA tokenizer
add_eos_token_flag = getattr(tokenizer, "add_eos_token")
setattr(tokenizer, "add_eos_token", True) setattr(tokenizer, "add_eos_token", True)
tokenized_examples = tokenizer(examples["prompt"], **kwargs) tokenized_examples = tokenizer(examples["prompt"], **kwargs)
@@ -54,16 +61,21 @@ def preprocess_dataset(
k: [t[i: i + block_size] for i in range(0, total_length, block_size)] k: [t[i: i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items() for k, t in concatenated_examples.items()
} }
# make sure the saved tokenizer is the same as the original one
if hasattr(tokenizer, "add_eos_token"):
setattr(tokenizer, "add_eos_token", add_eos_token_flag)
return result return result
def preprocess_supervised_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]: def preprocess_supervised_dataset(examples: Dict[str, List[Any]]) -> Dict[str, List[List[int]]]:
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>` # build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
# for multiturn examples, we only mask the prompt part in each prompt-response pair. # for multiturn examples, we only mask the prompt part in each prompt-response pair.
model_inputs = {"input_ids": [], "attention_mask": [], "labels": []} model_inputs = {"input_ids": [], "attention_mask": [], "labels": []}
for query, response, history, system in construct_example(examples): for query, response, history, system in construct_example(examples):
input_ids, labels = [], [] if not (isinstance(query, str) and isinstance(response, str) and query != "" and response != ""):
continue
input_ids, labels = [], []
for turn_idx, (source_ids, target_ids) in enumerate(template.encode_multiturn( for turn_idx, (source_ids, target_ids) in enumerate(template.encode_multiturn(
tokenizer, query, response, history, system tokenizer, query, response, history, system
)): )):
@@ -100,12 +112,15 @@ def preprocess_dataset(
return model_inputs return model_inputs
def preprocess_packed_supervised_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]: def preprocess_packed_supervised_dataset(examples: Dict[str, List[Any]]) -> Dict[str, List[List[int]]]:
# build inputs with format `<bos> X1 Y1 <eos> <bos> X2 Y2 <eos>` # build inputs with format `<bos> X1 Y1 <eos> <bos> X2 Y2 <eos>`
# and labels with format `<ignore> ... <ignore> Y1 <eos> <ignore> ... <ignore> Y2 <eos>` # and labels with format `<ignore> ... <ignore> Y1 <eos> <ignore> ... <ignore> Y2 <eos>`
model_inputs = {"input_ids": [], "attention_mask": [], "labels": []} model_inputs = {"input_ids": [], "attention_mask": [], "labels": []}
input_ids, labels = [], [] input_ids, labels = [], []
for query, response, history, system in construct_example(examples): for query, response, history, system in construct_example(examples):
if not (isinstance(query, str) and isinstance(response, str) and query != "" and response != ""):
continue
for turn_idx, (source_ids, target_ids) in enumerate(template.encode_multiturn( for turn_idx, (source_ids, target_ids) in enumerate(template.encode_multiturn(
tokenizer, query, response, history, system tokenizer, query, response, history, system
)): )):
@@ -134,11 +149,14 @@ def preprocess_dataset(
return model_inputs return model_inputs
def preprocess_unsupervised_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]: def preprocess_unsupervised_dataset(examples: Dict[str, List[Any]]) -> Dict[str, List[List[int]]]:
# build inputs with format `<bos> X` and labels with format `Y <eos>` # build inputs with format `<bos> X` and labels with format `Y <eos>`
model_inputs = {"input_ids": [], "attention_mask": [], "labels": []} model_inputs = {"input_ids": [], "attention_mask": [], "labels": []}
for query, response, history, system in construct_example(examples): for query, response, history, system in construct_example(examples):
if not (isinstance(query, str) and query != ""):
continue
input_ids, labels = template.encode_oneturn(tokenizer, query, response, history, system) input_ids, labels = template.encode_oneturn(tokenizer, query, response, history, system)
if template.efficient_eos: if template.efficient_eos:
@@ -155,10 +173,13 @@ def preprocess_dataset(
return model_inputs return model_inputs
def preprocess_pairwise_dataset(examples): def preprocess_pairwise_dataset(examples: Dict[str, List[Any]]) -> Dict[str, List[List[int]]]:
# build input pairs with format `<bos> X`, `Y1 <eos>` and `Y2 <eos>` # build input pairs with format `<bos> X`, `Y1 <eos>` and `Y2 <eos>`
model_inputs = {"prompt_ids": [], "chosen_ids": [], "rejected_ids": []} model_inputs = {"prompt_ids": [], "chosen_ids": [], "rejected_ids": []}
for query, response, history, system in construct_example(examples): for query, response, history, system in construct_example(examples):
if not (isinstance(query, str) and isinstance(response, list) and query != "" and len(response) > 1):
continue
prompt_ids, chosen_ids = template.encode_oneturn(tokenizer, query, response[0], history, system) prompt_ids, chosen_ids = template.encode_oneturn(tokenizer, query, response[0], history, system)
_, rejected_ids = template.encode_oneturn(tokenizer, query, response[1], history, system) _, rejected_ids = template.encode_oneturn(tokenizer, query, response[1], history, system)
@@ -180,9 +201,10 @@ def preprocess_dataset(
model_inputs["prompt_ids"].append(prompt_ids) model_inputs["prompt_ids"].append(prompt_ids)
model_inputs["chosen_ids"].append(chosen_ids) model_inputs["chosen_ids"].append(chosen_ids)
model_inputs["rejected_ids"].append(rejected_ids) model_inputs["rejected_ids"].append(rejected_ids)
return model_inputs return model_inputs
def print_supervised_dataset_example(example): def print_supervised_dataset_example(example: Dict[str, List[int]]) -> None:
print("input_ids:\n{}".format(example["input_ids"])) print("input_ids:\n{}".format(example["input_ids"]))
print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False))) print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False)))
print("label_ids:\n{}".format(example["labels"])) print("label_ids:\n{}".format(example["labels"]))
@@ -190,7 +212,7 @@ def preprocess_dataset(
tokenizer.decode(list(filter(lambda x: x != IGNORE_INDEX, example["labels"])), skip_special_tokens=False) tokenizer.decode(list(filter(lambda x: x != IGNORE_INDEX, example["labels"])), skip_special_tokens=False)
)) ))
def print_pairwise_dataset_example(example): def print_pairwise_dataset_example(example: Dict[str, List[int]]) -> None:
print("prompt_ids:\n{}".format(example["prompt_ids"])) print("prompt_ids:\n{}".format(example["prompt_ids"]))
print("prompt:\n{}".format(tokenizer.decode(example["prompt_ids"], skip_special_tokens=False))) print("prompt:\n{}".format(tokenizer.decode(example["prompt_ids"], skip_special_tokens=False)))
print("chosen_ids:\n{}".format(example["chosen_ids"])) print("chosen_ids:\n{}".format(example["chosen_ids"]))
@@ -198,46 +220,53 @@ def preprocess_dataset(
print("rejected_ids:\n{}".format(example["rejected_ids"])) print("rejected_ids:\n{}".format(example["rejected_ids"]))
print("rejected:\n{}".format(tokenizer.decode(example["rejected_ids"], skip_special_tokens=False))) print("rejected:\n{}".format(tokenizer.decode(example["rejected_ids"], skip_special_tokens=False)))
def print_unsupervised_dataset_example(example): def print_unsupervised_dataset_example(example: Dict[str, List[int]]) -> None:
print("input_ids:\n{}".format(example["input_ids"])) print("input_ids:\n{}".format(example["input_ids"]))
print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False))) print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False)))
if stage == "pt": if stage == "pt":
dataset = dataset.filter(lambda example: example["prompt"])
preprocess_func = preprocess_pretrain_dataset preprocess_func = preprocess_pretrain_dataset
print_function = print_unsupervised_dataset_example print_function = print_unsupervised_dataset_example
elif stage == "sft" and not training_args.predict_with_generate: elif stage == "sft" and not training_args.predict_with_generate:
dataset = dataset.filter(lambda example: example["prompt"] and example["response"])
preprocess_func = preprocess_packed_supervised_dataset if data_args.sft_packing else preprocess_supervised_dataset preprocess_func = preprocess_packed_supervised_dataset if data_args.sft_packing else preprocess_supervised_dataset
print_function = print_supervised_dataset_example print_function = print_supervised_dataset_example
elif stage == "rm": elif stage == "rm":
dataset = dataset.filter(lambda example: example["prompt"] and len(example["response"]) > 1)
preprocess_func = preprocess_pairwise_dataset preprocess_func = preprocess_pairwise_dataset
print_function = print_pairwise_dataset_example print_function = print_pairwise_dataset_example
else: else:
dataset = dataset.filter(lambda example: example["prompt"])
preprocess_func = preprocess_unsupervised_dataset preprocess_func = preprocess_unsupervised_dataset
print_function = print_unsupervised_dataset_example print_function = print_unsupervised_dataset_example
if data_args.cache_path is not None and os.path.exists(data_args.cache_path):
logger.warning("Loading dataset from disk will ignore other data arguments.")
return load_from_disk(data_args.cache_path)
with training_args.main_process_first(desc="dataset map pre-processing"): with training_args.main_process_first(desc="dataset map pre-processing"):
column_names = list(next(iter(dataset)).keys())
kwargs = {} kwargs = {}
if not data_args.streaming: if not data_args.streaming:
kwargs = dict( kwargs = dict(
num_proc=data_args.preprocessing_num_workers, num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache, load_from_cache_file=(not data_args.overwrite_cache),
desc="Running tokenizer on dataset" desc="Running tokenizer on dataset"
) )
dataset = dataset.map( dataset = dataset.map(
preprocess_func, preprocess_func,
batched=True, batched=True,
remove_columns=column_names, remove_columns=column_names,
**kwargs **kwargs
) )
try: if data_args.cache_path is not None and not os.path.exists(data_args.cache_path):
print_function(next(iter(dataset))) if training_args.should_save:
except StopIteration: dataset.save_to_disk(data_args.cache_path)
raise ValueError("Empty dataset!") raise SystemExit("Dataset saved, rerun this script with the same `--cache_path`.")
if training_args.should_log:
try:
print_function(next(iter(dataset)))
except StopIteration:
raise RuntimeError("Empty dataset!")
return dataset return dataset

View File

@@ -13,9 +13,11 @@ logger = get_logger(__name__)
EXT2TYPE = { EXT2TYPE = {
"arrow": "arrow",
"csv": "csv", "csv": "csv",
"json": "json", "json": "json",
"jsonl": "json", "jsonl": "json",
"parquet": "parquet",
"txt": "text" "txt": "text"
} }

View File

@@ -0,0 +1 @@
from llmtuner.eval.engine import Evaluator

View File

@@ -0,0 +1,3 @@
CHOICES = ["A", "B", "C", "D"]
SUBJECTS = ["Average", "STEM", "Social Sciences", "Humanities", "Other"]

110
src/llmtuner/eval/engine.py Normal file
View File

@@ -0,0 +1,110 @@
# Inspired by: https://github.com/hendrycks/test/blob/master/evaluate_flan.py
import os
import json
import torch
import tiktoken
import numpy as np
from tqdm import tqdm, trange
from datasets import load_dataset
from typing import Any, Dict, List, Optional
from llmtuner.eval.constants import CHOICES, SUBJECTS
from llmtuner.eval.parser import get_eval_args
from llmtuner.eval.template import get_eval_template
from llmtuner.extras.misc import dispatch_model
from llmtuner.extras.template import get_template_and_fix_tokenizer
from llmtuner.tuner.core import load_model_and_tokenizer
class Evaluator:
def __init__(self, args: Optional[Dict[str, Any]] = None) -> None:
model_args, self.data_args, self.eval_args, finetuning_args = get_eval_args(args)
self.model, self.tokenizer = load_model_and_tokenizer(model_args, finetuning_args)
self.tokenizer.padding_side = "right" # avoid overflow issue in batched inference for llama2
self.model = dispatch_model(self.model)
self.template = get_template_and_fix_tokenizer(self.data_args.template, self.tokenizer)
self.eval_template = get_eval_template(self.eval_args.lang)
self.choice_inputs = self._encode_choices()
def _encode_choices(self) -> List[int]:
if isinstance(getattr(self.tokenizer, "tokenizer", None), tiktoken.Encoding): # for tiktoken tokenizer (Qwen)
kwargs = dict(allowed_special="all")
else:
kwargs = dict(add_special_tokens=False)
return [self.tokenizer.encode(self.eval_template.prefix + ch, **kwargs)[-1] for ch in CHOICES]
@torch.inference_mode()
def batch_inference(self, batch_input: Dict[str, torch.Tensor]) -> List[str]:
logits = self.model(**batch_input).logits
lengths = torch.sum(batch_input["attention_mask"], dim=-1)
word_probs = torch.stack([logits[i, lengths[i] - 1] for i in range(len(lengths))], dim=0)
choice_probs = torch.nn.functional.softmax(word_probs[:, self.choice_inputs], dim=-1).detach()
return [chr(ord("A") + offset.item()) for offset in torch.argmax(choice_probs, dim=-1)]
def eval(self) -> None:
mapping = os.path.join(self.eval_args.task_dir, self.eval_args.task, "mapping.json")
with open(mapping, "r", encoding="utf-8") as f:
categorys: Dict[str, Dict[str, str]] = json.load(f)
category_corrects = {subj: np.array([], dtype="bool") for subj in SUBJECTS}
pbar = tqdm(categorys.keys(), desc="Processing subjects", position=0)
results = {}
for subject in pbar:
dataset = load_dataset(
path=os.path.join(self.eval_args.task_dir, self.eval_args.task),
name=subject,
download_mode="force_redownload"
)
pbar.set_postfix_str(categorys[subject]["name"])
inputs, outputs, labels = [], [], []
for i in trange(len(dataset[self.data_args.split]), desc="Formatting batches", position=1, leave=False):
support_set = dataset["train"].shuffle().select(range(min(self.eval_args.n_shot, len(dataset["train"]))))
query, resp, history = self.eval_template.format_example(
target_data=dataset[self.data_args.split][i],
support_set=support_set,
subject_name=categorys[subject]["name"],
use_history=self.template.use_history
)
input_ids, _ = self.template.encode_oneturn(
tokenizer=self.tokenizer, query=query, resp=resp, history=history
)
inputs.append({"input_ids": input_ids, "attention_mask": [1] * len(input_ids)})
labels.append(resp)
for i in trange(0, len(inputs), self.eval_args.batch_size, desc="Predicting batches", position=1, leave=False):
batch_input = self.tokenizer.pad(
inputs[i : i + self.eval_args.batch_size], return_attention_mask=True, return_tensors="pt"
).to(self.model.device)
preds = self.batch_inference(batch_input)
outputs += preds
corrects = (np.array(outputs) == np.array(labels))
category_name = categorys[subject]["category"]
category_corrects[category_name] = np.concatenate([category_corrects[category_name], corrects], axis=0)
category_corrects["Average"] = np.concatenate([category_corrects["Average"], corrects], axis=0)
results[subject] = {str(i): outputs[i] for i in range(len(outputs))}
pbar.close()
self._save_results(category_corrects, results)
def _save_results(self, category_corrects: Dict[str, np.ndarray], results: Dict[str, Dict[int, str]]) -> None:
score_info = "\n".join([
"{:>15}: {:.2f}".format(category_name, 100 * np.mean(category_correct))
for category_name, category_correct in category_corrects.items() if len(category_correct)
])
print(score_info)
if self.eval_args.save_dir is not None:
os.makedirs(self.eval_args.save_dir, exist_ok=False)
with open(os.path.join(self.eval_args.save_dir, "results.json"), "w", encoding="utf-8", newline="\n") as f:
json.dump(results, f, indent=2)
with open(os.path.join(self.eval_args.save_dir, "results.log"), "w", encoding="utf-8", newline="\n") as f:
f.write(score_info)
if __name__ == "__main__":
evaluator = Evaluator()
evaluator.eval()

View File

@@ -0,0 +1,49 @@
import transformers
from typing import Any, Dict, Optional, Tuple
from transformers import HfArgumentParser
from llmtuner.extras.misc import parse_args
from llmtuner.hparams import (
ModelArguments,
DataArguments,
EvaluationArguments,
FinetuningArguments
)
def parse_eval_args(
args: Optional[Dict[str, Any]] = None
) -> Tuple[
ModelArguments,
DataArguments,
EvaluationArguments,
FinetuningArguments
]:
parser = HfArgumentParser((
ModelArguments,
DataArguments,
EvaluationArguments,
FinetuningArguments
))
return parse_args(parser, args)
def get_eval_args(
args: Optional[Dict[str, Any]] = None
) -> Tuple[
ModelArguments,
DataArguments,
EvaluationArguments,
FinetuningArguments
]:
model_args, data_args, eval_args, finetuning_args = parse_eval_args(args)
if data_args.template is None:
raise ValueError("Please specify which `template` to use.")
if model_args.quantization_bit is not None and finetuning_args.finetuning_type != "lora":
raise ValueError("Quantization is only compatible with the LoRA method.")
transformers.set_seed(eval_args.seed)
return model_args, data_args, eval_args, finetuning_args

View File

@@ -0,0 +1,86 @@
from dataclasses import dataclass
from typing import TYPE_CHECKING, Dict, List, Tuple
from llmtuner.eval.constants import CHOICES
if TYPE_CHECKING:
from datasets import Dataset
@dataclass
class EvalTemplate:
system: str
choice: str
answer: str
prefix: str
def parse_example(
self,
example: Dict[str, str]
) -> Tuple[str, str]:
candidates = [self.choice.format(choice=ch, content=example[ch]) for ch in CHOICES if ch in example]
return "".join([example["question"]] + candidates + [self.answer]), example["answer"]
def format_example(
self,
target_data: Dict[str, str],
support_set: "Dataset",
subject_name: str,
use_history: bool
) -> Tuple[str, str, List[Tuple[str, str]]]:
query, resp = self.parse_example(target_data)
history = [self.parse_example(support_set[k]) for k in range(len(support_set))]
if len(history):
temp = history.pop(0)
history.insert(0, (self.system.format(subject=subject_name) + temp[0], temp[1]))
else:
query = self.system.format(subject=subject_name) + query
if not use_history:
query = "\n\n".join(["".join(item) for item in history] + [query])
history = []
return query.strip(), resp, history
eval_templates: Dict[str, EvalTemplate] = {}
def register_eval_template(
name: str,
system: str,
choice: str,
answer: str,
prefix: str
) -> None:
eval_templates[name] = EvalTemplate(
system=system,
choice=choice,
answer=answer,
prefix=prefix
)
def get_eval_template(name: str) -> EvalTemplate:
eval_template = eval_templates.get(name, None)
assert eval_template is not None, "Template {} does not exist.".format(name)
return eval_template
register_eval_template(
name="en",
system="The following are multiple choice questions (with answers) about {subject}.\n\n",
choice="\n{choice}. {content}",
answer="\nAnswer: ",
prefix=" "
)
register_eval_template(
name="zh",
system="以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n",
choice="\n{choice}. {content}",
answer="\n答案:",
prefix="\n"
)

View File

@@ -1,9 +1,11 @@
from collections import defaultdict, OrderedDict
from typing import Dict, Optional
IGNORE_INDEX = -100 IGNORE_INDEX = -100
LOG_FILE_NAME = "trainer_log.jsonl" LOG_FILE_NAME = "trainer_log.jsonl"
LAYERNORM_NAMES = ["norm", "ln_f", "ln_attn", "ln_mlp", "ln_1", "ln_2"]
METHODS = ["full", "freeze", "lora"] METHODS = ["full", "freeze", "lora"]
TRAINING_STAGES = { TRAINING_STAGES = {
@@ -14,75 +16,222 @@ TRAINING_STAGES = {
"Pre-Training": "pt" "Pre-Training": "pt"
} }
SUPPORTED_MODELS = { LAYERNORM_NAMES = {"norm", "ln"}
"LLaMA-7B": "huggyllama/llama-7b",
"LLaMA-13B": "huggyllama/llama-13b",
"LLaMA-30B": "huggyllama/llama-30b",
"LLaMA-65B": "huggyllama/llama-65b",
"LLaMA2-7B": "meta-llama/Llama-2-7b-hf",
"LLaMA2-13B": "meta-llama/Llama-2-13b-hf",
"LLaMA2-70B": "meta-llama/Llama-2-70b-hf",
"LLaMA2-7B-Chat": "meta-llama/Llama-2-7b-chat-hf",
"LLaMA2-13B-Chat": "meta-llama/Llama-2-13b-chat-hf",
"LLaMA2-70B-Chat": "meta-llama/Llama-2-70b-chat-hf",
"ChineseLLaMA2-7B": "ziqingyang/chinese-llama-2-7b",
"ChineseLLaMA2-13B": "ziqingyang/chinese-llama-2-13b",
"ChineseLLaMA2-7B-Chat": "ziqingyang/chinese-alpaca-2-7b",
"ChineseLLaMA2-13B-Chat": "ziqingyang/chinese-alpaca-2-13b",
"BLOOM-560M": "bigscience/bloom-560m",
"BLOOM-3B": "bigscience/bloom-3b",
"BLOOM-7B1": "bigscience/bloom-7b1",
"BLOOMZ-560M": "bigscience/bloomz-560m",
"BLOOMZ-3B": "bigscience/bloomz-3b",
"BLOOMZ-7B1-mt": "bigscience/bloomz-7b1-mt",
"Falcon-7B": "tiiuae/falcon-7b",
"Falcon-40B": "tiiuae/falcon-40b",
"Falcon-7B-Chat": "tiiuae/falcon-7b-instruct",
"Falcon-40B-Chat": "tiiuae/falcon-40b-instruct",
"Baichuan-7B": "baichuan-inc/Baichuan-7B",
"Baichuan-13B": "baichuan-inc/Baichuan-13B-Base",
"Baichuan-13B-Chat": "baichuan-inc/Baichuan-13B-Chat",
"Baichuan2-7B": "baichuan-inc/Baichuan2-7B-Base",
"Baichuan2-13B": "baichuan-inc/Baichuan2-13B-Base",
"Baichuan2-7B-Chat": "baichuan-inc/Baichuan2-7B-Chat",
"Baichuan2-13B-Chat": "baichuan-inc/Baichuan2-13B-Chat",
"InternLM-7B": "internlm/internlm-7b",
"InternLM-20B": "internlm/internlm-20b",
"InternLM-7B-Chat": "internlm/internlm-chat-7b",
"InternLM-20B-Chat": "internlm/internlm-chat-20b",
"Qwen-7B": "Qwen/Qwen-7B",
"Qwen-14B": "Qwen/Qwen-14B",
"Qwen-7B-Chat": "Qwen/Qwen-7B-Chat",
"Qwen-14B-Chat": "Qwen/Qwen-14B-Chat",
"XVERSE-13B": "xverse/XVERSE-13B",
"XVERSE-13B-Chat": "xverse/XVERSE-13B-Chat",
"ChatGLM2-6B-Chat": "THUDM/chatglm2-6b",
"Phi1.5-1.3B": "microsoft/phi-1_5"
}
DEFAULT_MODULE = { SUPPORTED_MODELS = OrderedDict()
"LLaMA": "q_proj,v_proj",
"LLaMA2": "q_proj,v_proj",
"ChineseLLaMA2": "q_proj,v_proj",
"BLOOM": "query_key_value",
"BLOOMZ": "query_key_value",
"Falcon": "query_key_value",
"Baichuan": "W_pack",
"Baichuan2": "W_pack",
"InternLM": "q_proj,v_proj",
"Qwen": "c_attn",
"XVERSE": "q_proj,v_proj",
"ChatGLM2": "query_key_value",
"Phi1.5": "Wqkv"
}
DEFAULT_TEMPLATE = { DEFAULT_MODULE = defaultdict(str)
"LLaMA2": "llama2",
"ChineseLLaMA2": "llama2_zh", DEFAULT_TEMPLATE = defaultdict(str)
"Baichuan": "baichuan",
"Baichuan2": "baichuan2",
"InternLM": "intern", def register_model_group(
"Qwen": "chatml", models: Dict[str, str],
"XVERSE": "xverse", module: Optional[str] = None,
"ChatGLM2": "chatglm2" template: Optional[str] = None
} ) -> None:
prefix = None
for name, path in models.items():
if prefix is None:
prefix = name.split("-")[0]
else:
assert prefix == name.split("-")[0], "prefix should be identical."
SUPPORTED_MODELS[name] = path
if module is not None:
DEFAULT_MODULE[prefix] = module
if template is not None:
DEFAULT_TEMPLATE[prefix] = template
register_model_group(
models={
"Baichuan-7B-Base": "baichuan-inc/Baichuan-7B",
"Baichuan-13B-Base": "baichuan-inc/Baichuan-13B-Base",
"Baichuan-13B-Chat": "baichuan-inc/Baichuan-13B-Chat"
},
module="W_pack",
template="baichuan"
)
register_model_group(
models={
"Baichuan2-7B-Base": "baichuan-inc/Baichuan2-7B-Base",
"Baichuan2-13B-Base": "baichuan-inc/Baichuan2-13B-Base",
"Baichuan2-7B-Chat": "baichuan-inc/Baichuan2-7B-Chat",
"Baichuan2-13B-Chat": "baichuan-inc/Baichuan2-13B-Chat"
},
module="W_pack",
template="baichuan2"
)
register_model_group(
models={
"BLOOM-560M": "bigscience/bloom-560m",
"BLOOM-3B": "bigscience/bloom-3b",
"BLOOM-7B1": "bigscience/bloom-7b1"
},
module="query_key_value"
)
register_model_group(
models={
"BLOOMZ-560M": "bigscience/bloomz-560m",
"BLOOMZ-3B": "bigscience/bloomz-3b",
"BLOOMZ-7B1-mt": "bigscience/bloomz-7b1-mt"
},
module="query_key_value"
)
register_model_group(
models={
"BlueLM-7B-Base": "vivo-ai/BlueLM-7B-Base",
"BlueLM-7B-Chat": "vivo-ai/BlueLM-7B-Chat"
},
template="bluelm"
)
register_model_group(
models={
"ChatGLM2-6B-Chat": "THUDM/chatglm2-6b"
},
module="query_key_value",
template="chatglm2"
)
register_model_group(
models={
"ChatGLM3-6B-Base": "THUDM/chatglm3-6b-base",
"ChatGLM3-6B-Chat": "THUDM/chatglm3-6b"
},
module="query_key_value",
template="chatglm3"
)
register_model_group(
models={
"ChineseLLaMA2-7B": "ziqingyang/chinese-llama-2-7b",
"ChineseLLaMA2-13B": "ziqingyang/chinese-llama-2-13b",
"ChineseLLaMA2-7B-Chat": "ziqingyang/chinese-alpaca-2-7b",
"ChineseLLaMA2-13B-Chat": "ziqingyang/chinese-alpaca-2-13b"
},
template="llama2_zh"
)
register_model_group(
models={
"Falcon-7B": "tiiuae/falcon-7b",
"Falcon-40B": "tiiuae/falcon-40b",
"Falcon-180B": "tiiuae/falcon-180B",
"Falcon-7B-Chat": "tiiuae/falcon-7b-instruct",
"Falcon-40B-Chat": "tiiuae/falcon-40b-instruct",
"Falcon-180B-Chat": "tiiuae/falcon-180B-chat"
},
module="query_key_value",
template="falcon"
)
register_model_group(
models={
"InternLM-7B": "internlm/internlm-7b",
"InternLM-20B": "internlm/internlm-20b",
"InternLM-7B-Chat": "internlm/internlm-chat-7b",
"InternLM-20B-Chat": "internlm/internlm-chat-20b"
},
template="intern"
)
register_model_group(
models={
"LingoWhale-8B": "deeplang-ai/LingoWhale-8B"
},
module="qkv_proj"
)
register_model_group(
models={
"LLaMA-7B": "huggyllama/llama-7b",
"LLaMA-13B": "huggyllama/llama-13b",
"LLaMA-30B": "huggyllama/llama-30b",
"LLaMA-65B": "huggyllama/llama-65b"
}
)
register_model_group(
models={
"LLaMA2-7B": "meta-llama/Llama-2-7b-hf",
"LLaMA2-13B": "meta-llama/Llama-2-13b-hf",
"LLaMA2-70B": "meta-llama/Llama-2-70b-hf",
"LLaMA2-7B-Chat": "meta-llama/Llama-2-7b-chat-hf",
"LLaMA2-13B-Chat": "meta-llama/Llama-2-13b-chat-hf",
"LLaMA2-70B-Chat": "meta-llama/Llama-2-70b-chat-hf"
},
template="llama2"
)
register_model_group(
models={
"Mistral-7B": "mistralai/Mistral-7B-v0.1",
"Mistral-7B-Chat": "mistralai/Mistral-7B-Instruct-v0.1"
},
template="mistral"
)
register_model_group(
models={
"Phi1.5-1.3B": "microsoft/phi-1_5"
},
module="Wqkv"
)
register_model_group(
models={
"Qwen-7B": "Qwen/Qwen-7B",
"Qwen-14B": "Qwen/Qwen-14B",
"Qwen-7B-Chat": "Qwen/Qwen-7B-Chat",
"Qwen-14B-Chat": "Qwen/Qwen-14B-Chat"
},
module="c_attn",
template="qwen"
)
register_model_group(
models={
"Skywork-13B-Base": "Skywork/Skywork-13B-base"
}
)
register_model_group(
models={
"XVERSE-7B": "xverse/XVERSE-7B",
"XVERSE-13B": "xverse/XVERSE-13B",
"XVERSE-65B": "xverse/XVERSE-65B",
"XVERSE-7B-Chat": "xverse/XVERSE-7B-Chat",
"XVERSE-13B-Chat": "xverse/XVERSE-13B-Chat"
},
template="xverse"
)
register_model_group(
models={
"Yi-6B": "01-ai/Yi-6B",
"Yi-34B": "01-ai/Yi-34B"
}
)

View File

@@ -1,6 +1,8 @@
import gc import gc
import os
import sys
import torch import torch
from typing import TYPE_CHECKING, Tuple from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple
from transformers import InfNanRemoveLogitsProcessor, LogitsProcessorList from transformers import InfNanRemoveLogitsProcessor, LogitsProcessorList
try: try:
@@ -17,6 +19,7 @@ except ImportError:
_is_bf16_available = torch.cuda.is_bf16_supported() _is_bf16_available = torch.cuda.is_bf16_supported()
if TYPE_CHECKING: if TYPE_CHECKING:
from transformers import HfArgumentParser
from transformers.modeling_utils import PreTrainedModel from transformers.modeling_utils import PreTrainedModel
@@ -74,7 +77,7 @@ def infer_optim_dtype(model_dtype: torch.dtype) -> torch.dtype:
return torch.float32 return torch.float32
def get_logits_processor() -> LogitsProcessorList: def get_logits_processor() -> "LogitsProcessorList":
r""" r"""
Gets logits processor that removes NaN and Inf logits. Gets logits processor that removes NaN and Inf logits.
""" """
@@ -93,6 +96,17 @@ def torch_gc() -> None:
torch.cuda.ipc_collect() torch.cuda.ipc_collect()
def parse_args(parser: "HfArgumentParser", args: Optional[Dict[str, Any]] = None) -> Tuple[Any]:
if args is not None:
return parser.parse_dict(args)
elif len(sys.argv) == 2 and sys.argv[1].endswith(".yaml"):
return parser.parse_yaml_file(os.path.abspath(sys.argv[1]))
elif len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
return parser.parse_json_file(os.path.abspath(sys.argv[1]))
else:
return parser.parse_args_into_dataclasses()
def dispatch_model(model: "PreTrainedModel") -> "PreTrainedModel": def dispatch_model(model: "PreTrainedModel") -> "PreTrainedModel":
r""" r"""
Dispatches a pre-trained model to GPUs with balanced memory. Dispatches a pre-trained model to GPUs with balanced memory.

View File

@@ -5,11 +5,14 @@ from typing import Optional, Tuple
from transformers.utils import logging from transformers.utils import logging
from transformers.models.llama.modeling_llama import LlamaAttention, apply_rotary_pos_emb, repeat_kv from transformers.models.llama.modeling_llama import LlamaAttention, apply_rotary_pos_emb, repeat_kv
is_flash_attn_2_available = False
try: try:
from flash_attn import flash_attn_func, flash_attn_varlen_func # type: ignore from flash_attn import flash_attn_func, flash_attn_varlen_func # type: ignore
from flash_attn.bert_padding import pad_input, unpad_input # type: ignore from flash_attn.bert_padding import pad_input, unpad_input # type: ignore
is_flash_attn_2_available = True
except ImportError: except ImportError:
print("FlashAttention-2 is not installed, ignore this if you are not using FlashAttention.") is_flash_attn_2_available = False
logger = logging.get_logger(__name__) logger = logging.get_logger(__name__)

View File

@@ -1,21 +0,0 @@
import os
import torch
from transformers.trainer import WEIGHTS_NAME
from llmtuner.extras.logging import get_logger
logger = get_logger(__name__)
def load_valuehead_params(model: torch.nn.Module, checkpoint_dir: os.PathLike) -> bool:
vhead_file = os.path.join(checkpoint_dir, WEIGHTS_NAME)
if not os.path.exists(vhead_file):
logger.warning("Provided path ({}) does not contain valuehead weights.".format(checkpoint_dir))
return False
vhead_params = torch.load(vhead_file, map_location="cpu")
model.register_buffer("reward_head_weight", vhead_params["v_head.summary.weight"], persistent=False)
model.register_buffer("reward_head_bias", vhead_params["v_head.summary.bias"], persistent=False)
model.register_buffer("default_head_weight", torch.zeros_like(vhead_params["v_head.summary.weight"]), persistent=False)
model.register_buffer("default_head_bias", torch.zeros_like(vhead_params["v_head.summary.bias"]), persistent=False)
return True

View File

@@ -225,89 +225,8 @@ def get_template_and_fix_tokenizer(
return template return template
r"""
Supports language model inference without histories.
"""
register_template(
name="vanilla",
prefix=[],
prompt=[
"{{query}}"
],
system="",
sep=[],
use_history=False
)
r"""
Default template.
"""
register_template(
name="default",
prefix=[
"{{system}}"
],
prompt=[
"Human: {{query}}\nAssistant: "
],
system=(
"A chat between a curious user and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the user's questions."
),
sep=[
"\n"
]
)
r"""
Supports: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf
https://huggingface.co/meta-llama/Llama-2-13b-chat-hf
https://huggingface.co/meta-llama/Llama-2-70b-chat-hf
"""
register_template(
name="llama2",
prefix=[
"<<SYS>>\n{{system}}\n<</SYS>>\n\n"
],
prompt=[
"[INST] {{query}} [/INST] "
],
system=(
"You are a helpful, respectful and honest assistant. "
"Always answer as helpfully as possible, while being safe. "
"Your answers should not include any harmful, unethical, "
"racist, sexist, toxic, dangerous, or illegal content. "
"Please ensure that your responses are socially unbiased and positive in nature.\n\n"
"If a question does not make any sense, or is not factually coherent, "
"explain why instead of answering something not correct. "
"If you don't know the answer to a question, please don't share false information."
),
sep=[]
)
r"""
Supports: https://github.com/ymcui/Chinese-LLaMA-Alpaca-2
https://huggingface.co/ziqingyang/chinese-alpaca-2-7b
"""
register_template(
name="llama2_zh",
prefix=[
"<<SYS>>\n{{system}}\n<</SYS>>\n\n"
],
prompt=[
"[INST] {{query}} [/INST] "
],
system="You are a helpful assistant. 你是一个乐于助人的助手。",
sep=[]
)
r""" r"""
Supports: https://huggingface.co/tatsu-lab/alpaca-7b-wdiff Supports: https://huggingface.co/tatsu-lab/alpaca-7b-wdiff
https://github.com/ymcui/Chinese-LLaMA-Alpaca
""" """
register_template( register_template(
name="alpaca", name="alpaca",
@@ -327,103 +246,10 @@ register_template(
) )
r"""
Supports: https://huggingface.co/lmsys/vicuna-7b-delta-v1.1
https://huggingface.co/lmsys/vicuna-13b-delta-v1.1
"""
register_template(
name="vicuna",
prefix=[
"{{system}}"
],
prompt=[
"USER: {{query}} ASSISTANT: "
],
system=(
"A chat between a curious user and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the user's questions."
),
sep=[]
)
r"""
Supports: https://huggingface.co/BelleGroup/BELLE-LLaMA-EXT-13B
"""
register_template(
name="belle",
prefix=[
"{{system}}"
],
prompt=[
"Human: {{query}}\n\nBelle: "
],
system="",
sep=[
"\n\n"
]
)
r"""
Supports: https://github.com/CVI-SZU/Linly
"""
register_template(
name="linly",
prefix=[
"{{system}}"
],
prompt=[
"User: {{query}}\nBot: "
],
system="",
sep=[
"\n"
]
)
r"""
Supports: https://github.com/Neutralzz/BiLLa
"""
register_template(
name="billa",
prefix=[
"{{system}}"
],
prompt=[
"Human: {{query}}\nAssistant: "
],
system="",
sep=[
"\n"
]
)
r"""
Supports: https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1
"""
register_template(
name="ziya",
prefix=[
"{{system}}"
],
prompt=[
{"token": "<human>"},
":{{query}}\n",
{"token": "<bot>"},
":"
],
system="",
sep=[
"\n"
]
)
r""" r"""
Supports: https://huggingface.co/BAAI/AquilaChat-7B Supports: https://huggingface.co/BAAI/AquilaChat-7B
https://huggingface.co/BAAI/AquilaChat2-7B
https://huggingface.co/BAAI/AquilaChat2-34B
""" """
register_template( register_template(
name="aquila", name="aquila",
@@ -431,7 +257,7 @@ register_template(
"{{system}}" "{{system}}"
], ],
prompt=[ prompt=[
"Human: {{query}}###Assistant: " "Human: {{query}}###Assistant:"
], ],
system=( system=(
"A chat between a curious human and an artificial intelligence assistant. " "A chat between a curious human and an artificial intelligence assistant. "
@@ -447,31 +273,6 @@ register_template(
) )
r"""
Supports: https://huggingface.co/internlm/internlm-chat-7b
"""
register_template(
name="intern",
prefix=[
"{{system}}"
],
prompt=[
"<|User|>:{{query}}",
{"token": "<eoh>"},
"\n<|Bot|>:"
],
system="",
sep=[
{"token": "<eoa>"},
"\n"
],
stop_words=[
"<eoa>"
],
efficient_eos=True
)
r""" r"""
Supports: https://huggingface.co/baichuan-inc/Baichuan-13B-Chat Supports: https://huggingface.co/baichuan-inc/Baichuan-13B-Chat
""" """
@@ -511,6 +312,300 @@ register_template(
) )
r"""
Supports: https://huggingface.co/BelleGroup/BELLE-LLaMA-EXT-13B
"""
register_template(
name="belle",
prefix=[
"{{system}}"
],
prompt=[
"Human: {{query}}\n\nBelle: "
],
system="",
sep=[
"\n\n"
]
)
r"""
Supports: https://huggingface.co/vivo-ai/BlueLM-7B-Chat
"""
register_template(
name="bluelm",
prefix=[
"{{system}}"
],
prompt=[
{"token": "[|Human|]:"},
"{{query}}",
{"token": "[|AI|]:"}
],
system="",
sep=[]
)
r"""
Supports: https://huggingface.co/THUDM/chatglm2-6b
"""
register_template(
name="chatglm2",
prefix=[
{"token": "[gMASK]"},
{"token": "sop"},
"{{system}}"
],
prompt=[
"[Round {{idx}}]\n\n问:{{query}}\n\n答:"
],
system="",
sep=[
"\n\n"
],
efficient_eos=True
)
r"""
Supports: https://huggingface.co/THUDM/chatglm3-6b
"""
register_template(
name="chatglm3",
prefix=[
{"token": "[gMASK]"},
{"token": "sop"},
"{{system}}"
],
prompt=[
{"token": "<|user|>"},
"\n",
"{{query}}",
{"token": "<|assistant|>"}
],
system="",
sep=[],
stop_words=[
"<|user|>",
"<|observation|>"
],
efficient_eos=True
)
r"""
Supports: https://huggingface.co/deepseek-ai/deepseek-coder-1.3b-instruct
https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-instruct
https://huggingface.co/deepseek-ai/deepseek-coder-33b-instruct
"""
register_template(
name="deepseek",
prefix=[
"{{system}}"
],
prompt=[
"### Instruction:\n{{query}}\n\n### Response:\n"
],
system=(
"You are an AI programming assistant, utilizing the Deepseek Coder model, "
"developed by Deepseek Company, and you only answer questions related to computer science. "
"For politically sensitive questions, security and privacy issues, "
"and other non-computer science questions, you will refuse to answer."
),
sep=[
"\n",
{"token": "<|EOT|>"},
"\n\n"
],
stop_words=[
"<|EOT|>"
],
efficient_eos=True
)
r"""
Default template.
"""
register_template(
name="default",
prefix=[
"{{system}}"
],
prompt=[
"Human: {{query}}\nAssistant:"
],
system=(
"A chat between a curious user and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the user's questions."
),
sep=[
"\n"
]
)
r"""
Supports: https://huggingface.co/tiiuae/falcon-180B-chat
"""
register_template(
name="falcon",
prefix=[
"{{system}}"
],
prompt=[
"User: {{query}}\nFalcon:"
],
system="",
sep=[
"\n"
],
efficient_eos=True
)
r"""
Supports: https://huggingface.co/internlm/internlm-chat-7b
https://huggingface.co/internlm/internlm-chat-20b
"""
register_template(
name="intern",
prefix=[
"{{system}}"
],
prompt=[
"<|User|>:{{query}}",
{"token": "<eoh>"},
"\n<|Bot|>:"
],
system="",
sep=[
{"token": "<eoa>"},
"\n"
],
stop_words=[
"<eoa>"
],
efficient_eos=True
)
r"""
Supports: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf
https://huggingface.co/meta-llama/Llama-2-13b-chat-hf
https://huggingface.co/meta-llama/Llama-2-70b-chat-hf
"""
register_template(
name="llama2",
prefix=[
"<<SYS>>\n{{system}}\n<</SYS>>\n\n"
],
prompt=[
"[INST] {{query}} [/INST]"
],
system=(
"You are a helpful, respectful and honest assistant. "
"Always answer as helpfully as possible, while being safe. "
"Your answers should not include any harmful, unethical, "
"racist, sexist, toxic, dangerous, or illegal content. "
"Please ensure that your responses are socially unbiased and positive in nature.\n\n"
"If a question does not make any sense, or is not factually coherent, "
"explain why instead of answering something not correct. "
"If you don't know the answer to a question, please don't share false information."
),
sep=[]
)
r"""
Supports: https://huggingface.co/ziqingyang/chinese-alpaca-2-7b
https://huggingface.co/ziqingyang/chinese-alpaca-2-13b
"""
register_template(
name="llama2_zh",
prefix=[
"<<SYS>>\n{{system}}\n<</SYS>>\n\n"
],
prompt=[
"[INST] {{query}} [/INST]"
],
system="You are a helpful assistant. 你是一个乐于助人的助手。",
sep=[]
)
r"""
Supports: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1
"""
register_template(
name="mistral",
prefix=[
"{{system}}"
],
prompt=[
"[INST] {{query}} [/INST]"
],
system="",
sep=[]
)
r"""
Supports: https://huggingface.co/openchat/openchat_3.5
"""
register_template(
name="openchat",
prefix=[
"{{system}}"
],
prompt=[
"GPT4 Correct User: {{query}}",
{"token": "<|end_of_turn|>"},
"GPT4 Correct Assistant:"
],
system="",
sep=[
{"token": "<|end_of_turn|>"}
],
stop_words=[
"<|end_of_turn|>"
],
efficient_eos=True
)
r"""
Supports: https://huggingface.co/Qwen/Qwen-7B-Chat
https://huggingface.co/Qwen/Qwen-14B-Chat
"""
register_template(
name="qwen",
prefix=[
{"token": "<|im_start|>"},
"system\n{{system}}"
],
prompt=[
{"token": "<|im_start|>"},
"user\n{{query}}",
{"token": "<|im_end|>"},
"\n",
{"token": "<|im_start|>"},
"assistant\n"
],
system="You are a helpful assistant.",
sep=[
{"token": "<|im_end|>"},
"\n"
],
stop_words=[
"<|im_end|>"
],
efficient_eos=True
)
r""" r"""
Supports: https://huggingface.co/HuggingFaceH4/starchat-alpha Supports: https://huggingface.co/HuggingFaceH4/starchat-alpha
https://huggingface.co/HuggingFaceH4/starchat-beta https://huggingface.co/HuggingFaceH4/starchat-beta
@@ -541,57 +636,43 @@ register_template(
r""" r"""
Supports: https://huggingface.co/Qwen/Qwen-7B-Chat Supports language model inference without histories.
""" """
register_template( register_template(
name="chatml", name="vanilla",
prefix=[ prefix=[],
{"token": "<|im_start|>"},
"system\n{{system}}"
],
prompt=[ prompt=[
{"token": "<|im_start|>"}, "{{query}}"
"user\n{{query}}",
{"token": "<|im_end|>"},
"\n",
{"token": "<|im_start|>"},
"assistant\n"
], ],
system="You are a helpful assistant.", system="",
sep=[ sep=[],
{"token": "<|im_end|>"}, use_history=False
"\n"
],
stop_words=[
"<|im_end|>"
],
efficient_eos=True
) )
r""" r"""
Supports: https://huggingface.co/THUDM/chatglm2-6b Supports: https://huggingface.co/lmsys/vicuna-7b-v1.5
https://huggingface.co/lmsys/vicuna-13b-v1.5
""" """
register_template( register_template(
name="chatglm2", name="vicuna",
prefix=[ prefix=[
{"token": "[gMASK]"},
{"token": "sop"},
"{{system}}" "{{system}}"
], ],
prompt=[ prompt=[
"[Round {{idx}}]\n\n问:{{query}}\n\n答:" "USER: {{query}} ASSISTANT:"
], ],
system="", system=(
sep=[ "A chat between a curious user and an artificial intelligence assistant. "
"\n\n" "The assistant gives helpful, detailed, and polite answers to the user's questions."
], ),
efficient_eos=True sep=[]
) )
r""" r"""
Supports: https://huggingface.co/xverse/XVERSE-13B-Chat Supports: https://huggingface.co/xverse/XVERSE-7B-Chat
https://huggingface.co/xverse/XVERSE-13B-Chat
""" """
register_template( register_template(
name="xverse", name="xverse",
@@ -604,3 +685,85 @@ register_template(
system="", system="",
sep=[] sep=[]
) )
r"""
Supports: https://huggingface.co/wenge-research/yayi-7b
https://huggingface.co/wenge-research/yayi-7b-llama2
https://huggingface.co/wenge-research/yayi-13b-llama2
"""
register_template(
name="yayi",
prefix=[
{"token": "<|System|>"},
":\n{{system}}"
],
prompt=[
{"token": "<|Human|>"},
":\n{{query}}\n\n",
{"token": "<|YaYi|>"},
":"
],
system=(
"You are a helpful, respectful and honest assistant named YaYi "
"developed by Beijing Wenge Technology Co.,Ltd. "
"Always answer as helpfully as possible, while being safe. "
"Your answers should not include any harmful, unethical, "
"racist, sexist, toxic, dangerous, or illegal content. "
"Please ensure that your responses are socially unbiased and positive in nature.\n\n"
"If a question does not make any sense, or is not factually coherent, "
"explain why instead of answering something not correct. "
"If you don't know the answer to a question, please don't share false information."
),
sep=[
"\n\n"
],
stop_words=[
"<|End|>"
]
)
r"""
Supports: https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha
https://huggingface.co/HuggingFaceH4/zephyr-7b-beta
"""
register_template(
name="zephyr",
prefix=[
{"token": "<|system|>"},
"\n{{system}}",
{"token": "</s>"}
],
prompt=[
{"token": "<|user|>"},
"\n{{query}}",
{"token": "</s>"},
{"token": "<|assistant|>"}
],
system="You are a friendly chatbot who always responds in the style of a pirate",
sep=[]
)
r"""
Supports: https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1
https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1.1
https://huggingface.co/IDEA-CCNL/Ziya2-13B-Chat
"""
register_template(
name="ziya",
prefix=[
"{{system}}"
],
prompt=[
{"token": "<human>"},
":{{query}}\n",
{"token": "<bot>"},
":"
],
system="",
sep=[
"\n"
]
)

View File

@@ -1,4 +1,5 @@
from .data_args import DataArguments from .data_args import DataArguments
from .evaluation_args import EvaluationArguments
from .finetuning_args import FinetuningArguments from .finetuning_args import FinetuningArguments
from .generating_args import GeneratingArguments from .generating_args import GeneratingArguments
from .model_args import ModelArguments from .model_args import ModelArguments

View File

@@ -11,11 +11,17 @@ class DatasetAttr:
dataset_name: Optional[str] = None dataset_name: Optional[str] = None
dataset_sha1: Optional[str] = None dataset_sha1: Optional[str] = None
system_prompt: Optional[str] = None system_prompt: Optional[str] = None
subset: Optional[str] = None
ranking: Optional[bool] = False ranking: Optional[bool] = False
formatting: Optional[Literal["alpaca", "sharegpt"]] = "alpaca"
prompt: Optional[str] = "instruction" prompt: Optional[str] = "instruction"
query: Optional[str] = "input" query: Optional[str] = "input"
response: Optional[str] = "output" response: Optional[str] = "output"
history: Optional[str] = None history: Optional[str] = None
messages: Optional[str] = "conversations"
role: Optional[str] = "from"
content: Optional[str] = "value"
def __repr__(self) -> str: def __repr__(self) -> str:
return self.dataset_name return self.dataset_name
@@ -36,7 +42,7 @@ class DataArguments:
) )
dataset_dir: Optional[str] = field( dataset_dir: Optional[str] = field(
default="data", default="data",
metadata={"help": "The name of the folder containing datasets."} metadata={"help": "Path to the folder containing the datasets."}
) )
split: Optional[str] = field( split: Optional[str] = field(
default="train", default="train",
@@ -60,7 +66,7 @@ class DataArguments:
) )
mix_strategy: Optional[Literal["concat", "interleave_under", "interleave_over"]] = field( mix_strategy: Optional[Literal["concat", "interleave_under", "interleave_over"]] = field(
default="concat", default="concat",
metadata={"help": "Strategy to use in dataset mixing."} metadata={"help": "Strategy to use in dataset mixing (concat/interleave) (undersampling/oversampling)."}
) )
interleave_probs: Optional[str] = field( interleave_probs: Optional[str] = field(
default=None, default=None,
@@ -98,6 +104,10 @@ class DataArguments:
default=False, default=False,
metadata={"help": "Packing the questions and answers in the supervised fine-tuning stage."} metadata={"help": "Packing the questions and answers in the supervised fine-tuning stage."}
) )
cache_path: Optional[str] = field(
default=None,
metadata={"help": "Path to save or load the preprocessed datasets."}
)
def __post_init__(self): def __post_init__(self):
if self.streaming and self.val_size > 1e-6 and self.val_size < 1: if self.streaming and self.val_size > 1e-6 and self.val_size < 1:
@@ -106,12 +116,18 @@ class DataArguments:
if self.streaming and self.max_samples is not None: if self.streaming and self.max_samples is not None:
raise ValueError("`max_samples` is incompatible with `streaming`.") raise ValueError("`max_samples` is incompatible with `streaming`.")
def init_for_training(self): # support mixing multiple datasets if self.streaming and self.cache_path:
raise ValueError("`cache_path` is incompatible with `streaming`.")
def init_for_training(self, seed: int): # support mixing multiple datasets
self.seed = seed
dataset_names = [ds.strip() for ds in self.dataset.split(",")] if self.dataset is not None else [] dataset_names = [ds.strip() for ds in self.dataset.split(",")] if self.dataset is not None else []
try: try:
with open(os.path.join(self.dataset_dir, "dataset_info.json"), "r") as f: with open(os.path.join(self.dataset_dir, "dataset_info.json"), "r") as f:
dataset_info = json.load(f) dataset_info = json.load(f)
except Exception: except Exception:
if self.dataset is not None:
raise ValueError("Cannot find dataset_info.json in `dataset_dir`.")
dataset_info = None dataset_info = None
prompt_list = self.system_prompt.split("|") if self.system_prompt else [None] prompt_list = self.system_prompt.split("|") if self.system_prompt else [None]
@@ -142,7 +158,12 @@ class DataArguments:
dataset_attr.query = dataset_info[name]["columns"].get("query", None) dataset_attr.query = dataset_info[name]["columns"].get("query", None)
dataset_attr.response = dataset_info[name]["columns"].get("response", None) dataset_attr.response = dataset_info[name]["columns"].get("response", None)
dataset_attr.history = dataset_info[name]["columns"].get("history", None) dataset_attr.history = dataset_info[name]["columns"].get("history", None)
dataset_attr.messages = dataset_info[name]["columns"].get("messages", None)
dataset_attr.role = dataset_info[name]["columns"].get("role", None)
dataset_attr.content = dataset_info[name]["columns"].get("content", None)
dataset_attr.subset = dataset_info[name].get("subset", None)
dataset_attr.ranking = dataset_info[name].get("ranking", False) dataset_attr.ranking = dataset_info[name].get("ranking", False)
dataset_attr.formatting = dataset_info[name].get("formatting", "alpaca")
dataset_attr.system_prompt = prompt_list[i] dataset_attr.system_prompt = prompt_list[i]
self.dataset_list.append(dataset_attr) self.dataset_list.append(dataset_attr)

View File

@@ -0,0 +1,55 @@
import os
from typing import Literal, Optional
from dataclasses import dataclass, field
from datasets import DownloadMode
@dataclass
class EvaluationArguments:
r"""
Arguments pertaining to specify the evaluation parameters.
"""
task: str = field(
metadata={"help": "Name of the evaluation task."}
)
task_dir: Optional[str] = field(
default="evaluation",
metadata={"help": "Path to the folder containing the evaluation datasets."}
)
batch_size: Optional[int] = field(
default=4,
metadata={"help": "The batch size per GPU for evaluation."}
)
seed: Optional[int] = field(
default=42,
metadata={"help": "Random seed to be used with data loaders."}
)
lang: Optional[Literal["en", "zh"]] = field(
default="en",
metadata={"help": "Language used at evaluation."}
)
n_shot: Optional[int] = field(
default=5,
metadata={"help": "Number of examplars for few-shot learning."}
)
save_dir: Optional[str] = field(
default=None,
metadata={"help": "Path to save the evaluation results."}
)
download_mode: Optional[DownloadMode] = field(
default=DownloadMode.REUSE_DATASET_IF_EXISTS,
metadata={"help": "Download mode used for the evaluation datasets."}
)
def __post_init__(self):
task_available = []
for folder in os.listdir(self.task_dir):
if os.path.isdir(os.path.join(self.task_dir, folder)):
task_available.append(folder)
if self.task not in task_available:
raise ValueError("Task {} not found in {}.".format(self.task, self.task_dir))
if self.save_dir is not None and os.path.exists(self.save_dir):
raise ValueError("`save_dir` already exists, use another one.")

View File

@@ -12,7 +12,7 @@ class FinetuningArguments:
default="sft", default="sft",
metadata={"help": "Which stage will be performed in training."} metadata={"help": "Which stage will be performed in training."}
) )
finetuning_type: Optional[Literal["lora", "freeze", "full", "none"]] = field( finetuning_type: Optional[Literal["lora", "freeze", "full"]] = field(
default="lora", default="lora",
metadata={"help": "Which fine-tuning method to use."} metadata={"help": "Which fine-tuning method to use."}
) )
@@ -24,10 +24,10 @@ class FinetuningArguments:
default="mlp", default="mlp",
metadata={"help": "Name of trainable modules for partial-parameter (freeze) fine-tuning. \ metadata={"help": "Name of trainable modules for partial-parameter (freeze) fine-tuning. \
LLaMA choices: [\"mlp\", \"self_attn\"], \ LLaMA choices: [\"mlp\", \"self_attn\"], \
BLOOM & Falcon & ChatGLM2 choices: [\"mlp\", \"self_attention\"], \ BLOOM & Falcon & ChatGLM choices: [\"mlp\", \"self_attention\"], \
Qwen choices: [\"mlp\", \"attn\"], \ Qwen choices: [\"mlp\", \"attn\"], \
Phi-1.5 choices: [\"mlp\", \"mixer\"], \ Phi-1.5 choices: [\"mlp\", \"mixer\"], \
LLaMA-2, Baichuan, InternLM, XVERSE choices: the same as LLaMA."} LLaMA-2, BlueLM, Baichuan, InternLM, Mistral, Skywork, XVERSE, Yi choices: the same as LLaMA."}
) )
lora_rank: Optional[int] = field( lora_rank: Optional[int] = field(
default=8, default=8,
@@ -45,11 +45,11 @@ class FinetuningArguments:
default=None, default=None,
metadata={"help": "Name(s) of target modules to apply LoRA. Use commas to separate multiple modules. \ metadata={"help": "Name(s) of target modules to apply LoRA. Use commas to separate multiple modules. \
LLaMA choices: [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"], \ LLaMA choices: [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"], \
BLOOM & Falcon & ChatGLM2 choices: [\"query_key_value\", \"self_attention.dense\", \"mlp.dense\"], \ BLOOM & Falcon & ChatGLM choices: [\"query_key_value\", \"dense\", \"dense_h_to_4h\", \"dense_4h_to_h\"], \
Baichuan choices: [\"W_pack\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"], \ Baichuan choices: [\"W_pack\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"], \
Qwen choices: [\"c_attn\", \"attn.c_proj\", \"w1\", \"w2\", \"mlp.c_proj\"], \ Qwen choices: [\"c_attn\", \"attn.c_proj\", \"w1\", \"w2\", \"mlp.c_proj\"], \
Phi-1.5 choices: [\"Wqkv\", \"out_proj\", \"fc1\", \"fc2\"], \ Phi-1.5 choices: [\"Wqkv\", \"out_proj\", \"fc1\", \"fc2\"], \
LLaMA-2, InternLM, XVERSE choices: the same as LLaMA."} LLaMA-2, BlueLM, InternLM, Mistral, Skywork, XVERSE, Yi choices: the same as LLaMA."}
) )
additional_target: Optional[str] = field( additional_target: Optional[str] = field(
default=None, default=None,
@@ -75,6 +75,22 @@ class FinetuningArguments:
default=0.1, default=0.1,
metadata={"help": "The beta parameter for the DPO loss."} metadata={"help": "The beta parameter for the DPO loss."}
) )
dpo_ref_model: Optional[str] = field(
default=None,
metadata={"help": "Path to the reference model used for the DPO training."}
)
dpo_ref_model_checkpoint: Optional[str] = field(
default=None,
metadata={"help": "Path to the directory(s) containing the model checkpoints of the reference model."}
)
upcast_layernorm: Optional[bool] = field(
default=False,
metadata={"help": "Whether to upcast the layernorm weights in fp32."}
)
neft_alpha: Optional[float] = field(
default=0,
metadata={"help": "The alpha parameter to control the noise magnitude in NEFTune."}
)
def __post_init__(self): def __post_init__(self):
if isinstance(self.lora_target, str): # support custom target modules/layers of LoRA if isinstance(self.lora_target, str): # support custom target modules/layers of LoRA
@@ -83,7 +99,7 @@ class FinetuningArguments:
if isinstance(self.additional_target, str): if isinstance(self.additional_target, str):
self.additional_target = [target.strip() for target in self.additional_target.split(",")] self.additional_target = [target.strip() for target in self.additional_target.split(",")]
assert self.finetuning_type in ["lora", "freeze", "full", "none"], "Invalid fine-tuning method." assert self.finetuning_type in ["lora", "freeze", "full"], "Invalid fine-tuning method."
def save_to_json(self, json_path: str): def save_to_json(self, json_path: str):
r"""Saves the content of this instance in JSON format inside `json_path`.""" r"""Saves the content of this instance in JSON format inside `json_path`."""

View File

@@ -1,13 +0,0 @@
from typing import Literal, Optional
from dataclasses import dataclass, field
@dataclass
class GeneralArguments:
r"""
Arguments pertaining to which stage we are going to perform.
"""
stage: Optional[Literal["pt", "sft", "rm", "ppo", "dpo"]] = field(
default="sft",
metadata={"help": "Which stage will be performed in training."}
)

View File

@@ -28,7 +28,7 @@ class GeneratingArguments:
metadata={"help": "Number of beams for beam search. 1 means no beam search."} metadata={"help": "Number of beams for beam search. 1 means no beam search."}
) )
max_length: Optional[int] = field( max_length: Optional[int] = field(
default=None, default=512,
metadata={"help": "The maximum length the generated tokens can have. It can be overridden by max_new_tokens."} metadata={"help": "The maximum length the generated tokens can have. It can be overridden by max_new_tokens."}
) )
max_new_tokens: Optional[int] = field( max_new_tokens: Optional[int] = field(
@@ -46,6 +46,8 @@ class GeneratingArguments:
def to_dict(self) -> Dict[str, Any]: def to_dict(self) -> Dict[str, Any]:
args = asdict(self) args = asdict(self)
if args.get("max_new_tokens", None): if args.get("max_new_tokens", -1) > 0:
args.pop("max_length", None) args.pop("max_length", None)
else:
args.pop("max_new_tokens", None)
return args return args

View File

@@ -1,5 +1,5 @@
from typing import Literal, Optional from typing import Any, Dict, Literal, Optional
from dataclasses import dataclass, field from dataclasses import asdict, dataclass, field
@dataclass @dataclass
@@ -22,10 +22,6 @@ class ModelArguments:
default=False, default=False,
metadata={"help": "Whether or not the special tokens should be split during the tokenization process."} metadata={"help": "Whether or not the special tokens should be split during the tokenization process."}
) )
use_auth_token: Optional[bool] = field(
default=False,
metadata={"help": "Will use the token generated when running `huggingface-cli login`."}
)
model_revision: Optional[str] = field( model_revision: Optional[str] = field(
default="main", default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}
@@ -48,7 +44,7 @@ class ModelArguments:
) )
checkpoint_dir: Optional[str] = field( checkpoint_dir: Optional[str] = field(
default=None, default=None,
metadata={"help": "Path to the directory(s) containing the delta model checkpoints as well as the configurations."} metadata={"help": "Path to the directory(s) containing the model checkpoints as well as the configurations."}
) )
flash_attn: Optional[bool] = field( flash_attn: Optional[bool] = field(
default=False, default=False,
@@ -58,19 +54,15 @@ class ModelArguments:
default=False, default=False,
metadata={"help": "Enable shift short attention (S^2-Attn) proposed by LongLoRA."} metadata={"help": "Enable shift short attention (S^2-Attn) proposed by LongLoRA."}
) )
reward_model: Optional[str] = field( reward_model: Optional[str] = field( # TODO: move it to FinetuningArguments
default=None, default=None,
metadata={"help": "Path to the directory containing the checkpoints of the reward model."} metadata={"help": "Path to the directory containing the checkpoints of the reward model."}
) )
upcast_layernorm: Optional[bool] = field( plot_loss: Optional[bool] = field( # TODO: move it to FinetuningArguments
default=False,
metadata={"help": "Whether to upcast the layernorm weights in fp32."}
)
plot_loss: Optional[bool] = field(
default=False, default=False,
metadata={"help": "Whether to plot the training loss after fine-tuning or not."} metadata={"help": "Whether to plot the training loss after fine-tuning or not."}
) )
hf_auth_token: Optional[str] = field( hf_hub_token: Optional[str] = field(
default=None, default=None,
metadata={"help": "Auth token to log in with Hugging Face Hub."} metadata={"help": "Auth token to log in with Hugging Face Hub."}
) )
@@ -92,6 +84,5 @@ class ModelArguments:
if self.quantization_bit is not None: if self.quantization_bit is not None:
assert self.quantization_bit in [4, 8], "We only accept 4-bit or 8-bit quantization." assert self.quantization_bit in [4, 8], "We only accept 4-bit or 8-bit quantization."
if self.use_auth_token == True and self.hf_auth_token is not None: def to_dict(self) -> Dict[str, Any]:
from huggingface_hub.hf_api import HfFolder # lazy load return asdict(self)
HfFolder.save_token(self.hf_auth_token)

View File

@@ -1,2 +1,3 @@
from llmtuner.tuner.core.parser import get_train_args, get_infer_args from llmtuner.tuner.core.parser import get_train_args, get_infer_args
from llmtuner.tuner.core.loader import load_model_and_tokenizer from llmtuner.tuner.core.loader import load_model_and_tokenizer
from llmtuner.tuner.core.utils import generate_model_card

View File

@@ -1,6 +1,9 @@
import os
import torch import torch
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from transformers.utils import cached_file
from transformers.trainer import WEIGHTS_NAME, SAFE_WEIGHTS_NAME
from peft import ( from peft import (
PeftModel, PeftModel,
TaskType, TaskType,
@@ -23,8 +26,7 @@ def init_adapter(
model: "PreTrainedModel", model: "PreTrainedModel",
model_args: "ModelArguments", model_args: "ModelArguments",
finetuning_args: "FinetuningArguments", finetuning_args: "FinetuningArguments",
is_trainable: bool, is_trainable: bool
is_mergeable: bool
) -> "PreTrainedModel": ) -> "PreTrainedModel":
r""" r"""
Initializes the adapters. Initializes the adapters.
@@ -34,14 +36,15 @@ def init_adapter(
Note that the trainable parameters must be cast to float32. Note that the trainable parameters must be cast to float32.
""" """
if finetuning_args.finetuning_type == "none" and is_trainable: if (not is_trainable) and model_args.checkpoint_dir is None:
raise ValueError("You cannot use finetuning_type=none while training.") logger.info("Checkpoint is not found at evaluation, load the original model.")
return model
if finetuning_args.finetuning_type == "full" and is_trainable: if finetuning_args.finetuning_type == "full" and is_trainable:
logger.info("Fine-tuning method: Full") logger.info("Fine-tuning method: Full")
model = model.float() model = model.float()
if finetuning_args.finetuning_type == "freeze": if finetuning_args.finetuning_type == "freeze" and is_trainable:
logger.info("Fine-tuning method: Freeze") logger.info("Fine-tuning method: Freeze")
num_layers = getattr(model.config, "num_layers") num_layers = getattr(model.config, "num_layers")
if finetuning_args.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0 if finetuning_args.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0
@@ -58,11 +61,11 @@ def init_adapter(
if finetuning_args.finetuning_type == "lora": if finetuning_args.finetuning_type == "lora":
logger.info("Fine-tuning method: LoRA") logger.info("Fine-tuning method: LoRA")
latest_checkpoint = None checkpoint_to_resume = None
if model_args.checkpoint_dir is not None: if model_args.checkpoint_dir is not None:
if (is_trainable and finetuning_args.resume_lora_training) or (not is_mergeable): # continually fine-tuning if is_trainable and finetuning_args.resume_lora_training:
checkpoints_to_merge, latest_checkpoint = model_args.checkpoint_dir[:-1], model_args.checkpoint_dir[-1] checkpoints_to_merge, checkpoint_to_resume = model_args.checkpoint_dir[:-1], model_args.checkpoint_dir[-1]
else: else:
checkpoints_to_merge = model_args.checkpoint_dir checkpoints_to_merge = model_args.checkpoint_dir
@@ -73,10 +76,10 @@ def init_adapter(
if len(checkpoints_to_merge) > 0: if len(checkpoints_to_merge) > 0:
logger.info("Merged {} model checkpoint(s).".format(len(checkpoints_to_merge))) logger.info("Merged {} model checkpoint(s).".format(len(checkpoints_to_merge)))
if latest_checkpoint is not None: # resume lora training or quantized inference if checkpoint_to_resume is not None: # resume lora training
model = PeftModel.from_pretrained(model, latest_checkpoint, is_trainable=is_trainable) model = PeftModel.from_pretrained(model, checkpoint_to_resume, is_trainable=is_trainable)
if is_trainable and latest_checkpoint is None: # create new lora weights while training if is_trainable and checkpoint_to_resume is None: # create new lora weights while training
if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all": if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all":
target_modules = find_all_linear_modules(model, model_args.quantization_bit) target_modules = find_all_linear_modules(model, model_args.quantization_bit)
else: else:
@@ -92,10 +95,35 @@ def init_adapter(
modules_to_save=finetuning_args.additional_target modules_to_save=finetuning_args.additional_target
) )
model = get_peft_model(model, lora_config) model = get_peft_model(model, lora_config)
if id(model.peft_config) != id(model.base_model.peft_config): # https://github.com/huggingface/peft/issues/923
model.base_model.peft_config = model.peft_config
if model_args.checkpoint_dir is not None: if model_args.checkpoint_dir is not None:
logger.info("Loaded fine-tuned model from checkpoint(s): {}".format(",".join(model_args.checkpoint_dir))) logger.info("Loaded fine-tuned model from checkpoint(s): {}".format(",".join(model_args.checkpoint_dir)))
return model return model
def load_valuehead_params(
model: "PreTrainedModel",
model_args: "ModelArguments"
) -> bool:
kwargs = {
"path_or_repo_id": model_args.reward_model,
"cache_dir": model_args.cache_dir,
"token": model_args.hf_hub_token,
"revision": model_args.model_revision
}
try:
vhead_file = cached_file(filename=WEIGHTS_NAME, **kwargs)
except:
try:
vhead_file = cached_file(filename=SAFE_WEIGHTS_NAME, **kwargs)
except:
logger.warning("Provided path ({}) does not contain valuehead weights.".format(model_args.reward_model))
return False
vhead_params = torch.load(vhead_file, map_location="cpu")
model.register_buffer("reward_head_weight", vhead_params["v_head.summary.weight"], persistent=False)
model.register_buffer("reward_head_bias", vhead_params["v_head.summary.bias"], persistent=False)
model.register_buffer("default_head_weight", torch.zeros_like(vhead_params["v_head.summary.weight"]), persistent=False)
model.register_buffer("default_head_bias", torch.zeros_like(vhead_params["v_head.summary.bias"]), persistent=False)
return True

View File

@@ -14,8 +14,8 @@ from transformers import (
PreTrainedTokenizerBase PreTrainedTokenizerBase
) )
from transformers.models.llama import modeling_llama as LlamaModule from transformers.models.llama import modeling_llama as LlamaModule
from transformers.utils import check_min_version
from transformers.utils.versions import require_version from transformers.utils.versions import require_version
from peft import PeftModel
from trl import AutoModelForCausalLMWithValueHead from trl import AutoModelForCausalLMWithValueHead
try: try:
@@ -26,9 +26,8 @@ except ImportError: # https://github.com/huggingface/transformers/releases/tag/v
from llmtuner.extras.logging import reset_logging, get_logger from llmtuner.extras.logging import reset_logging, get_logger
from llmtuner.extras.misc import count_parameters, infer_optim_dtype from llmtuner.extras.misc import count_parameters, infer_optim_dtype
from llmtuner.extras.patches import llama_patch as LlamaPatches from llmtuner.extras.patches import llama_patch as LlamaPatches
from llmtuner.extras.save_and_load import load_valuehead_params
from llmtuner.hparams import FinetuningArguments from llmtuner.hparams import FinetuningArguments
from llmtuner.tuner.core.adapter import init_adapter from llmtuner.tuner.core.adapter import init_adapter, load_valuehead_params
from llmtuner.tuner.core.utils import prepare_model_for_training from llmtuner.tuner.core.utils import prepare_model_for_training
if TYPE_CHECKING: if TYPE_CHECKING:
@@ -39,11 +38,11 @@ if TYPE_CHECKING:
logger = get_logger(__name__) logger = get_logger(__name__)
check_min_version("4.31.0") require_version("transformers>=4.31.0,<4.35.0", "To fix: pip install \"transformers>=4.31.0,<4.35.0\"")
require_version("datasets>=2.12.0", "To fix: pip install datasets>=2.12.0") require_version("datasets>=2.14.0", "To fix: pip install datasets>=2.14.0")
require_version("accelerate>=0.21.0", "To fix: pip install accelerate>=0.21.0") require_version("accelerate>=0.21.0", "To fix: pip install accelerate>=0.21.0")
require_version("peft>=0.4.0", "To fix: pip install peft>=0.4.0") require_version("peft>=0.6.0", "To fix: pip install peft>=0.6.0")
require_version("trl>=0.7.1", "To fix: pip install trl>=0.7.1") require_version("trl>=0.7.4", "To fix: pip install trl>=0.7.4")
def load_model_and_tokenizer( def load_model_and_tokenizer(
@@ -57,15 +56,12 @@ def load_model_and_tokenizer(
Support both training and inference. Support both training and inference.
""" """
if (not is_trainable) and model_args.checkpoint_dir is None:
logger.warning("Checkpoint is not found at evaluation, load the original model.")
finetuning_args = FinetuningArguments(finetuning_type="none")
config_kwargs = { config_kwargs = {
"trust_remote_code": True, "trust_remote_code": True,
"cache_dir": model_args.cache_dir, "cache_dir": model_args.cache_dir,
"revision": model_args.model_revision, "revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None, "token": model_args.hf_hub_token
} }
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
@@ -83,7 +79,7 @@ def load_model_and_tokenizer(
config = AutoConfig.from_pretrained(model_to_load, **config_kwargs) config = AutoConfig.from_pretrained(model_to_load, **config_kwargs)
# Fix tokenizer (for ChatGLM2) # Fix tokenizer (for ChatGLM2 and ChatGLM3)
if getattr(config, "model_type", None) == "chatglm": if getattr(config, "model_type", None) == "chatglm":
tokenizer._pad = MethodType(PreTrainedTokenizerBase._pad, tokenizer) tokenizer._pad = MethodType(PreTrainedTokenizerBase._pad, tokenizer)
@@ -100,15 +96,9 @@ def load_model_and_tokenizer(
# Set RoPE scaling # Set RoPE scaling
if model_args.rope_scaling is not None: if model_args.rope_scaling is not None:
if hasattr(config, "use_dynamic_ntk"): # for Qwen models if not hasattr(config, "rope_scaling"):
if is_trainable: logger.warning("Current model does not support RoPE scaling.")
logger.warning("Qwen model does not support RoPE scaling in training.") else:
else:
setattr(config, "use_dynamic_ntk", True)
setattr(config, "use_logn_attn", True)
logger.info("Using dynamic NTK scaling.")
elif hasattr(config, "rope_scaling"): # for LLaMA and Falcon models
if is_trainable: if is_trainable:
if model_args.rope_scaling == "dynamic": if model_args.rope_scaling == "dynamic":
logger.warning( logger.warning(
@@ -130,17 +120,17 @@ def load_model_and_tokenizer(
model_args.rope_scaling, scaling_factor model_args.rope_scaling, scaling_factor
)) ))
else:
logger.warning("Current model does not support RoPE scaling.")
# Set FlashAttention-2 # Set FlashAttention-2
if model_args.flash_attn: if model_args.flash_attn:
if getattr(config, "model_type", None) == "llama": if getattr(config, "model_type", None) == "llama":
LlamaModule.LlamaAttention = LlamaPatches.LlamaFlashAttention2 if LlamaPatches.is_flash_attn_2_available:
LlamaModule.LlamaModel._prepare_decoder_attention_mask = LlamaPatches._prepare_decoder_attention_mask LlamaModule.LlamaAttention = LlamaPatches.LlamaFlashAttention2
logger.info("Using FlashAttention-2 for faster training and inference.") LlamaModule.LlamaModel._prepare_decoder_attention_mask = LlamaPatches._prepare_decoder_attention_mask
elif getattr(config, "model_type", None) == "qwen": logger.info("Using FlashAttention-2 for faster training and inference.")
logger.info("Qwen models automatically enable FlashAttention if installed.") else:
logger.warning("FlashAttention-2 is not installed.")
elif getattr(config, "model_type", None) in ["qwen", "Yi"]:
logger.info("Current model automatically enables FlashAttention if installed.")
else: else:
logger.warning("Current model does not support FlashAttention-2.") logger.warning("Current model does not support FlashAttention-2.")
elif is_trainable and model_args.shift_attn and getattr(config, "model_type", None) == "llama": elif is_trainable and model_args.shift_attn and getattr(config, "model_type", None) == "llama":
@@ -156,7 +146,6 @@ def load_model_and_tokenizer(
logger.warning("Current model does not support shift short attention.") logger.warning("Current model does not support shift short attention.")
# Quantization configurations (using bitsandbytes library). # Quantization configurations (using bitsandbytes library).
is_mergeable = True
if model_args.quantization_bit is not None: if model_args.quantization_bit is not None:
if is_deepspeed_zero3_enabled(): if is_deepspeed_zero3_enabled():
raise ValueError("DeepSpeed ZeRO-3 is incompatible with quantization.") raise ValueError("DeepSpeed ZeRO-3 is incompatible with quantization.")
@@ -166,7 +155,7 @@ def load_model_and_tokenizer(
config_kwargs["load_in_8bit"] = True config_kwargs["load_in_8bit"] = True
config_kwargs["quantization_config"] = BitsAndBytesConfig(load_in_8bit=True) config_kwargs["quantization_config"] = BitsAndBytesConfig(load_in_8bit=True)
elif model_args.quantization_bit == 4: if model_args.quantization_bit == 4:
require_version("bitsandbytes>=0.39.0", "To fix: pip install bitsandbytes>=0.39.0") require_version("bitsandbytes>=0.39.0", "To fix: pip install bitsandbytes>=0.39.0")
config_kwargs["load_in_4bit"] = True config_kwargs["load_in_4bit"] = True
config_kwargs["quantization_config"] = BitsAndBytesConfig( config_kwargs["quantization_config"] = BitsAndBytesConfig(
@@ -176,7 +165,6 @@ def load_model_and_tokenizer(
bnb_4bit_quant_type=model_args.quantization_type bnb_4bit_quant_type=model_args.quantization_type
) )
is_mergeable = False
config_kwargs["device_map"] = {"": int(os.environ.get("LOCAL_RANK", "0"))} if is_trainable else "auto" config_kwargs["device_map"] = {"": int(os.environ.get("LOCAL_RANK", "0"))} if is_trainable else "auto"
logger.info("Quantizing model to {} bit.".format(model_args.quantization_bit)) logger.info("Quantizing model to {} bit.".format(model_args.quantization_bit))
@@ -193,9 +181,10 @@ def load_model_and_tokenizer(
if isinstance(model, PreTrainedModel) and "GenerationMixin" not in str(model.generate.__func__): if isinstance(model, PreTrainedModel) and "GenerationMixin" not in str(model.generate.__func__):
model.generate = MethodType(PreTrainedModel.generate, model) model.generate = MethodType(PreTrainedModel.generate, model)
# Fix LM head (for ChatGLM2) # Fix LM head (for ChatGLM2 and ChatGLM3)
if getattr(config, "model_type", None) == "chatglm": if getattr(config, "model_type", None) == "chatglm":
setattr(model, "lm_head", model.transformer.output_layer) setattr(model, "lm_head", model.transformer.output_layer)
setattr(model, "_keys_to_ignore_on_save", ["lm_head.weight"])
# Register auto class to save the custom code files. # Register auto class to save the custom code files.
if isinstance(config, PretrainedConfig) and "AutoConfig" in getattr(config, "auto_map", {}): if isinstance(config, PretrainedConfig) and "AutoConfig" in getattr(config, "auto_map", {}):
@@ -206,19 +195,17 @@ def load_model_and_tokenizer(
tokenizer.__class__.register_for_auto_class() tokenizer.__class__.register_for_auto_class()
# Initialize adapters # Initialize adapters
if is_trainable: model = prepare_model_for_training(model=model, finetuning_args=finetuning_args) if is_trainable else model
model = prepare_model_for_training(model, model_args.upcast_layernorm, finetuning_args.finetuning_type) model = init_adapter(model, model_args, finetuning_args, is_trainable)
model = init_adapter(model, model_args, finetuning_args, is_trainable, is_mergeable)
model = model.train() if is_trainable else model.eval() model = model.train() if is_trainable else model.eval()
# Prepare model with valuehead for RLHF # Prepare model with valuehead for RLHF
if stage == "rm" or stage == "ppo": if stage == "rm" or stage == "ppo":
model = AutoModelForCausalLMWithValueHead.from_pretrained(model) model: "AutoModelForCausalLMWithValueHead" = AutoModelForCausalLMWithValueHead.from_pretrained(model)
model._keys_to_ignore_on_save = None
reset_logging() reset_logging()
if stage == "rm" and model_args.checkpoint_dir is not None: # load valuehead weights to evaluate reward model if stage == "rm" and model_args.checkpoint_dir is not None: # load valuehead weights to evaluate reward model
logger.warning("Only the last checkpoint containing valuehead will be loaded.") logger.warning("Only the last checkpoint containing valuehead will be loaded.")
if load_valuehead_params(model, model_args.checkpoint_dir[-1]): if load_valuehead_params(model, model_args):
model.v_head.load_state_dict({ model.v_head.load_state_dict({
"summary.weight": getattr(model, "reward_head_weight"), "summary.weight": getattr(model, "reward_head_weight"),
"summary.bias": getattr(model, "reward_head_bias") "summary.bias": getattr(model, "reward_head_bias")
@@ -226,9 +213,12 @@ def load_model_and_tokenizer(
if stage == "ppo": # load reward model if stage == "ppo": # load reward model
logger.info("Load reward model from {}".format(model_args.reward_model)) logger.info("Load reward model from {}".format(model_args.reward_model))
if getattr(model, "is_peft_model", False): if isinstance(model.pretrained_model, PeftModel):
model.pretrained_model.load_adapter(model_args.reward_model, "reward") model.pretrained_model.load_adapter(model_args.reward_model, "reward")
assert load_valuehead_params(model, model_args.reward_model), "Reward model is not correctly loaded." for name, param in model.named_parameters(): # https://github.com/huggingface/peft/issues/1090
if "default" in name:
param.data = param.data.to(torch.float32) # trainable params should in fp32
assert load_valuehead_params(model, model_args), "Reward model is not correctly loaded."
# Prepare model for inference # Prepare model for inference
if not is_trainable: if not is_trainable:
@@ -240,4 +230,7 @@ def load_model_and_tokenizer(
trainable_params, all_param, 100 * trainable_params / all_param trainable_params, all_param, 100 * trainable_params / all_param
)) ))
if not is_trainable:
logger.info("This IS expected that the trainable params is 0 if you are using model for inference only.")
return model, tokenizer return model, tokenizer

View File

@@ -1,5 +1,4 @@
import os import os
import sys
import torch import torch
import datasets import datasets
import transformers import transformers
@@ -8,6 +7,7 @@ from transformers import HfArgumentParser, Seq2SeqTrainingArguments
from transformers.trainer_utils import get_last_checkpoint from transformers.trainer_utils import get_last_checkpoint
from llmtuner.extras.logging import get_logger from llmtuner.extras.logging import get_logger
from llmtuner.extras.misc import parse_args
from llmtuner.hparams import ( from llmtuner.hparams import (
ModelArguments, ModelArguments,
DataArguments, DataArguments,
@@ -19,17 +19,6 @@ from llmtuner.hparams import (
logger = get_logger(__name__) logger = get_logger(__name__)
def _parse_args(parser: HfArgumentParser, args: Optional[Dict[str, Any]] = None) -> Tuple[Any]:
if args is not None:
return parser.parse_dict(args)
elif len(sys.argv) == 2 and sys.argv[1].endswith(".yaml"):
return parser.parse_yaml_file(os.path.abspath(sys.argv[1]))
elif len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
return parser.parse_json_file(os.path.abspath(sys.argv[1]))
else:
return parser.parse_args_into_dataclasses()
def parse_train_args( def parse_train_args(
args: Optional[Dict[str, Any]] = None args: Optional[Dict[str, Any]] = None
) -> Tuple[ ) -> Tuple[
@@ -46,7 +35,7 @@ def parse_train_args(
FinetuningArguments, FinetuningArguments,
GeneratingArguments GeneratingArguments
)) ))
return _parse_args(parser, args) return parse_args(parser, args)
def parse_infer_args( def parse_infer_args(
@@ -63,7 +52,7 @@ def parse_infer_args(
FinetuningArguments, FinetuningArguments,
GeneratingArguments GeneratingArguments
)) ))
return _parse_args(parser, args) return parse_args(parser, args)
def get_train_args( def get_train_args(
@@ -88,8 +77,8 @@ def get_train_args(
transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format() transformers.utils.logging.enable_explicit_format()
# Check arguments (do not check finetuning_args since it may be loaded from checkpoints) # Check arguments
data_args.init_for_training() data_args.init_for_training(training_args.seed)
if finetuning_args.stage != "pt" and data_args.template is None: if finetuning_args.stage != "pt" and data_args.template is None:
raise ValueError("Please specify which `template` to use.") raise ValueError("Please specify which `template` to use.")
@@ -100,14 +89,16 @@ def get_train_args(
if finetuning_args.stage == "sft" and training_args.do_predict and not training_args.predict_with_generate: if finetuning_args.stage == "sft" and training_args.do_predict and not training_args.predict_with_generate:
raise ValueError("Please enable `predict_with_generate` to save model predictions.") raise ValueError("Please enable `predict_with_generate` to save model predictions.")
if finetuning_args.stage in ["rm", "ppo"] and finetuning_args.finetuning_type != "lora": if finetuning_args.stage in ["rm", "ppo"]:
raise ValueError("RM and PPO stages can only be performed with the LoRA method.") if finetuning_args.finetuning_type != "lora":
raise ValueError("RM and PPO stages can only be performed with the LoRA method.")
if training_args.resume_from_checkpoint is not None:
raise ValueError("RM and PPO stages do not support `resume_from_checkpoint`.")
if training_args.load_best_model_at_end:
raise ValueError("RM and PPO stages do not support `load_best_model_at_end`.")
if finetuning_args.stage in ["rm", "ppo"] and training_args.resume_from_checkpoint is not None: if finetuning_args.stage == "ppo" and not training_args.do_train:
raise ValueError("RM and PPO stages do not support `resume_from_checkpoint`.") raise ValueError("PPO training does not support evaluation.")
if finetuning_args.stage in ["ppo", "dpo"] and not training_args.do_train:
raise ValueError("PPO and DPO stages can only be performed at training.")
if finetuning_args.stage in ["rm", "dpo"]: if finetuning_args.stage in ["rm", "dpo"]:
for dataset_attr in data_args.dataset_list: for dataset_attr in data_args.dataset_list:
@@ -117,9 +108,6 @@ def get_train_args(
if finetuning_args.stage == "ppo" and model_args.reward_model is None: if finetuning_args.stage == "ppo" and model_args.reward_model is None:
raise ValueError("Reward model is necessary for PPO training.") raise ValueError("Reward model is necessary for PPO training.")
if finetuning_args.stage == "ppo" and data_args.streaming:
raise ValueError("Streaming mode does not suppport PPO training currently.")
if finetuning_args.stage == "ppo" and model_args.shift_attn: if finetuning_args.stage == "ppo" and model_args.shift_attn:
raise ValueError("PPO training is incompatible with S^2-Attn.") raise ValueError("PPO training is incompatible with S^2-Attn.")
@@ -135,18 +123,14 @@ def get_train_args(
if model_args.quantization_bit is not None and finetuning_args.finetuning_type != "lora": if model_args.quantization_bit is not None and finetuning_args.finetuning_type != "lora":
raise ValueError("Quantization is only compatible with the LoRA method.") raise ValueError("Quantization is only compatible with the LoRA method.")
if model_args.checkpoint_dir is not None: if (
if finetuning_args.finetuning_type != "lora" and len(model_args.checkpoint_dir) != 1: model_args.checkpoint_dir is not None
raise ValueError("Only LoRA tuning accepts multiple checkpoints.") and len(model_args.checkpoint_dir) != 1
and finetuning_args.finetuning_type != "lora"
):
raise ValueError("Only LoRA tuning accepts multiple checkpoints.")
if model_args.quantization_bit is not None: if training_args.do_train and model_args.quantization_bit is not None and (not finetuning_args.upcast_layernorm):
if len(model_args.checkpoint_dir) != 1:
raise ValueError("Quantized model only accepts a single checkpoint. Merge them first.")
if not finetuning_args.resume_lora_training:
raise ValueError("Quantized model cannot create new LoRA weight. Merge them first.")
if training_args.do_train and model_args.quantization_bit is not None and (not model_args.upcast_layernorm):
logger.warning("We recommend enable `upcast_layernorm` in quantized training.") logger.warning("We recommend enable `upcast_layernorm` in quantized training.")
if training_args.do_train and (not training_args.fp16) and (not training_args.bf16): if training_args.do_train and (not training_args.fp16) and (not training_args.bf16):
@@ -219,11 +203,11 @@ def get_infer_args(
if model_args.quantization_bit is not None and finetuning_args.finetuning_type != "lora": if model_args.quantization_bit is not None and finetuning_args.finetuning_type != "lora":
raise ValueError("Quantization is only compatible with the LoRA method.") raise ValueError("Quantization is only compatible with the LoRA method.")
if model_args.checkpoint_dir is not None: if (
if finetuning_args.finetuning_type != "lora" and len(model_args.checkpoint_dir) != 1: model_args.checkpoint_dir is not None
raise ValueError("Only LoRA tuning accepts multiple checkpoints.") and len(model_args.checkpoint_dir) != 1
and finetuning_args.finetuning_type != "lora"
if model_args.quantization_bit is not None and len(model_args.checkpoint_dir) != 1: ):
raise ValueError("Quantized model only accepts a single checkpoint. Merge them first.") raise ValueError("Only LoRA tuning accepts multiple checkpoints.")
return model_args, data_args, finetuning_args, generating_args return model_args, data_args, finetuning_args, generating_args

View File

@@ -1,16 +1,20 @@
import torch import torch
from typing import TYPE_CHECKING, List, Optional from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple
from llmtuner.extras.constants import LAYERNORM_NAMES from llmtuner.extras.constants import LAYERNORM_NAMES
from llmtuner.extras.logging import get_logger
if TYPE_CHECKING: if TYPE_CHECKING:
from transformers.modeling_utils import PreTrainedModel from transformers.modeling_utils import PreTrainedModel
from llmtuner.hparams import ModelArguments, DataArguments, FinetuningArguments
logger = get_logger(__name__)
def find_all_linear_modules( def find_all_linear_modules(
model: "PreTrainedModel", model: "PreTrainedModel",
quantization_bit: Optional[int] = None, quantization_bit: Optional[int] = None
output_layer_name: Optional[str] = "lm_head"
) -> List[str]: ) -> List[str]:
if quantization_bit is not None: if quantization_bit is not None:
import bitsandbytes as bnb import bitsandbytes as bnb
@@ -18,24 +22,41 @@ def find_all_linear_modules(
else: else:
linear_cls = torch.nn.Linear linear_cls = torch.nn.Linear
output_layer_names = ["lm_head"]
if model.config.model_type == "chatglm":
output_layer_names.append("output_layer")
module_names = set() module_names = set()
for name, module in model.named_modules(): for name, module in model.named_modules():
if output_layer_name not in name and isinstance(module, linear_cls): if (
isinstance(module, linear_cls)
and not any([output_layer in name for output_layer in output_layer_names])
):
module_names.add(name.split(".")[-1]) module_names.add(name.split(".")[-1])
if output_layer_name in module_names: logger.info("Found linear modules: {}".format(",".join(module_names)))
module_names.pop(output_layer_name)
return list(module_names) return list(module_names)
def generate_model_card(
model_args: "ModelArguments",
data_args: "DataArguments",
finetuning_args: "FinetuningArguments"
) -> Dict[str, Any]:
return {
"tasks": "text-generation",
"finetuned_from": model_args.model_name_or_path,
"dataset": [dataset.strip() for dataset in data_args.dataset.split(",")],
"tags": ["llama-factory"] + (["lora"] if finetuning_args.finetuning_type == "lora" else [])
}
def prepare_model_for_training( def prepare_model_for_training(
model: "PreTrainedModel", model: "PreTrainedModel",
upcast_layernorm: bool, finetuning_args: "FinetuningArguments",
finetuning_type: str,
output_layer_name: Optional[str] = "lm_head", output_layer_name: Optional[str] = "lm_head",
use_gradient_checkpointing: Optional[bool] = True, use_gradient_checkpointing: Optional[bool] = True,
layernorm_names: Optional[List[str]] = LAYERNORM_NAMES layernorm_names: Optional[Set[str]] = LAYERNORM_NAMES
) -> "PreTrainedModel": ) -> "PreTrainedModel":
r""" r"""
Includes: Includes:
@@ -44,31 +65,43 @@ def prepare_model_for_training(
(3) upcast the lm_head to fp32 (3) upcast the lm_head to fp32
Inspired by: https://github.com/huggingface/peft/blob/v0.2.0/src/peft/utils/other.py#L33 Inspired by: https://github.com/huggingface/peft/blob/v0.2.0/src/peft/utils/other.py#L33
""" """
if upcast_layernorm: if finetuning_args.upcast_layernorm:
for name, param in model.named_parameters(): for name, param in model.named_parameters():
if param.ndim == 1 and any(ln_name in name for ln_name in layernorm_names): if param.ndim == 1 and any(ln_name in name for ln_name in layernorm_names):
param.data = param.data.to(torch.float32) param.data = param.data.to(torch.float32)
logger.info("Upcasting weights in layernorm in float32.")
if finetuning_args.neft_alpha > 1e-6:
def neftune_forward_hook(module: torch.nn.Module, args: Tuple[torch.Tensor], output: torch.Tensor):
if module.training:
dims = torch.tensor(output.size(1) * output.size(2))
mag_norm = finetuning_args.neft_alpha / torch.sqrt(dims)
output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm)
return output
model.get_input_embeddings().register_forward_hook(neftune_forward_hook)
logger.info("Using noisy embedding with alpha={:.2f}".format(finetuning_args.neft_alpha))
if use_gradient_checkpointing: if use_gradient_checkpointing:
if hasattr(model, "enable_input_require_grads"): if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads() model.enable_input_require_grads()
else: else:
def make_inputs_require_grad(module, input, output): def make_inputs_require_grad(module: torch.nn.Module, args: Tuple[torch.Tensor], output: torch.Tensor):
output.requires_grad_(True) output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
model.gradient_checkpointing_enable() model.gradient_checkpointing_enable()
model.config.use_cache = False # turn off when gradient checkpointing is enabled model.config.use_cache = False # turn off when gradient checkpointing is enabled
logger.info("Gradient checkpointing enabled.")
if finetuning_type != "full" and hasattr(model, output_layer_name): if finetuning_args.finetuning_type != "full" and hasattr(model, output_layer_name):
output_layer: torch.nn.Linear = getattr(model, output_layer_name) output_layer = getattr(model, output_layer_name)
input_dtype = output_layer.weight.dtype if isinstance(output_layer, torch.nn.Linear):
def fp32_forward_pre_hook(module: torch.nn.Module, args: Tuple[torch.Tensor]):
class CastOutputToFloat(torch.nn.Sequential): return args[0].to(output_layer.weight.dtype)
def fp32_forward_post_hook(module: torch.nn.Module, args: Tuple[torch.Tensor], output: torch.Tensor):
def forward(self, x: torch.Tensor) -> torch.Tensor: return output.to(torch.float32)
return super().forward(x.to(input_dtype)).to(torch.float32) output_layer.register_forward_pre_hook(fp32_forward_pre_hook)
output_layer.register_forward_hook(fp32_forward_post_hook)
setattr(model, output_layer_name, CastOutputToFloat(output_layer))
return model return model

View File

@@ -1,6 +1,6 @@
import torch import torch
from collections import defaultdict from collections import defaultdict
from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union from typing import TYPE_CHECKING, Dict, Literal, Optional, Tuple, Union
from transformers import BatchEncoding, Trainer from transformers import BatchEncoding, Trainer
from trl import DPOTrainer from trl import DPOTrainer
from trl.trainer.utils import disable_dropout_in_model from trl.trainer.utils import disable_dropout_in_model
@@ -19,6 +19,7 @@ class CustomDPOTrainer(DPOTrainer):
model: Union["PreTrainedModel", torch.nn.Module], model: Union["PreTrainedModel", torch.nn.Module],
ref_model: Optional[Union["PreTrainedModel", torch.nn.Module]] = None, ref_model: Optional[Union["PreTrainedModel", torch.nn.Module]] = None,
disable_dropout: Optional[bool] = True, disable_dropout: Optional[bool] = True,
loss_type: Optional[Literal["sigmoid", "hinge"]] = "sigmoid",
**kwargs **kwargs
): ):
if disable_dropout: if disable_dropout:
@@ -29,9 +30,11 @@ class CustomDPOTrainer(DPOTrainer):
self.is_encoder_decoder = model.config.is_encoder_decoder self.is_encoder_decoder = model.config.is_encoder_decoder
self.ref_model = ref_model self.ref_model = ref_model
self.use_dpo_data_collator = True # hack to avoid warning self.use_dpo_data_collator = True # hack to avoid warning
self.generate_during_eval = False # disable at evaluation
self.label_pad_token_id = IGNORE_INDEX self.label_pad_token_id = IGNORE_INDEX
self.padding_value = 0 self.padding_value = 0
self.beta = beta self.beta = beta
self.loss_type = loss_type
self._stored_metrics = defaultdict(lambda: defaultdict(list)) self._stored_metrics = defaultdict(lambda: defaultdict(list))
Trainer.__init__(self, model=model, **kwargs) Trainer.__init__(self, model=model, **kwargs)
@@ -40,8 +43,7 @@ class CustomDPOTrainer(DPOTrainer):
if ref_model is not None: if ref_model is not None:
if self.is_deepspeed_enabled: if self.is_deepspeed_enabled:
self.ref_model, = self.accelerator._prepare_deepspeed(self.ref_model) self.ref_model = self._prepare_deepspeed(self.ref_model)
self.ref_model.eval()
else: else:
self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True) self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)

View File

@@ -1,20 +1,24 @@
# Inspired by: https://github.com/huggingface/trl/blob/main/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py # Inspired by: https://github.com/huggingface/trl/blob/main/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py
from copy import deepcopy
from peft import PeftModel from peft import PeftModel
from typing import TYPE_CHECKING, Optional, List from typing import TYPE_CHECKING, Optional, List
from transformers import Seq2SeqTrainingArguments from transformers import Seq2SeqTrainingArguments
from llmtuner.dsets import get_dataset, preprocess_dataset, split_dataset from llmtuner.dsets import get_dataset, preprocess_dataset, split_dataset
from llmtuner.extras.constants import IGNORE_INDEX from llmtuner.extras.constants import IGNORE_INDEX
from llmtuner.extras.logging import get_logger
from llmtuner.extras.ploting import plot_loss from llmtuner.extras.ploting import plot_loss
from llmtuner.tuner.core import load_model_and_tokenizer from llmtuner.hparams import ModelArguments
from llmtuner.tuner.core import generate_model_card, load_model_and_tokenizer
from llmtuner.tuner.dpo.collator import DPODataCollatorWithPadding from llmtuner.tuner.dpo.collator import DPODataCollatorWithPadding
from llmtuner.tuner.dpo.trainer import CustomDPOTrainer from llmtuner.tuner.dpo.trainer import CustomDPOTrainer
if TYPE_CHECKING: if TYPE_CHECKING:
from transformers import TrainerCallback from transformers import TrainerCallback
from llmtuner.hparams import ModelArguments, DataArguments, FinetuningArguments from llmtuner.hparams import DataArguments, FinetuningArguments
logger = get_logger(__name__)
def run_dpo( def run_dpo(
@@ -33,6 +37,26 @@ def run_dpo(
label_pad_token_id=IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id label_pad_token_id=IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
) )
# Create reference model
if finetuning_args.dpo_ref_model is not None:
ref_model_args_dict = model_args.to_dict()
ref_model_args_dict.update(dict(
model_name_or_path=finetuning_args.dpo_ref_model,
checkpoint_dir=finetuning_args.dpo_ref_model_checkpoint
))
ref_model_args = ModelArguments(**ref_model_args_dict)
ref_model, _ = load_model_and_tokenizer(ref_model_args, finetuning_args, is_trainable=False, stage="sft")
logger.info("Created reference model from {}".format(finetuning_args.dpo_ref_model))
elif training_args.do_train:
if isinstance(model, PeftModel):
ref_model = None
else:
ref_model, _ = load_model_and_tokenizer(model_args, finetuning_args, is_trainable=False, stage="sft")
logger.info("Created reference model from the model itself.")
else:
ref_model = model
# Update arguments
training_args_dict = training_args.to_dict() training_args_dict = training_args.to_dict()
training_args_dict.update(dict(remove_unused_columns=False)) # important for pairwise dataset training_args_dict.update(dict(remove_unused_columns=False)) # important for pairwise dataset
training_args = Seq2SeqTrainingArguments(**training_args_dict) training_args = Seq2SeqTrainingArguments(**training_args_dict)
@@ -41,7 +65,7 @@ def run_dpo(
trainer = CustomDPOTrainer( trainer = CustomDPOTrainer(
beta=finetuning_args.dpo_beta, beta=finetuning_args.dpo_beta,
model=model, model=model,
ref_model=deepcopy(model) if not isinstance(model, PeftModel) else None, ref_model=ref_model,
args=training_args, args=training_args,
tokenizer=tokenizer, tokenizer=tokenizer,
data_collator=data_collator, data_collator=data_collator,
@@ -52,9 +76,27 @@ def run_dpo(
# Training # Training
if training_args.do_train: if training_args.do_train:
train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics) trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics)
trainer.save_state() trainer.save_state()
trainer.save_model()
if trainer.is_world_process_zero() and model_args.plot_loss: if trainer.is_world_process_zero() and model_args.plot_loss:
plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) plot_loss(training_args.output_dir, keys=["loss", "eval_loss"])
# Evaluation
if training_args.do_eval:
metrics = trainer.evaluate(metric_key_prefix="eval")
if id(model) == id(ref_model): # unable to compute rewards without a reference model
logger.warning("Pass `dpo_ref_model` for computing rewards at evaluation.")
remove_keys = [key for key in metrics.keys() if "rewards" in key]
for key in remove_keys:
metrics.pop(key)
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Create model card
if training_args.do_train:
if training_args.push_to_hub:
trainer.push_to_hub(**generate_model_card(model_args, data_args, finetuning_args))
else:
trainer.create_model_card(**generate_model_card(model_args, data_args, finetuning_args))

View File

@@ -1,10 +1,11 @@
import os import os
import sys
import math import math
import torch import torch
from tqdm import tqdm from tqdm import tqdm
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
from transformers import GenerationConfig, Trainer, TrainerState, TrainerControl from transformers import BatchEncoding, GenerationConfig, Trainer, TrainerState, TrainerControl
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
from trl import PPOTrainer from trl import PPOTrainer
@@ -18,7 +19,7 @@ from llmtuner.tuner.ppo.utils import dump_layernorm, restore_layernorm, replace_
if TYPE_CHECKING: if TYPE_CHECKING:
from transformers import Seq2SeqTrainingArguments, TrainerCallback from transformers import Seq2SeqTrainingArguments, TrainerCallback
from trl import AutoModelForCausalLMWithValueHead from trl import AutoModelForCausalLMWithValueHead
from llmtuner.hparams import ModelArguments, GeneratingArguments from llmtuner.hparams import ModelArguments, FinetuningArguments, GeneratingArguments
logger = get_logger(__name__) logger = get_logger(__name__)
@@ -33,16 +34,15 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
self, self,
model_args: "ModelArguments", model_args: "ModelArguments",
training_args: "Seq2SeqTrainingArguments", training_args: "Seq2SeqTrainingArguments",
finetuning_args: "FinetuningArguments",
generating_args: "GeneratingArguments", generating_args: "GeneratingArguments",
callbacks: List["TrainerCallback"], callbacks: List["TrainerCallback"],
**kwargs **kwargs
): ):
PPOTrainer.__init__(self, **kwargs) PPOTrainer.__init__(self, **kwargs)
if getattr(self.accelerator.state, "deepspeed_plugin", None) is not None:
raise ValueError("PPOTrainer is incompatible with DeepSpeed.")
self.args = training_args self.args = training_args
self.model_args = model_args self.model_args = model_args
self.finetuning_args = finetuning_args
self.generation_config = GenerationConfig( self.generation_config = GenerationConfig(
pad_token_id=self.tokenizer.pad_token_id, pad_token_id=self.tokenizer.pad_token_id,
eos_token_id=[self.tokenizer.eos_token_id] + self.tokenizer.additional_special_tokens_ids, eos_token_id=[self.tokenizer.eos_token_id] + self.tokenizer.additional_special_tokens_ids,
@@ -52,6 +52,8 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
self.control = TrainerControl() self.control = TrainerControl()
self.log_callback, self.save_callback = callbacks[0], callbacks[1] self.log_callback, self.save_callback = callbacks[0], callbacks[1]
assert isinstance(self.log_callback, LogCallback) and isinstance(self.save_callback, SavePeftModelCallback) assert isinstance(self.log_callback, LogCallback) and isinstance(self.save_callback, SavePeftModelCallback)
if self.args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
def ppo_train(self) -> None: def ppo_train(self) -> None:
r""" r"""
@@ -60,10 +62,17 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
total_train_batch_size = ( total_train_batch_size = (
self.args.per_device_train_batch_size * self.args.gradient_accumulation_steps * self.args.world_size self.args.per_device_train_batch_size * self.args.gradient_accumulation_steps * self.args.world_size
) )
len_dataloader = len(self.dataloader) if self.args.max_steps > 0:
num_examples = len(self.dataset) num_examples = total_train_batch_size * self.args.max_steps
num_train_epochs = self.args.num_train_epochs num_train_epochs = sys.maxsize
max_steps = math.ceil(num_train_epochs * len_dataloader) max_steps = self.args.max_steps
steps_in_epoch = self.args.max_steps * self.args.gradient_accumulation_steps
else:
len_dataloader = len(self.dataloader)
num_examples = len(self.dataset)
num_train_epochs = self.args.num_train_epochs
max_steps = math.ceil(num_train_epochs * len_dataloader)
steps_in_epoch = len_dataloader
self.state.max_steps = max_steps self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs self.state.num_train_epochs = num_train_epochs
@@ -82,14 +91,16 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
unwrapped_model: "AutoModelForCausalLMWithValueHead" = self.accelerator.unwrap_model(self.model) unwrapped_model: "AutoModelForCausalLMWithValueHead" = self.accelerator.unwrap_model(self.model)
dataiter = iter(self.dataloader) dataiter = iter(self.dataloader)
steps_trained = 0
loss_meter = AverageMeter() loss_meter = AverageMeter()
reward_meter = AverageMeter() reward_meter = AverageMeter()
self.log_callback.on_train_begin(self.args, self.state, self.control) self.log_callback.on_train_begin(self.args, self.state, self.control)
for step in tqdm(range(max_steps), disable=not self.is_local_process_zero()): for step in tqdm(range(max_steps), disable=not self.is_local_process_zero()):
batch = next(dataiter) try:
steps_trained += 1 batch = next(dataiter)
except StopIteration:
dataiter = iter(self.dataloader)
batch = next(dataiter)
# Cast to inference mode # Cast to inference mode
unwrapped_model.gradient_checkpointing_disable() unwrapped_model.gradient_checkpointing_disable()
@@ -97,9 +108,14 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
self.model.eval() self.model.eval()
# Get inputs # Get inputs
queries, responses = self.get_inputs(batch)
self.tokenizer.padding_side = "right" # change padding side self.tokenizer.padding_side = "right" # change padding side
rewards = self.get_rewards(queries, responses, unwrapped_model) queries, responses, rewards = [], [], []
for idx in range(0, self.config.batch_size, self.config.mini_batch_size):
mini_batch_queries, mini_batch_responses = self.get_inputs(batch[idx:idx+self.config.mini_batch_size])
mini_batch_rewards = self.get_rewards(mini_batch_queries, mini_batch_responses, unwrapped_model)
queries.extend(mini_batch_queries)
responses.extend(mini_batch_responses)
rewards.extend(mini_batch_rewards)
# Cast to training mode # Cast to training mode
unwrapped_model.gradient_checkpointing_enable() unwrapped_model.gradient_checkpointing_enable()
@@ -128,7 +144,7 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
loss=round(loss_meter.avg, 4), loss=round(loss_meter.avg, 4),
reward=round(reward_meter.avg, 4), reward=round(reward_meter.avg, 4),
learning_rate=stats["ppo/learning_rate"], learning_rate=stats["ppo/learning_rate"],
epoch=round(step / len_dataloader, 2) epoch=round(step / steps_in_epoch, 2)
) )
tqdm.write(str(logs)) tqdm.write(str(logs))
logs["step"] = step logs["step"] = step
@@ -148,21 +164,17 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
if self.control.should_epoch_stop or self.control.should_training_stop: if self.control.should_epoch_stop or self.control.should_training_stop:
break break
if steps_trained == len_dataloader:
dataiter = iter(self.dataloader)
steps_trained = 0
self.log_callback.on_train_end(self.args, self.state, self.control) self.log_callback.on_train_end(self.args, self.state, self.control)
self.save_callback.on_train_end( self.save_callback.on_train_end(
self.args, self.state, self.control, model=self.accelerator.unwrap_model(self.model) self.args, self.state, self.control, model=self.accelerator.unwrap_model(self.model)
) )
@torch.no_grad() @torch.no_grad()
def get_inputs(self, batch: Dict[str, torch.Tensor]) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: def get_inputs(self, batch: BatchEncoding) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
r""" r"""
Generates model's responses given queries. Generates model's responses given queries.
""" """
if self.model_args.upcast_layernorm: if self.finetuning_args.upcast_layernorm:
layernorm_params = dump_layernorm(self.model) layernorm_params = dump_layernorm(self.model)
unwrapped_model: "AutoModelForCausalLMWithValueHead" = self.accelerator.unwrap_model(self.model) unwrapped_model: "AutoModelForCausalLMWithValueHead" = self.accelerator.unwrap_model(self.model)
@@ -172,21 +184,19 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
**batch **batch
) )
if self.model_args.upcast_layernorm: if self.finetuning_args.upcast_layernorm:
restore_layernorm(self.model, layernorm_params) restore_layernorm(self.model, layernorm_params)
query, response = batch["input_ids"].detach().cpu(), response[:, batch["input_ids"].size(-1):].detach().cpu() query, response = batch["input_ids"].detach().cpu(), response[:, batch["input_ids"].size(-1):].detach().cpu()
queries, responses = [], [] queries, responses = [], []
for i in range(len(query)): for i in range(len(query)):
query_length = (query[i] != self.tokenizer.pad_token_id).nonzero()[0] query_length = (query[i] != self.tokenizer.pad_token_id).nonzero()[0].item()
response_index = (response[i] != self.tokenizer.pad_token_id).nonzero() response_index = (response[i] != self.tokenizer.pad_token_id).nonzero()
if len(response_index) == 0: if len(response_index) == 0:
response_length = 1 # allow empty response response_length = 1 # allow empty response
elif self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:
response_length = response_index[-1] + 2 # save the EOS token
else: else:
response_length = response_index[-1] + 1 response_length = response_index[-1].item() + 1
queries.append(query[i, query_length:]) # remove padding from left queries.append(query[i, query_length:]) # remove padding from left
responses.append(response[i, :response_length]) # remove padding from right responses.append(response[i, :response_length]) # remove padding from right
@@ -214,13 +224,14 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
rewards = [] rewards = []
for i in range(values.size(0)): for i in range(values.size(0)):
end_index = batch["attention_mask"][i].nonzero()[-1] # use the score on the EOS token end_indexes = (batch["input_ids"][i] != self.tokenizer.pad_token_id).nonzero()
end_index = end_indexes[-1].item() if len(end_indexes) else 0
rewards.append(values[i, end_index].float().detach().cpu()) # use fp32 type rewards.append(values[i, end_index].float().detach().cpu()) # use fp32 type
replace_model(unwrapped_model, target="default") replace_model(unwrapped_model, target="default")
return rewards return rewards
@PPODecorators.empty_cuda_cache() @PPODecorators.empty_device_cache()
def batched_forward_pass( def batched_forward_pass(
self, self,
model: "AutoModelForCausalLMWithValueHead", model: "AutoModelForCausalLMWithValueHead",
@@ -264,7 +275,7 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
for j in range(len(query_batch)): for j in range(len(query_batch)):
start = len(query_batch[j]) - 1 start = len(query_batch[j]) - 1
if attention_mask[j, 0] == 0: # offset left padding if attention_mask[j, 0] == 0: # offset left padding
start += attention_mask[j, :].nonzero()[0] start += attention_mask[j, :].nonzero()[0].item()
end = start + len(response_batch[j]) end = start + len(response_batch[j])
if response_masks is not None: if response_masks is not None:

View File

@@ -42,7 +42,7 @@ def run_ppo(
ppo_epochs=1, ppo_epochs=1,
max_grad_norm=training_args.max_grad_norm, max_grad_norm=training_args.max_grad_norm,
seed=training_args.seed, seed=training_args.seed,
optimize_cuda_cache=True, optimize_device_cache=True,
target=finetuning_args.ppo_target, target=finetuning_args.ppo_target,
log_with=finetuning_args.ppo_logger, log_with=finetuning_args.ppo_logger,
use_score_scaling=finetuning_args.ppo_score_norm, use_score_scaling=finetuning_args.ppo_score_norm,
@@ -51,10 +51,14 @@ def run_ppo(
) )
optimizer = AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=training_args.learning_rate) optimizer = AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=training_args.learning_rate)
total_train_batch_size = ( if training_args.max_steps > 0:
training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size num_training_steps = training_args.max_steps
) else:
num_training_steps = training_args.num_train_epochs * math.ceil(len(dataset) / total_train_batch_size) total_train_batch_size = (
training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
num_training_steps = training_args.num_train_epochs * math.ceil(len(dataset) / total_train_batch_size)
lr_scheduler = get_scheduler( lr_scheduler = get_scheduler(
training_args.lr_scheduler_type, training_args.lr_scheduler_type,
optimizer=optimizer, optimizer=optimizer,
@@ -66,6 +70,7 @@ def run_ppo(
ppo_trainer = CustomPPOTrainer( ppo_trainer = CustomPPOTrainer(
model_args=model_args, model_args=model_args,
training_args=training_args, training_args=training_args,
finetuning_args=finetuning_args,
generating_args=generating_args, generating_args=generating_args,
callbacks=callbacks + [SavePeftModelCallback()], callbacks=callbacks + [SavePeftModelCallback()],
config=ppo_config, config=ppo_config,

View File

@@ -1,4 +1,4 @@
# Inspired by: https://github.com/huggingface/transformers/blob/v4.29.2/examples/pytorch/language-modeling/run_clm.py # Inspired by: https://github.com/huggingface/transformers/blob/v4.34.1/examples/pytorch/language-modeling/run_clm.py
import math import math
from typing import TYPE_CHECKING, Optional, List from typing import TYPE_CHECKING, Optional, List
@@ -6,7 +6,7 @@ from transformers import DataCollatorForLanguageModeling, Trainer
from llmtuner.dsets import get_dataset, preprocess_dataset, split_dataset from llmtuner.dsets import get_dataset, preprocess_dataset, split_dataset
from llmtuner.extras.ploting import plot_loss from llmtuner.extras.ploting import plot_loss
from llmtuner.tuner.core import load_model_and_tokenizer from llmtuner.tuner.core import generate_model_card, load_model_and_tokenizer
if TYPE_CHECKING: if TYPE_CHECKING:
from transformers import Seq2SeqTrainingArguments, TrainerCallback from transformers import Seq2SeqTrainingArguments, TrainerCallback
@@ -38,10 +38,10 @@ def run_pt(
# Training # Training
if training_args.do_train: if training_args.do_train:
train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics) trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics)
trainer.save_state() trainer.save_state()
trainer.save_model()
if trainer.is_world_process_zero() and model_args.plot_loss: if trainer.is_world_process_zero() and model_args.plot_loss:
plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) plot_loss(training_args.output_dir, keys=["loss", "eval_loss"])
@@ -56,3 +56,10 @@ def run_pt(
metrics["perplexity"] = perplexity metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics) trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics) trainer.save_metrics("eval", metrics)
# Create model card
if training_args.do_train:
if training_args.push_to_hub:
trainer.push_to_hub(**generate_model_card(model_args, data_args, finetuning_args))
else:
trainer.create_model_card(**generate_model_card(model_args, data_args, finetuning_args))

View File

@@ -34,7 +34,7 @@ class PairwiseTrainer(Trainer):
Subclass and override to inject custom behavior. Subclass and override to inject custom behavior.
Note that the first element will be removed from the output tuple. Note that the first element will be removed from the output tuple.
See: https://github.com/huggingface/transformers/blob/v4.30.2/src/transformers/trainer.py#L3509 See: https://github.com/huggingface/transformers/blob/v4.30.2/src/transformers/trainer.py#L3509
""" """
# Compute rewards # Compute rewards
@@ -45,9 +45,6 @@ class PairwiseTrainer(Trainer):
# Split the inputs and rewards into two parts, chosen and rejected # Split the inputs and rewards into two parts, chosen and rejected
batch_size = inputs["input_ids"].size(0) // 2 batch_size = inputs["input_ids"].size(0) // 2
chosen_input_ids, rejected_input_ids = inputs["input_ids"][:batch_size], inputs["input_ids"][batch_size:] chosen_input_ids, rejected_input_ids = inputs["input_ids"][:batch_size], inputs["input_ids"][batch_size:]
chosen_attn_mask, rejected_attn_mask = (
inputs["attention_mask"][:batch_size], inputs["attention_mask"][batch_size:]
)
chosen_rewards, rejected_rewards = values[:batch_size], values[batch_size:] chosen_rewards, rejected_rewards = values[:batch_size], values[batch_size:]
chosen_scores, rejected_scores = [], [] chosen_scores, rejected_scores = [], []
@@ -55,8 +52,8 @@ class PairwiseTrainer(Trainer):
# Inspired by: https://github.com/CarperAI/trlx/blob/main/examples/summarize_rlhf/reward_model/reward_model.py # Inspired by: https://github.com/CarperAI/trlx/blob/main/examples/summarize_rlhf/reward_model/reward_model.py
loss = 0 loss = 0
for i in range(batch_size): for i in range(batch_size):
chosen_length = chosen_attn_mask[i].nonzero()[-1] + 1 chosen_length = (chosen_input_ids[i] != self.tokenizer.pad_token_id).nonzero()[-1] + 1
rejected_length = rejected_attn_mask[i].nonzero()[-1] + 1 rejected_length = (rejected_input_ids[i] != self.tokenizer.pad_token_id).nonzero()[-1] + 1
check_divergence = (chosen_input_ids[i] != rejected_input_ids[i]).nonzero() check_divergence = (chosen_input_ids[i] != rejected_input_ids[i]).nonzero()
if len(check_divergence) == 0: if len(check_divergence) == 0:
@@ -69,7 +66,7 @@ class PairwiseTrainer(Trainer):
assert div_index > 0 assert div_index > 0
chosen_trunc_rewards = chosen_rewards[i, div_index:end_index] chosen_trunc_rewards = chosen_rewards[i, div_index:end_index]
rejected_trunc_rewards = rejected_rewards[i, div_index:end_index] rejected_trunc_rewards = rejected_rewards[i, div_index:end_index]
if return_outputs: # use the score on the EOS token for inference if return_outputs: # use the score on the last token except pad token for inference
chosen_scores.append(chosen_rewards[i, chosen_length-1]) chosen_scores.append(chosen_rewards[i, chosen_length-1])
rejected_scores.append(rejected_rewards[i, rejected_length-1]) rejected_scores.append(rejected_rewards[i, rejected_length-1])
loss += -torch.nn.functional.logsigmoid(chosen_trunc_rewards - rejected_trunc_rewards).mean() loss += -torch.nn.functional.logsigmoid(chosen_trunc_rewards - rejected_trunc_rewards).mean()
@@ -95,7 +92,6 @@ class PairwiseTrainer(Trainer):
output_prediction_file = os.path.join(self.args.output_dir, "generated_predictions.jsonl") output_prediction_file = os.path.join(self.args.output_dir, "generated_predictions.jsonl")
logger.info(f"Saving prediction results to {output_prediction_file}") logger.info(f"Saving prediction results to {output_prediction_file}")
chosen_scores, rejected_scores = predict_results.predictions chosen_scores, rejected_scores = predict_results.predictions
with open(output_prediction_file, "w", encoding="utf-8") as writer: with open(output_prediction_file, "w", encoding="utf-8") as writer:

View File

@@ -1,5 +1,4 @@
# Inspired by: # Inspired by: https://github.com/CarperAI/trlx/blob/main/examples/summarize_rlhf/reward_model/train_reward_model_gptj.py
# https://github.com/CarperAI/trlx/blob/main/examples/summarize_rlhf/reward_model/train_reward_model_gptj.py
from typing import TYPE_CHECKING, Optional, List from typing import TYPE_CHECKING, Optional, List
from transformers import Seq2SeqTrainingArguments from transformers import Seq2SeqTrainingArguments
@@ -7,7 +6,7 @@ from transformers import Seq2SeqTrainingArguments
from llmtuner.dsets import get_dataset, preprocess_dataset, split_dataset from llmtuner.dsets import get_dataset, preprocess_dataset, split_dataset
from llmtuner.extras.callbacks import SavePeftModelCallback from llmtuner.extras.callbacks import SavePeftModelCallback
from llmtuner.extras.ploting import plot_loss from llmtuner.extras.ploting import plot_loss
from llmtuner.tuner.core import load_model_and_tokenizer from llmtuner.tuner.core import generate_model_card, load_model_and_tokenizer
from llmtuner.tuner.rm.metric import compute_accuracy from llmtuner.tuner.rm.metric import compute_accuracy
from llmtuner.tuner.rm.collator import PairwiseDataCollatorWithPadding from llmtuner.tuner.rm.collator import PairwiseDataCollatorWithPadding
from llmtuner.tuner.rm.trainer import PairwiseTrainer from llmtuner.tuner.rm.trainer import PairwiseTrainer
@@ -29,6 +28,7 @@ def run_rm(
dataset = preprocess_dataset(dataset, tokenizer, data_args, training_args, stage="rm") dataset = preprocess_dataset(dataset, tokenizer, data_args, training_args, stage="rm")
data_collator = PairwiseDataCollatorWithPadding(tokenizer, pad_to_multiple_of=4) data_collator = PairwiseDataCollatorWithPadding(tokenizer, pad_to_multiple_of=4)
# Update arguments
training_args_dict = training_args.to_dict() training_args_dict = training_args.to_dict()
training_args_dict.update(dict(remove_unused_columns=False)) # important for pairwise dataset training_args_dict.update(dict(remove_unused_columns=False)) # important for pairwise dataset
training_args = Seq2SeqTrainingArguments(**training_args_dict) training_args = Seq2SeqTrainingArguments(**training_args_dict)
@@ -47,10 +47,10 @@ def run_rm(
# Training # Training
if training_args.do_train: if training_args.do_train:
train_result = trainer.train() train_result = trainer.train()
trainer.save_model()
trainer.log_metrics("train", train_result.metrics) trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics)
trainer.save_state() trainer.save_state()
trainer.save_model()
if trainer.is_world_process_zero() and model_args.plot_loss: if trainer.is_world_process_zero() and model_args.plot_loss:
plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) plot_loss(training_args.output_dir, keys=["loss", "eval_loss"])
@@ -66,3 +66,10 @@ def run_rm(
trainer.log_metrics("predict", predict_results.metrics) trainer.log_metrics("predict", predict_results.metrics)
trainer.save_metrics("predict", predict_results.metrics) trainer.save_metrics("predict", predict_results.metrics)
trainer.save_predictions(predict_results) trainer.save_predictions(predict_results)
# Create model card
if training_args.do_train:
if training_args.push_to_hub:
trainer.push_to_hub(**generate_model_card(model_args, data_args, finetuning_args))
else:
trainer.create_model_card(**generate_model_card(model_args, data_args, finetuning_args))

View File

@@ -33,28 +33,20 @@ class CustomSeq2SeqTrainer(Seq2SeqTrainer):
Subclass and override to inject custom behavior. Subclass and override to inject custom behavior.
""" """
labels = inputs["labels"].detach().clone() if "labels" in inputs else None # backup labels
if self.args.predict_with_generate: if self.args.predict_with_generate:
assert self.tokenizer.padding_side == "left", "This method only accepts left-padded tensor." assert self.tokenizer.padding_side == "left", "This method only accepts left-padded tensor."
assert self.tokenizer.pad_token_id is not None, "Pad token is required."
prompt_len, label_len = inputs["input_ids"].size(-1), inputs["labels"].size(-1) prompt_len, label_len = inputs["input_ids"].size(-1), inputs["labels"].size(-1)
if prompt_len > label_len: if prompt_len > label_len:
inputs["labels"] = self._pad_tensors_to_target_len(inputs["labels"], inputs["input_ids"]) inputs["labels"] = self._pad_tensors_to_target_len(inputs["labels"], inputs["input_ids"])
if label_len > prompt_len: if label_len > prompt_len:
inputs["input_ids"] = self._pad_tensors_to_target_len(inputs["input_ids"], inputs["labels"]) inputs["labels"] = inputs["labels"][:, :prompt_len] # truncate the labels instead of padding the inputs
if "attention_mask" in inputs:
inputs["attention_mask"] = self._pad_tensors_to_target_len(
inputs["attention_mask"], inputs["labels"], pad_token_id=0
)
if "position_ids" in inputs:
inputs["position_ids"] = self._pad_tensors_to_target_len(
inputs["position_ids"], inputs["labels"], pad_token_id=0
)
loss, generated_tokens, labels = super().prediction_step( loss, generated_tokens, _ = super().prediction_step(
model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys
) )
if generated_tokens is not None and self.args.predict_with_generate: if generated_tokens is not None and self.args.predict_with_generate:
generated_tokens[:, :max(prompt_len, label_len)] = self.tokenizer.pad_token_id generated_tokens[:, :prompt_len] = self.tokenizer.pad_token_id
generated_tokens = generated_tokens.contiguous() generated_tokens = generated_tokens.contiguous()
return loss, generated_tokens, labels return loss, generated_tokens, labels
@@ -62,14 +54,13 @@ class CustomSeq2SeqTrainer(Seq2SeqTrainer):
def _pad_tensors_to_target_len( def _pad_tensors_to_target_len(
self, self,
src_tensor: torch.Tensor, src_tensor: torch.Tensor,
tgt_tensor: torch.Tensor, tgt_tensor: torch.Tensor
pad_token_id: Optional[int] = None
) -> torch.Tensor: ) -> torch.Tensor:
r""" r"""
Pads the tensor to the same length as the target tensor. Pads the tensor to the same length as the target tensor.
""" """
pad_token_id = pad_token_id if pad_token_id is not None else self.tokenizer.pad_token_id assert self.tokenizer.pad_token_id is not None, "Pad token is required."
padded_tensor = pad_token_id * torch.ones_like(tgt_tensor) padded_tensor = self.tokenizer.pad_token_id * torch.ones_like(tgt_tensor)
padded_tensor[:, -src_tensor.shape[-1]:] = src_tensor # adopt left-padding padded_tensor[:, -src_tensor.shape[-1]:] = src_tensor # adopt left-padding
return padded_tensor.contiguous() # in contiguous memory return padded_tensor.contiguous() # in contiguous memory

View File

@@ -1,4 +1,4 @@
# Inspired by: https://github.com/huggingface/transformers/blob/v4.29.2/examples/pytorch/summarization/run_summarization.py # Inspired by: https://github.com/huggingface/transformers/blob/v4.34.1/examples/pytorch/summarization/run_summarization.py
from typing import TYPE_CHECKING, Optional, List from typing import TYPE_CHECKING, Optional, List
from transformers import DataCollatorForSeq2Seq, Seq2SeqTrainingArguments from transformers import DataCollatorForSeq2Seq, Seq2SeqTrainingArguments
@@ -7,7 +7,7 @@ from llmtuner.dsets import get_dataset, preprocess_dataset, split_dataset
from llmtuner.extras.constants import IGNORE_INDEX from llmtuner.extras.constants import IGNORE_INDEX
from llmtuner.extras.misc import get_logits_processor from llmtuner.extras.misc import get_logits_processor
from llmtuner.extras.ploting import plot_loss from llmtuner.extras.ploting import plot_loss
from llmtuner.tuner.core import load_model_and_tokenizer from llmtuner.tuner.core import generate_model_card, load_model_and_tokenizer
from llmtuner.tuner.sft.metric import ComputeMetrics from llmtuner.tuner.sft.metric import ComputeMetrics
from llmtuner.tuner.sft.trainer import CustomSeq2SeqTrainer from llmtuner.tuner.sft.trainer import CustomSeq2SeqTrainer
@@ -33,7 +33,7 @@ def run_sft(
data_collator = DataCollatorForSeq2Seq( data_collator = DataCollatorForSeq2Seq(
tokenizer=tokenizer, tokenizer=tokenizer,
pad_to_multiple_of=4, # for shift short attention pad_to_multiple_of=4 if tokenizer.padding_side == "right" else None, # for shift short attention
label_pad_token_id=IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id label_pad_token_id=IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
) )
@@ -65,10 +65,10 @@ def run_sft(
# Training # Training
if training_args.do_train: if training_args.do_train:
train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics) trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics)
trainer.save_state() trainer.save_state()
trainer.save_model()
if trainer.is_world_process_zero() and model_args.plot_loss: if trainer.is_world_process_zero() and model_args.plot_loss:
plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) plot_loss(training_args.output_dir, keys=["loss", "eval_loss"])
@@ -88,3 +88,10 @@ def run_sft(
trainer.log_metrics("predict", predict_results.metrics) trainer.log_metrics("predict", predict_results.metrics)
trainer.save_metrics("predict", predict_results.metrics) trainer.save_metrics("predict", predict_results.metrics)
trainer.save_predictions(predict_results) trainer.save_predictions(predict_results)
# Create model card
if training_args.do_train:
if training_args.push_to_hub:
trainer.push_to_hub(**generate_model_card(model_args, data_args, finetuning_args))
else:
trainer.create_model_card(**generate_model_card(model_args, data_args, finetuning_args))

View File

@@ -38,10 +38,10 @@ def export_model(args: Optional[Dict[str, Any]] = None, max_shard_size: Optional
model_args, _, finetuning_args, _ = get_infer_args(args) model_args, _, finetuning_args, _ = get_infer_args(args)
model, tokenizer = load_model_and_tokenizer(model_args, finetuning_args) model, tokenizer = load_model_and_tokenizer(model_args, finetuning_args)
model.config.use_cache = True model.config.use_cache = True
tokenizer.padding_side = "left" # restore padding side
tokenizer.init_kwargs["padding_side"] = "left"
model.save_pretrained(model_args.export_dir, max_shard_size=max_shard_size) model.save_pretrained(model_args.export_dir, max_shard_size=max_shard_size)
try: try:
tokenizer.padding_side = "left" # restore padding side
tokenizer.init_kwargs["padding_side"] = "left"
tokenizer.save_pretrained(model_args.export_dir) tokenizer.save_pretrained(model_args.export_dir)
except: except:
logger.warning("Cannot save tokenizer, please copy the files manually.") logger.warning("Cannot save tokenizer, please copy the files manually.")

View File

@@ -1,3 +1,4 @@
import gradio as gr
from gradio.components import Component # cannot use TYPE_CHECKING here from gradio.components import Component # cannot use TYPE_CHECKING here
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Tuple from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Tuple
@@ -26,19 +27,19 @@ class WebChatModel(ChatModel):
return self.model is not None return self.model is not None
def load_model(self, data: Dict[Component, Any]) -> Generator[str, None, None]: def load_model(self, data: Dict[Component, Any]) -> Generator[str, None, None]:
get = lambda name: data[self.manager.get_elem(name)] get = lambda name: data[self.manager.get_elem_by_name(name)]
lang = get("top.lang") lang = get("top.lang")
error = ""
if self.loaded: if self.loaded:
yield ALERTS["err_exists"][lang] error = ALERTS["err_exists"][lang]
return elif not get("top.model_name"):
error = ALERTS["err_no_model"][lang]
elif not get("top.model_path"):
error = ALERTS["err_no_path"][lang]
if not get("top.model_name"): if error:
yield ALERTS["err_no_model"][lang] gr.Warning(error)
return yield error
if not get("top.model_path"):
yield ALERTS["err_no_path"][lang]
return return
if get("top.checkpoints"): if get("top.checkpoints"):
@@ -65,9 +66,7 @@ class WebChatModel(ChatModel):
yield ALERTS["info_loaded"][lang] yield ALERTS["info_loaded"][lang]
def unload_model(self, data: Dict[Component, Any]) -> Generator[str, None, None]: def unload_model(self, data: Dict[Component, Any]) -> Generator[str, None, None]:
get = lambda name: data[self.manager.get_elem(name)] lang = data[self.manager.get_elem_by_name("top.lang")]
lang = get("top.lang")
yield ALERTS["info_unloading"][lang] yield ALERTS["info_unloading"][lang]
self.model = None self.model = None
self.tokenizer = None self.tokenizer = None

View File

@@ -61,26 +61,31 @@ def get_model_path(model_name: str) -> str:
return user_config["path_dict"].get(model_name, None) or SUPPORTED_MODELS.get(model_name, "") return user_config["path_dict"].get(model_name, None) or SUPPORTED_MODELS.get(model_name, "")
def get_prefix(model_name: str) -> str:
return model_name.split("-")[0]
def get_module(model_name: str) -> str: def get_module(model_name: str) -> str:
return DEFAULT_MODULE.get(model_name.split("-")[0], "q_proj,v_proj") return DEFAULT_MODULE.get(get_prefix(model_name), "q_proj,v_proj")
def get_template(model_name: str) -> str: def get_template(model_name: str) -> str:
if model_name.endswith("Chat") and model_name.split("-")[0] in DEFAULT_TEMPLATE: if model_name.endswith("Chat") and get_prefix(model_name) in DEFAULT_TEMPLATE:
return DEFAULT_TEMPLATE[model_name.split("-")[0]] return DEFAULT_TEMPLATE[get_prefix(model_name)]
return "default" return "default"
def list_checkpoint(model_name: str, finetuning_type: str) -> Dict[str, Any]: def list_checkpoint(model_name: str, finetuning_type: str) -> Dict[str, Any]:
checkpoints = [] checkpoints = []
save_dir = get_save_dir(model_name, finetuning_type) if model_name:
if save_dir and os.path.isdir(save_dir): save_dir = get_save_dir(model_name, finetuning_type)
for checkpoint in os.listdir(save_dir): if save_dir and os.path.isdir(save_dir):
if ( for checkpoint in os.listdir(save_dir):
os.path.isdir(os.path.join(save_dir, checkpoint)) if (
and any([os.path.isfile(os.path.join(save_dir, checkpoint, name)) for name in CKPT_NAMES]) os.path.isdir(os.path.join(save_dir, checkpoint))
): and any([os.path.isfile(os.path.join(save_dir, checkpoint, name)) for name in CKPT_NAMES])
checkpoints.append(checkpoint) ):
checkpoints.append(checkpoint)
return gr.update(value=[], choices=checkpoints) return gr.update(value=[], choices=checkpoints)

View File

@@ -11,11 +11,9 @@ def create_chat_box(
engine: "Engine", engine: "Engine",
visible: Optional[bool] = False visible: Optional[bool] = False
) -> Tuple["Block", "Component", "Component", Dict[str, "Component"]]: ) -> Tuple["Block", "Component", "Component", Dict[str, "Component"]]:
elem_dict = dict()
with gr.Box(visible=visible) as chat_box: with gr.Box(visible=visible) as chat_box:
chatbot = gr.Chatbot() chatbot = gr.Chatbot()
history = gr.State([])
with gr.Row(): with gr.Row():
with gr.Column(scale=4): with gr.Column(scale=4):
system = gr.Textbox(show_label=False) system = gr.Textbox(show_label=False)
@@ -29,13 +27,6 @@ def create_chat_box(
top_p = gr.Slider(0.01, 1, value=gen_kwargs.top_p, step=0.01) top_p = gr.Slider(0.01, 1, value=gen_kwargs.top_p, step=0.01)
temperature = gr.Slider(0.01, 1.5, value=gen_kwargs.temperature, step=0.01) temperature = gr.Slider(0.01, 1.5, value=gen_kwargs.temperature, step=0.01)
elem_dict.update(dict(
system=system, query=query, submit_btn=submit_btn, clear_btn=clear_btn,
max_new_tokens=max_new_tokens, top_p=top_p, temperature=temperature
))
history = gr.State([])
submit_btn.click( submit_btn.click(
engine.chatter.predict, engine.chatter.predict,
[chatbot, query, history, system, max_new_tokens, top_p, temperature], [chatbot, query, history, system, max_new_tokens, top_p, temperature],
@@ -47,4 +38,12 @@ def create_chat_box(
clear_btn.click(lambda: ([], []), outputs=[chatbot, history], show_progress=True) clear_btn.click(lambda: ([], []), outputs=[chatbot, history], show_progress=True)
return chat_box, chatbot, history, elem_dict return chat_box, chatbot, history, dict(
system=system,
query=query,
submit_btn=submit_btn,
clear_btn=clear_btn,
max_new_tokens=max_new_tokens,
top_p=top_p,
temperature=temperature
)

View File

@@ -1,21 +1,103 @@
import os
import json
import gradio as gr import gradio as gr
from typing import TYPE_CHECKING, Tuple from typing import TYPE_CHECKING, Any, Dict, Tuple
from llmtuner.webui.common import DATA_CONFIG
if TYPE_CHECKING: if TYPE_CHECKING:
from gradio.blocks import Block
from gradio.components import Component from gradio.components import Component
def create_preview_box() -> Tuple["Block", "Component", "Component", "Component"]: PAGE_SIZE = 2
with gr.Box(visible=False, elem_classes="modal-box") as preview_box:
def prev_page(page_index: int) -> int:
return page_index - 1 if page_index > 0 else page_index
def next_page(page_index: int, total_num: int) -> int:
return page_index + 1 if (page_index + 1) * PAGE_SIZE < total_num else page_index
def can_preview(dataset_dir: str, dataset: list) -> Dict[str, Any]:
with open(os.path.join(dataset_dir, DATA_CONFIG), "r", encoding="utf-8") as f:
dataset_info = json.load(f)
if (
len(dataset) > 0
and "file_name" in dataset_info[dataset[0]]
and os.path.isfile(os.path.join(dataset_dir, dataset_info[dataset[0]]["file_name"]))
):
return gr.update(interactive=True)
else:
return gr.update(interactive=False)
def get_preview(dataset_dir: str, dataset: list, page_index: int) -> Tuple[int, list, Dict[str, Any]]:
with open(os.path.join(dataset_dir, DATA_CONFIG), "r", encoding="utf-8") as f:
dataset_info = json.load(f)
data_file: str = dataset_info[dataset[0]]["file_name"]
with open(os.path.join(dataset_dir, data_file), "r", encoding="utf-8") as f:
if data_file.endswith(".json"):
data = json.load(f)
elif data_file.endswith(".jsonl"):
data = [json.loads(line) for line in f]
else:
data = [line for line in f]
return len(data), data[PAGE_SIZE * page_index : PAGE_SIZE * (page_index + 1)], gr.update(visible=True)
def create_preview_box(dataset_dir: "gr.Textbox", dataset: "gr.Dropdown") -> Dict[str, "Component"]:
data_preview_btn = gr.Button(interactive=False, scale=1)
with gr.Column(visible=False, elem_classes="modal-box") as preview_box:
with gr.Row(): with gr.Row():
preview_count = gr.Number(interactive=False) preview_count = gr.Number(value=0, interactive=False, precision=0)
page_index = gr.Number(value=0, interactive=False, precision=0)
with gr.Row():
prev_btn = gr.Button()
next_btn = gr.Button()
close_btn = gr.Button()
with gr.Row(): with gr.Row():
preview_samples = gr.JSON(interactive=False) preview_samples = gr.JSON(interactive=False)
close_btn = gr.Button() dataset.change(
can_preview, [dataset_dir, dataset], [data_preview_btn], queue=False
).then(
lambda: 0, outputs=[page_index], queue=False
)
data_preview_btn.click(
get_preview,
[dataset_dir, dataset, page_index],
[preview_count, preview_samples, preview_box],
queue=False
)
prev_btn.click(
prev_page, [page_index], [page_index], queue=False
).then(
get_preview,
[dataset_dir, dataset, page_index],
[preview_count, preview_samples, preview_box],
queue=False
)
next_btn.click(
next_page, [page_index, preview_count], [page_index], queue=False
).then(
get_preview,
[dataset_dir, dataset, page_index],
[preview_count, preview_samples, preview_box],
queue=False
)
close_btn.click(lambda: gr.update(visible=False), outputs=[preview_box], queue=False) close_btn.click(lambda: gr.update(visible=False), outputs=[preview_box], queue=False)
return dict(
return preview_box, preview_count, preview_samples, close_btn data_preview_btn=data_preview_btn,
preview_count=preview_count,
page_index=page_index,
prev_btn=prev_btn,
next_btn=next_btn,
close_btn=close_btn,
preview_samples=preview_samples
)

View File

@@ -3,7 +3,6 @@ from typing import TYPE_CHECKING, Dict
from llmtuner.webui.common import list_dataset, DEFAULT_DATA_DIR from llmtuner.webui.common import list_dataset, DEFAULT_DATA_DIR
from llmtuner.webui.components.data import create_preview_box from llmtuner.webui.components.data import create_preview_box
from llmtuner.webui.utils import can_preview, get_preview
if TYPE_CHECKING: if TYPE_CHECKING:
from gradio.components import Component from gradio.components import Component
@@ -17,28 +16,12 @@ def create_eval_tab(engine: "Engine") -> Dict[str, "Component"]:
with gr.Row(): with gr.Row():
dataset_dir = gr.Textbox(value=DEFAULT_DATA_DIR, scale=2) dataset_dir = gr.Textbox(value=DEFAULT_DATA_DIR, scale=2)
dataset = gr.Dropdown(multiselect=True, scale=4) dataset = gr.Dropdown(multiselect=True, scale=4)
data_preview_btn = gr.Button(interactive=False, scale=1) preview_elems = create_preview_box(dataset_dir, dataset)
dataset_dir.change(list_dataset, [dataset_dir], [dataset], queue=False) dataset_dir.change(list_dataset, [dataset_dir], [dataset], queue=False)
dataset.change(can_preview, [dataset_dir, dataset], [data_preview_btn], queue=False)
input_elems.update({dataset_dir, dataset}) input_elems.update({dataset_dir, dataset})
elem_dict.update(dict( elem_dict.update(dict(dataset_dir=dataset_dir, dataset=dataset, **preview_elems))
dataset_dir=dataset_dir, dataset=dataset, data_preview_btn=data_preview_btn
))
preview_box, preview_count, preview_samples, close_btn = create_preview_box()
data_preview_btn.click(
get_preview,
[dataset_dir, dataset],
[preview_count, preview_samples, preview_box],
queue=False
)
elem_dict.update(dict(
preview_count=preview_count, preview_samples=preview_samples, close_btn=close_btn
))
with gr.Row(): with gr.Row():
cutoff_len = gr.Slider(value=1024, minimum=4, maximum=8192, step=1) cutoff_len = gr.Slider(value=1024, minimum=4, maximum=8192, step=1)

View File

@@ -1,16 +1,54 @@
import gradio as gr import gradio as gr
from typing import TYPE_CHECKING, Dict from typing import TYPE_CHECKING, Dict, Generator, List
from llmtuner.webui.utils import save_model from llmtuner.tuner import export_model
from llmtuner.webui.common import get_save_dir
from llmtuner.webui.locales import ALERTS
if TYPE_CHECKING: if TYPE_CHECKING:
from gradio.components import Component from gradio.components import Component
from llmtuner.webui.engine import Engine from llmtuner.webui.engine import Engine
def create_export_tab(engine: "Engine") -> Dict[str, "Component"]: def save_model(
elem_dict = dict() lang: str,
model_name: str,
model_path: str,
checkpoints: List[str],
finetuning_type: str,
template: str,
max_shard_size: int,
export_dir: str
) -> Generator[str, None, None]:
error = ""
if not model_name:
error = ALERTS["err_no_model"][lang]
elif not model_path:
error = ALERTS["err_no_path"][lang]
elif not checkpoints:
error = ALERTS["err_no_checkpoint"][lang]
elif not export_dir:
error = ALERTS["err_no_export_dir"][lang]
if error:
gr.Warning(error)
yield error
return
args = dict(
model_name_or_path=model_path,
checkpoint_dir=",".join([get_save_dir(model_name, finetuning_type, ckpt) for ckpt in checkpoints]),
finetuning_type=finetuning_type,
template=template,
export_dir=export_dir
)
yield ALERTS["info_exporting"][lang]
export_model(args, max_shard_size="{}GB".format(max_shard_size))
yield ALERTS["info_exported"][lang]
def create_export_tab(engine: "Engine") -> Dict[str, "Component"]:
with gr.Row(): with gr.Row():
export_dir = gr.Textbox() export_dir = gr.Textbox()
max_shard_size = gr.Slider(value=10, minimum=1, maximum=100) max_shard_size = gr.Slider(value=10, minimum=1, maximum=100)
@@ -21,23 +59,21 @@ def create_export_tab(engine: "Engine") -> Dict[str, "Component"]:
export_btn.click( export_btn.click(
save_model, save_model,
[ [
engine.manager.get_elem("top.lang"), engine.manager.get_elem_by_name("top.lang"),
engine.manager.get_elem("top.model_name"), engine.manager.get_elem_by_name("top.model_name"),
engine.manager.get_elem("top.model_path"), engine.manager.get_elem_by_name("top.model_path"),
engine.manager.get_elem("top.checkpoints"), engine.manager.get_elem_by_name("top.checkpoints"),
engine.manager.get_elem("top.finetuning_type"), engine.manager.get_elem_by_name("top.finetuning_type"),
engine.manager.get_elem("top.template"), engine.manager.get_elem_by_name("top.template"),
max_shard_size, max_shard_size,
export_dir export_dir
], ],
[info_box] [info_box]
) )
elem_dict.update(dict( return dict(
export_dir=export_dir, export_dir=export_dir,
max_shard_size=max_shard_size, max_shard_size=max_shard_size,
export_btn=export_btn, export_btn=export_btn,
info_box=info_box info_box=info_box
)) )
return elem_dict

View File

@@ -31,9 +31,10 @@ def create_top() -> Dict[str, "Component"]:
with gr.Accordion(label="Model config (LLaMA only)", open=False) as llama_tab: with gr.Accordion(label="Model config (LLaMA only)", open=False) as llama_tab:
with gr.Row(): with gr.Row():
flash_attn = gr.Checkbox(value=False) with gr.Column():
shift_attn = gr.Checkbox(value=False) flash_attn = gr.Checkbox(value=False)
rope_scaling = gr.Dropdown(choices=["none", "linear", "dynamic"], value="none") shift_attn = gr.Checkbox(value=False)
rope_scaling = gr.Radio(choices=["none", "linear", "dynamic"], value="none")
model_name.change( model_name.change(
list_checkpoint, [model_name, finetuning_type], [checkpoints], queue=False list_checkpoint, [model_name, finetuning_type], [checkpoints], queue=False

View File

@@ -5,7 +5,7 @@ from transformers.trainer_utils import SchedulerType
from llmtuner.extras.constants import TRAINING_STAGES from llmtuner.extras.constants import TRAINING_STAGES
from llmtuner.webui.common import list_checkpoint, list_dataset, DEFAULT_DATA_DIR from llmtuner.webui.common import list_checkpoint, list_dataset, DEFAULT_DATA_DIR
from llmtuner.webui.components.data import create_preview_box from llmtuner.webui.components.data import create_preview_box
from llmtuner.webui.utils import can_preview, get_preview, gen_plot from llmtuner.webui.utils import gen_plot
if TYPE_CHECKING: if TYPE_CHECKING:
from gradio.components import Component from gradio.components import Component
@@ -22,28 +22,14 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
) )
dataset_dir = gr.Textbox(value=DEFAULT_DATA_DIR, scale=2) dataset_dir = gr.Textbox(value=DEFAULT_DATA_DIR, scale=2)
dataset = gr.Dropdown(multiselect=True, scale=4) dataset = gr.Dropdown(multiselect=True, scale=4)
data_preview_btn = gr.Button(interactive=False, scale=1) preview_elems = create_preview_box(dataset_dir, dataset)
training_stage.change(list_dataset, [dataset_dir, training_stage], [dataset], queue=False) training_stage.change(list_dataset, [dataset_dir, training_stage], [dataset], queue=False)
dataset_dir.change(list_dataset, [dataset_dir, training_stage], [dataset], queue=False) dataset_dir.change(list_dataset, [dataset_dir, training_stage], [dataset], queue=False)
dataset.change(can_preview, [dataset_dir, dataset], [data_preview_btn], queue=False)
input_elems.update({training_stage, dataset_dir, dataset}) input_elems.update({training_stage, dataset_dir, dataset})
elem_dict.update(dict( elem_dict.update(dict(
training_stage=training_stage, dataset_dir=dataset_dir, dataset=dataset, data_preview_btn=data_preview_btn training_stage=training_stage, dataset_dir=dataset_dir, dataset=dataset, **preview_elems
))
preview_box, preview_count, preview_samples, close_btn = create_preview_box()
data_preview_btn.click(
get_preview,
[dataset_dir, dataset],
[preview_count, preview_samples, preview_box],
queue=False
)
elem_dict.update(dict(
preview_count=preview_count, preview_samples=preview_samples, close_btn=close_btn
)) ))
with gr.Row(): with gr.Row():
@@ -79,26 +65,30 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
logging_steps = gr.Slider(value=5, minimum=5, maximum=1000, step=5) logging_steps = gr.Slider(value=5, minimum=5, maximum=1000, step=5)
save_steps = gr.Slider(value=100, minimum=10, maximum=5000, step=10) save_steps = gr.Slider(value=100, minimum=10, maximum=5000, step=10)
warmup_steps = gr.Slider(value=0, minimum=0, maximum=5000, step=1) warmup_steps = gr.Slider(value=0, minimum=0, maximum=5000, step=1)
neft_alpha = gr.Slider(value=0, minimum=0, maximum=10, step=0.1)
input_elems.update({logging_steps, save_steps, warmup_steps}) with gr.Column():
train_on_prompt = gr.Checkbox(value=False)
upcast_layernorm = gr.Checkbox(value=False)
input_elems.update({logging_steps, save_steps, warmup_steps, neft_alpha, train_on_prompt, upcast_layernorm})
elem_dict.update(dict( elem_dict.update(dict(
advanced_tab=advanced_tab, logging_steps=logging_steps, save_steps=save_steps, warmup_steps=warmup_steps advanced_tab=advanced_tab, logging_steps=logging_steps, save_steps=save_steps, warmup_steps=warmup_steps,
neft_alpha=neft_alpha, train_on_prompt=train_on_prompt, upcast_layernorm=upcast_layernorm
)) ))
with gr.Accordion(label="LoRA config", open=False) as lora_tab: with gr.Accordion(label="LoRA config", open=False) as lora_tab:
with gr.Row(): with gr.Row():
lora_rank = gr.Slider(value=8, minimum=1, maximum=1024, step=1, scale=1) lora_rank = gr.Slider(value=8, minimum=1, maximum=1024, step=1, scale=1)
lora_dropout = gr.Slider(value=0.1, minimum=0, maximum=1, step=0.01, scale=1) lora_dropout = gr.Slider(value=0.1, minimum=0, maximum=1, step=0.01, scale=1)
lora_target = gr.Textbox(scale=2) lora_target = gr.Textbox(scale=1)
additional_target = gr.Textbox(scale=1)
resume_lora_training = gr.Checkbox(value=True, scale=1) resume_lora_training = gr.Checkbox(value=True, scale=1)
input_elems.update({lora_rank, lora_dropout, lora_target, resume_lora_training}) input_elems.update({lora_rank, lora_dropout, lora_target, additional_target, resume_lora_training})
elem_dict.update(dict( elem_dict.update(dict(
lora_tab=lora_tab, lora_tab=lora_tab, lora_rank=lora_rank, lora_dropout=lora_dropout, lora_target=lora_target,
lora_rank=lora_rank, additional_target=additional_target, resume_lora_training=resume_lora_training,
lora_dropout=lora_dropout,
lora_target=lora_target,
resume_lora_training=resume_lora_training,
)) ))
with gr.Accordion(label="RLHF config", open=False) as rlhf_tab: with gr.Accordion(label="RLHF config", open=False) as rlhf_tab:
@@ -109,7 +99,7 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
refresh_btn.click( refresh_btn.click(
list_checkpoint, list_checkpoint,
[engine.manager.get_elem("top.model_name"), engine.manager.get_elem("top.finetuning_type")], [engine.manager.get_elem_by_name("top.model_name"), engine.manager.get_elem_by_name("top.finetuning_type")],
[reward_model], [reward_model],
queue=False queue=False
) )
@@ -139,19 +129,24 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
input_elems.add(output_dir) input_elems.add(output_dir)
output_elems = [output_box, process_bar] output_elems = [output_box, process_bar]
elem_dict.update(dict(
cmd_preview_btn=cmd_preview_btn, start_btn=start_btn, stop_btn=stop_btn, output_dir=output_dir,
resume_btn=resume_btn, process_bar=process_bar, output_box=output_box, loss_viewer=loss_viewer
))
cmd_preview_btn.click(engine.runner.preview_train, input_elems, output_elems) cmd_preview_btn.click(engine.runner.preview_train, input_elems, output_elems)
start_btn.click(engine.runner.run_train, input_elems, output_elems) start_btn.click(engine.runner.run_train, input_elems, output_elems)
stop_btn.click(engine.runner.set_abort, queue=False) stop_btn.click(engine.runner.set_abort, queue=False)
resume_btn.change(engine.runner.monitor, outputs=output_elems) resume_btn.change(engine.runner.monitor, outputs=output_elems)
elem_dict.update(dict(
cmd_preview_btn=cmd_preview_btn, start_btn=start_btn, stop_btn=stop_btn, output_dir=output_dir,
resume_btn=resume_btn, process_bar=process_bar, output_box=output_box, loss_viewer=loss_viewer
))
output_box.change( output_box.change(
gen_plot, gen_plot,
[engine.manager.get_elem("top.model_name"), engine.manager.get_elem("top.finetuning_type"), output_dir], [
engine.manager.get_elem_by_name("top.model_name"),
engine.manager.get_elem_by_name("top.finetuning_type"),
output_dir
],
loss_viewer, loss_viewer,
queue=False queue=False
) )

View File

@@ -6,10 +6,12 @@ CSS = r"""
transform: translate(-50%, -50%); /* center horizontally */ transform: translate(-50%, -50%); /* center horizontally */
max-width: 1000px; max-width: 1000px;
max-height: 750px; max-height: 750px;
overflow-y: scroll !important; overflow-y: auto;
background-color: var(--input-background-fill); background-color: var(--input-background-fill);
flex-wrap: nowrap !important;
border: 2px solid black !important; border: 2px solid black !important;
z-index: 1000; z-index: 1000;
padding: 10px;
} }
.dark .modal-box { .dark .modal-box {

View File

@@ -19,36 +19,36 @@ class Engine:
self.chatter: "WebChatModel" = WebChatModel(manager=self.manager, lazy_init=(not pure_chat)) self.chatter: "WebChatModel" = WebChatModel(manager=self.manager, lazy_init=(not pure_chat))
def _form_dict(self, resume_dict: Dict[str, Dict[str, Any]]): def _form_dict(self, resume_dict: Dict[str, Dict[str, Any]]):
return {self.manager.get_elem(k): gr.update(**v) for k, v in resume_dict.items()} return {self.manager.get_elem_by_name(k): gr.update(**v) for k, v in resume_dict.items()}
def resume(self) -> Generator[Dict[Component, Dict[str, Any]], None, None]: def resume(self) -> Generator[Dict[Component, Dict[str, Any]], None, None]:
user_config = load_config() user_config = load_config()
lang = user_config.get("lang", None) or "en" lang = user_config.get("lang", None) or "en"
resume_dict = { init_dict = {
"top.lang": {"value": lang}, "top.lang": {"value": lang},
"infer.chat_box": {"visible": self.chatter.loaded} "infer.chat_box": {"visible": self.chatter.loaded}
} }
if not self.pure_chat: if not self.pure_chat:
resume_dict["train.dataset"] = {"choices": list_dataset()["choices"]} init_dict["train.dataset"] = {"choices": list_dataset()["choices"]}
resume_dict["eval.dataset"] = {"choices": list_dataset()["choices"]} init_dict["eval.dataset"] = {"choices": list_dataset()["choices"]}
if user_config.get("last_model", None): if user_config.get("last_model", None):
resume_dict["top.model_name"] = {"value": user_config["last_model"]} init_dict["top.model_name"] = {"value": user_config["last_model"]}
resume_dict["top.model_path"] = {"value": get_model_path(user_config["last_model"])} init_dict["top.model_path"] = {"value": get_model_path(user_config["last_model"])}
yield self._form_dict(resume_dict) yield self._form_dict(init_dict)
if self.runner.alive: if not self.pure_chat:
yield {elem: gr.update(value=value) for elem, value in self.runner.data.items()} if self.runner.alive:
if self.runner.do_train: yield {elem: gr.update(value=value) for elem, value in self.runner.running_data.items()}
resume_dict = {"train.resume_btn": {"value": True}} if self.runner.do_train:
yield self._form_dict({"train.resume_btn": {"value": True}})
else:
yield self._form_dict({"eval.resume_btn": {"value": True}})
else: else:
resume_dict = {"eval.resume_btn": {"value": True}} yield self._form_dict({"train.output_dir": {"value": get_time()}})
else:
resume_dict = {"train.output_dir": {"value": get_time()}}
yield self._form_dict(resume_dict)
def change_lang(self, lang: str) -> Dict[Component, Dict[str, Any]]: def change_lang(self, lang: str) -> Dict[Component, Dict[str, Any]]:
return { return {

View File

@@ -14,15 +14,15 @@ from llmtuner.webui.css import CSS
from llmtuner.webui.engine import Engine from llmtuner.webui.engine import Engine
require_version("gradio==3.38.0", "To fix: pip install gradio==3.38.0") require_version("gradio>=3.38.0,<4.0.0", "To fix: pip install \"gradio>=3.38.0,<4.0.0\"")
def create_ui() -> gr.Blocks: def create_ui() -> gr.Blocks:
engine = Engine(pure_chat=False) engine = Engine(pure_chat=False)
with gr.Blocks(title="Web Tuner", css=CSS) as demo: with gr.Blocks(title="LLaMA Board", css=CSS) as demo:
engine.manager.all_elems["top"] = create_top() engine.manager.all_elems["top"] = create_top()
lang: "gr.Dropdown" = engine.manager.get_elem("top.lang") lang: "gr.Dropdown" = engine.manager.get_elem_by_name("top.lang")
with gr.Tab("Train"): with gr.Tab("Train"):
engine.manager.all_elems["train"] = create_train_tab(engine) engine.manager.all_elems["train"] = create_train_tab(engine)

View File

@@ -163,12 +163,28 @@ LOCALES = {
"label": "数量" "label": "数量"
} }
}, },
"preview_samples": { "page_index": {
"en": { "en": {
"label": "Samples" "label": "Page"
}, },
"zh": { "zh": {
"label": "样例" "label": "页数"
}
},
"prev_btn": {
"en": {
"value": "Prev"
},
"zh": {
"value": "上一页"
}
},
"next_btn": {
"en": {
"value": "Next"
},
"zh": {
"value": "下一页"
} }
}, },
"close_btn": { "close_btn": {
@@ -179,6 +195,14 @@ LOCALES = {
"value": "关闭" "value": "关闭"
} }
}, },
"preview_samples": {
"en": {
"label": "Samples"
},
"zh": {
"label": "样例"
}
},
"cutoff_len": { "cutoff_len": {
"en": { "en": {
"label": "Cutoff length", "label": "Cutoff length",
@@ -309,6 +333,36 @@ LOCALES = {
"info": "学习率预热采用的步数。" "info": "学习率预热采用的步数。"
} }
}, },
"neft_alpha": {
"en": {
"label": "NEFTune Alpha",
"info": "Magnitude of noise adding to embedding vectors."
},
"zh": {
"label": "NEFTune 噪声参数",
"info": "嵌入向量所添加的噪声大小。"
}
},
"train_on_prompt": {
"en": {
"label": "Train on prompt",
"info": "Compute loss on the prompt tokens in supervised fine-tuning."
},
"zh": {
"label": "计算输入损失",
"info": "在监督微调时候计算输入序列的损失。"
}
},
"upcast_layernorm": {
"en": {
"label": "Upcast LayerNorm",
"info": "Upcast weights of layernorm in float32."
},
"zh": {
"label": "缩放归一化层",
"info": "将归一化层权重缩放至 32 位浮点数。"
}
},
"lora_tab": { "lora_tab": {
"en": { "en": {
"label": "LoRA configurations" "label": "LoRA configurations"
@@ -340,11 +394,21 @@ LOCALES = {
"lora_target": { "lora_target": {
"en": { "en": {
"label": "LoRA modules (optional)", "label": "LoRA modules (optional)",
"info": "The name(s) of target modules to apply LoRA. Use commas to separate multiple modules." "info": "Name(s) of target modules to apply LoRA. Use commas to separate multiple modules."
}, },
"zh": { "zh": {
"label": "LoRA 作用(非必填)", "label": "LoRA 作用模块(非必填)",
"info": "应用 LoRA 的线性层名称。使用英文逗号分隔多个名称。" "info": "应用 LoRA 的目标模块名称。使用英文逗号分隔多个名称。"
}
},
"additional_target": {
"en": {
"label": "Additional modules (optional)",
"info": "Name(s) of modules apart from LoRA layers to be set as trainable. Use commas to separate multiple modules."
},
"zh": {
"label": "附加模块(非必填)",
"info": "除 LoRA 层以外的可训练模块名称。使用英文逗号分隔多个名称。"
} }
}, },
"resume_lora_training": { "resume_lora_training": {

View File

@@ -1,4 +1,4 @@
from typing import TYPE_CHECKING, Dict, List from typing import TYPE_CHECKING, Dict, List, Set
if TYPE_CHECKING: if TYPE_CHECKING:
from gradio.components import Component from gradio.components import Component
@@ -9,14 +9,14 @@ class Manager:
def __init__(self) -> None: def __init__(self) -> None:
self.all_elems: Dict[str, Dict[str, "Component"]] = {} self.all_elems: Dict[str, Dict[str, "Component"]] = {}
def get_elem(self, name: str) -> "Component": def get_elem_by_name(self, name: str) -> "Component":
r""" r"""
Example: top.lang, train.dataset Example: top.lang, train.dataset
""" """
tab_name, elem_name = name.split(".") tab_name, elem_name = name.split(".")
return self.all_elems[tab_name][elem_name] return self.all_elems[tab_name][elem_name]
def get_base_elems(self): def get_base_elems(self) -> Set["Component"]:
return { return {
self.all_elems["top"]["lang"], self.all_elems["top"]["lang"],
self.all_elems["top"]["model_name"], self.all_elems["top"]["model_name"],

View File

@@ -26,12 +26,15 @@ class Runner:
def __init__(self, manager: "Manager") -> None: def __init__(self, manager: "Manager") -> None:
self.manager = manager self.manager = manager
""" Resume """
self.thread: "Thread" = None self.thread: "Thread" = None
self.data: Dict["Component", Any] = None
self.do_train = True self.do_train = True
self.running_data: Dict["Component", Any] = None
self.monitor_inputs: Dict[str, str] = None self.monitor_inputs: Dict[str, str] = None
""" State """
self.aborted = False self.aborted = False
self.running = False self.running = False
""" Handler """
self.logger_handler = LoggerHandler() self.logger_handler = LoggerHandler()
self.logger_handler.setLevel(logging.INFO) self.logger_handler.setLevel(logging.INFO)
logging.root.addHandler(self.logger_handler) logging.root.addHandler(self.logger_handler)
@@ -45,7 +48,11 @@ class Runner:
self.aborted = True self.aborted = True
self.running = False self.running = False
def _initialize(self, lang: str, model_name: str, model_path: str, dataset: List[str]) -> str: def _initialize(self, data: Dict[Component, Any], do_train: bool) -> str:
get = lambda name: data[self.manager.get_elem_by_name(name)]
lang, model_name, model_path = get("top.lang"), get("top.model_name"), get("top.model_path")
dataset = get("train.dataset") if do_train else get("eval.dataset")
if self.running: if self.running:
return ALERTS["err_conflict"][lang] return ALERTS["err_conflict"][lang]
@@ -72,8 +79,8 @@ class Runner:
else: else:
return finish_info return finish_info
def _parse_train_args(self, data: Dict[Component, Any]) -> Tuple[str, str, str, List[str], str, Dict[str, Any]]: def _parse_train_args(self, data: Dict[Component, Any]) -> Dict[str, Any]:
get = lambda name: data[self.manager.get_elem(name)] get = lambda name: data[self.manager.get_elem_by_name(name)]
user_config = load_config() user_config = load_config()
if get("top.checkpoints"): if get("top.checkpoints"):
@@ -83,13 +90,10 @@ class Runner:
else: else:
checkpoint_dir = None checkpoint_dir = None
output_dir = get_save_dir(get("top.model_name"), get("top.finetuning_type"), get("train.output_dir"))
args = dict( args = dict(
stage=TRAINING_STAGES[get("train.training_stage")], stage=TRAINING_STAGES[get("train.training_stage")],
model_name_or_path=get("top.model_path"), model_name_or_path=get("top.model_path"),
do_train=True, do_train=True,
overwrite_cache=False,
cache_dir=user_config.get("cache_dir", None), cache_dir=user_config.get("cache_dir", None),
checkpoint_dir=checkpoint_dir, checkpoint_dir=checkpoint_dir,
finetuning_type=get("top.finetuning_type"), finetuning_type=get("top.finetuning_type"),
@@ -112,11 +116,15 @@ class Runner:
logging_steps=get("train.logging_steps"), logging_steps=get("train.logging_steps"),
save_steps=get("train.save_steps"), save_steps=get("train.save_steps"),
warmup_steps=get("train.warmup_steps"), warmup_steps=get("train.warmup_steps"),
neft_alpha=get("train.neft_alpha"),
train_on_prompt=get("train.train_on_prompt"),
upcast_layernorm=get("train.upcast_layernorm"),
lora_rank=get("train.lora_rank"), lora_rank=get("train.lora_rank"),
lora_dropout=get("train.lora_dropout"), lora_dropout=get("train.lora_dropout"),
lora_target=get("train.lora_target") or get_module(get("top.model_name")), lora_target=get("train.lora_target") or get_module(get("top.model_name")),
additional_target=get("train.additional_target") if get("train.additional_target") else None,
resume_lora_training=get("train.resume_lora_training"), resume_lora_training=get("train.resume_lora_training"),
output_dir=output_dir output_dir=get_save_dir(get("top.model_name"), get("top.finetuning_type"), get("train.output_dir"))
) )
args[get("train.compute_type")] = True args[get("train.compute_type")] = True
args["disable_tqdm"] = True args["disable_tqdm"] = True
@@ -128,7 +136,7 @@ class Runner:
args["upcast_layernorm"] = True args["upcast_layernorm"] = True
if args["stage"] == "ppo": if args["stage"] == "ppo":
args["reward_model"] = get("train.reward_model") args["reward_model"] = get_save_dir(get("top.model_name"), get("top.finetuning_type"), get("train.reward_model"))
if args["stage"] == "dpo": if args["stage"] == "dpo":
args["dpo_beta"] = get("train.dpo_beta") args["dpo_beta"] = get("train.dpo_beta")
@@ -139,10 +147,10 @@ class Runner:
args["eval_steps"] = get("train.save_steps") args["eval_steps"] = get("train.save_steps")
args["load_best_model_at_end"] = True args["load_best_model_at_end"] = True
return get("top.lang"), get("top.model_name"), get("top.model_path"), get("train.dataset"), output_dir, args return args
def _parse_eval_args(self, data: Dict[Component, Any]) -> Tuple[str, str, str, List[str], str, Dict[str, Any]]: def _parse_eval_args(self, data: Dict[Component, Any]) -> Dict[str, Any]:
get = lambda name: data[self.manager.get_elem(name)] get = lambda name: data[self.manager.get_elem_by_name(name)]
user_config = load_config() user_config = load_config()
if get("top.checkpoints"): if get("top.checkpoints"):
@@ -160,7 +168,6 @@ class Runner:
stage="sft", stage="sft",
model_name_or_path=get("top.model_path"), model_name_or_path=get("top.model_path"),
do_eval=True, do_eval=True,
overwrite_cache=False,
predict_with_generate=True, predict_with_generate=True,
cache_dir=user_config.get("cache_dir", None), cache_dir=user_config.get("cache_dir", None),
checkpoint_dir=checkpoint_dir, checkpoint_dir=checkpoint_dir,
@@ -179,34 +186,35 @@ class Runner:
max_new_tokens=get("eval.max_new_tokens"), max_new_tokens=get("eval.max_new_tokens"),
top_p=get("eval.top_p"), top_p=get("eval.top_p"),
temperature=get("eval.temperature"), temperature=get("eval.temperature"),
output_dir=get("eval.output_dir") output_dir=output_dir
) )
if get("eval.predict"): if get("eval.predict"):
args.pop("do_eval", None) args.pop("do_eval", None)
args["do_predict"] = True args["do_predict"] = True
return get("top.lang"), get("top.model_name"), get("top.model_path"), get("eval.dataset"), output_dir, args return args
def _preview(self, data: Dict[Component, Any], do_train: bool) -> Generator[Tuple[str, Dict[str, Any]], None, None]: def _preview(self, data: Dict[Component, Any], do_train: bool) -> Generator[Tuple[str, Dict[str, Any]], None, None]:
parse_func = self._parse_train_args if do_train else self._parse_eval_args error = self._initialize(data, do_train)
lang, model_name, model_path, dataset, _, args = parse_func(data)
error = self._initialize(lang, model_name, model_path, dataset)
if error: if error:
gr.Warning(error)
yield error, gr.update(visible=False) yield error, gr.update(visible=False)
else: else:
args = self._parse_train_args(data) if do_train else self._parse_eval_args(data)
yield gen_cmd(args), gr.update(visible=False) yield gen_cmd(args), gr.update(visible=False)
def _launch(self, data: Dict[Component, Any], do_train: bool) -> Generator[Tuple[str, Dict[str, Any]], None, None]: def _launch(self, data: Dict[Component, Any], do_train: bool) -> Generator[Tuple[str, Dict[str, Any]], None, None]:
parse_func = self._parse_train_args if do_train else self._parse_eval_args error = self._initialize(data, do_train)
lang, model_name, model_path, dataset, output_dir, args = parse_func(data)
self.data, self.do_train, self.monitor_inputs = data, do_train, dict(lang=lang, output_dir=output_dir)
error = self._initialize(lang, model_name, model_path, dataset)
if error: if error:
gr.Warning(error)
yield error, gr.update(visible=False) yield error, gr.update(visible=False)
else: else:
self.running = True args = self._parse_train_args(data) if do_train else self._parse_eval_args(data)
run_kwargs = dict(args=args, callbacks=[self.trainer_callback]) run_kwargs = dict(args=args, callbacks=[self.trainer_callback])
self.running = True
self.do_train, self.running_data = do_train, data
self.monitor_inputs = dict(lang=data[self.manager.get_elem_by_name("top.lang")], output_dir=args["output_dir"])
self.thread = Thread(target=run_exp, kwargs=run_kwargs) self.thread = Thread(target=run_exp, kwargs=run_kwargs)
self.thread.start() self.thread.start()
yield from self.monitor() yield from self.monitor()

View File

@@ -3,13 +3,11 @@ import json
import gradio as gr import gradio as gr
import matplotlib.figure import matplotlib.figure
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Tuple from typing import TYPE_CHECKING, Any, Dict
from datetime import datetime from datetime import datetime
from llmtuner.extras.ploting import smooth from llmtuner.extras.ploting import smooth
from llmtuner.tuner import export_model from llmtuner.webui.common import get_save_dir
from llmtuner.webui.common import get_save_dir, DATA_CONFIG
from llmtuner.webui.locales import ALERTS
if TYPE_CHECKING: if TYPE_CHECKING:
from llmtuner.extras.callbacks import LogCallback from llmtuner.extras.callbacks import LogCallback
@@ -33,37 +31,6 @@ def get_time() -> str:
return datetime.now().strftime('%Y-%m-%d-%H-%M-%S') return datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
def can_preview(dataset_dir: str, dataset: list) -> Dict[str, Any]:
with open(os.path.join(dataset_dir, DATA_CONFIG), "r", encoding="utf-8") as f:
dataset_info = json.load(f)
if (
len(dataset) > 0
and "file_name" in dataset_info[dataset[0]]
and os.path.isfile(os.path.join(dataset_dir, dataset_info[dataset[0]]["file_name"]))
):
return gr.update(interactive=True)
else:
return gr.update(interactive=False)
def get_preview(
dataset_dir: str, dataset: list, start: Optional[int] = 0, end: Optional[int] = 2
) -> Tuple[int, list, Dict[str, Any]]:
with open(os.path.join(dataset_dir, DATA_CONFIG), "r", encoding="utf-8") as f:
dataset_info = json.load(f)
data_file: str = dataset_info[dataset[0]]["file_name"]
with open(os.path.join(dataset_dir, data_file), "r", encoding="utf-8") as f:
if data_file.endswith(".json"):
data = json.load(f)
elif data_file.endswith(".jsonl"):
data = [json.loads(line) for line in f]
else:
data = [line for line in f]
return len(data), data[start:end], gr.update(visible=True)
def can_quantize(finetuning_type: str) -> Dict[str, Any]: def can_quantize(finetuning_type: str) -> Dict[str, Any]:
if finetuning_type != "lora": if finetuning_type != "lora":
return gr.update(value="None", interactive=False) return gr.update(value="None", interactive=False)
@@ -90,9 +57,11 @@ def get_eval_results(path: os.PathLike) -> str:
def gen_plot(base_model: str, finetuning_type: str, output_dir: str) -> matplotlib.figure.Figure: def gen_plot(base_model: str, finetuning_type: str, output_dir: str) -> matplotlib.figure.Figure:
if not base_model:
return
log_file = get_save_dir(base_model, finetuning_type, output_dir, "trainer_log.jsonl") log_file = get_save_dir(base_model, finetuning_type, output_dir, "trainer_log.jsonl")
if not os.path.isfile(log_file): if not os.path.isfile(log_file):
return None return
plt.close("all") plt.close("all")
fig = plt.figure() fig = plt.figure()
@@ -114,42 +83,3 @@ def gen_plot(base_model: str, finetuning_type: str, output_dir: str) -> matplotl
ax.set_xlabel("step") ax.set_xlabel("step")
ax.set_ylabel("loss") ax.set_ylabel("loss")
return fig return fig
def save_model(
lang: str,
model_name: str,
model_path: str,
checkpoints: List[str],
finetuning_type: str,
template: str,
max_shard_size: int,
export_dir: str
) -> Generator[str, None, None]:
if not model_name:
yield ALERTS["err_no_model"][lang]
return
if not model_path:
yield ALERTS["err_no_path"][lang]
return
if not checkpoints:
yield ALERTS["err_no_checkpoint"][lang]
return
if not export_dir:
yield ALERTS["err_no_export_dir"][lang]
return
args = dict(
model_name_or_path=model_path,
checkpoint_dir=",".join([get_save_dir(model_name, finetuning_type, ckpt) for ckpt in checkpoints]),
finetuning_type=finetuning_type,
template=template,
export_dir=export_dir
)
yield ALERTS["info_exporting"][lang]
export_model(args, max_shard_size="{}GB".format(max_shard_size))
yield ALERTS["info_exported"][lang]