1295 Commits

Author SHA1 Message Date
hiyouga
c0c387e4db release v0.8.0
Former-commit-id: 004db680b9e3996ec511ee818df6c0c02bf13603
2024-06-08 05:20:54 +08:00
hiyouga
ae60ea15da add ultrafeedback and fineweb #4085 #4132
Former-commit-id: 968e4992e2f2a3ccba73e8668f1654ddc6eb0034
2024-06-08 02:42:34 +08:00
hiyouga
72cd1123a8 fix ci
Former-commit-id: 3f4d293fd861d765edb2040f80d16f99a5e1e3c6
2024-06-08 02:00:44 +08:00
hiyouga
1364190a66 fix ci
Former-commit-id: 95aceebd61d195be5c980a919c12c59b56722898
2024-06-08 01:57:36 +08:00
hiyouga
6d17c59090 add ci
Former-commit-id: 3ea3acdadaa54abe33d93538580196cfdd91ee56
2024-06-08 01:48:30 +08:00
hiyouga
e0f2c0b5dc init unittest
Former-commit-id: 1c6f21cb8878ced043fe0b27c72cad2ef6ee990e
2024-06-08 01:35:58 +08:00
hiyouga
073e34855d Delete .readthedocs.yaml
Former-commit-id: dd3ee514216a9a329519c58d79208040adcad126
2024-06-08 00:58:10 +08:00
hiyouga
ff9ba70bb8 reorganize adapter code
Former-commit-id: b26c2df9d97f4efffccbf7d28de13619b43f10dd
2024-06-08 00:47:23 +08:00
hoshi-hiyouga
adbebb0e3f fix #4139
Former-commit-id: c025a4d74f293c14c2705e68af20a82a84608520
2024-06-08 00:45:02 +08:00
hiyouga
3f6b3eed98 add resume args in webui
Former-commit-id: 1d86ad768b1f36e54b4c2a9f18f6ea5a7df04c90
2024-06-08 00:22:16 +08:00
hiyouga
f45e81e186 fix #4137
Former-commit-id: cdc0d6f5a2e5040e145c82c4801f37bd76529047
2024-06-07 19:16:06 +08:00
hiyouga
ba648fd003 tiny fix
Former-commit-id: 0621bcad1dfbe8ce2464f741d4256c5df2a8d1b6
2024-06-07 05:19:21 +08:00
hiyouga
b0e5a76f4c fix ppo trainer save zero3 model
accelerator.get_state_dict(ds_model) should be called at all ranks


Former-commit-id: 3a0f60f0aa072531e4ae5819ec00c8fa42aa0913
2024-06-07 05:14:19 +08:00
hiyouga
8692796c9b fix ppo in trl 0.8.6
Former-commit-id: 5e0d66a0d80b4bd4a8506e2317209d8fb9d25ff6
2024-06-07 04:48:29 +08:00
hiyouga
d0edcde4ea fix #4120
Former-commit-id: 2a44da678a5e360a9c0f9056397ac9e801329321
2024-06-07 04:18:05 +08:00
hiyouga
8c4c2e580c update data processors
Former-commit-id: 04b138cbcb8b9a72e4bbda6c65843bb459e525e7
2024-06-07 04:15:40 +08:00
hoshi-hiyouga
07f33e7641 Merge pull request #4009 from AlongWY/main
supervised packing with greedy knapsack algorithm

Former-commit-id: 5ded166b39a75a98ded5733678f5a1eab7d4cc71
2024-06-07 03:48:46 +08:00
hoshi-hiyouga
1998c641af Update supervised.py
Former-commit-id: 04b6c2a754e602e0b698cfe6c255c2f2486d8865
2024-06-07 03:42:08 +08:00
hoshi-hiyouga
be1e5f9d62 Update supervised.py
Former-commit-id: 49993c4f4e1f871a22ff0196afe60026b668a4dc
2024-06-07 03:38:23 +08:00
hoshi-hiyouga
fdeec6db52 Update supervised.py
Former-commit-id: 67625b5278a839c12a3e4245f9e90af67d8b11b4
2024-06-07 03:38:04 +08:00
hiyouga
a4d335b42f add qwen2 models
Former-commit-id: 49cb694d02c876e3740a003a8b332349f4310ad3
2024-06-07 00:22:57 +08:00
hiyouga
fcb134e144 rename files
Former-commit-id: e1a8431770fc36c0c9ee7fed4abbc3d7fdcc5efd
2024-06-07 00:09:06 +08:00
hiyouga
a47e24222a add DISABLE_TORCHRUN option
Former-commit-id: bcc574b479c2101438723aadead42743d4378776
2024-06-06 23:44:58 +08:00
hoshi-hiyouga
b96b995620 Merge pull request #4082 from MengqingCao/bugfix
Fix #4077

Former-commit-id: 288028c3fb6bb1b58d1b7f4e8b90108c9bbf27d1
2024-06-06 23:38:40 +08:00
hoshi-hiyouga
c231706aa5 Update cli.py
Former-commit-id: 32190507534adf5f505858b3af2b592ca6568ac7
2024-06-06 23:38:09 +08:00
hiyouga
35b5117a59 fix ppo+zero3 #3108
Former-commit-id: 33a93cc29e3e57bf001515000c0a70c112573dea
2024-06-06 23:30:07 +08:00
hiyouga
80f716bc10 fix torch gc
Former-commit-id: e173799d057598e5692a407601c30d8ce1513461
2024-06-06 20:30:25 +08:00
hiyouga
ca95e98ca0 fix ppo dataset bug #4012
Former-commit-id: 7fc51b2e93698ae5e012566af8481f4d861c873d
2024-06-06 19:03:20 +08:00
hiyouga
d5559461c1 update trainers
Former-commit-id: b7f6c4a171293cf4f3e88f15a811f847342f84ee
2024-06-06 18:45:49 +08:00
hiyouga
f4acd81e2f fix base64 image read #4061
Former-commit-id: 66ccb2a27a04296b4600f2c85f428071bf14eeb0
2024-06-06 17:29:19 +08:00
hiyouga
31feb6e26c update readme
Former-commit-id: cc331fa2d28afe081937c50ea83d63add21d4e3a
2024-06-06 16:59:18 +08:00
hiyouga
7d5c0a069c update readme
Former-commit-id: fb1f709af5199976e63d7188e088e33c75d19bfe
2024-06-06 16:25:42 +08:00
hiyouga
937f49ec3d lora modules: all by default
Former-commit-id: 52c4ae87c7f4312704c31ef26b079b2c5b95ea5f
2024-06-06 03:53:28 +08:00
hiyouga
abc2a73a33 add codestral 22B
Former-commit-id: b011c7f527a57cb1d21c4e2c9631c2fb62bb835e
2024-06-06 03:42:50 +08:00
hiyouga
5e1bf7572c lint
Former-commit-id: 9030501eaef97ea249347198272adf0d709503ec
2024-06-06 03:33:44 +08:00
hoshi-hiyouga
8fdb32d0a3 Merge pull request #4066 from injet-zhou/main
add throughput entry to training log

Former-commit-id: d2816f343f405f3fab09f2a8eade774b886e8f92
2024-06-06 03:32:04 +08:00
hoshi-hiyouga
c709d5f7db Merge pull request #4080 from MengqingCao/npu
Add npu option for model exporting

Former-commit-id: 07fc67193ef6bcb8e8a392aff0c57a2eb36832bf
2024-06-06 03:15:44 +08:00
hoshi-hiyouga
f5b2749ec2 Update export.py
Former-commit-id: 694833c1104d13929d4f181f014a121f25955dc5
2024-06-06 03:14:46 +08:00
hoshi-hiyouga
ee5853c565 Update model_args.py
Former-commit-id: 09c0afd94a8a5f5b45a61b32c983d50e1b9e2941
2024-06-06 03:14:23 +08:00
hoshi-hiyouga
6ec6df8a5f Merge pull request #4053 from hzhaoy/feature/add_select_config_file
Support selecting saved configuration files

Former-commit-id: 568ef3cf2a793f268cbe01c39dec418a13e61ecd
2024-06-06 03:06:03 +08:00
hiyouga
fc95800840 add vllm_dtype arg #3387 #3717
Former-commit-id: a0dd3a6351bb78541d40fec1d2fc457d803c86a4
2024-06-06 02:53:27 +08:00
hiyouga
765715af21 support train from scratch #4033 #4075
Former-commit-id: 1290b9d01077e62f8de7a23637daa2586cc82bfa
2024-06-06 02:43:19 +08:00
hiyouga
639a7f6796 support image input in api #3971 #4061
Former-commit-id: c70aaf763ef22fb83ce3635e8ffd5ec4c89c1cb0
2024-06-06 02:29:55 +08:00
hiyouga
35379c7c0e update train hparams
Former-commit-id: 1ca9fce55b55bf209f4b76152b586731932a3f39
2024-06-06 01:49:20 +08:00
hiyouga
d992f5353f fix setup
Former-commit-id: b2b80d434fcc0c3838d229098e1c21d26632204c
2024-06-06 01:39:02 +08:00
hiyouga
875eef45f3 add llamafactory-cli env
Former-commit-id: 1df077184845ff5f394b9324d46f8c382869e590
2024-06-06 01:28:14 +08:00
hiyouga
556a4aa972 fix #4090
Former-commit-id: d9f15f30a8f4bc64778a5c96baeb6801700d7a2c
2024-06-06 00:50:32 +08:00
MengqingCao
8dc1969111 modify export_device option
Former-commit-id: b2fc4a5499e21a5b9622c2285402efef6e27a74d
2024-06-05 09:37:36 +00:00
hiyouga
b74c229498 fix #4079
Former-commit-id: fda732d7f4616373844c97beff416880260f49db
2024-06-05 16:56:54 +08:00
hiyouga
3dbca466fd update readme
Former-commit-id: 02d34db29a7a35c25711d49e98fd3167a2f4dfe7
2024-06-05 16:32:32 +08:00
MengqingCao
ce6f7fdb82 fix #4077
Former-commit-id: fedbe92f3b56294acc6c49f9a51e369cf2de3ead
2024-06-05 08:03:30 +00:00
hiyouga
7528bc1bc0 support glm-4
Former-commit-id: a10f4718fbf3f3c89dc7eb31cb8e1a46ca6adda5
2024-06-05 15:16:38 +08:00
MengqingCao
9dd5f7d642 add npu for model export
Former-commit-id: ce020b6eb3f35c1db37ee4835e694eddcd0f59b0
2024-06-05 07:06:40 +00:00
faddddeout
99ecb0daaf add throughput entry to log
Former-commit-id: 691f999f64c7bac78761e4354f89816d2f0d46fc
2024-06-04 11:04:29 +00:00
hzhaoy
39d8d7995a add: support selecting saved configuration files and loading training parameters
Former-commit-id: 5c9b17c1dc9093da0ea813642bce9b5c9ae96274
2024-06-04 10:33:43 +08:00
hiyouga
2ac2cde03e tiny fix
Former-commit-id: f9d50501aac1f60a3b445ca3fee9aa60995461ee
2024-06-04 00:31:10 +08:00
hiyouga
aa6c3766de fix #3873
Former-commit-id: 1ac325b4d682bb493573c18bb0b67ceae8d0d372
2024-06-04 00:21:50 +08:00
hiyouga
f4f5d7e3ce fix #3992
Former-commit-id: a48321fbf5196b88a11106cf74a74fbcea2ea50b
2024-06-04 00:17:36 +08:00
hiyouga
efbf6018d3 fix abort in webui DDP mode
Former-commit-id: b90ac72d753b13a3eed9cb8b898fac2f2fe5153f
2024-06-04 00:10:24 +08:00
hoshi-hiyouga
1090bb8bf3 Merge pull request #3987 from injet-zhou/main
Fix cann't interrupt training when using multi GPUs in webui

Former-commit-id: 455bb158b0e600723d2afaa2070b71178f2f5188
2024-06-04 00:04:07 +08:00
hiyouga
26bc79f971 fix #4043
Former-commit-id: 67af68f4fc5232760c57b3a0ae780628da09db6a
2024-06-03 23:30:37 +08:00
hiyouga
4c1f015eca remove gc warnings in DPO&KTO
Former-commit-id: b649bdcbafb464a638387429b770fe258b41f8af
2024-06-03 22:53:54 +08:00
hoshi-hiyouga
0655a183d3 Merge pull request #4045 from enji-zhou/feature/add_kto
fix KTO Trainer Sampler

Former-commit-id: 8e235beb9cf4939c06ccb753b047326a9839e77f
2024-06-03 22:09:25 +08:00
hoshi-hiyouga
7754024e9b Update trainer.py
Former-commit-id: 8565d4b43db905374c328ae57c71fc226980d14f
2024-06-03 22:08:38 +08:00
enji.zhou
b4913569a8 fix KTO Trainer Sampler
Former-commit-id: 39eb1bfa272011554322e9bb2534f83b68282a70
2024-06-03 21:32:38 +08:00
hoshi-hiyouga
eae9f09ca8 Merge pull request #4006 from Uminosachi/scheduler-kwargs
Set scheduler_specific_kwargs to get_scheduler

Former-commit-id: c6ed1955fd8990ddb960750913c9d8b13fe0ace3
2024-06-03 19:27:53 +08:00
hiyouga
8264e5ceaa update placeholder in issue template
Former-commit-id: 5503a90d7e38273b67129e0b9eb62bd1fd23154f
2024-06-03 19:24:10 +08:00
hoshi-hiyouga
b76f319e45 Merge pull request #4011 from statelesshz/issue-template
Update bug-report.yml

Former-commit-id: 1fbc46f45ae4e673f0b20b5eacab3d81d1053807
2024-06-03 19:20:43 +08:00
hiyouga
82d744716a fix #4005 #4013
Former-commit-id: 8608fa268cde5cddf8d0c6c2eb2cb5fa246c1831
2024-06-03 19:12:29 +08:00
hoshi-hiyouga
1a3764ab8f Merge pull request #4007 from xu-song/patch-3
Update model_args.py

Former-commit-id: d88b3a0f2707bcc964f642d348295b99f7c796f8
2024-06-03 18:54:37 +08:00
hiyouga
d2ede9d393 fix #4022
Former-commit-id: 9541f2f1f1b7d7877eb734f051048e52003a3430
2024-06-03 18:38:36 +08:00
hiyouga
5690f513fc bump versions
transformers 4.37.2->4.41.2
datasets 2.14.3->2.16.0
accelerate 0.27.2->0.30.1
peft 0.10.0->0.11.1
trl 0.8.1->0.8.6


Former-commit-id: 5f1e041f7295bf42a41dd4d9e7f0c42fcc37fed2
2024-06-03 18:29:38 +08:00
hiyouga
123a845209 fix data loader hint
Former-commit-id: 25b56126a11591b0155e2f72b673dd8f45a6c8c9
2024-06-03 18:28:27 +08:00
ylfeng
b1b7d735b3 remove empty line
Former-commit-id: 3164710971a6d6545629f5bf133f98de5ff0991a
2024-05-31 21:43:08 +08:00
ylfeng
230c69f7ce fix eos
Former-commit-id: 6e236c952958cbfe50b5dcb7b8eff6aea8477922
2024-05-31 21:40:41 +08:00
ylfeng
bfc43558ef supervised packing with greedy knapsack algorithm
Former-commit-id: 24d12396c9aabd49da0b08719068f24679111cc6
2024-05-31 15:33:54 +08:00
Xu Song
f2ae2cc04d Update model_args.py
Former-commit-id: f1e018587e5722e41962abd60f74043a3e55f692
2024-05-31 14:35:48 +08:00
statelesshz
6e9c03f958 Update bug-report.yml
Former-commit-id: a8561502360c1e247eeacb46b77ffbcf3387c482
2024-05-31 13:18:18 +08:00
Uminosachi
2696f614a7 Set scheduler_specific_kwargs to get_scheduler
Former-commit-id: f04e70dfab44480ef4c015c06470443237f69ba9
2024-05-31 13:45:39 +09:00
hiyouga
070b944895 update readme
Former-commit-id: 3b92d8c2ddb288b849f38e573ca168cab23315d2
2024-05-30 16:40:17 +08:00
faddddeout
f5f091d390 fix cann't interrupt training when using multi GPUs in webui
Former-commit-id: a7fb02d52bc202c958490aa7081252be5d9eff50
2024-05-30 08:39:21 +00:00
hiyouga
14ab14a0e6 fix #3837
Former-commit-id: 72965aa3f13a9c085c29781b6790d80d00a545d8
2024-05-30 00:52:26 +08:00
hoshi-hiyouga
4f7c850115 Merge pull request #3829 from seanzhang-zhichen/add_dataset_sample_num
Add dataset sample num

Former-commit-id: ab38cf74ce48ea4f1800e077ca287f2eb9336135
2024-05-30 00:25:45 +08:00
hoshi-hiyouga
391eca66cf Update loader.py
Former-commit-id: 0aa59322906d91c5e385c9c02ebb5dd64ba060f3
2024-05-30 00:20:20 +08:00
hoshi-hiyouga
a67199246d Update loader.py
Former-commit-id: aa7f335e3ad5a78e4ed5f99c120be28e9733ea2e
2024-05-30 00:17:21 +08:00
hoshi-hiyouga
5f67fdaac9 Update loader.py
Former-commit-id: 19d8fd62c18ee3ba0e431fc241f7d315cb716fef
2024-05-30 00:12:12 +08:00
hoshi-hiyouga
05e6fe4287 Update parser.py
Former-commit-id: 310cc11e8c83f16fc5bccc349c38fea347ea9a97
2024-05-30 00:05:20 +08:00
hoshi-hiyouga
91cc571e6e Update README_zh.md
Former-commit-id: 3007d260ed45169583a74497a53b661337dd5f71
2024-05-30 00:04:47 +08:00
hoshi-hiyouga
890926e60c Update README.md
Former-commit-id: 65fb69e388c0a04c15ecd11441e567966f51fae5
2024-05-30 00:04:26 +08:00
hiyouga
87aa332583 better llamaboard
* easily resume from checkpoint
* support full and freeze checkpoints
* faster ui


Former-commit-id: 84cfb2452cc86b037ccddee6e833f8eb7c129fa4
2024-05-29 23:55:38 +08:00
hiyouga
f90c4ca672 fix cohere system
Former-commit-id: 5d629b29e705c8ff8dd4521719d9c0e67a3fe0a2
2024-05-29 20:58:23 +08:00
hiyouga
a922e85a5c fix #3965
Former-commit-id: 37d15ac55d0be0ff47d6a88f07e2d823117a4a36
2024-05-29 20:55:51 +08:00
hiyouga
9a65820592 update readme
Former-commit-id: 440e9de66986ef7736361ce8ec3e23ce68655a56
2024-05-29 18:39:11 +08:00
hoshi-hiyouga
f4e16ae373 Merge pull request #3930 from MengqingCao/npu
Add Ascend npu doc and dependency

Former-commit-id: 7210090e4fc6531b9f6122f104875811a8798185
2024-05-29 18:33:38 +08:00
MengqingCao
e2cfd34da0 update torch-npu version
Former-commit-id: a70d7fcf2967eb30280a1fb845b39db7878f535c
2024-05-29 10:05:11 +00:00
MengqingCao
668dea9706 update cann kernels url
Former-commit-id: 23c65e9d7e8817b5815264e44cbf4a7bcb88d3d7
2024-05-29 09:53:31 +00:00
hoshi-hiyouga
084be442f2 Merge pull request #3958 from hzhaoy/add_telechat_12b_support
add TeleChat-12B/TeleChat-12B-v2 models

Former-commit-id: c228546a09764423ae66966079802022185f7e86
2024-05-29 17:20:53 +08:00
hzhaoy
29cb4a1327 add TeleChat-12B/TeleChat-12B-v2 models
Former-commit-id: e0675385c88af03aaef8d51586c8a282829c4051
2024-05-29 15:00:37 +08:00
hiyouga
81a61134b8 fix hf chat engine
Former-commit-id: 76ce52911690ab0dd8ffa5587127afb4ec942abe
2024-05-29 01:20:07 +08:00
hiyouga
cb1a49aa02 add ds config to webui
Former-commit-id: 66d72b263d36dc81de9f6152077663b613035977
2024-05-29 01:13:17 +08:00
hiyouga
351b4efc6c 10x generate in ppo w/ zero3
https://github.com/huggingface/trl/pull/1483

Former-commit-id: 5dc43ba8b373d8803bc22d88b3d0d95ef8b9c7f8
2024-05-29 00:23:23 +08:00
hiyouga
9b551309de update dpo, kto trainer
Former-commit-id: 4a6cc3c7046f8b27d05ea53ef216bab6fa7ebfaf
2024-05-29 00:14:29 +08:00
hiyouga
9fed4a2ef4 clean kto trainer
Former-commit-id: 76402bd78cbd3a99a544f0ac019468b569b0e1d1
2024-05-28 21:43:26 +08:00
hiyouga
bceac4f554 bump vllm version to 0.4.1
Former-commit-id: a00fd39a4c2f270620711f2bfbad8d460fb4aa89
2024-05-28 21:27:27 +08:00
hiyouga
ae3a88d3a7 update readme
Former-commit-id: bc861f76706df3f643028f1dfc8ec2044b067a08
2024-05-28 19:35:52 +08:00
hiyouga
9138a7a5ba support DDP in webui
Former-commit-id: d059262ff8dc857f597d2657546ec625726a664a
2024-05-28 19:24:22 +08:00
hiyouga
9912b43fcc update readme
Former-commit-id: e2c7de1b5147801b301cfc5da0e2866273da18f5
2024-05-28 16:41:34 +08:00
hiyouga
5ac37555a4 update readme
Former-commit-id: 30ef8ee1e86136f38f105b67f70c417d20552f41
2024-05-28 16:19:56 +08:00
hiyouga
34bdc730a6 fix #3931
Former-commit-id: 47e0072416b545d9718af4fa266a83f747b9a4f7
2024-05-28 13:44:22 +08:00
MengqingCao
e45a9d70fc add Ascend npu doc and dependency
Former-commit-id: 803d9f142a294f8c1e0b4e2046c214b0857ccfd6
2024-05-28 01:33:54 +00:00
hoshi-hiyouga
232b36059c Merge pull request #3925 from Yimi81/feat-fix-yi-template
fix yi template

Former-commit-id: 6caee1eb868b9f7b00578c6608883e89aa232d17
2024-05-27 22:59:32 +08:00
Yimi81
d9fbd675d5 fix yi template
Former-commit-id: b3669c8989c3adda305416245e32e9e5a3b7caac
2024-05-27 13:11:25 +00:00
hiyouga
0206e7b9de tiny fix
Former-commit-id: 4c47b3dcef9e400a1c35fce1ad53619a0a86fe81
2024-05-27 20:54:26 +08:00
hoshi-hiyouga
a886544d3d Merge pull request #3921 from gusye1234/main
Add openchat-3.6-8B support

Former-commit-id: 92e6bba3cab22b7835a68f787caf7992a398978e
2024-05-27 20:52:37 +08:00
hoshi-hiyouga
8c9b929bb0 Update template.py
Former-commit-id: f4dabce0a71c9978e051e70886941b64b928ffe2
2024-05-27 20:51:56 +08:00
hoshi-hiyouga
1bb1ae834e Update template.py
Former-commit-id: af869e4c48eb426c4078415533f6dab89123a9d8
2024-05-27 20:51:26 +08:00
Jianbai Ye
0d9e364a90 add openchat-3.6-8B support
Former-commit-id: b66f39d50d896d7597a1506e67ec210b31c9b700
2024-05-27 20:42:08 +08:00
hiyouga
3b28c003dd fix full/freeze tuning for mllm
Former-commit-id: df5860ddb593d5b82163a585d12160b41dbce0f3
2024-05-27 20:37:57 +08:00
hoshi-hiyouga
48ff9fb150 Merge pull request #3835 from BUAADreamer/main
fix some features in llava-style training

Former-commit-id: fc8583bd17dfb088a52e4d8fa91356b918373b50
2024-05-27 20:23:45 +08:00
hiyouga
c43bc74fe6 support Aya23
Former-commit-id: 071935b90006e2c79e39bb9ee0c5d48c6c910501
2024-05-27 20:23:24 +08:00
BUAADreamer
eaf9cc2195 Merge branch 'hiyouga:main' into main
Former-commit-id: cc1b82bf49b060987392c455fdbfe125ad667ec5
2024-05-27 20:10:58 +08:00
hiyouga
4bd276f58f add llava 1k datasets
Former-commit-id: 345d3355752f4a4dc454696a39f1610fffbbf382
2024-05-27 19:57:33 +08:00
hiyouga
f8cf0d5e5d update dpo examples
Former-commit-id: 69e32a7cb6336ca9a953c379ec794818b3f169bd
2024-05-27 19:56:04 +08:00
BUAADreamer
79bc60db33 Merge branch 'hiyouga:main' into main
Former-commit-id: d89e1f8bf8bad1dd125b4de8fe6c0b2b16411cb5
2024-05-27 19:00:48 +08:00
BUAADreamer
dc7c54067e add only tune lm and mm_proj
Former-commit-id: ba12ca430ec527fbfe4cd1eace0adb5c7712146a
2024-05-27 19:00:15 +08:00
BUAADreamer
932f0d5c20 add regex of only tune lm and mm_proj
Former-commit-id: 38d540b3e69bceabafafab524fcfc78aeb05612d
2024-05-27 18:59:00 +08:00
hiyouga
9670f5e41a add phi-3 7b/14b, mistral v0.3 models
Former-commit-id: 86dab182f9710b063f518922ccb49b01aa71c576
2024-05-27 18:20:16 +08:00
hiyouga
97a23e1cbe update readme
Former-commit-id: b8d0170fe0d094acce85dcb5f91775e4685ee055
2024-05-27 18:14:02 +08:00
BUAADreamer
11fcd055ec Merge branch 'hiyouga:main' into main
Former-commit-id: 113be744b3d044fbea3a8654158aa83ddb4599eb
2024-05-27 11:54:01 +08:00
hiyouga
b0d9966663 support SimPO #3900
Former-commit-id: 6b954ce60155cf8334150b795cfc4bb63ca74c8b
2024-05-26 23:46:33 +08:00
BUAADreamer
5c51ab7e1f Merge branch 'hiyouga:main' into main
Former-commit-id: fd5420c43e1414bcd3fadb6239f4e5d42e6ac10e
2024-05-25 14:18:49 +08:00
hiyouga
26f293d587 fix #3853
Former-commit-id: 465a5500bae1f30744d4b9b3db40aaf9171da2cb
2024-05-24 23:29:45 +08:00
seanzhang-zhichen
a3b52fd380 Merge branch 'main' into add_dataset_sample_num
Former-commit-id: 26300127c45f24e63b91f1b0cc73e46c3a936a91
2024-05-24 15:57:47 +08:00
BUAADreamer
27d8706d6d Merge branch 'hiyouga:main' into main
Former-commit-id: a4ce5ee381fd59f6b254ab634af51b6bb54edd97
2024-05-24 09:50:00 +08:00
hiyouga
bf59383783 refactor data preprocessing, fix mllm rlhf
Former-commit-id: 53ff2dd24f9121ea30c95063bb72e49a9b31e980
2024-05-24 04:08:25 +08:00
hoshi-hiyouga
1078611259 Merge pull request #3876 from dongdongqiang2018/main
added adapted to 910B image

Former-commit-id: 0708cc8a24589b9f22ad3df6685e57d1da0336f2
2024-05-24 01:54:30 +08:00
hiyouga
e6fc0ac8fe fix paligemma sft
requires transformers>=4.41.1


Former-commit-id: 80b3030569cd606ac0de43e9a682478f5bd7b727
2024-05-24 00:23:40 +08:00
hiyouga
554ca3d8dc fix oom issues in export
Former-commit-id: b7ccc882a192aa1e25b1e5816f875ea304282412
2024-05-23 23:32:45 +08:00
donggang
86dfdf956d adapted to 910B image
Former-commit-id: e095254808aace63a1be878620f683902f51cfb3
2024-05-23 09:48:22 +00:00
BUAADreamer
c0e4475485 Merge branch 'hiyouga:main' into main
Former-commit-id: 4076f52c8ba7da4624a1fb3fa52a7170d1c3171e
2024-05-21 22:18:20 +08:00
hiyouga
2b65f8bd5c fix paligemma sft
Former-commit-id: 60682d04414be37e611d6470618a8d599703942b
2024-05-21 20:03:09 +08:00
hiyouga
09e78272c2 Update README_zh.md
Former-commit-id: 34c4ba6bf9bb89170446fb396aa06ae44d251de0
2024-05-21 18:30:59 +08:00
hiyouga
cccce564bd update wechat
Former-commit-id: 6613349562194b48c5fc57aa68e620b8fa83fc0a
2024-05-21 18:22:32 +08:00
hiyouga
4adec327de fix #3847
Former-commit-id: d206b306ca4eadc8b3d4feaf490ad12f9452e562
2024-05-21 17:53:06 +08:00
BUAADreamer
1f093334d1 support pretraining of llava
Former-commit-id: 6a4c8cf0a6a1674c693b9337f018ff8df7477f8f
2024-05-21 08:57:14 +08:00
hiyouga
e0e8507108 support paligemma
Former-commit-id: 11c27f9bf204d3d6a9ca5bd4f0a19a420160453f
2024-05-21 00:01:22 +08:00
hiyouga
f5962f8128 fix paligemma data preprocess
Former-commit-id: 71b85437301739d9d96d3881d4a34b37c0f69db8
2024-05-20 23:51:32 +08:00
hiyouga
b31d808655 fix paligemma inference
Former-commit-id: 46357b7a677e8ba2e0a7c9d4ec1974abd061569c
2024-05-20 23:36:43 +08:00
hiyouga
247cda4b68 fix #3818
Former-commit-id: 3f366e05a34be224f53c5bf8334e57ae5d316004
2024-05-20 21:43:19 +08:00
hiyouga
e30975e9a2 add kto to webui
Former-commit-id: 6c866f4dbd45e868860be8351d1a65c4e1a4e02b
2024-05-20 21:20:25 +08:00
zhangzc
de9f1583c2 fix conflict
Former-commit-id: 6922b23a748c2459147bf44b96d86daa89f2c96c
2024-05-20 17:10:01 +08:00
hiyouga
ab48653e63 fix chat engines
do not use pop(key, default) since api assigns None to dict values


Former-commit-id: 3ebbd0b55ea07de2897c27ca54eeab5c3b319419
2024-05-20 00:36:43 +08:00
hoshi-hiyouga
6d7a1e3f8f Merge pull request #3812 from ycjcl868/feat/chat-support-system-prompt
feat: cli chat support system_message
Former-commit-id: 96596990527403e910c81e95e38bf2638541cf31
2024-05-20 00:31:32 +08:00
hoshi-hiyouga
e093dad7cb Update vllm_engine.py
Former-commit-id: 0b8278bd21baf35d3f60c6ed24f110b391c92a47
2024-05-20 00:31:04 +08:00
hoshi-hiyouga
b103a121f0 Update hf_engine.py
Former-commit-id: ce8b902e538c69d89f207db8a43c85072cd70265
2024-05-20 00:30:45 +08:00
hoshi-hiyouga
3578abc7a4 Update generating_args.py
Former-commit-id: 861c146fa7d9cb5b99372464bd068c20fa36415d
2024-05-20 00:29:31 +08:00
hoshi-hiyouga
17d398f419 Update chat_model.py
Former-commit-id: 7736aafdc81d175e9fb484dbb7cae9263120a0fc
2024-05-20 00:29:12 +08:00
hiyouga
3453a8eebb fix jinja template
Former-commit-id: 353561f0e3914de3f81499c4e4b831ae0a6383b6
2024-05-19 23:38:30 +08:00
ycjcl868
77a089c35c feat: cli chat support system_message
Former-commit-id: e3982bff596d01992733687a580c4f41c558061c
2024-05-19 23:17:46 +08:00
hiyouga
516d83c946 fix zero2 high ram usage
Former-commit-id: 01797126eb173250250e31f8e76b69ae0047745d
2024-05-19 21:53:54 +08:00
hiyouga
fd02c9f973 fix hf gen args
Former-commit-id: 491a84976258cbb2a2647922420e2f84de1e38cd
2024-05-19 19:39:32 +08:00
hiyouga
351e80a656 fix envs
Former-commit-id: d5e150cfb98f8216713415564ab386b8320c88cb
2024-05-19 18:27:18 +08:00
hiyouga
4f04e2ed93 fix #3807
Former-commit-id: 08b695969049de8bf9bd3e90b9700736d90385ee
2024-05-19 17:07:57 +08:00
hiyouga
a810d1b98e update readme
Former-commit-id: e0beb67a417b13c818a09bd419d4e20dd44ca842
2024-05-18 23:09:03 +08:00
hiyouga
fbe963a96a safe output path in webui
Former-commit-id: 23f14262e0d54631630c084ba71e0433ea1d4640
2024-05-18 22:42:28 +08:00
hiyouga
d13b8bee8a fix jetmoe z3 block
Former-commit-id: cb00a14d905395c4b8fadb955f0424a4c56668de
2024-05-18 22:28:45 +08:00
hiyouga
0aa072a155 improve data process logger
Former-commit-id: 33d0b012b56dbafc9fff87b821c2d1bf1409dbb5
2024-05-18 22:02:42 +08:00
hiyouga
57dde7c3bc update data readme
Former-commit-id: 22c7335b496e4a673383d5a1e4e60bf2cb4e35b3
2024-05-18 21:37:38 +08:00
hiyouga
6b9003f781 update data readme
Former-commit-id: beb864a9367943d3274cb6057423d1eb9aaf85c4
2024-05-18 21:15:20 +08:00
hiyouga
9c1c59e481 fix #3803
Former-commit-id: 1ef12c95059d14a1717c82ce04e529e7ad6435ed
2024-05-18 16:13:14 +08:00
hoshi-hiyouga
31daec2749 Merge pull request #3799 from hiyouga/dev
improve KTO impl, replace datasets

Former-commit-id: b4cc207855aa1dbb120f7999165e176e649af338
2024-05-18 03:49:13 +08:00
hiyouga
2bff90719b improve KTO impl., replace datasets
Former-commit-id: e56a57ddcf061de6e4acc8679f7dbf0b68364986
2024-05-18 03:44:56 +08:00
hoshi-hiyouga
e4570e28a8 Merge pull request #3785 from enji-zhou/feature/add_kto
add kto

Former-commit-id: f60faa23e23022fd855dac6b1ecbd21e095bccb5
2024-05-18 03:07:18 +08:00
hoshi-hiyouga
d84a730daa Merge pull request #3794 from jue-jue-zi/main
feat: pass the `max_lora_rank` parameter to vLLM backend
Former-commit-id: be839961686a1845f00a56e398a7b3779df8b6e4
2024-05-17 16:17:30 +08:00
hoshi-hiyouga
0fd1a05cec Update model_args.py
Former-commit-id: f40a2fe5334865763e4d513292d359317b7a091b
2024-05-17 16:16:41 +08:00
juejuezi
6373d307ec feat: pass the max_lora_rank parameter to vLLM backend
Former-commit-id: a8756d839405ecb5deabe885cf11d1a61564deee
2024-05-17 16:07:39 +08:00
hiyouga
a32c3a50fc add deepseek v2 lite model
Former-commit-id: 5e864e6b721d8b891b1cc2ca2dcac41babb9eaaf
2024-05-17 13:25:36 +08:00
enji.zhou
66b5634ebf add kto
Former-commit-id: ec51986cf70b0bdd79b8141e45916670fb97a08e
2024-05-17 13:09:17 +08:00
hiyouga
92b3697e2c update badam example #3764
Former-commit-id: a3730fd0a96bab869be6d695031182dabaea8137
2024-05-17 02:21:10 +08:00
hiyouga
969e605c7e better dtype handle in loading
Former-commit-id: 663f0577dd61a1a31191db2c6fbb0c7cea533b21
2024-05-17 02:14:56 +08:00
hiyouga
a3320f26cf update examples
Former-commit-id: 3b5f138155d96b346bda18e465cf60ec7d99e19c
2024-05-17 01:02:00 +08:00
hiyouga
45329d9e3c enable inbrowser in webui
Former-commit-id: 71fdeedb64b2339eb1c740d670b87e0c03dada68
2024-05-17 00:08:56 +08:00
hiyouga
6481321470 add falcon 11b
Former-commit-id: 897acc725edc204fad393cc9616828431b4fa768
2024-05-17 00:08:33 +08:00
hiyouga
efcf5e050d fix examples #3769
Former-commit-id: 80c036beb8d9ddac8f844f1818c9488ded04e86e
2024-05-16 19:12:09 +08:00
hiyouga
dfa686b617 rename package
Former-commit-id: a07ff0c083558cfe6f474d13027642d3052fee08
2024-05-16 18:39:08 +08:00
hiyouga
fe638cf11f set dev version
Former-commit-id: 5e9c72d07c3793cdccbdb8a9f95f1bb5d714e0a3
2024-05-16 02:17:31 +08:00
hiyouga
b2949b88e9 release v0.7.1
Former-commit-id: a4f8adb021b6218d624303b51cd5e93ffa3111a1
2024-05-16 00:57:16 +08:00
hiyouga
538c79fd8f fix #3694
Former-commit-id: 3d1b818cb6a77b7603724fbeb756b468aa74e7ea
2024-05-16 00:35:28 +08:00
hiyouga
437cc20be6 fix #3606
https://github.com/huggingface/peft/pull/1706

Former-commit-id: bf2783e1b6bc207375974c48736d6f82dd293f02
2024-05-15 23:05:02 +08:00
hiyouga
2ac972d6e7 add Yi-VL-34B model
Former-commit-id: 8b3d8a7e3bd8dff27cc72edba1b8a042f6d1929c
2024-05-15 22:58:19 +08:00
hiyouga
4d7f0fbb7a add yi-vl 6b model
Former-commit-id: 35f4041b13a593a6cf1ec6686fa18b38911ad6a4
2024-05-15 20:02:41 +08:00
hiyouga
40e3d3fbdd fix yi vl vllm infer
Former-commit-id: de54e5d7ec06dd7c20ec82c9ff032fc16cd50244
2024-05-15 19:25:48 +08:00
hiyouga
096677b989 add NPU docker images
Former-commit-id: 3b3257962c52f5d1f15ce245fee402c5baddb774
2024-05-15 19:20:11 +08:00
hoshi-hiyouga
7940b968ae Merge pull request #3748 from BUAADreamer/main
Add MLLM YI-VL and save processor config during training

Former-commit-id: 1d3cbd24ccea63d36c27725cdc5ecd02b460b0ed
2024-05-15 16:40:54 +08:00
hoshi-hiyouga
36a4224bf5 Update visual.py
Former-commit-id: f5f13a995c64fc374ad05e26cde8efa6651aefa1
2024-05-15 16:39:57 +08:00
hiyouga
d4d36e157c fix fsdp model loading
Former-commit-id: fc6fe23cc9ae4a920a17e8268a85c1aa4ad16d3b
2024-05-15 16:32:28 +08:00
hoshi-hiyouga
c4f5e49d0d Update patcher.py
Former-commit-id: 4c31a21f2106adcdad100119bad83ecaef0be3f3
2024-05-15 15:37:07 +08:00
hoshi-hiyouga
8e518d6c62 Update template.py
Former-commit-id: a13022166ba691c03f4fea7e9e2927fa446cf681
2024-05-15 14:20:39 +08:00
hoshi-hiyouga
79165100e5 Update trainer.py
Former-commit-id: dd767b20635bb549ce14f9556e1c4fb44b3662c5
2024-05-15 14:13:26 +08:00
hoshi-hiyouga
fc82acbbd8 Update workflow.py
Former-commit-id: 97cfb44bced18b721166ccb5f260098645fc5318
2024-05-15 14:13:01 +08:00
BUAADreamer
aead3ca8e5 rm extra import
Former-commit-id: 031215019e3d7727b1c7cc87a44e1cf1eb2853ec
2024-05-15 12:48:18 +08:00
BUAADreamer
b12679ad59 cast dtype in mm_proj
Former-commit-id: e0ab22648fe8b65055b5986258cc2800438dc60c
2024-05-15 11:22:15 +08:00
BUAADreamer
8061cb5671 modify style
Former-commit-id: 823af88c3201412da7ef734d34198424e09b2d51
2024-05-15 10:18:10 +08:00
BUAADreamer
0a7e5f2f57 Merge branch 'main' of https://github.com/BUAADreamer/LLaMA-Factory
Former-commit-id: ce5cb0f897eebe32a1c2c0a78fe1b0267e4b6d9d
2024-05-15 09:54:21 +08:00
BUAADreamer
812d2c25a7 Merge branch 'hiyouga:main' into main
Former-commit-id: a4795c2f5328e0cfc657409f5774819e3defc006
2024-05-15 09:54:14 +08:00
BUAADreamer
51795e8db1 add yivl and save processor to model_dir
Former-commit-id: ae72f745cb4f7713c3b835d11202aec19c3c5093
2024-05-15 09:54:00 +08:00
hiyouga
2c011060b1 fix bug in vllm engine
Former-commit-id: 38f02a2c5b52cba6908c2d3c2a455677f8574faf
2024-05-15 02:17:54 +08:00
hiyouga
a8c7531250 fix gen args
Former-commit-id: d79f91f87106ba1bc3c0ea08da5898aad59566a7
2024-05-15 01:49:05 +08:00
hiyouga
88c34d26a8 fix examples
Former-commit-id: 910ffaf46e3dde87d2dbb48b82a59a9898a90847
2024-05-15 00:26:10 +08:00
hiyouga
12d666a63c update examples
Former-commit-id: 09269c59427e8a007c1c1b6f9d2014b4c0d0a328
2024-05-15 00:05:17 +08:00
hiyouga
304a2efec8 update readme
Former-commit-id: 568cc1d33c3d202e6430b68e0bcb2772aa6b0aa2
2024-05-14 23:57:08 +08:00
hiyouga
322331df51 update readme
Former-commit-id: f315a545d85a661746ad304b5a688d1fad9eaea1
2024-05-14 23:55:49 +08:00
hiyouga
ba0da83031 add npu examples
Former-commit-id: 0f21e68e2dbd84c820d66d5c6d980004efc51d51
2024-05-14 23:32:53 +08:00
hoshi-hiyouga
0a82e15e7c Merge pull request #3584 from zhou-wjjw/main
Enhancing Ascend 910A Training Efficiency in LlamaFactory with NPU

Former-commit-id: 310cf017a5ec24af8f5cf3af298760dd4150f9f2
2024-05-14 22:18:37 +08:00
hiyouga
6670b36c49 use robust envs
Former-commit-id: f3e194c3b3c40a3e6c3c5397ec0d859e6db614b5
2024-05-14 21:36:42 +08:00
hoshi-hiyouga
7a1d13aae2 Update train.py
Former-commit-id: da1e6f0d9c2eff64f92da1f6ada3aa44ef6d6a7e
2024-05-14 20:47:52 +08:00
hoshi-hiyouga
86a048128b Apply suggestions from code review
Co-authored-by: Huazhong Ji <hzji210@gmail.com>
Former-commit-id: abef48c17ee795eae984fcc89019c2c4859108c1
2024-05-14 20:44:21 +08:00
hoshi-hiyouga
fe1a3b1367 Apply suggestions from code review
Co-authored-by: Huazhong Ji <hzji210@gmail.com>
Former-commit-id: a435e5a0bdd7268c4f1204f99f289ee0b36fd930
2024-05-14 20:44:04 +08:00
hiyouga
84ff56c3a0 fix #3728
Former-commit-id: ea3e32a27f7f7dce75a708f8a6f376b5d3e8059a
2024-05-14 20:37:21 +08:00
BUAADreamer
483ed64b43 modify yi-vl template
Former-commit-id: f113975b425e70bed2588ca55a2c62594fbf2283
2024-05-14 16:45:28 +08:00
BUAADreamer
dd4619e9f3 add support for Yi-VL
Former-commit-id: d7834ca92d3048949caa48f8635cfbcea2c85771
2024-05-14 14:03:19 +08:00
BUAADreamer
905815d878 Merge branch 'main' of https://github.com/BUAADreamer/LLaMA-Factory
Former-commit-id: e82f527ea583a7e99a25a06c7fe7b03c1dc2ebb9
2024-05-13 23:28:52 +08:00
BUAADreamer
ba72e08901 add yi-vl
Former-commit-id: 891b25cb3d709ea82182ca90496034360e1cd5d8
2024-05-13 23:28:28 +08:00
hiyouga
e4972c8fc4 update examples
Former-commit-id: 779603055ae9216ff549f5285caac8c0c0a1e9fb
2024-05-13 20:39:36 +08:00
hiyouga
5f5f948806 fix #3724
Former-commit-id: 62f5999d79834d6cbc4129eda387a317665d6099
2024-05-13 20:09:09 +08:00
hiyouga
2892e5d42a fix #3702
Former-commit-id: 55755786f21050b9efc127c391509ba5d9ea8982
2024-05-13 18:24:35 +08:00
hoshi-hiyouga
542a5d15ef Merge pull request #3655 from Tendo33/main
1.Change the name of is_fastapi_available function 2. Added the log of printing requests when deploying using vllm

Former-commit-id: 28c75448eed9d472e96285737a66ac0d20280e13
2024-05-13 18:05:50 +08:00
hiyouga
b1c791fb0d support Yi 1.5
Former-commit-id: e580823676cbb83ddb9a0f685992e6054ae5ffaa
2024-05-13 16:51:20 +08:00
Tendo33
7589123465 ruff check scripts src tests --fix
Former-commit-id: da5277b6a1cff40d59df8f1835d9514b2a51be34
2024-05-13 09:40:33 +08:00
Sun Jinfeng
f94b54b776 Merge branch 'hiyouga:main' into main
Former-commit-id: 014acaa7845b7ac2876596d216b1be369a8e9311
2024-05-13 09:29:58 +08:00
hiyouga
1e1b8899f5 lint
Former-commit-id: cb72eb6ab24615ce492ca2945f29daa34c0c52d4
2024-05-12 01:28:51 +08:00
hiyouga
7b02c83399 fix #3658
Former-commit-id: 37799a62d4431d1d8c02fee6c23d607a65723c1a
2024-05-12 01:25:16 +08:00
hiyouga
8f1ba07b30 remove checksum and fix ui args
Former-commit-id: 0cfdeb1d30efb63211434bc4656bceb59e666289
2024-05-12 01:10:30 +08:00
hoshi-hiyouga
1ce400bddf Merge pull request #3654 from betapeanut/main
Remove Redundant Environment Variable Usage

Former-commit-id: aa57a2a183eef822973d7e5d7c7bc80a42167482
2024-05-12 00:49:00 +08:00
hiyouga
6bc0ec63c7 update readme
Former-commit-id: d57ca8a865b46588f65b2cc15073c5fcc4e4cebc
2024-05-12 00:33:49 +08:00
hiyouga
25d316b1a0 fix #3674
Former-commit-id: 6bad2eafef75ec697477e1f2ce739006042fb4c7
2024-05-12 00:03:59 +08:00
hiyouga
2bcd5b2b73 fix llava config
Former-commit-id: b13d032325e45d401a9dbc64d4c73e308eff3288
2024-05-12 00:02:49 +08:00
hoshi-hiyouga
436afcba57 Merge pull request #3651 from BUAADreamer/main
add some mllm features and try to incorporate Chinese-LLaVA-Med project

Former-commit-id: 143d311d4a82e1fa9b6d4ad98b0db5b02f3572c4
2024-05-11 23:59:08 +08:00
hoshi-hiyouga
db47c53486 Update loader.py
Former-commit-id: 2fc12790414677bb82736208fb9547640780af2e
2024-05-11 23:58:47 +08:00
hoshi-hiyouga
4efe56fd68 Update model_args.py
Former-commit-id: c4114add4c42c1d7723f7270451a6c9fc656ecd1
2024-05-11 23:57:05 +08:00
hoshi-hiyouga
d54313fcf9 Update patcher.py
Former-commit-id: 2c88d394d29c6e98ac3a6860848855722614ca52
2024-05-11 23:56:40 +08:00
hoshi-hiyouga
382f096475 Update tuner.py
Former-commit-id: ccd1eb2c0992f75440c0e1c5cd3f02d03aacb085
2024-05-11 23:55:59 +08:00
hoshi-hiyouga
0ccc76392e Update tuner.py
Former-commit-id: 22afcbdb25160583e5ece28fad0585c7bc70f41a
2024-05-11 23:54:53 +08:00
hoshi-hiyouga
e2cfcb0a5f Update README_zh.md
Former-commit-id: 1a205478403b5852fac0aa8418cdb8995fbe40e3
2024-05-11 22:44:51 +08:00
hoshi-hiyouga
b530a798c1 Update README.md
Former-commit-id: d24c83bb30e2829ba78db90c4c4975788f2eed25
2024-05-11 22:43:04 +08:00
BUAADreamer
fdf38b70a0 Merge branch 'main' of https://github.com/BUAADreamer/LLaMA-Factory
Former-commit-id: 50cc5cf93d50c42cfcf5047bcd9b5c7959d503ae
2024-05-11 13:11:10 +08:00
BUAADreamer
1a78b675be add full parameter finetuning of mllm
Former-commit-id: f90c1da5636ac3cb8112c5081a3b56b09a17fcf8
2024-05-11 13:11:00 +08:00
kkkl
9b1008912c Update constants.py
Fix the download issue of the Phi3 model

Former-commit-id: 8978e80914ac6db1ed1b79641b20c84087dd4341
2024-05-11 00:22:40 +08:00
BUAADreamer
18241f4ed8 Merge branch 'hiyouga:main' into main
Former-commit-id: 0dd072703508f68fd4ee51b6648d0c7642a4cc93
2024-05-10 20:34:41 +08:00
hiyouga
223bbd9930 resolve python 3.8 package
Former-commit-id: 5eee4ec7016846356715a4fa1ad58e3cbb1cac6e
2024-05-09 16:52:27 +08:00
Tendo33
9dadff90bb 1.Change the name of is_fastapi_available function
2. Added the log of printing requests when deploying using vllm


Former-commit-id: 530d4f5d51c13c71d99de5fe2d23805b0aa875a2
2024-05-09 14:28:01 +08:00
BUAADreamer
827a929f1d add push processor to hub
Former-commit-id: 7a05a965311edfdfafa57af8342875860d341f27
2024-05-09 14:05:19 +08:00
BUAADreamer
e508519e0a add mllm processor save and Chinese-LLaVA-Med show
Former-commit-id: 110c49fbf79fe0625f091e63746bfabde00add99
2024-05-09 13:53:39 +08:00
BUAADreamer
47892418ad Merge branch 'hiyouga:main' into main
Former-commit-id: 1f3163509ecd05902ea216a905b4ca15ddd3696f
2024-05-09 13:45:43 +08:00
cocktailpeanut
2aeae4b88b yet another removal of unnecessary environment variables
Former-commit-id: a07726028f0287de28e4751672b27efe0efc6477
2024-05-09 01:33:20 -04:00
cocktailpeanut
c213f2a9a9 more removal of unnecessary environment variables
Former-commit-id: 59ef1a6e0d81585a6c010143d05fcfae26d40c00
2024-05-09 01:32:00 -04:00
cocktailpeanut
333f4a69bb remove unnecessary environment variable usage
Former-commit-id: 4be1d832cb269a07987f5cab5d5f949e269087da
2024-05-09 01:26:15 -04:00
BUAADreamer
172600d432 add mllm export
Former-commit-id: ce4770d33f6761d3b1d60661efcb0be34a036154
2024-05-08 22:50:42 +08:00
hiyouga
4ce4172c87 fix #3625
Former-commit-id: 8c0f5d1db29862277d84aa128b424b7d0f2b187f
2024-05-08 17:12:56 +08:00
hiyouga
400ae144a4 add llama3 chinese chat
Former-commit-id: ee3e5920f2f28567259693cb106e884a90cb02a2
2024-05-08 17:10:03 +08:00
hiyouga
0a1b6ca5a7 add deepseek moe 236B
Former-commit-id: 30c10e2dc41b5d64191a91ad2d61f3b5c440b1d5
2024-05-08 16:37:54 +08:00
BUAADreamer
05ef89cfcc modify export model
Former-commit-id: c7051edae4ce23f85daf204a2aaac134b1f29c3d
2024-05-08 10:36:36 +08:00
hiyouga
6d9d8b92ca update readme
Former-commit-id: bcc3d3b95609555e5e9a4deb68e65391c5b465bd
2024-05-07 22:17:04 +08:00
hiyouga
3f7f1daa33 remove big file
Former-commit-id: 8a05242787f810ec25d1b33358257d2867c45497
2024-05-07 22:14:06 +08:00
hiyouga
8061e92d07 update readme
Former-commit-id: ecefcb2e891e75d37df5ebfc616cfdb2106bcfd6
2024-05-07 21:17:31 +08:00
hiyouga
0c811a7653 update readme
Former-commit-id: 730ea71584debc5784d68eeadceb42f7e827447f
2024-05-07 19:03:47 +08:00
hiyouga
f6ac3796ca fix #3560
Former-commit-id: ea69cbe903a301df1bcc4b63cdc5bd4c6e3a8255
2024-05-07 19:03:35 +08:00
hoshi-hiyouga
c1394e7dfc Merge pull request #3601 from Katehuuh/main
Add contribution Luminia

Former-commit-id: 53bef571c445111f49bcc8a5d49afc2872f754ae
2024-05-07 18:01:48 +08:00
hiyouga
ebab655683 fix #3602
Former-commit-id: 1518b45490606ea200482da4737113c46985e8c5
2024-05-07 17:50:27 +08:00
hoshi-hiyouga
3d74f21738 Merge pull request #3604 from gaussian8/main
fix: splitted Dockerfile's CMD
Former-commit-id: 1d6e6956ca45d3cb7de213c4a641b98a35af5896
2024-05-07 16:53:23 +08:00
junwooo.lee
8493753fab fix: splitted Dockerfile's CMD
Former-commit-id: d8032550c7e084648fbf24da5abbac6432b54f26
2024-05-07 15:09:48 +09:00
Katehuuh
0f626a2145 Update README_zh.md
Add Projects Nekochu/Luminia-13B-v3

Former-commit-id: 88d01e831bd511daec30a94817f06e07b8406b18
2024-05-07 06:28:48 +02:00
Katehuuh
5100c290c4 Update README.md
Add Projects Nekochu/Luminia-13B-v3

Former-commit-id: 3d2cd743c2c8830e8b131d1192f1549fa557762d
2024-05-07 06:23:36 +02:00
hiyouga
4bde37e7c8 update readme
Former-commit-id: 3fdc72b9aad9e129f74417cbbf25e841d28e3737
2024-05-07 06:19:29 +08:00
hiyouga
e3b3a722de fix stop param
Former-commit-id: f0a850c25211b72eddbb357c81679db9b0930d44
2024-05-07 00:41:04 +08:00
hoshi-hiyouga
b9e167e6ca Merge pull request #3527 from zhaonx/dev
"add support for vllm api stop parameter"

Former-commit-id: e7d436403af6ac4c6a33cf36411098a0b0fefce2
2024-05-07 00:37:49 +08:00
hoshi-hiyouga
1ebd1e50e7 Update vllm_engine.py
Former-commit-id: fa2410de07150a82082ab5b88baf56aa891db870
2024-05-07 00:37:05 +08:00
hoshi-hiyouga
14316f6583 Update generating_args.py
Former-commit-id: 714957ba0159919a89fc1659a7a7b4b6bd82eead
2024-05-07 00:28:16 +08:00
hoshi-hiyouga
8e4ab2f7d0 Update generating_args.py
Former-commit-id: 7a9fb56786f4c40856211009656a983be1e42cb7
2024-05-07 00:27:56 +08:00
hiyouga
196068fa19 update readme
Former-commit-id: 1c67708291195825e8356d5862d22cbee9566233
2024-05-06 23:34:59 +08:00
hiyouga
da2295f8c8 fix gradio args
Former-commit-id: 7767c1ad4b2b638b558f941ba1f0d05d4a049507
2024-05-06 23:33:06 +08:00
hoshi-hiyouga
ab0741b5a6 Merge pull request #3596 from hiyouga/dev_doc
Add CLI document

Former-commit-id: 2b08c51500592f092b9596517e787081453ecbb5
2024-05-06 23:10:38 +08:00
hiyouga
6aec446940 update examples
Former-commit-id: cca50b627c85e0a777717d609377406cc7fd579f
2024-05-06 23:07:55 +08:00
hiyouga
50c71dd29f update example docs
Former-commit-id: 102cd42768d9eb2cf1219309a25b41e26149067e
2024-05-06 22:51:02 +08:00
hiyouga
5c9da798b5 update docs
Former-commit-id: a4a2e94241bea6f96590f6cb8ca8b5cddee1917e
2024-05-06 21:47:00 +08:00
zhouwei
3d1b0e1864 The training efficiency of the Ascend 910A has been significantly enhanced, leveraging the full computational power of the NPU (Neural Processing Unit) and the capabilities of torch_npu, a PyTorch library optimized for NPUs. This improvement has resulted in a remarkable tenfold increase in efficiency.
Former-commit-id: 90980b626d3408b3e2ee32a02456c20881318be7
2024-05-06 13:29:59 +08:00
zhaonx96
45becd2a45 ”add stop parameter in chat.py“
Former-commit-id: e529bf5bc14c72558d26f73c42076eaa9684205c
2024-05-06 10:10:00 +08:00
zhaonx96
8f1197de7e Merge branch 'main' of https://github.com/zhaonx/LLaMA-Factory into dev
Former-commit-id: ec1f834905e241277fdd3f764c70eede97e9ff40
2024-05-06 10:09:00 +08:00
hoshi-hiyouga
25de4ce56a Merge pull request #3578 from pha123661/main
Fix badam example argument

Former-commit-id: d6edf3d91e5d20f48938e02d96d2193ed3d50181
2024-05-05 23:41:58 +08:00
Oscar
d0597897bf Fix badam example outdated argument
Former-commit-id: 29aa188cc774cb72367f706f1cd4c07bc5a9f241
2024-05-05 23:35:19 +08:00
hiyouga
4674f3baa7 add version and help to cli
Former-commit-id: f762f2215169b9fe55564d5600b758ddc66f9c9c
2024-05-05 02:44:35 +08:00
hiyouga
2f5f6722cf fix eval scripts
Former-commit-id: fc3743d0b82c28fbff1170761139e4fa5d2a8939
2024-05-05 00:53:07 +08:00
hiyouga
7ef3788ff4 update webui
Former-commit-id: 17a53d25cdadd2df70a8afa0488f75bbf1918b89
2024-05-05 00:17:54 +08:00
hiyouga
f9aa74715a update scripts
Former-commit-id: 1c07648c4bb4bb0c46bc0240547b46bd2835dce1
2024-05-04 23:05:17 +08:00
hiyouga
9b187b274c add avg ppl
Former-commit-id: 40caeb6f0fdf76a1e2c9ca3761299d087fc643e0
2024-05-04 22:35:31 +08:00
hiyouga
68ed89f351 update ppl script
Former-commit-id: 07606fa4ab303f088170a569c1f86141a1b496c5
2024-05-04 22:13:14 +08:00
hiyouga
342d7da8d7 add cal_ppl script
Former-commit-id: 947068c11c0be00db2cecddb2c5842a0d6e2c321
2024-05-04 22:02:25 +08:00
hiyouga
6eda42eb7c update readme
Former-commit-id: eaf83847ef6d89d8b70429138e73b04fd2aa3ef8
2024-05-04 17:01:21 +08:00
hiyouga
e9fe8815be remove empty stream response
Former-commit-id: 070d0da928b1e974a094279a2782201016d2a3ab
2024-05-04 16:13:52 +08:00
hiyouga
9381fecca7 fix async stream api response
Former-commit-id: d70bbcae6513e50aa6094f2d98c4aa5c6641ea02
2024-05-04 16:11:18 +08:00
hiyouga
efa9140577 update api and support abort eval in webui
Former-commit-id: 8661bed68812e9ded9439e8a821b1d7716bc797b
2024-05-04 15:59:15 +08:00
hiyouga
b1b18b2c5a update readme
Former-commit-id: 5061f7196a3278af5ebce77249d9c3c0f8a55b34
2024-05-04 00:43:53 +08:00
hiyouga
37bcbf72b4 update readme and webui launch
Former-commit-id: c66ffa57323ef6ea78a9b75ec5122d9ea25fd420
2024-05-04 00:43:02 +08:00
hiyouga
99125c8825 update readme
Former-commit-id: 012e5b9625682a628a0b7fb5879097be7166c7be
2024-05-04 00:31:02 +08:00
hiyouga
182b974786 fix eval in webui
Former-commit-id: 774ef2bf5823d68b9cc254a676f5adb4af533d75
2024-05-04 00:19:19 +08:00
hiyouga
7a4a6a5522 fix webui resume
Former-commit-id: c2f6582ddd365bb64b72e8057cc4ecd7884d2480
2024-05-03 23:15:19 +08:00
hiyouga
2383e5440c fix slow op in dpo/orpo trainer
Former-commit-id: 38cad0896ea0516de6d4b2759ec9d45ee67d339b
2024-05-03 23:06:52 +08:00
hiyouga
1fea91736a fix callback log multigpu #3559
Former-commit-id: 1f105f1551b12675ca7d339ef5f91333f0371987
2024-05-03 21:24:27 +08:00
hiyouga
09d9fb28f9 enable tqdm in webui
Former-commit-id: 1737bff64799047a5b715fd979b4c038ae213bb3
2024-05-03 04:42:50 +08:00
hiyouga
57c6eabf83 fix gen_args
Former-commit-id: c3e2f4f07b7fb3b1d7d2b44451660f082a467aed
2024-05-03 04:24:50 +08:00
hiyouga
33d440b577 fix colab gradio
Former-commit-id: 26179a29d3400d1fea155e325a79473a8bc12f04
2024-05-03 03:54:46 +08:00
hiyouga
ce8200ad98 update webui and add CLIs
Former-commit-id: 1368dda22ab875914c9dd86ee5146a4f6a4736ad
2024-05-03 02:58:23 +08:00
hiyouga
2cedb59bee Update prepare.sh
Former-commit-id: 5928b869251a984a085289ca6861a9731dc5b910
2024-05-02 17:16:02 +08:00
hiyouga
dd0b85580e fix badam configs
Former-commit-id: 8a4e6a4c65a9a42e6501b0d3ce81d6220c287454
2024-05-02 02:47:04 +08:00
hoshi-hiyouga
cd4dad846b Merge pull request #3487 from codemayq/main
support BAdam in WebUI

Former-commit-id: 6eada1a2844a2b2c8aad599ebfcc35b376c938ea
2024-05-02 02:38:01 +08:00
hoshi-hiyouga
a11a04a24f Update train.py
Former-commit-id: 16f0d0056967872e02969fdd842a381f9484af8a
2024-05-02 02:21:27 +08:00
hoshi-hiyouga
eb99999ca8 Update README_zh.md
Former-commit-id: 1c673d89faca3160627009fcd0a4aa39138570c0
2024-05-02 02:14:55 +08:00
hoshi-hiyouga
ea58cf111e Update README.md
Former-commit-id: 4fb43b0c9aa48242126252ad755a2a1683b38d6a
2024-05-02 02:13:46 +08:00
zhaonx
2d95127c33 "add support for vllm api stop parameter"
Former-commit-id: b9f21fa639b66db09c79404d885661c96bdf9395
2024-04-30 17:17:09 +08:00
Lao
57fcdca336 Update README_zh.md
Former-commit-id: bacc8588dc7b0b43c240189ecf4336bedc299357
2024-04-28 23:31:37 +08:00
khazic
3d88589c0f Upgrade the second sharegpt format
Former-commit-id: 057f992a666b029d207a3dc7dfc353f9abcf8316
2024-04-28 14:30:05 +08:00
khazic
dfd153cc81 added the second sharegpt format
Former-commit-id: 6d140ac98a78ecc0a713842bb917dc8eb14450cb
2024-04-28 14:27:45 +08:00
codingma
7641a214d8 support BAdam in WebUI
Former-commit-id: 1247154dd7d5eba5d11c4bb8504bf551ab49eb72
2024-04-28 11:31:34 +08:00
hiyouga
3cef844079 fix setup
Former-commit-id: 7d3e7db46a5f8672dd57fa5fcc03822e175047f9
2024-04-28 03:49:13 +08:00
hiyouga
4dcd47100d fix llava rlhf
Former-commit-id: f6863cbbcbf960d6481296c6cae3e40fd70e4e14
2024-04-28 03:01:49 +08:00
hiyouga
a412b4ed4a add models to 0.7.0
Former-commit-id: 436d3754452f839c617839ab3bbaacc4a8908e19
2024-04-28 01:50:30 +08:00
hiyouga
544a6259b6 update readme
Former-commit-id: c9190fe36f511c3a5149d45c85a10b02a57fa88a
2024-04-26 23:39:19 +08:00
hiyouga
c501f377dd release v0.7.0
Former-commit-id: 45bb89cb4d26a6b3fb5360bc90ab950738fe4920
2024-04-26 23:18:00 +08:00
hiyouga
cb8b8f40cd update readme
Former-commit-id: f3d4b46338d4d484b205d0651a1fa7b2e77a1654
2024-04-26 20:09:14 +08:00
hiyouga
70bed8ad8f support Qwen1.5 110B
Former-commit-id: d6e5ecaf4109127bab24e39a0696076bceb0b37c
2024-04-26 19:59:22 +08:00
hiyouga
51f776ae2a fix llava qlora
Former-commit-id: 01c5a669f6fe598aac1758a700a7607da37db1bc
2024-04-26 18:00:23 +08:00
hiyouga
697bc20941 add llava to llamaboard
Former-commit-id: deaaff0a9de0eef9691991c99cd797461b1165cc
2024-04-26 06:41:35 +08:00
hiyouga
1480e3a88f update readme
Former-commit-id: df1155245d3f71ba4f3361d43aa662ab3b024de8
2024-04-26 05:49:26 +08:00
hoshi-hiyouga
19029d5b0f Merge pull request #3454 from hiyouga/mllm
Support fine-tuning LLaVA-1.5 MLLM @BUAADreamer 

Former-commit-id: c4195d1e26349795f7aad5c10a8a9e2abb7b64a3
2024-04-26 05:46:29 +08:00
hiyouga
7773ac0ead update readme
Former-commit-id: 41728fd74de7bec0cc6135aef9dfa3ae9fe7af73
2024-04-26 05:44:30 +08:00
hiyouga
23b881bff1 support mllm hf inference
Former-commit-id: 2c7c01282acd7ddabbb17ce3246b8dae4bc4b8cf
2024-04-26 05:34:58 +08:00
hoshi-hiyouga
10a6c395bb Merge pull request #3450 from BUAADreamer/mllm
Add Multimodal LLM Finetuning

Former-commit-id: 7cacbcfdf7391080ef43eb2b2c79a5237e6120e8
2024-04-26 05:30:30 +08:00
hoshi-hiyouga
f9a7732a1f Update preprocess.py
Former-commit-id: 0e376eab23d38b8fca05f054f3cde308756ee3b1
2024-04-26 04:10:28 +08:00
hoshi-hiyouga
c37582af02 Update aligner.py
Former-commit-id: 855489074c469f47572153df0fa1e251b187b232
2024-04-26 03:48:34 +08:00
hoshi-hiyouga
ece67f8c7f Update parser.py
Former-commit-id: 4df75e8a9a391565cc3eec69bc0ebf5d5192de61
2024-04-26 03:35:39 +08:00
hoshi-hiyouga
e1838e76fe Update loader.py
Former-commit-id: 6a5f2e2ab7304113ff71cb77aafff6a1f74831f8
2024-04-26 03:33:07 +08:00
hoshi-hiyouga
2eede9ffd6 Update workflow.py
Former-commit-id: 5b8b5b975716d539ae2fae8536f79e106aa0b566
2024-04-26 03:29:12 +08:00
hoshi-hiyouga
a6f6b406b3 Update loader.py
Former-commit-id: 72d4817a15f6916706828ea2a61d808183c23773
2024-04-26 03:22:40 +08:00
hoshi-hiyouga
279439abbe update hparam name
Former-commit-id: 9941adfbf06db37f8ba32c4555f6e58e27188aaf
2024-04-26 02:49:39 +08:00
hoshi-hiyouga
13117b69d7 delete llava template (use vicuna)
Former-commit-id: 420e64970e5a0e45453041927e0366ee8beb73d5
2024-04-26 02:20:47 +08:00
BUAADreamer
5d03ac642d modify some bug
Former-commit-id: 593b7b004df74bd24361c9883401a656c08fb589
2024-04-25 22:59:46 +08:00
BUAADreamer
5062ee547e modify some style
Former-commit-id: 1291c7ee39361dd75247c67f04dcf20b472faf83
2024-04-25 22:40:53 +08:00
BUAADreamer
59817c27e3 modify some style
Former-commit-id: d578a90cefa7ec813355795bdd6ead5ee558ce26
2024-04-25 22:40:25 +08:00
BUAADreamer
759bee48d2 merge some func
Former-commit-id: 3085107c44715e4b2ca96d73b20d90c172b95219
2024-04-25 22:35:17 +08:00
BUAADreamer
514ffafc12 modify some style
Former-commit-id: 053062abc007014a7fde95c5ae9f4d859893d8ad
2024-04-25 22:04:09 +08:00
BUAADreamer
8b2a735c14 modify some style
Former-commit-id: b016e6a671a2f228f0bdd9b8d5995b4669609655
2024-04-25 21:58:18 +08:00
BUAADreamer
10d59e9e4a make dataset script
Former-commit-id: 25892f958da14976025a775febf628cd0e0a3d85
2024-04-25 21:32:01 +08:00
BUAADreamer
058ed5e607 modify style
Former-commit-id: c1f1df99e4dc3d0aadf1207b4e9a16218187fd5a
2024-04-25 21:29:50 +08:00
BUAADreamer
110c2ce2a5 modify style
Former-commit-id: 3bffc1e1b8bcc4582cebea06d35e5146163c7bec
2024-04-25 21:27:48 +08:00
BUAADreamer
c425436676 modify style
Former-commit-id: 54b713d0c4ffdfc6a7faeb14471b58bb1cd8acf5
2024-04-25 21:15:16 +08:00
BUAADreamer
266fe908e3 Merge branch 'main' of https://github.com/BUAADreamer/LLaMA-Factory
Former-commit-id: c4bb5af69c5bbf0b1ea044cbb2b18acddc6733ac
2024-04-25 21:08:40 +08:00
BUAADreamer
dbd905438b add some
Former-commit-id: 8d035a849c4a441d457791aab073861adf69a09f
2024-04-25 21:08:32 +08:00
hoshi-hiyouga
d64c87f928 Merge pull request #3449 from hiyouga/mllm
add webui backend option

Former-commit-id: 372fcedef40b79fe8bd3932c06c720f2a03db6e6
2024-04-25 20:58:16 +08:00
hiyouga
29eebef696 add webui backend option
Former-commit-id: 3764586cb3ed64fe376d0ae420ff5690c28459e2
2024-04-25 20:49:23 +08:00
hiyouga
7bfbcb1fe3 vllm + lora support
Former-commit-id: 8cb86ba355195f5d6dcb95ee6b6b7203463a34db
2024-04-25 20:24:31 +08:00
BUAADreamer
9b210cf4b3 rm some
Former-commit-id: 2c85b4fabbebd8b51eee53f5d29184d4a6e97569
2024-04-25 20:09:43 +08:00
BUAADreamer
f74e640565 Merge branch 'hiyouga:main' into main
Former-commit-id: 131d0bcd554dedd794add7eb3d7b1201cac80e7c
2024-04-25 20:02:50 +08:00
BUAADreamer
d1d08d066a merge data part to the text stream
Former-commit-id: 80537d580119d9d5a06ab236a5284aaae2f83b5b
2024-04-25 19:58:47 +08:00
hiyouga
6be321b5da fix #3374
Former-commit-id: 0097d7968b3b570e1705caff26f42d9ed71ad974
2024-04-25 19:56:49 +08:00
BUAADreamer
3c792174db merge data part to the text stream
Former-commit-id: 7ee20286d9bcc2d5378bfd6bb02cd3648396d873
2024-04-25 19:19:59 +08:00
hiyouga
9aeb88c426 add export_device in webui #3333
Former-commit-id: 30ebd3652809d73941e0a5e4a8be11d989faf98d
2024-04-25 19:02:32 +08:00
BUAADreamer
00e2a272ef merge model part to the text stream
Former-commit-id: b6fcb832ddaed4647d6f2b926f3dfccd47f3ea84
2024-04-25 08:20:41 +08:00
BUAADreamer
5142349661 remove error
Former-commit-id: 2bcd1c7dc3595f17ae4e2c4475196cc2d03d0e75
2024-04-25 01:01:59 +08:00
BUAADreamer
0e3cc52327 remove conflicts
Former-commit-id: e5750ee202eb67cf5fc54f464548e2eb43d00900
2024-04-25 00:56:06 +08:00
BUAADreamer
6c1db2d012 remove conflicts
Former-commit-id: f8b637eb76cba7ec229e2978068805ad1cca8adb
2024-04-25 00:34:22 +08:00
BUAADreamer
12c51655ce add llava and instructblip
Former-commit-id: 142fb6f4541a1acfefe66ff2574dabde53b00c06
2024-04-25 00:22:43 +08:00
hiyouga
36be12a3b7 update tool template
Former-commit-id: c72a1981859818c257c5271d32e03c9d3c344206
2024-04-25 00:21:34 +08:00
hiyouga
21fac4c98c fix log level
Former-commit-id: 8d21302f6201b3f33c10f61f3559bd95be3363c2
2024-04-24 23:42:59 +08:00
hiyouga
83404c4fa9 support new special token #3420
Former-commit-id: f5c6a47f5193ab3a6c137580992bdcce0b31fdd5
2024-04-24 23:39:31 +08:00
hoshi-hiyouga
12f852b8d4 fix phi template
Former-commit-id: 14a1ff665eaebfc618229efbe96f09848d52faec
2024-04-24 13:55:14 +08:00
hoshi-hiyouga
a88873116a fix webchatmodel
Former-commit-id: dc6d8b5dc42c363dd180aaf90c9a2f2d0cce6725
2024-04-24 13:54:21 +08:00
hoshi-hiyouga
7cfcd69c64 fix inference in llamaboard
Former-commit-id: 5e631915157083b61e2d5a183e0c91f2d11f416e
2024-04-24 13:53:39 +08:00
hiyouga
a5eabbe933 add olmo 1.7
Former-commit-id: 86a3fb3a141d2702b15af08df36ffcf9b3d6de14
2024-04-24 05:50:50 +08:00
hiyouga
aa25716a5d add dbrx and jamba models
Former-commit-id: ce35c80b4b00152185285d6064939803d14487f0
2024-04-24 05:39:52 +08:00
hiyouga
94c8219575 fix bug
Former-commit-id: 38e164fe4aaea6f0baf121a720291ca42643ba8c
2024-04-24 05:21:18 +08:00
hiyouga
ad24a2a0c9 fix bug
Former-commit-id: 271c24d2c82d645fa9072e6de94ca38f20411537
2024-04-24 05:10:07 +08:00
hiyouga
c05027d14a remove redundant code
Former-commit-id: 4a7a7ad2bcdc493458084f5f3d384239228b7d5a
2024-04-24 05:02:18 +08:00
hiyouga
5420905a2e support unsloth generate
Former-commit-id: 0ef1ad9f505dba71db9342f524cc3a7565e5e09e
2024-04-24 04:46:53 +08:00
hiyouga
03f2e3284a refactor patcher
Former-commit-id: 263cfe1294f5c3188f5e8d65791f35ee0d87315a
2024-04-24 03:02:23 +08:00
hiyouga
d2bb1b3a6b reenable sdpa and fast tok by default
Former-commit-id: 9e00902dbedc71d55743d1bf237843506a557891
2024-04-24 02:18:44 +08:00
hiyouga
35c4a2c212 fix #3347 #3387
Former-commit-id: c253c18185a29b59190f3e0ed236c2bb4c788085
2024-04-24 01:30:16 +08:00
hiyouga
1e4010a1fb support phi-3
Former-commit-id: 7e8ffa9beee3893e051ceeade443bd56c4a07b1c
2024-04-24 00:28:53 +08:00
BUAADreamer
1451297c78 add multimodal LLM BLIP-2 and InstructBLIP
Former-commit-id: 67800c565b086f362b8cf131b0c9babaa7a7ebc7
2024-04-23 19:22:42 +08:00
BUAADreamer
0b99b13786 add multimodal LLM BLIP-2 and InstructBLIP
Former-commit-id: b78b5f290aa38a7454e101ee9703fb6fac5064ac
2024-04-23 18:47:03 +08:00
BUAADreamer
f5edbf2b49 Merge branch 'hiyouga:main' into main
Former-commit-id: 6287d1b789c631205c1033adf036e28deaef4167
2024-04-23 18:46:12 +08:00
BUAADreamer
ab6dc0ea30 add multimodal LLM BLIP-2 and InstructBLIP
Former-commit-id: a730f89a972f1a9d37c718c716f199cb8d4903b2
2024-04-23 18:45:43 +08:00
hiyouga
79d34ce0f3 update examples
Former-commit-id: 8bf55682cdfbbdca0f01073eac0084c20a6a09d1
2024-04-23 18:29:46 +08:00
hiyouga
1d2e372a8e update readme
Former-commit-id: d4eaee262a64e716ce475dc4eb18d8d9697d8dd8
2024-04-22 17:09:17 +08:00
hiyouga
f6a53d83c8 update readme
Former-commit-id: 3eab580703ee01a0d2d75e7f01df5165af551386
2024-04-22 00:51:35 +08:00
hiyouga
4ec56dd958 update readme
Former-commit-id: fdca136309709e43d75a831252b9375a5a99635a
2024-04-22 00:42:25 +08:00
hiyouga
ba06eb65ca update readme and examples
Former-commit-id: 27dd9bf201c24f7804811398bc2758966ec78432
2024-04-22 00:37:32 +08:00
hiyouga
be716972fe remove extras
Former-commit-id: d67e972f8c3d5273e589c8c85c0a1620f59785c5
2024-04-22 00:35:41 +08:00
hiyouga
719585a128 update readme
Former-commit-id: 3a8c17907c71f46b1b37501e2afdc99ad89fb4bc
2024-04-22 00:21:01 +08:00
hiyouga
348f29aa50 set dev version
Former-commit-id: b9557887d7506ff57b2b2bf490092aac4e4becf0
2024-04-21 23:14:30 +08:00
hiyouga
c8fe3f544b release v0.6.3
Former-commit-id: 947572af8de201669598f54735f35b50bb719d71
2024-04-21 23:13:23 +08:00
hiyouga
0f1ad7140f fix #3366
Former-commit-id: dc20237455c36de44f8922539d7dfadd8bedb12f
2024-04-21 21:34:25 +08:00
hiyouga
233e167f68 fix optimizers
Former-commit-id: f811eee2fa12a89a55a9c5d3a05a1521b4347727
2024-04-21 20:40:54 +08:00
hiyouga
1d341dcd83 fix #3365
Former-commit-id: 415ce41e8fa887e980e5bd575c8e95bd4076b90b
2024-04-21 19:20:18 +08:00
hiyouga
d16561e7a4 fix bug in galore optimizer
Former-commit-id: c05ac23261a5a8ba893c2918a43dc7777307407b
2024-04-21 18:53:22 +08:00
hiyouga
f8e219dc81 fix mod stuff
Former-commit-id: cf3988226e6398c67bb2955578e436fc505aa5c5
2024-04-21 18:11:10 +08:00
hoshi-hiyouga
3365cc8cf0 Merge pull request #3338 from astramind-ai/main
Adding Mixture of Depth

Former-commit-id: 4da2ece53353b63e672ff529d6beba41ff710c14
2024-04-21 18:05:52 +08:00
hoshi-hiyouga
3a5e68b7d9 fix #3348
Former-commit-id: aa5e921c00f60074eceb2f9d4d8837cc713edba6
2024-04-20 10:34:09 +08:00
hiyouga
0cb596fee1 add dpo mix dataset
Former-commit-id: 6def3f8bfa51b2d9d73af112352ce07db972e4c9
2024-04-20 01:31:38 +08:00
hiyouga
b3b5b530d1 fix #3352
Former-commit-id: f315f8e8ec916b82bac94a159e55839ff155c6b5
2024-04-19 22:40:01 +08:00
hiyouga
9225c15c88 fix llama3 template
Former-commit-id: 20e95250168fbe081c779b2e1ff23f5df3ce02f7
2024-04-19 15:46:51 +08:00
Marco
abd9fed445 fix small typo
Former-commit-id: 5638a03cd0cf8119ff366b3b3e303b5a2351b065
2024-04-18 20:33:29 +02:00
Marco
44cda2eece Added Mixture of Depths
Former-commit-id: 75dd98b9abc847e22cb263c17ebcd2ca5dd98345
2024-04-18 20:31:24 +02:00
hoshi-hiyouga
8397808d1d support llama3
Former-commit-id: c1eabb751a5fd73b710714451b146732e0ed4558
2024-04-19 01:13:50 +08:00
hiyouga
9e1bd6420d fix #3324
Former-commit-id: 5e710c4ac331f3400534d33b2646c4108c898d98
2024-04-18 15:34:45 +08:00
hiyouga
619264c854 tiny fix
Former-commit-id: 86399ca8c06273c42c2b184664ae25d3405b3bf6
2024-04-18 00:22:17 +08:00
hiyouga
1ebac62e3d update readme
Former-commit-id: a49112a74339ba77bfec53f7870e821fe148db2c
2024-04-17 23:40:49 +08:00
hiyouga
ce9bdb3509 add mixtral 8x22B models
Former-commit-id: eccbeecff0909e1fa124b5439ffbbfbc5607e1d6
2024-04-17 23:35:59 +08:00
hiyouga
0c8d6369ac add CodeQwen models
Former-commit-id: 9f6094241391f8f717818c8ba94e11d1791b4a5c
2024-04-17 23:27:22 +08:00
hiyouga
bee796f6b5 fix #3316
Former-commit-id: 7395e9e90a209228ff563ab54319955608850fc3
2024-04-17 22:54:34 +08:00
hiyouga
9f6349a333 fix #3317
Former-commit-id: 7dce1763be4374cf616d96db95ae964ff510a9d6
2024-04-17 22:17:19 +08:00
hiyouga
171a029c5e lint
Former-commit-id: 917d65ce65024d17a5030bc57083a427cfae16d7
2024-04-16 18:21:09 +08:00
hoshi-hiyouga
eaefaa0fe0 Merge pull request #3291 from codemayq/main
support for previewing custom dataset in directory format

Former-commit-id: 40d89152282101a7c08f53e72c2ad7124a0595f3
2024-04-16 18:12:09 +08:00
hiyouga
d301f0a64b Update parser.py
Former-commit-id: 92c2133896c20054db86dd53508c982e39bd5ca0
2024-04-16 18:09:31 +08:00
hiyouga
0a1578e4e3 update readme and gradio version
Former-commit-id: 4029b60ddcbd15b5354503c51178f0f5e7e9aedf
2024-04-16 18:09:16 +08:00
hiyouga
a4167fd925 support badam for all stages
Former-commit-id: 7a1380646119bfe6855f73dd90570defcea05281
2024-04-16 17:44:48 +08:00
hoshi-hiyouga
42084e08ae Merge pull request #3287 from Ledzy/badam
[Feature] Add BAdam algorithm

Former-commit-id: 10a5e1e65b34b03e5ca2a41bf6ded09a3fb25f0c
2024-04-16 17:32:16 +08:00
hoshi-hiyouga
9d23f5dc89 Update utils.py
Former-commit-id: 01147536b2bb507e87e033fa696e9eb39fe96bbe
2024-04-16 17:30:12 +08:00
hoshi-hiyouga
5978427ae0 Update trainer.py
Former-commit-id: c6163be1444c00dd000f288e2f834968bd932981
2024-04-16 17:29:52 +08:00
hoshi-hiyouga
c7c216069c Update utils.py
Former-commit-id: 7edf4dbed88b8034282f14fd6e0cb6f7f9e5f805
2024-04-16 17:29:30 +08:00
hoshi-hiyouga
cde9d1b917 Update patcher.py
Former-commit-id: 494e6a1e05b38f5ff61d83327303614f53c92e64
2024-04-16 17:29:19 +08:00
hoshi-hiyouga
96213f04b0 Update adapter.py
Former-commit-id: 8f7b75b26f020d8ae85baab7b082475c3bfeb512
2024-04-16 17:28:12 +08:00
hoshi-hiyouga
7ecea08b9b Update parser.py
Former-commit-id: 898239883afc79f03abd0dc276eef901662a9591
2024-04-16 17:27:25 +08:00
hoshi-hiyouga
191971865d Update parser.py
Former-commit-id: 2f3da8169d18b026760cc0ac7dd6141bdd08c932
2024-04-16 17:27:02 +08:00
hoshi-hiyouga
ff4f587dd9 Update finetuning_args.py
Former-commit-id: 3a23d900aea74078f0bc8cf73fac860a4ce3df67
2024-04-16 17:26:30 +08:00
hoshi-hiyouga
de728d0371 Update sft.sh
Former-commit-id: 2b4b1562e91bbb02e345e71b7721da9333c0791b
2024-04-16 17:25:40 +08:00
hoshi-hiyouga
d08e09642d Update requirements.txt
Former-commit-id: 1e45537ca0bb4d49b4147df01122e365b3d617e4
2024-04-16 17:10:17 +08:00
hoshi-hiyouga
351493b183 Update setup.py
Former-commit-id: 5df30ea166aff29d48ff83a22ac6ef1611ce3e35
2024-04-16 17:10:02 +08:00
Jonery
86ab47e121 remove badam from core requirements
Former-commit-id: fa5898944a3867ac5108dd0d579ca0677c87d3d6
2024-04-16 12:25:50 +08:00
Jonery
6dd6b3e396 resolve gradient checkpointing issue.
Former-commit-id: 6df9135d063bb6102f0cbcdf0d702076f5febbae
2024-04-16 12:05:27 +08:00
codingma
5f1418a68b add check
Former-commit-id: 008f6498977c243c80e87242f05c9cf9573541ac
2024-04-16 10:56:39 +08:00
codingma
7b97a79efc support for previewing custom dataset in directory format
Former-commit-id: 501cff38c819f06f15194907ce7e052d5f28025a
2024-04-16 10:43:14 +08:00
hiyouga
ce4f653121 add empty template
Former-commit-id: a325ffa8a668bec354d2636683806acef105e196
2024-04-16 03:10:02 +08:00
hiyouga
b053c6454e update readme
Former-commit-id: 8f233745c3aa7a6ef57f275bec80ee731ff76de3
2024-04-16 02:36:54 +08:00
hiyouga
ebf0f4a77c update readme
Former-commit-id: f9a246572c1ec0e4b36bff237c6523ce629b7000
2024-04-16 02:35:36 +08:00
hiyouga
efa808069a support unsloth 2024.4
Former-commit-id: 14a83f8bc4fe44783252378fce59198194a96bb8
2024-04-16 00:25:03 +08:00
hiyouga
b5c5283dd6 add codegemma
Former-commit-id: 9324176525c2eda22962b0ca1895009b6237e6e3
2024-04-16 00:11:15 +08:00
hiyouga
b638c65519 support cohere commandR #3184
Former-commit-id: e077c36872740f6b2ac255aee9da6c4c70f28977
2024-04-15 23:26:42 +08:00
Jonery
d4d471450f Feature BAdam
Former-commit-id: d8d2807fbcf587c37f7fd34a23e9397d2775ceed
2024-04-15 23:15:27 +08:00
hoshi-hiyouga
3144bdec2c Merge pull request #3254 from marko1616/feature/Add-support-for-CohereForAI/c4ai-command-r-plus
Add template&support for c4ai-command-r/plus (tested)

Former-commit-id: 41d39ec4889abad050820bf153133ac3a11228a3
2024-04-15 22:59:35 +08:00
hoshi-hiyouga
c6d6c4c209 Update template.py
Former-commit-id: 00b8be7dafa65e13b344724a8d3855919ee4f631
2024-04-15 22:58:01 +08:00
hoshi-hiyouga
f5f1589662 Update constants.py
Former-commit-id: 39199f712aa7b7a1c66080d9c84651fd2eb0b425
2024-04-15 22:56:55 +08:00
hiyouga
276f2cb24e update examples
Former-commit-id: 369294b31c8a03a1cafcee83eb31a817007d3c49
2024-04-15 22:14:34 +08:00
marko1616
952b785bb3 change default_system accroding to official template
Former-commit-id: 7ad9029c5e77a87a7c324b8f90b4f80a31a5c78b
2024-04-15 20:45:46 +08:00
marko1616
72dd676208 Revert "Add support for function call(Not strictly following origin)"
This reverts commit dfaa31e991 [formerly 44f3ada4e394c06b0d972329ed2a62d2be2ea0c6].


Former-commit-id: fac9cc6e01dd8f3bc449b656804476e1871326f0
2024-04-15 20:27:09 +08:00
marko1616
dfaa31e991 Add support for function call(Not strictly following origin)
Former-commit-id: 44f3ada4e394c06b0d972329ed2a62d2be2ea0c6
2024-04-15 20:16:52 +08:00
hoshi-hiyouga
86556b1c74 Merge pull request #3261 from khazic/main
Added specimens for single-card full parameter prediction

Former-commit-id: 60df2a9519fbd8215c3afacc831b0cc89006457a
2024-04-15 16:30:57 +08:00
hoshi-hiyouga
0c80751e87 Merge pull request #3276 from liu-zichen/fix_mixtral
fix: turn on output_router_logits of mixtral
Former-commit-id: 07bbaf5c67d00a152e5304e81b15fd9189e7bb99
2024-04-15 15:38:16 +08:00
hiyouga
9338f878a3 fix #3273
Former-commit-id: 3b20c89b342a068356ffc29c3724b645775c65db
2024-04-15 15:32:58 +08:00
liuzc
fde3d91242 fix: mixtral output_router_logits
Former-commit-id: ab3171ea97ec968b972287287ef9ee2502c6d37c
2024-04-15 12:11:49 +08:00
khazic
19adfb88a9 Upgrade README.md
Former-commit-id: 697f768d7185789ee054c94f4f161a65b8a505bc
2024-04-13 20:50:49 +08:00
khazic
daaafa900a Added specimens for single-card full parameter prediction
Former-commit-id: d8d4fb9fa4b0e1950a453682e5e186f34f085dee
2024-04-13 20:45:19 +08:00
marko1616
0dcc9e0bca Typo fix
Former-commit-id: 607625497738b2c8be736be7b0bd5c6f4cbaad5e
2024-04-13 17:30:21 +08:00
marko1616
aeec78b35c Typo fix
Former-commit-id: 51b1e49e288e66c1b0c24ac070201c988fb2a389
2024-04-13 07:52:11 +08:00
marko1616
c991654cb4 Add c4ai-command-r-plus link
Former-commit-id: acaf953ca46eca8fb378067f4ada133654e4f088
2024-04-13 07:32:40 +08:00
marko1616
f328413646 Add template&support(Not tested)
Former-commit-id: 60bb60c4dc30a9641ddb57a44ef126f0768566c4
2024-04-13 04:31:33 +08:00
hiyouga
106a0104da fix #3247
Former-commit-id: bb67c66f80627805b585d157ba807c0ce378d3f2
2024-04-12 17:41:33 +08:00
hiyouga
5486ea09e3 fix model card
Former-commit-id: 920e7149bf2b559c9829aa4b11cfb6d00bbb2f9e
2024-04-12 17:11:59 +08:00
hiyouga
31bbbb6d13 fix #3238
Former-commit-id: 4d7e81ab4722d13bec6ca1af141f94bdc74d0883
2024-04-12 14:28:11 +08:00
hiyouga
1a77de82fa set dev version
Former-commit-id: f6cc76571d2c789675883a18e0db3d0c61f33808
2024-04-11 20:27:34 +08:00
hiyouga
7468f2535c release v0.6.2
Former-commit-id: f92ad0a62d957b595f6a76a5403216b163eb3d17
2024-04-11 20:08:51 +08:00
hiyouga
38e4f22605 Merge branch 'main' of https://github.com/hiyouga/LLaMA-Factory
Former-commit-id: 23ff02c1fd3787daf0bc6ac237c8897d02f726e4
2024-04-10 23:58:18 +08:00
hiyouga
2bc2fe7b5e fix #3225
Former-commit-id: 94110ecf27c32e263f1f2ee61842a3a301b9e089
2024-04-10 23:57:59 +08:00
hoshi-hiyouga
6d0140d8a0 Merge pull request #3201 from kno10/patch-1 and fix #3200
Pass additional_target to unsloth

Former-commit-id: 080a96c52f489fda0d315a77e26c4f6f5d69784a
2024-04-10 00:58:48 +08:00
hoshi-hiyouga
7856f98965 Update adapter.py
Former-commit-id: 720fde3683529ed7e08ac27c7c4598c6bdc30d44
2024-04-10 00:57:51 +08:00
hoshi-hiyouga
e25ddef08c Update adapter.py
Former-commit-id: a84b8d17dbf221259212e81931d80bcdd6284ad7
2024-04-10 00:57:30 +08:00
Erich Schubert
95a4589bbf Pass additional_target to unsloth
Fixes #3200

Former-commit-id: f8f87f5b0549cba6a011749c42064047f82ba577
2024-04-09 17:53:40 +02:00
hiyouga
566d71b7a9 fix quant infer and qwen2moe
Former-commit-id: b75d16767f35c36e2cf2aaab8a3844135085bccf
2024-04-09 17:12:59 +08:00
hiyouga
6030a4a720 tiny fix
Former-commit-id: d8f1ff51d4c920d4d0aeb9d53db29d1efb733c85
2024-04-08 21:28:39 +08:00
hoshi-hiyouga
5dc0cb94d4 Merge pull request #3161 from hiyouga/feature/add-mediatek-model
support Breeze-7B

Former-commit-id: af92ac8b62b919a75673011a1c56832e67882ee8
2024-04-08 20:56:51 +08:00
codingma
325dafcbb0 add empty line
Former-commit-id: 1c6c2e611d10e9fa662e3f4e1e7d23b80ae496cb
2024-04-07 18:28:08 +08:00
codingma
1a8a8b8651 rename template to breeze
Former-commit-id: 1223e6358dab52b4e1505057f1b16fd9d527c79e
2024-04-07 18:27:20 +08:00
hoshi-hiyouga
61a495cb1e Merge pull request #3160 from sliderSun/main
support Qwen1.5-32B

Former-commit-id: 1e5a5882dd494c3e9cf5eae2e0a485ce49d1863c
2024-04-07 18:00:40 +08:00
codingma
75866aa020 rename template to breeze
Former-commit-id: 1d894e7cfb73b8a29dababb554d051bd50e4f01d
2024-04-07 11:39:54 +08:00
codingma
9e4fda326d support https://github.com/hiyouga/LLaMA-Factory/issues/3152
Former-commit-id: 708f0ab4b0aa72e2c73ca36eb9ed058910e43092
2024-04-07 11:34:01 +08:00
sliderSun
1131ddfaff fix spell error
Former-commit-id: e6d36a2e593ebc1193b1735075c4ddb5d9f54990
2024-04-07 10:59:15 +08:00
sliderSun
9f437b5c43 support Qwen1.5-32B
Former-commit-id: c419adf1697b92520342f4ffa697c84bf19ca37d
2024-04-07 10:56:03 +08:00
sliderSun
0cc03d3f05 support Qwen1.5-32B
Former-commit-id: 8f2c67b95a8e177eb4096382417a70cacba38e90
2024-04-07 10:26:13 +08:00
hiyouga
04fc2f78bf update readme
Former-commit-id: 1cf15547e2420a3e5f7a969c21c10c7fbdfc71fe
2024-04-07 00:48:24 +08:00
hiyouga
3ac333fc6a update examples
Former-commit-id: de40ad62ba3d4c74c69de97b39cc79786ac28f0f
2024-04-04 14:48:21 +08:00
hiyouga
a246ac1914 tiny fix
Former-commit-id: 70aceecb27e72095c05462d01f956061669b267e
2024-04-04 02:19:03 +08:00
hiyouga
48ceac845c back to gradio 4.21 and fix chat
Former-commit-id: 695734a40a702ea059d855da54080cc8d161e41a
2024-04-04 02:07:20 +08:00
hiyouga
b1986a06b9 fix bug in latest gradio
Former-commit-id: 44a962862b4a74e50ef5786c8d5719faaa65f63f
2024-04-04 00:55:31 +08:00
hiyouga
43d134ba29 fix requires for windows
Former-commit-id: 5e25fae40b7ea9cfa72717efbe3677199ca9608f
2024-04-03 21:56:43 +08:00
hiyouga
1348f7d860 fix resize vocab at inference #3022
Former-commit-id: c243720b89eec0af2872fa3c7980a0026d893f4d
2024-04-03 18:14:24 +08:00
hiyouga
f6530222f7 fix #3116
Former-commit-id: b7256aa33d761280751518c20f29f9b8ea3fb025
2024-04-03 14:47:59 +08:00
hiyouga
a74a7585e0 update vllm example
Former-commit-id: 2df6d2eacfa27ebc69455696b93649624c1facbe
2024-04-02 22:45:20 +08:00
hiyouga
5bf0cca2b8 update readme
Former-commit-id: 7ea7333b51be6b1120fc0b13675f5a0ac3c5a12b
2024-04-02 22:17:48 +08:00
hiyouga
755b6511ff update examples
Former-commit-id: 2715cfe20f6f4532bebaa47b80ccd5df43d6a490
2024-04-02 21:09:25 +08:00
hiyouga
35621c6089 add zh readme
Former-commit-id: 389a170a4d42c56c71c0e17bbe018c4cb1983b5a
2024-04-02 20:58:45 +08:00
hiyouga
38b59664e6 update examples
Former-commit-id: c078582a759f6bce6e760cd39a05883f7eb194fe
2024-04-02 20:51:21 +08:00
hiyouga
933a084999 update examples
Former-commit-id: bf36b16e48d6438de6d0b2f2bfe33f7895699b9d
2024-04-02 20:41:49 +08:00
hiyouga
c1510d19c7 update readme
Former-commit-id: 9b8e7ccdab167f53fb897e1940562682324e8ff0
2024-04-02 20:37:37 +08:00
hiyouga
2074cf99fb update readme
Former-commit-id: 0c73d3c8a5762a8f119b27322ffd52a61de6fe38
2024-04-02 20:22:11 +08:00
hiyouga
b12176d818 simplify readme
Former-commit-id: 0da6ec2d516326fe9c7583ba71cd1778eb838178
2024-04-02 20:07:43 +08:00
hiyouga
117b67ea30 add moe aux loss control #3085
Former-commit-id: c9187ebc944e2de454ace3304b7d28eabb1b1a81
2024-04-02 14:26:31 +08:00
hiyouga
03e20bb5c6 fix #3022
Former-commit-id: dac2f617bda9470ac8d85c7e9def09cc04970506
2024-04-02 13:58:39 +08:00
hiyouga
0c4a1381a4 Update SECURITY.md
Former-commit-id: e22217c75421a89fd7e2ada62ce0e08245dd05e7
2024-04-01 23:30:03 +08:00
hiyouga
9e14501edb set dev version
Former-commit-id: 922ecae89210e5b8d62d78774f123a6d75c525ba
2024-04-01 23:24:08 +08:00
hiyouga
1dc963caa6 fix #3083
Former-commit-id: ff9a3f73961a362d0ddc22079f80a85465fffda8
2024-04-01 22:53:52 +08:00
hiyouga
85726c91ce add qwen1.5 moe
Former-commit-id: 3ea94f0d12cec25ac694a2c4ae8971c356990b61
2024-04-01 21:49:40 +08:00
hiyouga
40211db275 fix #3077
Former-commit-id: d0340391e8075cff0d84b3ef879c2101b66ca1dc
2024-04-01 21:35:18 +08:00
hiyouga
e7f13098c6 support infer 4bit model on GPUs #3023
Former-commit-id: 950a9dab9055839990656b2b40956792b253573d
2024-04-01 17:34:04 +08:00
hiyouga
61eb3a3d46 update webui
Former-commit-id: e96d260917a35ad2068f7b28b4f0b334b808ccc2
2024-04-01 16:23:28 +08:00
hiyouga
be0a807e8c fix ORPO loss
Former-commit-id: 5544ddde9087f00f9e20b78d0079f20c2f5d1604
2024-04-01 14:42:41 +08:00
hiyouga
52d402e2a9 fix IPO and ORPO loss
Former-commit-id: fc27955732aedbb12003faf19b760e2768b228f2
2024-04-01 14:37:53 +08:00
hiyouga
c5a46f9113 fix plots
Former-commit-id: 81355671296b84d438967463bb2a92934ff31aae
2024-03-31 19:43:48 +08:00
hiyouga
00e17a377c use log1p in orpo loss
https://github.com/huggingface/trl/pull/1491

Former-commit-id: 3b15d495264b00a4f8716bafea334778874963d7
2024-03-31 19:27:08 +08:00
hiyouga
9abd83adb1 update readme
Former-commit-id: 297b01f16ac78cde15a5d85a9a5b82ea20bfaf23
2024-03-31 18:46:34 +08:00
hoshi-hiyouga
f0d2afcf90 Merge pull request #3066 from hiyouga/orpo
support ORPO

Former-commit-id: fd4d3d29a9fae289f3bd0c4ce00656e4ccbec2e1
2024-03-31 18:42:48 +08:00
hiyouga
1aba442bcd support orpo in webui
Former-commit-id: dd5cc78d4fb18dd0a2e9d57f0f046cfe9f0dc2c9
2024-03-31 18:34:59 +08:00
hiyouga
d764cd8736 support ORPO
Former-commit-id: f44a4c27e2461cdaa1b16865f597a31033c0e6d9
2024-03-31 18:29:50 +08:00
hiyouga
526111a303 tiny fix
Former-commit-id: ba4a9b3c01e2f7467fbc5be268f47c0d003caa65
2024-03-31 00:10:29 +08:00
hoshi-hiyouga
b8364046df Merge pull request #3057 from marko1616/bugfix/lora-model-merge
Fix Llama model save for full param train

Former-commit-id: 18303e34d07dbf4c2dd9bac03243a3ed38582515
2024-03-31 00:07:20 +08:00
marko1616
1f617c6e08 fix blank line contains whitespace
Former-commit-id: 7bc3bcc64353d5a1d4870c6a9509b64cff710492
2024-03-30 23:46:55 +08:00
marko1616
a6858a36c0 Fix Llama model save for full param train
Former-commit-id: ca17b5db4f97c3ec9fe2004877f150e8f51ab4b5
2024-03-30 23:45:04 +08:00
hiyouga
6198121923 support save args in webui #2807 #3046
some ideas are borrowed from @marko1616


Former-commit-id: b5a062aa2d4a37670007e8b3dae5b6f5b7ffb15c
2024-03-30 23:09:12 +08:00
hiyouga
b0efebf853 upgrade gradio to 4.21.0
Former-commit-id: 63eecbeb967d849e1d03d8d03fb6421c0ee89257
2024-03-30 20:37:08 +08:00
hiyouga
fbd0584391 release v0.6.1
Former-commit-id: a59d823f554505b2e649e6e111b9dee8306d3ad8
2024-03-29 11:36:08 +08:00
hiyouga
50224b09cc update readme
Former-commit-id: 312d4f90784800dc8db4eaa7d908e6761115bc51
2024-03-28 22:02:32 +08:00
hiyouga
32dcc5a491 add project
Former-commit-id: 0418e9fecb2337b5d1b72e8358adb8aa10803c4b
2024-03-28 20:24:27 +08:00
hiyouga
9408366a36 fix #2982
Former-commit-id: e5e6a0c50c7a1c0052ed6b459450b9735ff2c9a1
2024-03-28 20:22:31 +08:00
hiyouga
f0e564beaa update readme
Former-commit-id: 6b634b5c2dbad827e8cc9850b8d7697c2056532a
2024-03-28 18:35:11 +08:00
hiyouga
14b75a0b93 fix #3010
Former-commit-id: a5e823ae75556eaa3b52ce7a887a6e7838a1eb5f
2024-03-28 18:31:17 +08:00
hiyouga
59e6ebf039 update trainers
Former-commit-id: d0dd6eefed0b86895ed00a7cafb331e5193db645
2024-03-28 18:16:27 +08:00
zhangzc
7cdc16abdf Supports custom data set sampling quantity
Former-commit-id: fa8325401df27595de4611a89dfcc14644956abd
2024-03-27 14:22:50 +08:00
hoshi-hiyouga
dc540dfaa8 fix ds optimizer
Former-commit-id: 2675127070a1e7584e71039a11c1ebac54ddd1db
2024-03-26 23:39:56 +08:00
hiyouga
587e65e442 fix #2981
Former-commit-id: ede2a913856e52c0a96155705116528d3af15998
2024-03-26 17:53:04 +08:00
hiyouga
a916688723 fix bug
Former-commit-id: f513e1415cc3fe87f600318fba855d1286b6d007
2024-03-26 17:30:12 +08:00
hiyouga
3336422760 fix #2961
Former-commit-id: 616917bb3be7f71073b56ad8c7bc4e164b08b9b5
2024-03-26 17:26:14 +08:00
hiyouga
04423b916f release v0.6.0 (real)
Former-commit-id: 34e06bf408ccd21e674f896703f1c7b62e97e1ca
2024-03-25 23:37:48 +08:00
hiyouga
bf8d2f8eda tiny fix
Former-commit-id: bf2455e420cf35c6596528f319c1b18408b5519a
2024-03-25 23:28:52 +08:00
hiyouga
2a5d02fd0f update readme
Former-commit-id: 32e6a7f10fdc28106e3b086eb79304943c6e8fab
2024-03-25 23:06:13 +08:00
hoshi-hiyouga
ea550ed9e0 Merge pull request #2967 from Tsumugii24/main
Update README_zh.md

Former-commit-id: 4c3b8da2caf74e9d6819bdb1a4e30ca3c549a2d8
2024-03-25 23:02:22 +08:00
Tsumugii24
02665cd42b Update README.md
Former-commit-id: fd28fff2b9dfdb3e59b160c5fcee9cdc69e53564
2024-03-25 22:54:38 +08:00
Tsumugii24
0c6a94e66d Update README_zh.md
Former-commit-id: 34141ee0515c3e765ca0cb82a0625fb0abfba6f9
2024-03-25 22:54:26 +08:00
hiyouga
ebd6bc2604 add arg check
Former-commit-id: 86e0d5a5a50ae34307f5176c7c4a6ab9d0c224b9
2024-03-25 22:42:58 +08:00
hiyouga
daab85e3e6 release v0.6.0
Former-commit-id: 51910d5803eb718e4976da0b3bfcdc5eeeea48eb
2024-03-25 22:38:56 +08:00
Tsumugii24
769d81a83d Update README_zh.md
Former-commit-id: deec57ec009ef6c08a90ad8e5800d6d5a936b337
2024-03-25 22:31:03 +08:00
hoshi-hiyouga
ac2a401b1d Merge pull request #2963 from rkinas/patch-1
Update requirements.txt

Former-commit-id: 0d56337adabd84aded31dd19f42d8d06ab2d8666
2024-03-25 21:49:34 +08:00
Remek Kinas
bb53c18153 Update requirements.txt
Former-commit-id: a640f245ef9cee706c2f982d578f520e6b1eb70b
2024-03-25 14:30:58 +01:00
hiyouga
04e0fe9147 tiny fix
Former-commit-id: c39cf3439a3025f703d50ac414c10ef3c8486a1f
2024-03-25 21:18:08 +08:00
hoshi-hiyouga
39f75c7001 Merge pull request #2945 from marko1616/bugfix/lora-model-merge
修复了在 transformers > 4.36.2 版本中部分模型合并 Lora 模型时因生成配置校验而导致的崩溃问题

Former-commit-id: 95afea730e80f58cc2984592fc07e265504c9491
2024-03-25 13:36:08 +08:00
marko1616
7f99cb1817 pass ruff check
Former-commit-id: 8534b069a05121eb041371a6becccf0a1a23f268
2024-03-24 16:12:10 +08:00
marko1616
c555b2cce3 fix Llama lora merge crash
Former-commit-id: 46f7d8e6b85f73fb0c51c8b08bd9955c3b171d93
2024-03-24 03:06:11 +08:00
marko1616
2eba1c6851 fix Llama lora merge crash
Former-commit-id: a8bd8e9149ff79a2707fec9c6d006761cfdd0dee
2024-03-24 02:55:23 +08:00
marko1616
edeed55664 fix Llama lora merge crash
Former-commit-id: c29a2893f58cf7a916ff05b2725fadf1ad2c4c9a
2024-03-24 02:44:35 +08:00
hiyouga
92248f9cb2 fix #2936
Former-commit-id: 9ae646fbbd809057a9c54fe41e1ae5a07a674556
2024-03-24 00:43:21 +08:00
hiyouga
c548ad5e69 fix #2928
Former-commit-id: 9558ee87bc7260a6596385aaa375df544862bfa9
2024-03-24 00:34:54 +08:00
hiyouga
a57d839e1d fix #2941
Former-commit-id: 3775ab52017f0b610ddd8199cccfb8c001eda507
2024-03-24 00:28:44 +08:00
hoshi-hiyouga
d88a34bc79 Merge pull request #2919 from 0xez/main
Update README.md, fix the release date of the paper

Former-commit-id: e7157cee78688fdd572a873b1e46accc1a32717e
2024-03-22 12:12:24 +08:00
0xez
60cbc9d0e5 Update README_zh.md, fix the release date of the paper
Former-commit-id: 6ea16156b6456216cefab59265dae1edc9dc938f
2024-03-22 10:41:17 +08:00
0xez
d5005e766f Update README.md, fix the release date of the paper
Former-commit-id: 4bf9ef3095376f0208f783f180c13bef88581824
2024-03-21 22:14:48 +08:00
hiyouga
4d0753cffe move file
Former-commit-id: f9017af7fe1dfbe5b799904ca1d900b3051fb719
2024-03-21 17:05:17 +08:00
hiyouga
1cf0f11840 add citation
Former-commit-id: 54199205f2000c0500d29822387646133e06e8b2
2024-03-21 17:04:10 +08:00
hiyouga
052e8b2cc6 paper release
Former-commit-id: 7bd384655244ce6a8c1f34aa6fed54122d0e9da5
2024-03-21 13:49:17 +08:00
hiyouga
8963e89633 update readme
Former-commit-id: ab98d4d617b7193c474f58a29ca9475fea7564aa
2024-03-21 00:48:42 +08:00
hiyouga
935ee0a023 support fsdp + qlora
Former-commit-id: b894bf8e84be689db258021f0638e9ac939abcbc
2024-03-21 00:36:06 +08:00
hiyouga
5ed234ca63 add orca_dpo_pairs dataset
Former-commit-id: af683aacbae462a2a37d76d37df583e217664bd5
2024-03-20 20:09:06 +08:00
hoshi-hiyouga
04884a0911 Merge pull request #2905 from SirlyDreamer/main
Follow HF_ENDPOINT environment variable

Former-commit-id: fa801ff118433b622f6aa47920c5c93ec9b68414
2024-03-20 18:09:54 +08:00
hiyouga
c7af26a9e3 fix #2777 #2895
Former-commit-id: 54d5f62d29456a8d9d0c0dd3d0bbfffe48935803
2024-03-20 17:59:45 +08:00
hiyouga
d8073488be fix #2346
Former-commit-id: c8888c499b0ac51e2fc86c16e8e91c79400a5993
2024-03-20 17:56:33 +08:00
SirlyDreamer
6fc2d7e063 Follow HF_ENDPOINT environment variable
Former-commit-id: 22b36a3cfd2909cb624b1bb7385558eda504defe
2024-03-20 08:31:30 +00:00
khazic
e93c7cdb80 Updated README with new information
Former-commit-id: b12f12039ce221decf09a25ec9d64e385d9497c7
2024-03-20 14:38:08 +08:00
khazic
c32d6c8250 Updated README with new information
Former-commit-id: 90a81c2e52bd44beb3b7feb5d2517b073f7f6ef9
2024-03-20 14:21:16 +08:00
刘一博
757158da63 Updated README with new information
Former-commit-id: fddbc29ca1bd9b13372087e6a349f21240abc013
2024-03-20 14:11:28 +08:00
hiyouga
ffdacaa618 fix packages
Former-commit-id: 2f9f334a123d43267bfb3dd26aaa1ad285ffe7a5
2024-03-17 22:32:03 +08:00
hiyouga
e194efab10 fix patcher
Former-commit-id: 6a5ad99c8cbf6b7def0a130306d49e7d1eb4e5a5
2024-03-15 19:18:42 +08:00
hoshi-hiyouga
772fc2eac7 Merge pull request #2849 from S3Studio/DockerizeSupport
Improve Dockerize support

Former-commit-id: b63cba317266f5ba217de54fda77ec26a4df344d
2024-03-15 19:16:02 +08:00
hiyouga
ed020579dc fix export
Former-commit-id: 4e996f194406d7eb27b2bde290a12c82c41219d0
2024-03-15 15:06:30 +08:00
S3Studio
096869c7b6 Use official Nvidia base image
Note that the flash-attn library is installed in this image and the qwen model will use it automatically.
However, if the the host machine's GPU is not compatible with the library, an exception will be raised during the training process as follows:
FlashAttention only supports Ampere GPUs or newer.
So if the --flash_attn flag is not set, an additional patch for the qwen model's config is necessary to set the default value of use_flash_attn from "auto" to False.


Former-commit-id: cd2f5717d676e1a5afd2f4e7a38402d2e55e7479
2024-03-15 08:59:13 +08:00
S3Studio
c6873211e9 improve Docker build and runtime parameters
Modify installation method of extra python library.
Utilize shared memory of the host machine to increase training performance.


Former-commit-id: 97f9901c2f5c29a6ab517a1f8fa028b8e89edf4e
2024-03-15 08:57:46 +08:00
hiyouga
623ee1bd88 tiny fix
Former-commit-id: bf8123669be334338b4268d0a8f7703ff2cf6255
2024-03-14 21:19:06 +08:00
hiyouga
aabe90343e fix export
Former-commit-id: c9b968b84c97c9a00fbb43194c3adc9354d74f3b
2024-03-14 18:17:01 +08:00
hiyouga
764cfb506d fix bug
Former-commit-id: 38c618b797ec219c2c45de960c9cbe50ec524c94
2024-03-13 23:55:31 +08:00
hiyouga
249ad56075 fix bug
Former-commit-id: 47ee0276830adbed65bc111d5a83049e77ad360a
2024-03-13 23:43:42 +08:00
hiyouga
46f99ff277 improve lora+ impl.
Former-commit-id: 332bad25455a70ad9204e7dd384bb086d789aa39
2024-03-13 23:32:51 +08:00
hoshi-hiyouga
73f4513c84 Merge pull request #2830 from qibaoyuan/lora_plus
[FEATURE]: ADD LORA+ ALGORITHM

Former-commit-id: 456f2aed5811b9f296acd371a1f706daeb37e12a
2024-03-13 20:15:46 +08:00
齐保元
3c91e86268 [FEATURE]: ADD LORA+ ALGORITHM
Former-commit-id: c35b3c3b1e27171f8a703f88ede1dc8a84c80a56
2024-03-13 19:43:27 +08:00
hiyouga
42473ec150 fix #2817
Former-commit-id: f1c8b8127b3c1ac095176015af5ec92d37a11efe
2024-03-13 12:42:03 +08:00
hiyouga
6a4e4b9c5b fix #2802
Former-commit-id: f4c56ccd785790c02f0d1275cd75958677a18690
2024-03-13 12:33:45 +08:00
hiyouga
9a784fb4f3 fix kv cache
Former-commit-id: a9588e36e95bed896eea8d79ba7108447ff08f4b
2024-03-13 01:21:50 +08:00
hiyouga
43fd80a1aa support QDoRA
Former-commit-id: d8ad1c5ef08e733e52084de271aad762b1613129
2024-03-12 22:12:42 +08:00
hiyouga
e6ab1a57ea patch for gemma cpt
Former-commit-id: fc0b19c62f52a90d78b63761dda3d8970a42f2da
2024-03-12 21:21:54 +08:00
hiyouga
282edb9161 fix plot issues
Former-commit-id: 01ae196b4916433da9aeec9c0b5c660c6b34464c
2024-03-12 18:41:35 +08:00
hiyouga
dff77004f2 support olmo
Former-commit-id: 2719510e8c6baa591c74458b773e4e47215e6052
2024-03-12 18:30:38 +08:00
hiyouga
6c1b4aec75 fix #2802
Former-commit-id: 1370db270d7ba1a20468abdb29193ce7534d1b4f
2024-03-12 17:08:34 +08:00
hiyouga
7814db1b42 fix #2803
Former-commit-id: d60498cba1ed124e8a678ce7775d55a018f99537
2024-03-12 16:57:39 +08:00
hiyouga
c9ed3fc3a4 fix #2782 #2798
Former-commit-id: eb3ab610610a0964bc8a1c9fa015805353f04c31
2024-03-12 15:53:29 +08:00
hoshi-hiyouga
9ee416a8fc Merge pull request #2743 from S3Studio/DockerizeSupport
Add dockerize support

Former-commit-id: 30751a7b9218770cc2bc6cae857a28950bffbb6c
2024-03-12 00:05:49 +08:00
hiyouga
4f9a47c026 fix #2775
Former-commit-id: a5c7feb3e8089f4deff760b00a9f84425957c419
2024-03-11 00:42:54 +08:00
hiyouga
3fcb1c6d09 tiny fix
Former-commit-id: 1d22c87db2449c7d9915842b70fbd59ce9c2dd70
2024-03-11 00:17:18 +08:00
hiyouga
7c492864e9 update parser
Former-commit-id: d98258aa08d93494ad50d7786064e7fda15f6ca9
2024-03-10 13:35:20 +08:00
hiyouga
7ff8a064f3 support layerwise galore
Former-commit-id: d43a4da0947897d0be3f62fad3107754d4c89f2b
2024-03-10 00:24:11 +08:00
hiyouga
c635bbe465 fix #2732
Former-commit-id: bc39ad1d102b91d5417daa38b8a581e1e1ab2af9
2024-03-09 22:37:16 +08:00
hiyouga
4881f4e631 allow non-packing pretraining
Former-commit-id: 3fee5cc5a3db9ce874ad90f2500ec092d904bd4e
2024-03-09 22:21:46 +08:00
hiyouga
c631799f5d fix #2766
Former-commit-id: a8cd556230c1d0bc4e090acc2276c035910ce6f6
2024-03-09 21:35:24 +08:00
hiyouga
48846676d8 use default arg for freeze tuning
Former-commit-id: a38fd7c8b39cb59fb61c26fdf80aaa6f2d0623b9
2024-03-09 06:08:48 +08:00
hiyouga
f37d481c5d add GaLore results
Former-commit-id: ac05b9bba62924693bdede85917d21b844849b8c
2024-03-09 04:11:55 +08:00
hiyouga
5d7d8bd55c update hardware requirements
Former-commit-id: 604b3d10fc1448f702943114b66b97bded21e080
2024-03-09 03:58:18 +08:00
hiyouga
8ed1463236 update examples
Former-commit-id: 38592faa258f7331afb95bc5db4b9bf37f08105d
2024-03-09 02:30:37 +08:00
hiyouga
43b2ede0f8 fix #2756 , patch #2746
Former-commit-id: 627d1c91e675f1d9ebf47bad123cbbf29821da4d
2024-03-09 02:01:26 +08:00
hoshi-hiyouga
2f095e2017 Merge pull request #2746 from stephen-nju/main
fix deepspeed ppo RuntimeError

Former-commit-id: 656c653f0c628f9494b4d7ae12e60c8eeec1ea7a
2024-03-09 01:37:00 +08:00
hiyouga
9b55bb964c Update setup.py
Former-commit-id: 543740fa00dda2c5d16822f7c9f4ef32d916426f
2024-03-09 00:14:48 +08:00
hiyouga
9b97b23ce7 fix aqlm version
Former-commit-id: 05673f81f0295c76957f3247c62f95fda322a63e
2024-03-09 00:09:09 +08:00
hiyouga
53ab28533e fix example params
Former-commit-id: 0280748528488d7bee6b9074025255453966124c
2024-03-08 20:41:43 +08:00
stephen_zhu
940c00e7ae update
Former-commit-id: 295f9ef2eff2e8b5d7a21d3da8dd3e6eb2a42006
2024-03-08 12:47:44 +08:00
stephen
18cfd5f349 fix ppo runtime error
Former-commit-id: 14e2f221e3e720075e59065a3dc42aa4d993a8b6
2024-03-08 11:48:26 +08:00
S3Studio
6169df1c52 Add dockerize support
Already tested with the model of Qwen:1.8B and the dataset of alpaca_data_zh. Some python libraries are added to the Dockerfile as a result of the exception messages displayed throughout test procedure.


Former-commit-id: 897e083bc28ccb15c46909b9d13fc03a674fb254
2024-03-08 10:47:28 +08:00
hiyouga
d46c2bbcba update readme
Former-commit-id: 353db1e28aa8888228a05813bb09c51e7d28728c
2024-03-08 03:06:21 +08:00
hiyouga
48d4364586 fix chat engine, update webui
Former-commit-id: 8b32dddd7d883bae07735796a517927c79d1c33b
2024-03-08 03:01:53 +08:00
hiyouga
8042c66a76 Update setup.py
Former-commit-id: 76c3ec05258a5f5d1f78430ef6258a5eda527d65
2024-03-08 01:23:00 +08:00
hiyouga
3879d79b89 update galore args
Former-commit-id: c7479a7976f773feb36aab4fdb0500be53d83b6a
2024-03-08 01:17:32 +08:00
hiyouga
e416cecf62 fix galore
Former-commit-id: 62a3ceeef8f60caef43ccc7f971a0c9184e21296
2024-03-08 00:44:51 +08:00
hiyouga
81fcb80466 add Yi-9B model
Former-commit-id: bfcb0245b832242eefb84de6f70bd75544f3ceb7
2024-03-07 23:11:57 +08:00
hiyouga
bf812fbe40 add galore examples
Former-commit-id: aabf1b99f39aae535401b2f65f0d629def6e39f5
2024-03-07 22:53:45 +08:00
hiyouga
1e6fb6c8aa support galore
Former-commit-id: b67a4a46a88d83bb2a3459b3317b66cda15e0171
2024-03-07 22:41:36 +08:00
hiyouga
5d0c95bd02 update readme
Former-commit-id: 649e3e8cb741b28552b351a3e2627345e292689d
2024-03-07 20:34:49 +08:00
hiyouga
7cd2417002 tiny fix
Former-commit-id: 731530212152476f76963bba121ce2fe1264432a
2024-03-07 20:29:34 +08:00
hoshi-hiyouga
16851d66e5 Merge pull request #2739 from hiyouga/dev-vllm
support vllm

Former-commit-id: 8cc876958a6c05e644e2f519282efb6f222a2277
2024-03-07 20:28:18 +08:00
hiyouga
056d2d956a support vllm
Former-commit-id: 889f6e910e654d8ec3922c2185042d737ffbf1c3
2024-03-07 20:26:31 +08:00
hiyouga
9a69cadab3 fix #2735
Former-commit-id: 416f6333f66b6afd70a3a936d82593efca583235
2024-03-07 16:15:53 +08:00
hoshi-hiyouga
3de642bffd Merge pull request #2730 from cx2333-gt/main
fix flash_attn in train_web

Former-commit-id: eff0b774fc8e1a5a07a2554d611cb85bef439dec
2024-03-07 14:37:18 +08:00
cx2333
286b9d9849 revert choice name
Former-commit-id: 7832e68072219c7d1f562aee868812a4d655f4e0
2024-03-07 14:28:55 +08:00
hiyouga
cef1ede826 fix chatglm3 template
Former-commit-id: 9be0aa70fdd2e9ec208aa1850ace5c287efc8c3a
2024-03-07 14:26:16 +08:00
cx2333
5007566588 fix flash_attn in train_web
Former-commit-id: 5f340e362b0e91fec76c19c77c5705bba1db481a
2024-03-07 10:13:55 +08:00
hiyouga
e93fb3cc6c tiny fix
Former-commit-id: c3145afa4164dd28888f17599a154f7dddbe9326
2024-03-06 17:25:08 +08:00
hiyouga
7578209735 export use balanced gpu
Former-commit-id: 710487dc694489bf3dfe54f8d32df80ce46439e4
2024-03-06 16:33:14 +08:00
hiyouga
67f02f75d0 fix add tokens
Former-commit-id: ff5353681a87d033903bf8cf6133c6bdb3fa9e5a
2024-03-06 15:04:02 +08:00
hiyouga
73d9dfc7ab fix version checking
Former-commit-id: 5780da8d640609cca388f55983d0251e5547209a
2024-03-06 14:51:51 +08:00
hiyouga
6b407092d9 update examples
Former-commit-id: 194e25606515bfa42c3be27d68f68d604191514b
2024-03-06 13:14:57 +08:00
hiyouga
3168abc0a1 fix arg dtype
Former-commit-id: 999ae05655815ac04ababddae55d9343f5d39f84
2024-03-05 20:53:30 +08:00
hiyouga
46ee267cfc improve aqlm optim
Former-commit-id: 81be999b407e988c2f42764d827ac859d079ed3e
2024-03-05 20:49:50 +08:00
hiyouga
a10bead9b5 optimize aqlm training
Former-commit-id: 8b42660e4039b3d6475f502f397686ba6b140627
2024-03-05 18:35:41 +08:00
hiyouga
3553e301dd fix dora inference
Former-commit-id: 21b3597b0a05169afe51e1609b532787a65ca8ea
2024-03-05 11:51:41 +08:00
hiyouga
02b838b9b0 fix export model
Former-commit-id: 7ba2f7bf8da3c559e05d8dde20e93cd1d3d4e8ef
2024-03-05 11:05:41 +08:00
hiyouga
b1de6d1025 update readme
Former-commit-id: bd6fd8ad3a5ef8c49247dc1b1cd7584ef211489e
2024-03-05 03:20:23 +08:00
hiyouga
bc67872218 add examples
Former-commit-id: 2744dc9d2f9df4150a496b38e24ea96040a85bef
2024-03-05 03:16:35 +08:00
hiyouga
0229fffde5 auto set chat template
Former-commit-id: d8bf2f0efe6919990c7032aaa06010980cfde019
2024-03-05 02:41:20 +08:00
hiyouga
3555b87363 update readme
Former-commit-id: c95bc2774800ed2e6d54a6099a466bdacc0cfb78
2024-03-04 19:29:26 +08:00
hiyouga
2dca53962e fix export on cpu device
Former-commit-id: e4722a9a627ea4e9a1341cc00a3108dd06a6b550
2024-03-04 17:35:09 +08:00
hiyouga
f4f71f2797 fix sub-process error in thread
Former-commit-id: 3448ad43d05301b12a19a02c1cc23d7b0ee525c3
2024-03-03 15:04:35 +08:00
hiyouga
77ab9457ed update readme
Former-commit-id: 8f1bbd8f5954f64554b7dbe98073d19841e0cb74
2024-03-03 01:41:07 +08:00
hiyouga
4fa53b6282 update readme, add starcoder2, cosmopedia
Former-commit-id: 1ae7c183640146bb9b06c98942985a1721d2b9c9
2024-03-03 01:01:46 +08:00
hoshi-hiyouga
790b73586b Update README_zh.md
Former-commit-id: ccc0887e7e33901d27ee33e502304f0a7464bc8d
2024-03-03 00:49:08 +08:00
hoshi-hiyouga
9c29c2a172 Update README.md
Former-commit-id: 3198b66f6ac342a069c6775104e4000f4a1d8355
2024-03-03 00:48:47 +08:00
hoshi-hiyouga
863960d33e Update README.md
Former-commit-id: f2cd1349ba07b2043ff61d618d1f3207cfd7e36f
2024-03-03 00:48:06 +08:00
hiyouga
330e5381b4 add colab demo
Former-commit-id: 446946357710d8a27c21107f7bdef2cf1d0fa4c7
2024-03-02 19:58:21 +08:00
hiyouga
5bb411fdb8 move git files
Former-commit-id: da9551a802250860cc870c0375d73d667211b8fa
2024-03-02 18:30:11 +08:00
hiyouga
59a9a5994e fix #2649
Former-commit-id: 1c850de660c671d92f0bc63f230d338b60b7c0bd
2024-03-01 13:02:41 +08:00
hiyouga
5306a71b42 tiny fix
Former-commit-id: 59116aa07fa5fc608f8b801dd3c89e53b117033e
2024-02-29 21:03:48 +08:00
hiyouga
3eafa2dd9e fix webui
Former-commit-id: 730377a818a7ff5e45bf4ac9ee4364c4f123a598
2024-02-29 20:09:09 +08:00
hiyouga
88fddb879d fix #2642
Former-commit-id: d8435e7f1850532310e1bee069b45f38cd666e48
2024-02-29 18:32:54 +08:00
hiyouga
71491825bf add twitter
Former-commit-id: d36ace1ebb903362b003c5d6ebbcfb52e20d055d
2024-02-29 17:45:30 +08:00
hiyouga
30855b924a tiny fix
Former-commit-id: 3b6e1132c4d203e6d5376cf97e81cc160697c822
2024-02-29 17:28:50 +08:00
hiyouga
48d2e6d7fe tiny fix and release
Former-commit-id: 79ae5f2e06c151cd8f71a96a5ee099f034043ffd
2024-02-29 00:46:47 +08:00
hoshi-hiyouga
041c83ea03 Merge pull request #2575 from lungothrin/feature/chatter-with-role
support on fly test of tools

Former-commit-id: c49af47d97ef2bae2c57dd03333752321ad6d483
2024-02-29 00:39:47 +08:00
hiyouga
0e621c2dc9 fix #2629
Former-commit-id: c18822669568327d4fbf480a80c5fe5b8fc95e7a
2024-02-29 00:37:29 +08:00
hiyouga
544e7a491b release v0.5.3
Former-commit-id: f6bc89581b3cd129448da2defc23848de6f494ed
2024-02-29 00:34:19 +08:00
hiyouga
a2c881fa08 add examples
Former-commit-id: 8cdf64adc2c8e5f194a6df26cf749d7bc9bc039f
2024-02-28 23:19:25 +08:00
hiyouga
c53c7af168 update chatglm3 template
Former-commit-id: f55e75ef3b86ea7930bb9d84b46cfc953a74441d
2024-02-28 21:11:23 +08:00
hiyouga
a2d93e5269 update readme
Former-commit-id: 654f3e174a460c621c52724b69fc4aee93370970
2024-02-28 20:50:01 +08:00
hiyouga
b392e6cfb9 support DoRA, AWQ, AQLM #2512
Former-commit-id: 6614cc1f08aa944db083e27e451bbdd733f7dd97
2024-02-28 19:53:28 +08:00
Liang Ge
13aa2d389a support on fly test of tools
Former-commit-id: 95bb82fd89512ea13caf20850d1f46d8a62b4e2a
2024-02-28 01:17:49 +08:00
hoshi-hiyouga
1e7962dfc4 Merge pull request #2608 from Katehuuh/main
bump accelerate

Former-commit-id: 315662bac17c2e958d0e0b706c6e3443b8a11ec8
2024-02-27 16:49:34 +08:00
Katehuuh
1c9556c84c bump accelerate
Former-commit-id: 100deec5a8b025dbf60cf543775d2b136a75eef4
2024-02-27 08:56:45 +01:00
hiyouga
ca3ca7a5b5 add pr template
Former-commit-id: 3303855fb08316c78bf2959e3fdd6de389a1e486
2024-02-26 18:31:07 +08:00
hoshi-hiyouga
0500befdb4 Create CONTRIBUTING.md
Former-commit-id: 892ae9fd570c1c9e307ecb1fd861b8de59f2a835
2024-02-26 18:23:03 +08:00
hoshi-hiyouga
f618feab51 Create SECURITY.md
Former-commit-id: c7459a8eac77dbfbae910d468e4ac04acd9fd9de
2024-02-26 18:03:17 +08:00
hiyouga
4b06aa134f update readme
Former-commit-id: 1b1b427ea13d2a84683514d924555db974865d73
2024-02-26 17:25:47 +08:00
hoshi-hiyouga
9cde56d760 Merge pull request #2531 from Rayrtfr/main
Support Atom Model

Former-commit-id: 9868d3e85d70413e49e108297309fcc62a5c1567
2024-02-26 16:36:45 +08:00
Rayrtfr
d0ea203694 Support Atom Model
Former-commit-id: da3e76f22aca9acaf772ff821b7eb03c2a2ac869
2024-02-26 10:44:10 +08:00
hiyouga
c5eb3fba62 update webui
Former-commit-id: 298a5fc52610deb9f7d555e2fc699f10067d8af5
2024-02-25 20:23:41 +08:00
hiyouga
a8bc32553c update readme
Former-commit-id: 33c93b1e89f532073429156dac45b62542d34070
2024-02-25 16:26:08 +08:00
hoshi-hiyouga
88f3358320 Merge pull request #2525 from stephen-nju/main
update project_kwargs for ppo config

Former-commit-id: e7a6910141cc8d8dd966c1f54388d9ef764418d0
2024-02-25 15:54:00 +08:00
hiyouga
a85bdcf2f6 add papers
Former-commit-id: d1650cddf66b2d118d618eff2f6beb082000a0e4
2024-02-25 15:34:47 +08:00
hiyouga
caf56b313e add papers
Former-commit-id: edf0af7bfc4d621a59be782e57b55c0e878e5b4a
2024-02-25 15:18:58 +08:00
hiyouga
75603c45fc fix data entry
Former-commit-id: e5c116816f2d00e3bfe1a9be5886fe1e41d93212
2024-02-23 18:29:24 +08:00
hiyouga
89f86cc970 fix gemma template
Former-commit-id: 75950d115845e00318bd457e66440e2c2d98efbd
2024-02-23 13:49:53 +08:00
hiyouga
c09a0e4f08 fix template
Former-commit-id: 84673463221f2b359732de8a936a8e7ca1d003b6
2024-02-22 12:09:21 +08:00
hiyouga
7bac6c9460 fix template
Former-commit-id: 1737c7389264ef80bb8ba85c73ede0b0381e11f9
2024-02-22 12:06:48 +08:00
hiyouga
0c7d0bf172 support gemma
Former-commit-id: b9674aa2f6f1b6b09b2a37375313d8d5abfcd453
2024-02-21 23:27:36 +08:00
hiyouga
a274900188 fix #2532
Former-commit-id: 23a8e64f1c47cd473c627effbe271233c136369c
2024-02-21 21:55:14 +08:00
hiyouga
67deefe527 tiny fix
Former-commit-id: acc99ef2fb62908288f88369354135d581588b63
2024-02-21 18:30:29 +08:00
stephen
823f618cba update project_kwargs for ppo config
Former-commit-id: 14f106962fc0a87802ae9ecffff00d52f7f5f046
2024-02-21 13:47:38 +08:00
hiyouga
bc16c9a54a support lora for llama pro
Former-commit-id: f74c78ba95f0545aae89e603e466f494705ad024
2024-02-21 02:17:22 +08:00
hiyouga
a3f30038a0 fix #2516
Former-commit-id: ce2340193e751c4212650b27f16c671261015047
2024-02-20 20:44:24 +08:00
hoshi-hiyouga
e237f618c2 Merge pull request #2514 from codemayq/main
add a pre-built version of flash-attn

Former-commit-id: 2521f1c7bd39dff17de90650ddb5167f66f27940
2024-02-20 16:09:25 +08:00
hoshi-hiyouga
688adad665 Update README.md
Former-commit-id: 8a7a02fcba077778a84164a16ff2cf33ec813dc4
2024-02-20 16:07:55 +08:00
hoshi-hiyouga
0158812afb Update README_zh.md
Former-commit-id: 4c3310651b67bbea8c893d503de2b5736184daaf
2024-02-20 16:06:59 +08:00
codemayq
e52e0d9b07 1. update the version of pre-built bitsandbytes library
2. add pre-built flash-attn library


Former-commit-id: 2b76a300995a74398ee11d9274e5c0eb6ef53403
2024-02-20 11:28:25 +08:00
codemayq
eb2aa2c073 1. update the version of pre-built bitsandbytes library
2. add pre-built flash-attn library


Former-commit-id: 9b40eddf7aeb6b3bcf58374d43cbe44eb24f3849
2024-02-20 11:26:22 +08:00
hiyouga
debfd46749 release v0.5.2
Former-commit-id: 0189867816b0eab92fb2a1b5f1b1da079bd161a7
2024-02-20 11:12:43 +08:00
hiyouga
5ccf8fcd6b update webui
Former-commit-id: 9e0f7c362d40b78d57e77d52eaa96e678cebadcd
2024-02-19 16:49:58 +08:00
hiyouga
7bd1991513 add test scripts
Former-commit-id: fdaa4843961257b48cc32d83d30f2efe18b9fd5a
2024-02-19 02:09:13 +08:00
hiyouga
456e4ca569 fix safetensors
Former-commit-id: 06478ae5302d5fc6eb7afedc69335ce2f32808c6
2024-02-18 18:12:16 +08:00
hiyouga
6bf0fe4913 fix #2481
Former-commit-id: 2a4e3e4a26a2fad77ccc476be7d45434b8af4a55
2024-02-15 19:07:47 +08:00
hiyouga
596b6828cb support llama pro #2338 , add rslora
Former-commit-id: 40d659b7f30dd5a004703c176ec1f22dc864e505
2024-02-15 02:27:36 +08:00
hoshi-hiyouga
b403f8d8a8 Merge pull request #2474 from younesbelkada/add-hf-tags
FEAT: add HF tags for models that have been trained with llama-factory
Former-commit-id: f35d96817e61da9fa7789b93b0350c9f95afc40a
2024-02-14 10:26:03 +08:00
younesbelkada
590b6c2143 add v1 hf tags
Former-commit-id: a29cc9f4472c95cd6a43ea350ab728e0a8069c6e
2024-02-13 05:58:49 +00:00
hiyouga
5537ef1e7d fix #2471
Former-commit-id: a408be8be1cf99cd4468a9905c27ec454f312b9a
2024-02-12 21:07:46 +08:00
hiyouga
5f83860aa1 add option to disable version check
Former-commit-id: fd769cb2de696aee3c5e882237e16eace6a9d675
2024-02-10 22:31:23 +08:00
hiyouga
62b6a7971a update data/readme
Former-commit-id: aa566e3cea5bc75688b4399a9da07be0b35b921c
2024-02-10 21:04:29 +08:00
hiyouga
1d16e87c5f update default template
Former-commit-id: f32b55649a9f95109a6d180216eb67f959d060da
2024-02-10 16:44:47 +08:00
hiyouga
1955a8ea5a improve aligner
Former-commit-id: cc7296b92e10c24967fc753393275b71d300683f
2024-02-10 16:39:19 +08:00
hoshi-hiyouga
a41fa6e730 Merge pull request #2462 from mnmueller/main
Enable Parsing of SlimOrca

Former-commit-id: 99eed520b87152ca6b89c2a068b09200fd45f30d
2024-02-09 22:55:48 +08:00
hiyouga
b98a64448a improve fix tokenizer
Former-commit-id: 57b138abad6397596bc47be94e092e8fabedc06f
2024-02-09 14:53:14 +08:00
Mark Mueller
1ce82f391a Slim Orca data parsing
Former-commit-id: f2d8efede7e20edafed0d5446eb64f2d419949b1
2024-02-08 19:32:20 +01:00
Mark Mueller
4d473894fd Slim Orca data parsing
Former-commit-id: ca57d27c39d4e7bc3dd7c3207a23d23d2cbd446b
2024-02-08 17:56:18 +01:00
Mark Mueller
5788b7c7d0 Slim Orca data parsing
Former-commit-id: 3016427be4e63fd25f40bc5a0d1f8cedc0997334
2024-02-08 17:54:18 +01:00
Mark Mueller
04515f6b55 Slim Orca data parsing
Former-commit-id: 4dca3907964d27abc2b21eb55c75676901c98912
2024-02-08 17:52:36 +01:00
Mark Mueller
96f8ccf3d5 SlimOrca aligner
Former-commit-id: 928dda93867c2327a7957c04648592044ccf9daf
2024-02-08 08:28:32 -08:00
hoshi-hiyouga
2c3ef480a6 Merge pull request #2423 from mayflower/main
Support for german sft and dpo

Former-commit-id: 8e282e4e6bee6493b1bd38ba239ca49a6a840a92
2024-02-07 15:58:20 +08:00
hiyouga
fa6873122c Update tests.yml
Former-commit-id: c882b7cf339304ff16a36b1544a3b5f1194ef346
2024-02-07 01:18:22 +08:00
hiyouga
34bc0c22b1 lint
Former-commit-id: 6b1f89b6494e9b6b087fe90600617a3024e014e5
2024-02-07 01:10:04 +08:00
hiyouga
e5484b2729 Update pyproject.toml
Former-commit-id: 650251ea77fae2e2595ca804f49efdd230dbb5b1
2024-02-07 00:45:58 +08:00
hiyouga
f67f781fed update gc kwargs
Former-commit-id: 0cb81c156bc8c21a4bbdd3289a491f78dfcaf730
2024-02-07 00:38:24 +08:00
hiyouga
b564b97b7e fix #2438
Former-commit-id: 412d856eeada2abcea598fac0a8d35ae90cc9c01
2024-02-06 15:23:08 +08:00
hiyouga
0dd68d1e06 add models
Former-commit-id: 0fdf61b2f765c125acda4f406eb25b3e59e75db2
2024-02-06 14:57:23 +08:00
hiyouga
73f40f1ca4 support qwen1.5
Former-commit-id: 8a03a572b058c5cc4ff598670dc8595b2b97e374
2024-02-06 00:10:51 +08:00
hoshi-hiyouga
ea53bebac4 fix #2436
Update test_toolcall.py

Former-commit-id: 39c539b6470c532ac639efbd2a1c485d2f5d485f
2024-02-05 22:55:28 +08:00
hoshi-hiyouga
00418012bd Update test_toolcall.py
Former-commit-id: f50a684a9d6fc2351436d3d7020dc84bc1553a5d
2024-02-05 22:51:03 +08:00
hoshi-hiyouga
5f3d8c514b Update test_toolcall.py
Former-commit-id: 97bcae546ab80737a906e5e28953f41b657f6c99
2024-02-05 22:50:43 +08:00
tao.jun
cb39a3f1c4 Update test_toolcall.py
Add openai version notes

Former-commit-id: 9ea4ab214e64f73ec902e76b82fc42419571fd66
2024-02-05 20:49:23 +08:00
Johann-Peter Hartmann
4d78fe6ece Merge branch 'hiyouga:main' into main
Former-commit-id: efbb0153981d0650f3a581e324b83054ca8063c1
2024-02-04 13:55:00 +00:00
hiyouga
a3e3ea9846 fix #2421
Former-commit-id: 43918c12310f7560d3820e5c6d72964309afeb8b
2024-02-04 21:02:55 +08:00
Johann-Peter Hartmann
feba34e82d Merge branch 'hiyouga:main' into main
Former-commit-id: 0395d0aafb69e86645e6b0a36b8f8dadb82219e0
2024-02-04 12:51:25 +00:00
hiyouga
e134013e04 fix reserved label len
Former-commit-id: b06d6c05a1911f329252a7572240048e456affdc
2024-02-04 17:54:26 +08:00
hiyouga
5589d0296a fix #2420
Former-commit-id: 7a34087e4db62e603c9a9a26d8ff3910d7b10c40
2024-02-04 15:51:47 +08:00
hiyouga
de0ebab464 fix #2189
Former-commit-id: b3d81b229d376671e1c12229aeb487b0d84f2548
2024-02-04 00:47:37 +08:00
hiyouga
f2e7122a96 bump up transformers version
Former-commit-id: 82f4d4301ed9f31b160d6313a1d2d44a22865f4d
2024-02-04 00:01:16 +08:00
hiyouga
996cc5d900 fix #2397
Former-commit-id: 7404692808f2288d539668d364965ad104dacadb
2024-02-03 23:45:31 +08:00
hiyouga
a2ae5bd867 add hint for freeze #2412
Former-commit-id: 9600c93633629605573d908019563fa3870ad6f8
2024-02-03 23:38:56 +08:00
hiyouga
5fa52e87cb fix #2376
Former-commit-id: 8e2cfa7cca21b7fd4538d72114e36f704bcc82fe
2024-02-03 23:14:31 +08:00
hiyouga
bcd76d2c7a support minicpm #2404
Former-commit-id: 4449e91cbee8fd804cf8bf1ff6b9f5301fde94ed
2024-02-03 22:36:46 +08:00
Johann-Peter Hartmann
36fcbedc11 add simple german chatml template chatml_de
Former-commit-id: 9f1d67c09f1af2c7aa383adec66842cacde92e33
2024-02-03 09:01:15 +01:00
Johann-Peter Hartmann
1dad01cc53 Merge branch 'hiyouga:main' into main
Former-commit-id: c350237d891df7edd7e681f9da5ac1446fdeb568
2024-02-03 08:43:12 +01:00
hoshi-hiyouga
5fb21f6e54 Merge pull request #2411 from lxsyz/main
fix eos_token_id=0 bug

Former-commit-id: 019a353e74ec70a9a2d8987df1ed19483413211a
2024-02-02 17:38:16 +08:00
Fallen Angel
08dfac8352 fix eos_token_id=0 bug
when eos_token_id=0, will never add eos_token

Former-commit-id: 576b4881c386d897462a875b28066ce9d6e06dd5
2024-02-02 17:34:48 +08:00
Johann-Peter Hartmann
956751e419 Merge branch 'hiyouga:main' into main
Former-commit-id: 25b0a11c715f87812edba1ca14d3122a75f421de
2024-01-31 14:05:52 +01:00
hiyouga
fe2ae04c91 fix #2388
Former-commit-id: 203a36c9adfd9aa0f35fbf8089c9138534d68c53
2024-01-31 17:23:56 +08:00
hiyouga
5b8712d061 fix autoset attn impl, update data readme
Former-commit-id: 34a6e5f82baf45cc8dbb11f9f7ab4a480ab7ec5c
2024-01-31 11:58:07 +08:00
Johann-Peter Hartmann
dc7ff90c1e Add support for german datasets
Former-commit-id: bbc038aa236952597e97d1ccf1ae2d64a16339b5
2024-01-30 10:18:01 +01:00
hiyouga
1ace676170 fix #2320
Former-commit-id: e0b0c4415aaf80e75f6dd4f3777a0616b0e60f84
2024-01-24 16:19:18 +08:00
hoshi-hiyouga
8947a87b95 Merge pull request #2319 from ftgreat/main
Add patch_mixtral_replace_moe_impl for full training Mitral using DeepSpeed Zero3

Former-commit-id: 0fadcd5f9deb9f03d341b6611c15f337f07e32d1
2024-01-24 15:32:26 +08:00
ldwang
786a2f1103 Add patch_mixtral_replace_moe_impl for full training Mitral using DeepSpeed Zero3.
Signed-off-by: ldwang <ftgreat@gmail.com>

Former-commit-id: 5f50c02f0e425737cd80abdf8fde9e25abf13083
2024-01-24 15:25:31 +08:00
ldwang
36ac14a566 Add patch_mixtral_replace_moe_impl for full training Mitral using DeepSpeed Zero3.
Signed-off-by: ldwang <ftgreat@gmail.com>

Former-commit-id: d1413dcec8a3b1d671f240b82a689c72b54d7b93
2024-01-24 14:43:16 +08:00
hiyouga
7a048fc91d add hint
Former-commit-id: c540ef41bda61993b83ef8cfe3c84b1d169e984c
2024-01-22 23:32:01 +08:00
hoshi-hiyouga
3f3756b113 Merge pull request #2283 from A-Cepheus/main
fix: ZeRO3 does not work with MoE models
Former-commit-id: f5ea760abec2aac8d29ce5c945647be05648e676
2024-01-22 23:28:45 +08:00
hoshi-hiyouga
b36c4b99cc Update patcher.py
Former-commit-id: 33556cc6b0b65cc6db02e66f4f6e75112c33d966
2024-01-22 23:27:39 +08:00
hoshi-hiyouga
9856a2276e Update tests.yml
Former-commit-id: 34151675388701afa40220729a63b0d7b2fa2a7c
2024-01-22 23:22:15 +08:00
hoshi-hiyouga
b6dc3ed3ad Create tests.yml
Former-commit-id: 9443ad76b7ef3ef1f3d184ef60652947d2c30609
2024-01-22 23:13:04 +08:00
hiyouga
75be329994 fix #2282 and update tool prompt
Former-commit-id: 1c412f803866bde32b76f7c26c7b464b6b3651f3
2024-01-22 22:27:30 +08:00
hiyouga
1fe1ca1c8b add orion models
Former-commit-id: a34db89d2a281d1a1ace29dfd5bd5d4ff7c2f657
2024-01-22 21:26:53 +08:00
A-Cepheus
882a6a1d51 🐞 fix: typo
Former-commit-id: 57a3687ecd23237559aee0e8e811b782846f2415
2024-01-22 16:04:39 +08:00
A-Cepheus
712ab4ae7a 🐞 fix: typo, move MoE fix to patcher
Former-commit-id: 4ff28e99ff9b48df7150591c6bbd3723f22b7715
2024-01-22 16:01:58 +08:00
A-Cepheus
18ad259fb3 fix: ZeRO3 does not work with MoE models
Former-commit-id: b2844c049a88ea89f8e1812e2d2e8662b4002965
2024-01-22 15:21:14 +08:00
hiyouga
fe4d93c6db add array param format
Former-commit-id: bf910f8a5b21ee552fa9ab069610a3f5f611de57
2024-01-21 22:17:48 +08:00
hiyouga
c6ba588e37 update tool test
Former-commit-id: 1d63ccc2866632596310235de15fdff660f6bee5
2024-01-21 19:41:46 +08:00
hiyouga
3fda60fca0 fix api
Former-commit-id: cca004da28aaaa0788eaea62b83d3402b38a3011
2024-01-21 19:15:27 +08:00
hiyouga
96531a0ef8 fix #2268
Former-commit-id: 300ecf9b9d7fd99fbb68f3d086e3ad973c2f894e
2024-01-21 14:11:38 +08:00
hiyouga
7abc3065fb tiny fix
Former-commit-id: 66839ae94985ddfa13eca4542127119c919b9648
2024-01-21 13:26:12 +08:00
hoshi-hiyouga
013ded4bac Merge pull request #2266 from yhyu13/fix_export_model_dtype
Remove manully set use_cache; torch_dtype is not str, save model as b…

Former-commit-id: 8c0827ba92a458e18c3b68af0330af3a65149f96
2024-01-21 12:40:39 +08:00
hoshi-hiyouga
010c3c7348 Merge branch 'main' into fix_export_model_dtype
Former-commit-id: 6c7d2729f28eb37a97820d73c05648eb7fb2ca87
2024-01-21 12:40:24 +08:00
hoshi-hiyouga
bf075c075c Update tuner.py
Former-commit-id: 691420661f7115f809e76484c1f29f74637e7cd0
2024-01-21 12:39:38 +08:00
hoshi-hiyouga
41b34e5f60 Merge pull request #2262 from fenglui/main
fix torch_dtype check of export_model

Former-commit-id: 37cacf73a534fed1b06b4f3c6724f3568ce095e3
2024-01-21 12:34:37 +08:00
hiyouga
5a889398e7 format
Former-commit-id: f28a1a0c1cdd0062ad7b6c2826f8ec107a200cff
2024-01-21 12:34:17 +08:00
hoshi-hiyouga
054cae86d8 Merge pull request #2264 from seoeaa/main
add russian lang

Former-commit-id: 15d1747de54efe69ed9f4cfd8f296fe8dd09a5c9
2024-01-21 12:25:24 +08:00
yhyu13
cd1cb8b83c Remove manully set use_cache; torch_dtype is not str, save model as bfloat16 used to fail;
Former-commit-id: 75557fb5df16fd6eda7586cf041a16822dcfee8e
2024-01-21 11:12:15 +08:00
Aleksandr
a34779c027 add russian lang
Former-commit-id: f8ce6d75b56439027bb17ff4e59eeb9eb3b9bd34
2024-01-21 04:28:14 +03:00
fenglui
d19cb77d74 fix torch_dtype check of export_model
Former-commit-id: 8813181b6bffa76e5c7cb1f4caceada611c54b9d
2024-01-21 05:01:53 +08:00
hiyouga
ab67528e89 release v0.5.0 (real)
Former-commit-id: 2146e1d9195c179fa8f92144ec2b7034e1a9f942
2024-01-21 01:54:49 +08:00
hiyouga
27f281480a finish agent
Former-commit-id: d8d9d3afe32725fe79120fcd1a0970fdcdc45625
2024-01-21 01:47:33 +08:00
hiyouga
50459a39f4 fix api
Former-commit-id: a4149fbcd600d4f3815f9353e5e92c569719bed6
2024-01-21 00:03:09 +08:00
hiyouga
5c9815ef6f fix internlm2 template
Former-commit-id: ae05b23eb86555dbfafc174aa6ceff736e7fc9fa
2024-01-20 23:33:50 +08:00
hiyouga
aed00a97b6 fix cli_demo
Former-commit-id: e8336b3653f43618cf7cd70f8da004208de970c0
2024-01-20 23:27:10 +08:00
hiyouga
7543dc4a9d fix #2260
Former-commit-id: ba97550671811a27177306dd231bb427130b26fb
2024-01-20 23:22:09 +08:00
hiyouga
841fa0030f release v0.5.0
Former-commit-id: 602bb9b685009b9af234499be278404721542ac7
2024-01-20 20:21:39 +08:00
hiyouga
66e0e651b9 format style
Former-commit-id: 53b683531b83cd1d19de97c6565f16c1eca6f5e1
2024-01-20 20:15:56 +08:00
hiyouga
1750218057 fix tests
Former-commit-id: 23f97bd437424ef43b2b84743d56acc5d1ca70d5
2024-01-20 19:58:04 +08:00
hiyouga
80637fc06d support longlora for main branch
Former-commit-id: f869501ad4c368df26534c41f62c6d63c6be17dd
2024-01-20 19:25:22 +08:00
hoshi-hiyouga
8efc055511 Merge pull request #2201 from liu-zichen/token_embed_resize
support resize embed for zero3

Former-commit-id: c0d1b5e3aef70da6b115614bd1ed539a76d6547a
2024-01-20 17:45:38 +08:00
hiyouga
be61bfda93 add upcast_lmhead option
Former-commit-id: 7ef69a1697c11ff13e7503360e40ef36cfb1c345
2024-01-19 23:54:25 +08:00
hiyouga
1a39f529c0 set use_reentrant=False
Former-commit-id: efa2e27d5ef6eaeb7baa7551c651ef10ab31400c
2024-01-19 23:29:54 +08:00
hiyouga
0868d5c550 fix #2249
Former-commit-id: 7ec64588c541422875adfdaf5692a27d05b96cb9
2024-01-19 21:44:32 +08:00
hiyouga
384f0e7678 add bf16 lora option
Former-commit-id: 58e7d7ff0cf9bf30e53b3eb12576f38d31976413
2024-01-19 16:29:03 +08:00
hiyouga
9b390c4bea fix function formatter
Former-commit-id: 363a87376ad8fe4149b387f7ccd60f31f2a5fdf7
2024-01-18 16:01:07 +08:00
hiyouga
42a13fec46 Update tuner.py
Former-commit-id: db30107385f100f88c9370abea6692ce6030a0c9
2024-01-18 15:06:02 +08:00
hiyouga
790acc4c17 fix templates
Former-commit-id: 382cc48b2a823b9a7d4ccf2c2a163f0e5b6e3169
2024-01-18 14:49:52 +08:00
hiyouga
b74cf27538 fix rm dataset
Former-commit-id: fa6f810026a59cecce813a696b2fdf15ba502fc4
2024-01-18 14:45:37 +08:00
hiyouga
ffc874ec6f fix pretrain data loader
Former-commit-id: 2a812b706ecc527013e79edc504ec18a4193123d
2024-01-18 14:42:52 +08:00
hoshi-hiyouga
546d6bd0b2 Merge pull request #2226 from hiyouga/dev
support function calling

Former-commit-id: 69391464f0d3fb0e2ef76e6b6fac51c119d66b53
2024-01-18 14:31:28 +08:00
hiyouga
8b68ca029e update readme
Former-commit-id: 11e0c732c4968b083f60a0bb6f7bb5dd5ca2ba56
2024-01-18 14:30:48 +08:00
hiyouga
502f84b30c add tool hint
Former-commit-id: 64734ffe2f45f80a1e33c2a72330b2ab1e58feb3
2024-01-18 13:19:09 +08:00
hiyouga
b7df920860 fix dataset
Former-commit-id: a7ce244a6d83d62f5bbecc588f1978e3791fd3b3
2024-01-18 12:59:30 +08:00
hiyouga
e4a424cb6a enable cutoff len
Former-commit-id: e9513d300c338dfcae98eee7d057bfd00da2da0e
2024-01-18 12:25:42 +08:00
hiyouga
d8affd3967 add tool test
Former-commit-id: 639a355a9ceb2e4585b81aea71fc810f4b510776
2024-01-18 10:26:26 +08:00
hiyouga
a423274fd9 support function calling
Former-commit-id: 66533b3f65babf2429c92c0f8fafe4eff5e0ff63
2024-01-18 09:54:23 +08:00
hiyouga
f7329b1a0e Update llamafy_internlm2.py
Former-commit-id: 3ca5915a4fcd3d28d10a47bf9f2188b5cf8393a8
2024-01-18 01:12:31 +08:00
hiyouga
48eb07c956 Update llamafy_internlm2.py
Former-commit-id: 69b3cb768eda57b63f47cd35e5da3a59b57b7853
2024-01-18 01:00:16 +08:00
hiyouga
636d8a886c Update llamafy_internlm2.py
Former-commit-id: 1f1a7bcee5a5bb0fa17b13aa6393bfba89451dd7
2024-01-18 00:49:31 +08:00
hiyouga
97b52c7fdf fix llamafy scripts
Former-commit-id: 99ff69c36767d4397a4a61e89317ec8c0c295c1e
2024-01-18 00:37:37 +08:00
hiyouga
344412e66e fix llamafy_internlm2
Former-commit-id: a309375d020dedc313f3b6921fb53d932f156e8b
2024-01-18 00:26:14 +08:00
hiyouga
5cdea14cdf add llamafy_internlm2
Former-commit-id: 7b71767ef67cd5f246f52fb7e74b36bd26774a6c
2024-01-18 00:17:41 +08:00
hiyouga
7b1a56b96f support export push_to_hub #2183
Former-commit-id: fac09da7123a500d255de74810a8d057fb5c5f07
2024-01-16 23:59:42 +08:00
hiyouga
d1ec884e75 fix #2195
Former-commit-id: 801f7279693a0c785480ea67d663d99f4ca653da
2024-01-16 23:53:50 +08:00
liuzc
aa72a4349e support resize embed for zero3
Former-commit-id: b5464f5699b13bb118ac57ebc40b3cf9eb030396
2024-01-16 15:16:20 +08:00
hiyouga
5ab7fd0842 tiny fix
Former-commit-id: 6b1e9207e988c253a808e6bb26e3af9d071b77bc
2024-01-15 23:34:23 +08:00
hoshi-hiyouga
86d5e9802a Merge pull request #2194 from junuMoon/patch-1
fix: typo on README.md
Former-commit-id: a066a633a1a4b50cd6dc6b50701e35532fe788c1
2024-01-15 20:21:28 +08:00
Junu Moon(Fran)
18df39e3a1 fix: typo on README.md
Former-commit-id: 372066b559305a1428c88fbd6b01e332bfd5e3e1
2024-01-15 19:50:35 +09:00
hiyouga
cfe1e24471 support solar 10.7B #1907
Former-commit-id: ecf9b35c612e5514dd25b0d15835d28447a7437e
2024-01-14 00:30:30 +08:00
hiyouga
2edbe87a8c Update README_zh.md
Former-commit-id: e6d704c383e36abe8e27b3834f41d95890858425
2024-01-14 00:17:28 +08:00
hiyouga
880055bc90 support deepseek moe
Former-commit-id: 07fbb32496b9b81c4cfe67cb9a15a6b2c43852c3
2024-01-14 00:14:49 +08:00
hiyouga
ad99bd0a14 fix phi modules
Former-commit-id: 68d7e925ec51b6ee277513de8f61ac18a8378b98
2024-01-13 23:12:47 +08:00
hiyouga
c5f099138d fix #2147
Former-commit-id: 49445a03cd46af4e7036cf444cd041dfab2d8941
2024-01-12 03:30:56 +08:00
hiyouga
6e64e02f71 fix #2164
Former-commit-id: abe23bb4aca4fa571ebafc329ec9a9d457e37d41
2024-01-12 00:27:57 +08:00
hoshi-hiyouga
f95f6ec009 Merge pull request #2163 from JessyTsu1/main
请求添加"Projects using LLaMA Factory"

Former-commit-id: fa9abb430b204fabe4c1b3a569225695ae0cbc29
2024-01-11 23:33:29 +08:00
JessyTsu1
8aeecc20e1 Update README.md
Former-commit-id: 547d4df5c7a1d6dd95cfed37229701ce507b421c
2024-01-11 23:18:29 +08:00
JessyTsu1
38d0f6c63f Update README_zh.md
Former-commit-id: 8677309a38140ec1e1be3f81d0b2024df3f16c21
2024-01-11 23:17:48 +08:00
JessyTsu1
ac8534a9e7 Update README.md
Former-commit-id: dcd4858fd2c2ac4d3cce8a369dc9991108c03821
2024-01-11 23:17:00 +08:00
hiyouga
73cab9d9d4 fix #2161
Former-commit-id: 9acd5a2b678cd07f8e3b48eca76c4cbacb559e37
2024-01-11 17:04:13 +08:00
hiyouga
64246d42d2 improve web ui
Former-commit-id: 5c0148c018b12b52bc5748acfd6ad43836f2edb5
2024-01-10 12:37:45 +08:00
hiyouga
6fa6d4532e improve model export
Former-commit-id: d1b795aac1fccbcb8a9ec2057065c33b46ce1a5a
2024-01-09 22:26:24 +08:00
hiyouga
92b9956c06 modify weight name
Former-commit-id: 3f3c528fa8056dc1952ea5293bad7e55187983ff
2024-01-09 20:22:47 +08:00
hiyouga
4d6669c268 fix #1789
Former-commit-id: d86455f685fa531e651333e00b4fe54d895cf2e4
2024-01-09 18:31:27 +08:00
hiyouga
89f4ae51f9 fix #2127
Former-commit-id: 5a1aa33fa9b546ab520f0ba4cb9d996b87eb71ca
2024-01-09 14:49:13 +08:00
hiyouga
af0659f573 fix #2125
Former-commit-id: 46a22f4daeafac5b0a695212d060960ff53af613
2024-01-08 21:42:25 +08:00
hoshi-hiyouga
45a10d501e Merge pull request #2117 from dasdristanta13/main
Update requirements.txt With einops dependency

Former-commit-id: af0c05f1cffc7fc0fc74d514783333501f83f59e
2024-01-07 23:56:53 +08:00
Dristanta Das
e529ff1245 Update requirements.txt With einops dependency
Former-commit-id: 0b47b13cb34cace6fa0b6d0c58ca16fb01b3a5e9
2024-01-07 21:03:30 +05:30
hiyouga
b29371dc87 tiny fix
Former-commit-id: 06b854fe15eb4cf4ff8d6f5570068d9e74a2f1b3
2024-01-07 17:17:18 +08:00
hiyouga
0bef890000 fix api server
Former-commit-id: cedd80ba56c0090487f65f4b1227e5615943997f
2024-01-07 17:14:42 +08:00
hiyouga
75fe1404b1 improve model export
Former-commit-id: 31255147a566a23ce1a48402662d14af8ac267ab
2024-01-05 18:51:49 +08:00
hiyouga
b460c9372f fix #2098
Former-commit-id: e62d9158cffbf1044396597ddaf15b1c0bc5f954
2024-01-05 17:11:26 +08:00
hiyouga
c3e574ceaa fix qwen template
Former-commit-id: c1923e0daa02b49ac07e96ce29877729acc78d31
2024-01-05 16:14:56 +08:00
hiyouga
04ae80a52e fix #2081
Former-commit-id: ec4b539b6c0be11e15d273025c414b694bbd6c9a
2024-01-04 23:19:08 +08:00
hiyouga
a7ff095399 fix #2090
Former-commit-id: 13ec720990a88b01f7f5e2a99a87f95128dc3537
2024-01-04 23:05:08 +08:00
hiyouga
a655dcebaf fix #2067
Former-commit-id: 6cfdeea5261fd5bf6f91ba2bb3efb921a2f3e866
2024-01-04 22:53:03 +08:00
hiyouga
8c74851b70 fix dispatch
Former-commit-id: deda82638716506dc690902c51276bb1eb0ddd5e
2024-01-03 16:33:16 +08:00
hiyouga
7168392a51 fix valuehead patch
Former-commit-id: d9cb98362b58b28ae0ee207e7c07e75e5d810876
2024-01-03 16:19:23 +08:00
hiyouga
ccc5b324fe fix rm server
Former-commit-id: 81bc1638682a9fd01518f9f25250a6b584d2a9e6
2024-01-03 15:30:46 +08:00
hiyouga
e85c205a81 fix #2014
Former-commit-id: 077f6bf64e50f01f62aa4a957438bedc4e7925b3
2023-12-29 15:17:22 +08:00
hiyouga
7e225be16e add yuan model
Former-commit-id: 6a0377e2e51633bd5fb10fa8628e554565c5ee3e
2023-12-29 13:50:24 +08:00
hiyouga
ebb32e85f8 fix version
Former-commit-id: dd7500b65d0d548441eece101b60d51fa619cc0f
2023-12-29 04:53:36 +08:00
hiyouga
90d279f39f fix args
Former-commit-id: ff18f327a3dc96d9677ef32841e8f29ab2eeb7ef
2023-12-28 18:47:19 +08:00
hiyouga
af3f5b6e16 fix export format
Former-commit-id: 7c82bd396b9e6ff395850ad544d95cbf1b7557cd
2023-12-28 18:40:46 +08:00
hiyouga
53d7c5109f fix ppo trainer
Former-commit-id: ca5b5823b03822ef899405d233a82396be997f44
2023-12-28 18:09:28 +08:00
hiyouga
bf381563ff add model link
Former-commit-id: 159729929516f68aa1f43a852ed50ca0fac81523
2023-12-25 19:44:38 +08:00
hiyouga
de4b9334e1 tiny update
Former-commit-id: 4417b8ee20b381c964f452f52081667dfa33cd7b
2023-12-25 18:29:34 +08:00
hiyouga
c33fbea469 fix bug
Former-commit-id: b06faa1be3f5aa5e0fa31aa31314c213c36c3442
2023-12-24 19:20:12 +08:00
hiyouga
921f593632 update loader
Former-commit-id: 080d8eab858217ca58bffe719d5ffde7579c5bda
2023-12-24 19:10:23 +08:00
hiyouga
940403720a update patcher
Former-commit-id: d6d7b6670847ce4ea10353c5b126214542b45c2b
2023-12-23 15:24:27 +08:00
hiyouga
f869e44fe5 fix #1909
Former-commit-id: 3e93c33af9f80e28c9f30af9b7ba20757358afb4
2023-12-23 14:42:20 +08:00
hiyouga
bcc92919a0 update readme
Former-commit-id: d3dea7a926e9d356a39ca2033b03be7f559cc143
2023-12-23 02:17:41 +08:00
hiyouga
306a70c7ba fix unsloth dtype
Former-commit-id: fd22e6546ce5f38a6a075cf894aafc3d206b2fcd
2023-12-23 01:59:49 +08:00
hiyouga
d358d955e5 fix dpo trainer
Former-commit-id: c160dd7cd86e296e32775ace2e4258a473449c41
2023-12-23 01:51:55 +08:00
hiyouga
0fdd6074c3 llama board: add unsloth
Former-commit-id: 9477e6f28808ae9deadada1f6cf679a29542c271
2023-12-23 00:35:53 +08:00
hiyouga
6faf9c35a9 support unsloth
Former-commit-id: b857f00234b90b785d82ca7cdb29af3d948b1a7b
2023-12-23 00:14:33 +08:00
hoshi-hiyouga
1066898e32 Merge pull request #1953 from ShaneTian/model-load-bf16
Fix slow model initialization in bfloat16 dtype.

Former-commit-id: 69daf107c4561f807ceae066f04d432323699cef
2023-12-22 17:29:54 +08:00
ShaneTian
d05febe5de Fix slow model initialization in bfloat16 dtype.
Former-commit-id: cf2e2f6f9b7f09b1e2faf6fbc413e3f62e3846c7
2023-12-22 16:27:28 +08:00
hiyouga
67f7034a21 fix param type
Former-commit-id: 11b99f344416ade1cdac52e11ba7f36fcf689221
2023-12-21 17:33:01 +08:00
hiyouga
79f301a2c6 fix ds zero3 check
Former-commit-id: 7f50705b1d821d287bd854211319f697f57b25db
2023-12-21 01:19:22 +08:00
hiyouga
31cbc67986 match version
Former-commit-id: 16db52522584a8e084d4db2a7c253c8b88f27371
2023-12-20 22:17:35 +08:00
hoshi-hiyouga
fe66bf3663 Merge pull request #1932 from ShaneTian/main
Update transformers to 4.36.2 to resolve multi-node saving bug.

Former-commit-id: 5c55907a57e8327134e2c982c838a53c9fa42f51
2023-12-20 22:13:28 +08:00
ShaneTian
4691d4b35d Update transformers to 4.36.2 to resolve bug when saving a checkpoint in the multi-node setting.
Former-commit-id: 3173f8e51eec5e8f488e3dfc54ad371b640d6b87
2023-12-20 22:00:41 +08:00
hiyouga
acf5241845 fix stop words
Former-commit-id: 6ce6cac9fa8f0af33697e824cf93a9a80cdbd064
2023-12-20 19:06:43 +08:00
hiyouga
2bce99b82f fix yi template #1895
Former-commit-id: 05b4fa1e2b13a15ee261a151ac8cd0a2ebdf5edc
2023-12-20 18:58:16 +08:00
hiyouga
3c330869ef improve quantization
Former-commit-id: 4dde60017ad8208dfea0b2bb61df6a14a35d03e0
2023-12-20 18:27:16 +08:00
hiyouga
dba1af4841 add max_memory for gptq #1923
Former-commit-id: 9afc42c8b999fbbc206d9a467ca5795b27a10096
2023-12-20 18:15:17 +08:00
hiyouga
2b1e52dcc9 fix #1073 #1462 #1735 #1908
Former-commit-id: cd8e2535aa66931b24b96e76c2b56ce703a579b1
2023-12-20 17:15:40 +08:00
hiyouga
b5238e945a optimize data loading logic
Former-commit-id: 58f669b384582ac90e85de835f1f44f7003f9ec0
2023-12-20 16:15:41 +08:00
hiyouga
afc0f29704 fix #1909
Former-commit-id: f563e8d28dfa48a60cbe3d295b20f9cf58de296d
2023-12-20 16:11:07 +08:00
hiyouga
de0bb1d2da fix mixtral inference #1821
Former-commit-id: 612f9fd19cbd29e8b1785a1576a9668e7dcd264c
2023-12-20 15:11:15 +08:00
hiyouga
cc16ece283 fix #1900
Former-commit-id: 4c35214396f873588562606b084740b6581188d9
2023-12-19 17:21:46 +08:00
hiyouga
31ba802fc9 update readme
Former-commit-id: 36cd747e6a1a568e1a03e6c6611fec48e6ab9df7
2023-12-18 22:29:45 +08:00
hiyouga
4b27cf5460 add codegeex template
Former-commit-id: a8222722b8097158f1c92e3729f41d411eff3926
2023-12-18 19:52:35 +08:00
hiyouga
a53b2a643f add xverse-65B-2 model
Former-commit-id: 3e563a0d9666934dfdab54d61654ec00079a93f1
2023-12-18 19:24:09 +08:00
hiyouga
d925ecae1b add models
Former-commit-id: 3a4728557304996bcbe58d7d6380beead7c63c70
2023-12-18 19:09:31 +08:00
hiyouga
13fd751a78 fix tokenizer for Yi chat models #1617 #1875
Former-commit-id: 9485692c8d367a0b25d3e653db413aa01cb9ad7d
2023-12-18 17:18:11 +08:00
hiyouga
74575f8922 update readme
Former-commit-id: 01267eee0da0bffb3f0c0378e2e60d14e05585c4
2023-12-18 15:46:45 +08:00
hiyouga
5e7bb5fe73 fix llama board
Former-commit-id: f43f61b2898dda56aba0066fcb3409b152260bdb
2023-12-16 22:17:37 +08:00
hiyouga
790a31404a fix #1742
Former-commit-id: efbb32afdcf0d6aa4ca26f54c95f76dbb84f77dc
2023-12-16 20:50:45 +08:00
hiyouga
f927601702 add xverse-65b-chat model
Former-commit-id: fff6288db6b61ca27010ea47c918298f76922106
2023-12-16 20:21:29 +08:00
hiyouga
c4654d54d7 set version
Former-commit-id: 45a05e3a415eeaf875e2cf15bdba0235fbd7d527
2023-12-16 20:17:51 +08:00
hiyouga
df777c30d1 add noisy mean initialization #1815
Former-commit-id: 3253b1fca0123071913079277186c160046edf21
2023-12-16 19:47:51 +08:00
hiyouga
d81ad2d4bc support dpo-ftx
Former-commit-id: 86dfa04f9821556019fa777106787f73eb70b452
2023-12-16 19:21:41 +08:00
hiyouga
9f77e8b025 support autogptq in llama board #246
Former-commit-id: fea01226703d1534b5cf511bcb6a49e73bc86ce1
2023-12-16 16:31:30 +08:00
hoshi-hiyouga
04dc3f4614 Merge pull request #1868 from yhyu13/improve_hfargparser
Improve logging for unknown args

Former-commit-id: 6455013a99ca5c63f5b99c1100e93f794a03c497
2023-12-16 16:06:09 +08:00
yhyu13
7d1fe50977 Use llmtuner logger
Former-commit-id: ef5a560b4246e04e0ef2612e3520e05288e93707
2023-12-16 07:15:27 +00:00
yhyu13
c0e5e3c5d5 Improve logging for unknown args
Former-commit-id: 03e49d76ca91f7fcaf1c013740d5f6bfc11a2028
2023-12-16 05:16:29 +00:00
hiyouga
3a45cfb604 update tips
Former-commit-id: 4432cbda6b7535bcbb40ba77df069fca631b4be8
2023-12-15 23:52:50 +08:00
hiyouga
393e4b0f5a fix #1770
Former-commit-id: 8266187cec70bb4bd1b4837d51b09409ec11f93e
2023-12-15 23:50:15 +08:00
hiyouga
296711d502 support quantization in export model
Former-commit-id: f32500ae6edccab7d14df4c92467e15986866def
2023-12-15 23:44:50 +08:00
hiyouga
9121722999 update dc link
Former-commit-id: f6789e50e17a377b6d9b434d8e12ad99d8eecfeb
2023-12-15 22:11:31 +08:00
hoshi-hiyouga
d8d74091f6 Merge pull request #1864 from hiyouga/dev
Refactor hyper-parameters of adapters and model loader

Former-commit-id: d5ce2fb6858b9f2963f355e9f4d6f046eb6efdcd
2023-12-15 22:06:56 +08:00
hiyouga
33521fb45e fix bug
Former-commit-id: 95ac272907a04a64785f928536de1fd099150f92
2023-12-15 21:54:02 +08:00
hiyouga
e5204e60ed fix bug
Former-commit-id: 8b80baf02cfece53527c27712f0899fa3532c414
2023-12-15 21:49:26 +08:00
hiyouga
0409428d87 add configurer
Former-commit-id: c40c9889615ffb49c7ce24c69c0d3d20d841c800
2023-12-15 21:46:40 +08:00
hiyouga
f902b0d420 refactor adapter hparam
Former-commit-id: f82aece9ebd6df83a7a005cc7cbbcec07fa6e14d
2023-12-15 20:53:11 +08:00
hiyouga
27ef5b1aa7 add loftq
Former-commit-id: 0b900882ef19ac49604a24fbae8b3254f1bff7ad
2023-12-14 21:53:56 +08:00
hiyouga
c32303fc7e fix valuehead model
Former-commit-id: 9f628debb6510f2d1c91b00f121a721ab5d648e9
2023-12-14 20:15:20 +08:00
hoshi-hiyouga
45abe361ba tiny fix
Former-commit-id: 987df4c62f34026adfe2089910f4ff9ac6ebd9a6
2023-12-13 17:32:36 +08:00
hoshi-hiyouga
3ae479faae revert peft version
Former-commit-id: 6440fa1a8c28fd2db58d0905a67d071837e0edd1
2023-12-13 10:49:45 +08:00
hoshi-hiyouga
5698038f49 update peft version
Former-commit-id: 31c01e1272bd2cd9588e5ee68c1924a3dd55c67e
2023-12-13 10:23:51 +08:00
hoshi-hiyouga
020233f725 tiny fix
Former-commit-id: 1478bc052417e0939188f55a0adcbf00956960f2
2023-12-13 10:21:29 +08:00
hoshi-hiyouga
6f9d55b8eb fix #1819
Former-commit-id: f2e2b0354cbe9a7190ccab807f690cc8ab433a6e
2023-12-13 10:14:01 +08:00
hiyouga
2542b62d77 remove loftq
Former-commit-id: e175c0a1c631296117abda2403a4b87bbdd35a66
2023-12-13 01:53:46 +08:00
hiyouga
95678bb6b1 fix sharegpt loading
Former-commit-id: ad35c35f9328bff69e8b9ea7dba6a61a2dc9e28b
2023-12-13 00:56:16 +08:00
hiyouga
a78759e7ee add model urls
Former-commit-id: 3139a9fafab246f5461697efd5ed7a6599d85481
2023-12-13 00:09:17 +08:00
hiyouga
cc5c523f58 update readme
Former-commit-id: e81037d766f89f7e2b6539596397983eba52b492
2023-12-12 23:30:29 +08:00
hiyouga
e39bbdd287 support loftq
Former-commit-id: e7ac2eb7f7daae17525a278ffbe2f82c0fbd8093
2023-12-12 22:47:06 +08:00
hiyouga
d9a50bf93f fix #1795
Former-commit-id: 949ab45487155525789c08027d4f8e7da1b8bc0c
2023-12-12 19:58:34 +08:00
hiyouga
934d00ea1e support system column #1765
Former-commit-id: f425584a511c5e42bae8b3ba090eaa898b28adad
2023-12-12 19:45:59 +08:00
hiyouga
c27675f70d fix modelscope data hub
Former-commit-id: 5b63e8c22538a4788e4b6c8df50e6e6be93ceeac
2023-12-12 18:33:06 +08:00
hoshi-hiyouga
7c9f37c83d Merge pull request #1802 from tastelikefeet/feat/support_ms
Support ModelScope Datahub

Former-commit-id: f73f321e765aab9325673218779ff4ee7f281514
2023-12-12 17:58:37 +08:00
hoshi-hiyouga
b9736c13e0 Merge branch 'main' into feat/support_ms
Former-commit-id: 698756dffb7d4e602b3e0cab66ef0a4befe7215c
2023-12-12 17:55:32 +08:00
hiyouga
c47725ff34 fix webui
Former-commit-id: 15ad266206b12181788db5bb112c2299050d6139
2023-12-12 15:27:40 +08:00
xingjun.wang
3ee3fe0bbb add use_streaming
Former-commit-id: 80388abdb7ee88eb4afad92d8c706370c0574039
2023-12-12 14:23:05 +08:00
xingjun.wang
e54dad75da fix cache dir
Former-commit-id: 6231272b9c51d44196f1fbec026973231e489b67
2023-12-12 14:21:33 +08:00
xingjun.wang
39c2f03eab add print info for test
Former-commit-id: e4ae2fccf0cbec57fb5fb01fd7cc352da69b23bf
2023-12-12 14:14:40 +08:00
xingjun.wang
fb9e1c4087 update cache dir
Former-commit-id: c8a1ce847fd7a75a06659133d92a0ac42e52a839
2023-12-12 13:08:18 +08:00
xingjun.wang
ed26bb3d82 update args for MsDataset.load
Former-commit-id: c5f69357a167cbf99a93607177526e787419ea05
2023-12-12 13:02:54 +08:00
xingjun.wang
0baf32e219 update
Former-commit-id: e15fc417d897c3063a25d6eb7eb89d1916db3cc5
2023-12-12 12:03:23 +08:00
xingjun.wang
79a376d1db for test
Former-commit-id: 33d9082320098f994bfa0c6353459afcb93165b7
2023-12-12 11:52:59 +08:00
xingjun.wang
b634e91c43 for test
Former-commit-id: 95ea942bd32402018e7c5dc61d50153c602ab67a
2023-12-12 11:47:59 +08:00
hiyouga
9e2cc21d04 update readme
Former-commit-id: 42e042a4206aeb5177ddde56386e9655b0c06460
2023-12-12 11:44:30 +08:00
hiyouga
6975124a57 support mixtral
Former-commit-id: 75b5b8e36ab1933b2625f11b645f56cbc805fd85
2023-12-12 11:39:04 +08:00
hiyouga
9f69307db1 fix baichuan resize
Former-commit-id: 66956d13074a9bc74d7a737b9476f38361a7764a
2023-12-11 20:55:50 +08:00
hiyouga
c3448a045c tiny fix
Former-commit-id: 1f839fc4f278c2a258df22899241fc66a2cca682
2023-12-11 18:09:40 +08:00
hiyouga
95c561983c support resize embeddings #1786
Former-commit-id: 368a41bd3c6a04f869083058d9165954fbdad105
2023-12-11 17:50:02 +08:00
hiyouga
7a03c8dab5 use peft 0.7.0, fix #1561 #1764
Former-commit-id: 423947bd58aa50da8785b8ceca1e7e288447a9da
2023-12-11 17:13:40 +08:00
hiyouga
f3ffa8310f fix #1784
Former-commit-id: 4e1af5a5d39d9e2f374c1372e2d67120c63fea09
2023-12-09 20:53:18 +08:00
yuze.zyz
596f496f19 support ms dataset
Former-commit-id: 98638b35dc24045ac17b9b01d08d3a02372acef3
2023-12-08 18:00:57 +08:00
hiyouga
2e6ed731cf fix #1771 and temporarily fix #1764
Former-commit-id: d0e5a5d604e16c2fe0035b0ac1d54dc3625d4da3
2023-12-08 16:26:20 +08:00
hiyouga
24ce319b6f add models
Former-commit-id: 758ae7937a41a95016e70180fb343011763c1b67
2023-12-06 13:33:18 +08:00
hiyouga
7b7bfea37d fix ppo trainer save logic
Former-commit-id: 5e70c41e4e12a1109570b0ff56346fe212c028ed
2023-12-04 19:00:19 +08:00
hiyouga
3be461260a update readme
Former-commit-id: a15f8cf19cac42acfb9917a2d7c9fa36a838b360
2023-12-04 11:22:01 +08:00
hiyouga
8dab8d9831 update readme
Former-commit-id: d3c46cb126a9182be765341fe31c860d71430712
2023-12-04 11:02:29 +08:00
hiyouga
fb4c5f3c91 fix #1715
Former-commit-id: 3f9192dbbbafdc2171d2eb80282d5cae47565b7b
2023-12-03 22:35:47 +08:00
hiyouga
5fe3cce5a3 release v0.3.3
Former-commit-id: 72ddb5fcce1649599671de214667d8d899ef5203
2023-12-03 21:59:45 +08:00
hiyouga
09f165d442 fix bug
Former-commit-id: 2fd7a8fc3134af66193a5e8db8fea35025f82de9
2023-12-03 21:40:40 +08:00
hiyouga
60aea7521b ppo support rm server
Former-commit-id: 20b0edf16f5b42cb2c4a795674647afb68cb3a4a
2023-12-03 21:38:51 +08:00
hiyouga
29545d0e5e implement rm server #1543
Former-commit-id: 2e5bb6888c86079493456c2ddd525f8c52b9963e
2023-12-03 20:52:54 +08:00
hiyouga
4a14099cfd fix #1707 #1710
Former-commit-id: 243a596518ad69cf1eec20a082534b9e94353ce4
2023-12-03 11:33:12 +08:00
hiyouga
b052574ddf add logo
Former-commit-id: 597894ad31c186120335252ccc0cc48fcea701b4
2023-12-02 01:31:24 +08:00
hiyouga
5ea6a7c6d6 fix #1642
Former-commit-id: 11be28201f688ac21cf94135067d37e9aa7ab0a1
2023-12-02 00:37:53 +08:00
hiyouga
8ca196d51f add xuanyuan models
Former-commit-id: 1dfa9de3723550cddf24bbc0739cad6207731212
2023-12-02 00:35:29 +08:00
hiyouga
5f572cbd77 fix gptq training
Former-commit-id: bec58e3dc575aa4247e563881a456328ee5ef496
2023-12-02 00:27:15 +08:00
hiyouga
679bd3ab30 tiny fix
Former-commit-id: fd2782a06ba4efa76cacbb49eb76a05de8d8aca6
2023-12-01 23:37:10 +08:00
hiyouga
da3d59fada fix gptq model inference
Former-commit-id: f7da9a87cb48cacb7d56322817b05d6f471f6508
2023-12-01 23:34:14 +08:00
hiyouga
835d27151d update readme
Former-commit-id: a0a9408e11f6b4cfb39af3f28402353b7cf48fa6
2023-12-01 22:58:29 +08:00
hiyouga
f1d7228a74 fix #1703
Former-commit-id: eee2e9abf6df345c5471e8ca7639293543ba720c
2023-12-01 22:55:41 +08:00
hiyouga
72bbd5bdef patch modelscope
Former-commit-id: 8888cf53f040f5a2d8c0e59cddf79b252449bf58
2023-12-01 22:53:15 +08:00
hoshi-hiyouga
ad9d866547 Merge pull request #1700 from tastelikefeet/feat/support_ms
Support ModelScope hub

Former-commit-id: f79c3b663a91ac2a7cdcf71192b6dd84f110b8f1
2023-12-01 20:25:18 +08:00
hoshi-hiyouga
a1ec668b70 Merge branch 'main' into feat/support_ms
Former-commit-id: b8954342611e24bc3af972747fd016cde89eee3f
2023-12-01 20:23:46 +08:00
yuze.zyz
389687a56d remove useless code
Former-commit-id: 323df46dd6a8eaf1fd608380406dcbce80c097b2
2023-12-01 17:28:23 +08:00
tastelikefeet
97280c73b9 fix bug
Former-commit-id: 6d483e76141420e0cb577541e6e1794c20f025f6
2023-12-01 17:27:00 +08:00
hiyouga
f3c622b665 fix err hint
Former-commit-id: 935a4a01bd9204129dd72a500ed75b268714d1e8
2023-12-01 17:13:22 +08:00
hiyouga
d71e8d8dbf add err hint
Former-commit-id: 2cf0249ec6f7524c39a6c8df73593f6d25b665b7
2023-12-01 17:04:37 +08:00
hoshi-hiyouga
02c2089ac8 Merge pull request #1699 from Samge0/patch-1
Update .gitignore

Former-commit-id: ab9da1bc5043fedeac8e57614e5986ebdd2128af
2023-12-01 16:52:57 +08:00
SamgeShao
07ad28a053 Update .gitignore
Former-commit-id: b2ec86ef63683665382c2fda142c3d9743e3c8a7
2023-12-01 16:37:41 +08:00
yuze.zyz
d323ccc3ec add readme
Former-commit-id: 3d5ec6f12b4ae7d04520e6865516a9a6dd4f7efe
2023-12-01 16:11:30 +08:00
hiyouga
4738d002c7 tiny fix
Former-commit-id: 37aa7099dff2a9a7b52e259dac92de41ce606946
2023-12-01 15:58:50 +08:00
hoshi-hiyouga
ec099b0586 Merge pull request #1695 from Samge0/dev
Improve:"CUDA_VISIBLE_DEVICES" read from the env

Former-commit-id: b49cde0c29774820dcf4463e3f1ef00114af7219
2023-12-01 15:56:18 +08:00
hoshi-hiyouga
a51253fea2 Merge pull request #1690 from billvsme/main
Improve get_current_device

Former-commit-id: c3b8cc27c91248a7381b3333abf099064412dc1a
2023-12-01 15:44:35 +08:00
hiyouga
304ec9ec6a fix #1696
Former-commit-id: 722ae14a652af34d9b91f9459e613d7959ecaa7e
2023-12-01 15:34:50 +08:00
tastelikefeet
8547085615 add model
Former-commit-id: 48e8d8438bc6cd2c75dc39419c45aaebb34a2e0a
2023-12-01 15:06:17 +08:00
samge
14b139ecb5 Improve:"CUDA_VISIBLE_DEVICES" read from the env
Former-commit-id: 7a61daa8be76779c876d685c57c464133ca70752
2023-12-01 11:35:02 +08:00
billvsme
7b45f5068f improve get_current_device
Former-commit-id: 2b07815e7fc8dc6ad0a7e9eccdd6681fbab35f3c
2023-11-30 22:40:35 +08:00
hiyouga
99ceee840e fix #1597
Former-commit-id: d77a3a79a0e854803a57af8ac6a7246691f69f70
2023-11-30 21:47:06 +08:00
hiyouga
8ed68301e3 fix #1668
Former-commit-id: bccc71259e703ca1e1d88169e385a026c4efa92e
2023-11-30 21:02:00 +08:00
hiyouga
664267e050 fix #1682
Former-commit-id: 06d56696731eadbeeea615eae4efce1b6c36def4
2023-11-30 20:03:32 +08:00
hiyouga
7ef8f46591 add models
Former-commit-id: b9eaadde8b5f4b9f89fa7bb910b325fcf9c84434
2023-11-30 19:16:13 +08:00
yuze.zyz
6933c1fed2 fix
Former-commit-id: e8774b4c9cbc8f894621ec72957f720d5c83d22b
2023-11-29 21:43:58 +08:00
yuze.zyz
9d125bf533 support ms
Former-commit-id: fdd4f94f563110ef9f96ab4a7fd954def32e9785
2023-11-29 20:36:55 +08:00
hiyouga
08d5340bd8 add gpu requirement #1657
Former-commit-id: 8581a9133790573031d9615a551fb677eb3be461
2023-11-29 12:05:03 +08:00
hiyouga
0e6f4f981e fix #1658
Former-commit-id: 3126687c4820c34daa6a2e9e3bf9065ad59e92dc
2023-11-28 20:57:24 +08:00
hiyouga
670ee3934f fix #1659
Former-commit-id: e4123129aae59f4123d53c1f5320e3d5e09ae26d
2023-11-28 20:52:28 +08:00
hiyouga
569860d7ac support export size setting
Former-commit-id: 1a4de54586c21cdbbc89f8a716ca5a54c87a6120
2023-11-26 18:34:09 +08:00
hiyouga
953a562ec1 support Yi-34B-Chat models
Former-commit-id: 1751a79c27e7fc13e76a731a061dc0c10d828cda
2023-11-23 19:31:49 +08:00
hiyouga
7f54008d3c update readme
Former-commit-id: 561481a8008fde5a3273558460193864a09866ed
2023-11-21 13:15:46 +08:00
hiyouga
5f5959bc33 set version
Former-commit-id: 6b47ad74c7b3099f9b5087c73db4aee42c451297
2023-11-20 22:57:44 +08:00
hiyouga
0105cd48f2 support GPTQ tuning #729 #1481 #1545 , fix chatglm template #1453 #1480 #1569
Former-commit-id: fdccc6cc9b68890199e9250cabdb996ff2f853b9
2023-11-20 22:52:11 +08:00
hiyouga
28258aecd2 update ppo trainer
Former-commit-id: caa525a5c6f228b9ad71387d1fe4f1c2ffa2479e
2023-11-20 21:39:15 +08:00
hoshi-hiyouga
e585950c54 Merge pull request #1553 from hannlp/hans
Change the default argument settings for PPO training

Former-commit-id: 1b64678fa4979485f67c3bb1420dfdff6fcbc6e7
2023-11-20 20:32:55 +08:00
hiyouga
bcd661afa6 fix value head model resuming
Former-commit-id: ccf0b65d886c09c7c49977c43b0544fe1bfcc258
2023-11-20 19:01:37 +08:00
hiyouga
adf2730d1d fix #1567
Former-commit-id: 8c01ffe8d277d49a413571e0669f460c8d0802bf
2023-11-20 18:46:36 +08:00
hiyouga
ba2be6371d better data streaming
Former-commit-id: 65ac8e84fd6f22255c587b20382fdf5d8131d015
2023-11-19 23:32:47 +08:00
hiyouga
d2ff09a404 fix model card network issue
Former-commit-id: 36155cd1893bea036f15c648c06b0047c02dfb4f
2023-11-19 23:03:19 +08:00
hiyouga
9f364d3880 fix Mistral template
https://github.com/lm-sys/FastChat/pull/2547

Former-commit-id: d426ecdf6e95402fc36893f7e4f17f881e1b957b
2023-11-19 16:29:30 +08:00
hiyouga
cfad41b901 fix #1263
Former-commit-id: faff5d32621f187ebd3124d7ade04e3fa437c53e
2023-11-19 16:05:18 +08:00
hiyouga
6889f044fb fix #1558
Former-commit-id: 263b2b24c8a649b51fa5ae768a24e67def8e0e96
2023-11-19 14:15:47 +08:00
hiyouga
3d1ee27ccd fix evaluator and cached_file in 4.31.0
Former-commit-id: 970897da402f604220d45084d492de4dab809ba4
2023-11-18 19:39:23 +08:00
hiyouga
775ce62950 update benchmark
Former-commit-id: 1cd2ae910e3ffca92978772d000de6fde2f6bb13
2023-11-18 11:30:01 +08:00
hiyouga
821a6f2fa6 update readme
Former-commit-id: a4d86a4bea1cce2219a54def9dfd3fd732d48e72
2023-11-18 11:15:56 +08:00
hiyouga
5197fb2fad add benchmark
Former-commit-id: 85a09cb649be740a47359371499d821ee0d5c81e
2023-11-18 11:09:52 +08:00
hiyouga
92abe91d22 update dataset
Former-commit-id: a310b22b446118d90dd73906847ed3d01a574b50
2023-11-17 23:19:12 +08:00
hiyouga
a7bf0b85d7 fix quantization
Former-commit-id: 8268aefe8fba268065e24ffe159a9c49f7c6f3a5
2023-11-17 22:21:29 +08:00
hiyouga
5ce5ea84a9 fix #1550
Former-commit-id: c12acd21a5a500892ed739c79327ccd39fddad5b
2023-11-17 17:23:13 +08:00
Yuchen Han
992be39f90 Update README_zh.md
Former-commit-id: 3e8a17c92d700bcafbe6559ea689dc4c0ad0481a
2023-11-17 00:18:07 -08:00
Yuchen Han
cab80a3c56 Update README.md
Former-commit-id: c1532dc6fe5d5b427011bd5509a2bc44ee16d951
2023-11-17 00:17:36 -08:00
Yuchen Han
6af7107938 Update workflow.py
Former-commit-id: f70b7ffe6442217a222e0ef797c407f259a13886
2023-11-17 00:16:27 -08:00
Yuchen Han
bcd31cf245 Update finetuning_args.py
Former-commit-id: 30e3430553f1f7e09cd57ef2c9843b549746c618
2023-11-17 00:15:51 -08:00
hiyouga
85c4ccfef9 fix packages
Former-commit-id: c93175d18ad9a4b7b61629153acabf8d0c978dfc
2023-11-17 16:11:48 +08:00
hoshi-hiyouga
dc0f81aabc Merge #1544 from Outsider565/main, fix #1548
Fix: Change rouge-chinese package name to rouge_chinese
Former-commit-id: c24da51cb5d3f78d54dcbfb31b565fcac4783a76
2023-11-17 16:09:42 +08:00
Shaowen Wang
07f934566a Fix: Change rouge-chinese package name to rouge_chinese
To reproduce:
python:
importlib.util.find_spec('rouge-chinese') -> None
importlib.util.find_spec('rouge_chinese') -> ModuleSpec(name='rouge_chinese'...)
from rouge_chinese import Rouge
print(Rouge.__module__) -> rouge_chinese
Former-commit-id: a78b11d944b6cb7dbe2a1d8a24d240e196aa530a
2023-11-16 20:12:35 -06:00
hiyouga
77cb18e9e3 fix chatglm template
Former-commit-id: 6a4b79c2e0610a17012bf3e72a2b5e8bac060092
2023-11-16 22:54:15 +08:00
hiyouga
fccaecf730 Update bug-report.yml
Former-commit-id: 92ed2297c78d016113fa7f90cedc0933a0bb2be0
2023-11-16 19:37:35 +08:00
hiyouga
53cdfe8f73 add issue template
Former-commit-id: 4ca01a6b051043593541403d74e4d464b70e0e4b
2023-11-16 19:35:30 +08:00
hoshi-hiyouga
ea03523c6a Update issue templates
Former-commit-id: f967abcfcd052b65745f20e2c760ca45c412b66a
2023-11-16 18:56:30 +08:00
hiyouga
caf3cbf8d7 fix web ui demo
Former-commit-id: e566a68a27872f730b111078977048755ec74a40
2023-11-16 18:41:55 +08:00
hiyouga
da411066c9 fix web ui demo
Former-commit-id: 6fead193fe44fec74c2262d8653ed2f6006fac36
2023-11-16 17:12:23 +08:00
hiyouga
95d0f77fc2 release v0.3.0
Former-commit-id: de7f5b622340ab09ebbe57ad2703e63d06dfdeea
2023-11-16 16:00:11 +08:00
hiyouga
9b2654277b update readme
Former-commit-id: 4018aabc5d1623033d27a8aced25804de79b7e7b
2023-11-16 15:58:37 +08:00
hoshi-hiyouga
f1b3bdac3f Merge #1525 from hiyouga/dev, fix #224 #336 #931 #936 #1011
Refactor llmtuner, support full-parameter RLHF

Former-commit-id: 3b92826803dc69471827b4f8204c2c3dc5310619
2023-11-16 15:47:13 +08:00
hiyouga
595fdbd95d fix css
Former-commit-id: 7afec127f60257462828298b25a5f6fd9c6f42c5
2023-11-16 15:45:38 +08:00
hiyouga
dab9385297 fix bug in web ui
Former-commit-id: a598f145ec903dd2b2c984d951b6c450b142ece5
2023-11-16 15:21:24 +08:00
hiyouga
df83def566 update ppo and demo in webui
Former-commit-id: de7571704c82121db13e3fc907379d2453100191
2023-11-16 14:55:26 +08:00
hiyouga
f9d4e37b3c fix bug in freeze tuning
Former-commit-id: f6b436a08421ca17d64abc51497f4aa43729a43b
2023-11-16 14:25:11 +08:00
hiyouga
e59a3d71e0 tiny fix
Former-commit-id: d65519d8a44b73bbb713741c23465f13c35c83f5
2023-11-16 03:27:19 +08:00
hiyouga
de3a84ac59 fix rlhf callback
Former-commit-id: f5485452d660caef56474cb7dc37abbe4f34599e
2023-11-16 03:26:19 +08:00
hiyouga
e017266b98 fix bug in PPO training
Former-commit-id: 2e99f0e53ce6de0acbcab85dd50aef874e8c6336
2023-11-16 02:32:54 +08:00
hiyouga
f81a8a5e5c fix import bug
Former-commit-id: 2356029cdd120d5f7bf630b80681ce8c53bff90d
2023-11-16 02:27:03 +08:00
hiyouga
7a3a0144a5 support full-parameter PPO
Former-commit-id: 4af967d69475e1c9fdf1a7983cd6b83bd431abff
2023-11-16 02:08:04 +08:00
hiyouga
8263b2d32d add demo mode for web UI
Former-commit-id: 5ad34f08b4e1505d7933b973497347f126b2e818
2023-11-15 23:51:26 +08:00
hoshi-hiyouga
833cd490b8 Create CODE_OF_CONDUCT.md
Former-commit-id: 6bee64cdf9c75488033e600fb5b48738daa1ed3b
2023-11-15 20:42:15 +08:00
hiyouga
2162c37e41 update readme and constants
Former-commit-id: 7d83e3dd9101a4fdd0b589d0c1f7b609c0feecd1
2023-11-15 18:04:37 +08:00
hiyouga
b2ac8376e1 support multiple modules in freeze training #1514
Former-commit-id: 60abac70dfd778df2ae8b3a2e960ed8b607d7ab6
2023-11-15 17:08:18 +08:00
hiyouga
8079584143 fix imports
Former-commit-id: 6156f1abef631c675d150dd1cb0325cfc3820c91
2023-11-15 16:47:45 +08:00
hiyouga
09a4474e7f disentangle model from tuner and rename modules
Former-commit-id: 02cbf91e7e424f8379c1fed01b82a5f7a83b6947
2023-11-15 16:29:09 +08:00
hiyouga
81530133ff fix #1507
Former-commit-id: 1ba9c53bd9743fa95fca1516c0ed9da352dbe9a1
2023-11-15 16:22:32 +08:00
hiyouga
cc4b384ac3 Update cal_lr.py
Former-commit-id: b92ef6c80ae108982046ec1419efb67c8b10b250
2023-11-14 21:14:42 +08:00
hiyouga
3852daf447 Update cal_lr.py
Former-commit-id: b6c3f9b24324403db41c5680a00aabc6d53bbeb9
2023-11-14 21:13:01 +08:00
hiyouga
5c97111f9d Update cal_lr.py
Former-commit-id: 1258eec806f6f4580a6eb7d9eb44f431f4c0da4f
2023-11-14 21:09:30 +08:00
hiyouga
75dd1f0f7e add cal_lr.py
Former-commit-id: cea2ba17efc47917e63437a376f220864f7f90dd
2023-11-14 20:58:37 +08:00
hiyouga
c9a4551012 fix #1494
Former-commit-id: 07c8d734529f03e47ef638a1bda222e8824d3d38
2023-11-14 18:07:20 +08:00
hiyouga
87197ba91d fix #1489
Former-commit-id: ebdeaca9cdfd6138c690a0fcb9f676deaddff177
2023-11-14 15:27:05 +08:00
hiyouga
7461bf84e5 support eval remote dataset
Former-commit-id: 71dd2698bf8c0b9ef7af995fb1e49e39fa66074e
2023-11-14 02:42:30 +08:00
hiyouga
fbc0357b2e fix dc link
Former-commit-id: 04c3a1f1c98d8f191102e359def0c8dcdc9621e3
2023-11-13 23:22:56 +08:00
hiyouga
ec334f5891 release v0.2.2, fix #1478 #1466
Former-commit-id: c9534c411716e1dceb54c5eb35fe845c93ee2973
2023-11-13 23:09:05 +08:00
hiyouga
885efe772e fix #424
Former-commit-id: ca24d445f825e120e659f5cd080a954c2243b8f2
2023-11-13 22:42:23 +08:00
hiyouga
64fc9ba678 refactor evaluation, upgrade trl to 074
Former-commit-id: ed09ebe2c1926ffdb0520b3866f7fd03a9aed046
2023-11-13 22:20:35 +08:00
hiyouga
989eccd286 fix flashattn warning
Former-commit-id: 6eb095d39bd82fdbdb729a0ea57fc7246e3a60d6
2023-11-10 18:34:54 +08:00
hiyouga
f0766a2ab0 add todo
Former-commit-id: 0bd884feb11736d0ab24ca19885151cb47d9dcd3
2023-11-10 14:38:18 +08:00
hiyouga
178b85ff9a refactor constants
Former-commit-id: a4d4c3fd35276f20e3b354e9d13ea971029c8775
2023-11-10 14:16:10 +08:00
hiyouga
68dd1ef121 tiny fix
Former-commit-id: 97ba2027bb1ddc01a3c824c40d5a180828810c2c
2023-11-09 17:20:49 +08:00
hoshi-hiyouga
b222cffe98 Merge pull request #1454 from yyq/main
Update finetuning_args.py

Former-commit-id: e67d8b93705383a8590f99e26e9fe8f663712aef
2023-11-09 17:12:18 +08:00
Yanqing
b4f1ab93d1 Update finetuning_args.py
更新 chatglm/falcon/bloom 的 lora_target 的名称

Former-commit-id: 06606739af035a80ae9ddba9d12c965ed289305d
2023-11-09 17:04:40 +08:00
hiyouga
f2e139f5cd fix #1452
Former-commit-id: 4d16214467715df458e24d03bb7d303d62b8bdcd
2023-11-09 16:41:32 +08:00
hiyouga
a9cbca1604 update readme
Former-commit-id: f7ead54042868550a3e8a6928ea3c0e2673f15b3
2023-11-09 16:00:24 +08:00
hiyouga
3a30ce6c16 release v0.2.1
Former-commit-id: 1c30f2be0140f5ab47c2bc811170d0271a0cdad6
2023-11-09 15:54:16 +08:00
hiyouga
48ec5355f9 add template, modify datasets
Former-commit-id: 81e54beb4d0f792f4fd7f450643caaf10f2f0b7d
2023-11-09 15:53:23 +08:00
hoshi-hiyouga
11859bc322 Merge pull request #1436 from lvzii/main
fix tokenizer config changed after pretrain

Former-commit-id: f485c3983e413fd3a3a57b451800705b072869a7
2023-11-09 14:30:50 +08:00
hiyouga
28c67a5be8 support parquet format #1446
Former-commit-id: 44a3b9ac9f10d2012b8ad3d8c48123db9a0da2f1
2023-11-09 14:17:40 +08:00
hiyouga
44fe93e9b0 fix #1438 #1439
Former-commit-id: 84260d58dda22adc32c26bc943ed2a36fd01341d
2023-11-09 13:45:10 +08:00
lvzi
09a1681b63 fix tokenizer config changed after pretrain
Changing tokenizer's attribute at preprocessing stage will result in saving a wrong tokenizer.
for example, baichuan2

Former-commit-id: 19942b5314b84267691f0a5657d0679f2ddbe58b
2023-11-08 15:50:46 +08:00
hiyouga
f5ba2190fb fix ppo train and dpo eval
Former-commit-id: ced863031836632cb5920e22ae6991f251372118
2023-11-07 22:48:51 +08:00
hiyouga
14a38b5069 fix #1422
Former-commit-id: 25d7bbd0a5142f001bd2ff498df07b24137050a9
2023-11-07 19:42:01 +08:00
hiyouga
f23e5b602a fix reward model loading
Former-commit-id: 9709ca501180a1afce32e9043aedb359762b437d
2023-11-07 17:20:51 +08:00
hiyouga
857696ed9c fix args
Former-commit-id: 44d0fa2ac6a6423c7ddaf91eb8998c1b9248c04e
2023-11-07 16:36:06 +08:00
hiyouga
2084133058 update info
Former-commit-id: 89643b8ac1e3fa8d2f29f1c88e4d4503410c0d05
2023-11-07 16:28:21 +08:00
hiyouga
f7f0c3070e delete file
Former-commit-id: 7d6355db0fd5809b99f3fa42753cf4dffd251fd1
2023-11-07 16:20:12 +08:00
hiyouga
46235aa514 fix #1418
Former-commit-id: 9bfecc72c53cf95fea4a9ff02ec40a65da6d4f54
2023-11-07 16:17:22 +08:00
hiyouga
2eb65d21ac upgrade peft, fix #1088 #1411
Former-commit-id: aa7d104f8e050d12cb8f585bc8a52c850995500f
2023-11-07 16:13:36 +08:00
hiyouga
37a0d62a82 update requirements
Former-commit-id: 82ebbbbb80b3f3f616274210970738d0f44b5a0a
2023-11-06 19:01:21 +08:00
hiyouga
21ac46e439 use seed in evaluate.py
Former-commit-id: ab5cac1dfa681933f3266827f80068ce798b4c56
2023-11-06 18:17:51 +08:00
hiyouga
ba3e8ba20c update readme (list in alphabetical order)
Former-commit-id: e6a67b5477ee095bd92764581cfe6af57e799a69
2023-11-06 17:18:12 +08:00
hiyouga
2c48e798ca update templates
Former-commit-id: 85be2e242b062283f192c4c4d0715dc1e8a68589
2023-11-06 12:25:47 +08:00
hiyouga
4e40f5b62b fix #1383
Former-commit-id: 9b8a782aa80f27c3e2a2e2621f9be17cae1a27e8
2023-11-06 11:42:23 +08:00
hiyouga
2a8892b785 fix deepseek template
Former-commit-id: 1fdbcdad9a1cdb20299350efd87a8e5cb8c625a3
2023-11-05 13:08:46 +08:00
hiyouga
ee3b33ff03 support deepseek coder #1378
Former-commit-id: ae0c829917b9de10e71199c85c77a52cdcd2b7b3
2023-11-05 12:51:03 +08:00
hiyouga
b2c3001f8e fix #1365
Former-commit-id: 0277d120e62164bb7fa1d6043b8fcc52c881fe96
2023-11-05 12:21:07 +08:00
hiyouga
6cfe1e1ac2 tiny fix
Former-commit-id: 594c510a20d6c2782d7b7ffff18931e3003e6c22
2023-11-03 01:26:06 +08:00
hiyouga
52326870e4 fix #1290
Former-commit-id: ad911d258c4cea16f54d09bc192e076c21d26394
2023-11-03 00:44:53 +08:00
hiyouga
217fde0918 fix bug in data loader, support dpo eval
Former-commit-id: f4f3dcff990468a2fa864b7176adcebbcf16dac9
2023-11-03 00:34:26 +08:00
hiyouga
065021d82a update data readme
Former-commit-id: 6a65ef44ed58714c611da60b5af96b85352e8735
2023-11-03 00:15:23 +08:00
hiyouga
4bb643e685 update data readme (zh)
Former-commit-id: b32fb3a984c681732b82f6544d6c05a98c34cf4c
2023-11-02 23:42:49 +08:00
hiyouga
b77c745b1a support sharegpt format, add datasets
Former-commit-id: 202daf8987ccb7523be03ca535b572b5c9e65994
2023-11-02 23:10:04 +08:00
hiyouga
7d13501b94 support pagination in webui preview
Former-commit-id: f2307e26b9c2ce5d60917cce5a9638466ea676c8
2023-11-02 21:21:45 +08:00
hiyouga
ac74639b32 fix webui
Former-commit-id: 9192948fa221c0275ddfa579ef6b3442d45b8962
2023-11-02 18:03:14 +08:00
hiyouga
12fa56ae68 support warning in webui
Former-commit-id: 9903b523fad2f0ec0e66c3d313823bd4674bfa2b
2023-11-02 17:57:04 +08:00
hiyouga
f11b863f4b fix #1349
Former-commit-id: 556c023eab2a68560b26a7d5318a79410fb0c700
2023-11-02 17:02:44 +08:00
hiyouga
f3e4b72957 fix #1356
Former-commit-id: d2ed436108a339d405dad1be1ca15baca3d6d3e4
2023-11-02 16:51:52 +08:00
hiyouga
8d52fb46ca fix #1325
Former-commit-id: 59f2cbbd52d4646fbd1ba83032bf522ecc49a50f
2023-11-01 23:38:49 +08:00
hiyouga
dab8f45033 fix chat
Former-commit-id: 68f2b3df09c4c8638b9e225fd5b8aed3541e97a0
2023-11-01 23:07:58 +08:00
hiyouga
bff8b02543 update gradio, support multiple resp in api
Former-commit-id: a34263e7c0e07a080276d164cdab9f12f1d767d2
2023-11-01 23:02:16 +08:00
hiyouga
2406200914 fix SFT trainer
Former-commit-id: bf09b6a6cd75cc2738d9af6b8c30bcbba77fa9b5
2023-10-31 21:52:52 +08:00
hiyouga
db06fcfc84 fix #1316
Former-commit-id: 88a753fe80e277007bac2264aee24024e18f2314
2023-10-31 11:32:08 +08:00
hiyouga
93b9f74e9f update projects
Former-commit-id: 33d58e9171ad2693b9d54715eb61a6f4326c59f4
2023-10-29 22:53:47 +08:00
hiyouga
33ec844f76 add projects
Former-commit-id: 495a68cd5962dd3b3af7e4a920d91ac25531a862
2023-10-29 22:07:13 +08:00
hiyouga
0f727b393e update constants
Former-commit-id: ebacbb1072045924a7e335cc9dda488d6f0be8b3
2023-10-29 13:30:20 +08:00
hiyouga
7da2aad6ee fix vicuna template
Former-commit-id: a98eda0803e4b73a24f12d848e14161451921e98
2023-10-27 22:15:25 +08:00
hiyouga
6f09f50d02 fix chatglm3 template
Former-commit-id: 69bcbc9f6c98e4f4ad97ec0306b33ab21923d311
2023-10-27 21:12:06 +08:00
hiyouga
5919832059 update readme
Former-commit-id: 6fb92c7088316c56ce8656e540fc47b0a5a1bf18
2023-10-27 19:19:03 +08:00
hiyouga
f7635c1afc support chatglm3
Former-commit-id: ba82e13bbeed3b262d301196b1860d73f319401d
2023-10-27 19:16:28 +08:00
hiyouga
c762168ed0 support dataset cache
Former-commit-id: f79ee62eb4a2a4a01cb4e2a6aa2d07158cf8eb59
2023-10-26 21:48:45 +08:00
hiyouga
67a46e553f fix #1287
Former-commit-id: d885aca472c6448bbf9a9e8d16bead92038825e3
2023-10-26 17:49:41 +08:00
hiyouga
e406f37b54 fix #1285
Former-commit-id: 2f8fe4439506e844b147fe38b5eb878c5748c31c
2023-10-26 16:34:52 +08:00
hiyouga
62fe877124 remove filter in preprocess
Former-commit-id: 9eac08b35fec47129a29c401ca265343f8388ab0
2023-10-23 23:46:02 +08:00
hiyouga
a0e682ba79 update neftune logic
Former-commit-id: bb4f0589ed23bf0236d3e918272ad64f0a05ef39
2023-10-22 17:42:13 +08:00
hiyouga
49e8a87383 fix webui
Former-commit-id: a5a5a7bc1f53d36e1b26e418999465903cb7d9ed
2023-10-22 17:24:56 +08:00
hiyouga
b2764b49ca add new options in webui
Former-commit-id: 6698b832dd9cc2d7d60be4fa5ab90e34a7e9d8e0
2023-10-22 17:17:58 +08:00
hiyouga
06b810de8f fix recursion error
Former-commit-id: c7938188c36a71a878bca982b7dd151195164986
2023-10-22 16:28:37 +08:00
hiyouga
6da51565f5 reimplement neftune
Former-commit-id: efe9e5a194d3a9f052701d904715238816e4c09e
2023-10-22 16:15:08 +08:00
hoshi-hiyouga
1f69965239 Merge pull request #1252 from anvie/neftune
add NEFTune optimization

Former-commit-id: 85d5c5fbe731f486c3e83812227fa05edc131487
2023-10-22 15:59:20 +08:00
anvie
af2d61178d add NEFTune optimization
Former-commit-id: 603e0298af64116ac07130fe6661a9ba823c186c
2023-10-21 13:24:10 +07:00
hiyouga
6a955ccf4f fix openchat template
Former-commit-id: 88b9b657bc50495ac4c42f64195fc652fe4ca3df
2023-10-21 01:25:42 +08:00
hiyouga
c0658711ca fix tokenizer padding side in evaluate.py
Former-commit-id: bcb43ff8ba1946c1f7e7865c9d0fb47ba276935d
2023-10-21 00:30:04 +08:00
hiyouga
d602f06882 fix #1232
Former-commit-id: 49975755d47344e362145c52548fdda8783f2c0c
2023-10-20 23:28:52 +08:00
hiyouga
1cb9a38ac2 fix #1215
Former-commit-id: d91b43a8afbea4859357f2224e3d9b9d71160e6d
2023-10-19 16:19:21 +08:00
hiyouga
47a1f73d0f fix #1218
Former-commit-id: b301f35bd4a3bf368159c8f5fb4e2736f922115b
2023-10-19 16:17:41 +08:00
hiyouga
142dd63b47 fix #1228
Former-commit-id: e4e0cae3f55da2f1b566c97dbfdd7fc5b7b728a4
2023-10-19 15:54:10 +08:00
hiyouga
b1bd8370c2 fix #1217
Former-commit-id: 065fc0a6f3f005bb87e1c5c126c8b6bb470ce700
2023-10-19 15:52:24 +08:00
hiyouga
215660c8da rename webui
Former-commit-id: 26feaf80fff6177d9eb4e28ad18feb6d34d3ea27
2023-10-16 15:16:24 +08:00
hiyouga
0cafe67efe fix #1197
Former-commit-id: 00100e23fcfef9587fda4cf01c62599d996e1176
2023-10-16 15:13:46 +08:00
hoshi-hiyouga
ea83b3222b Update README_zh.md
Former-commit-id: 3450404bb9a33c3bd4b45ac4afcf51062f8c7d1d
2023-10-16 00:28:27 +08:00
hoshi-hiyouga
725087a04f Update README.md
Former-commit-id: d84896597eded79f78224faed81cc9f2df222978
2023-10-16 00:23:37 +08:00
hiyouga
d627ab4855 release v0.2.0
Former-commit-id: 7f941c1ab6c52915aa2675fa77cae5efc530fdd9
2023-10-15 20:49:43 +08:00
hiyouga
7d867e8df4 update readme
Former-commit-id: a99a92b129a3d2372e66ca73b87c3e521f144043
2023-10-15 20:28:14 +08:00
hoshi-hiyouga
3d34d44497 Update README.md
Former-commit-id: e6fcc1831dadd2ec2c0acb14697a35f6471139ab
2023-10-15 20:23:22 +08:00
hiyouga
a6f800b741 fix config, #1191
Former-commit-id: 5dbc9b355e85b203cb43ff72589374f0e04be391
2023-10-15 18:28:45 +08:00
hiyouga
a003d1fa1e disable tqdm in webui mode
Former-commit-id: 832be571bec2eefb79ea88f110b7827f5c1249e6
2023-10-15 16:18:25 +08:00
hiyouga
c2e84d4558 refactor export, fix #1190
Former-commit-id: 30e60e37023a7c4a2db033ffec0542efa3d5cdfb
2023-10-15 16:01:48 +08:00
hiyouga
68330eab2a fix eval resuming in webui
Former-commit-id: b28b53cd06777f213ef7b925a914ff5fd357ade1
2023-10-15 15:45:38 +08:00
hiyouga
7070f3969d tiny fix
Former-commit-id: 47b7b34357708a5354d542ddc239146c6417d718
2023-10-15 05:02:48 +08:00
hiyouga
e4727ab155 fix callback
Former-commit-id: 51208655a8c1d66551b7b644247321a3583debdc
2023-10-15 04:59:44 +08:00
hoshi-hiyouga
280e7d97ad Merge pull request #1186 from hiyouga/dev
Support Web UI resuming training

Former-commit-id: fcbecd0c4cb17b883e9b780a71d2abc38228293e
2023-10-15 04:53:14 +08:00
hiyouga
31e3805fb8 implement webui resuming training
Former-commit-id: 2d41672ef52414c56c50c8b4fdc442797ba682e9
2023-10-15 04:52:19 +08:00
hiyouga
ef248dbe15 fix bugs in webui
Former-commit-id: 4befa74ea630d90e4d7a1f7d7c34d39257717ec1
2023-10-15 03:41:58 +08:00
hiyouga
6a61b4b638 refactor webui
Former-commit-id: 813ecd8e51949c21ab6fbaa51cc2b1a84ee07952
2023-10-15 03:06:21 +08:00
hiyouga
4b1473502f fix loading dtype
Former-commit-id: d54a356128f7e335c12089702cf3de7f5b4baf16
2023-10-14 20:15:24 +08:00
hiyouga
bf211d818d fix #1176 #1177
Former-commit-id: 5627a2b57c270a78095a32083e2dc7aa02162875
2023-10-14 20:00:17 +08:00
hiyouga
27dd87c890 fix #1184
Former-commit-id: 5b069a967823e659dbc70b0d50361b3ad248087e
2023-10-14 19:20:11 +08:00
hiyouga
8659084ab0 fix webui
Former-commit-id: a0fe43aac968d9f6ca4724b8d718b45c03063b91
2023-10-13 16:27:59 +08:00
hiyouga
e1c9dcea93 update readme
Former-commit-id: 9d9018fad314cdc4512b4847633489cdd7a25347
2023-10-13 13:53:43 +08:00
hiyouga
171339ab17 update discord link
Former-commit-id: f725cb4940a3a18e9f1edca986ef06d425b39710
2023-10-12 21:44:28 +08:00
hiyouga
8542ba5c69 rename repository
Former-commit-id: 6100ac080a5e52edd66b98147aede6cb77481beb
2023-10-12 21:42:29 +08:00
hiyouga
97b74d328b fix ppo args
Former-commit-id: 0f12899951808f53a482082eb116bda309775930
2023-10-11 23:40:50 +08:00
hiyouga
3198a7e5f4 refactor model_dtype, fix PPO trainer
Former-commit-id: 3e17ee5afbcb823a7c9a2f91864b3750cd79edb4
2023-10-11 23:16:01 +08:00
hiyouga
a2d08ce961 add averaging in evaluation
Former-commit-id: b39d6e0b8658e1c69bbaf6bcb6cfaa8f7af30110
2023-10-10 23:16:31 +08:00
hiyouga
bd8ea09479 fix aquila template, repair sft packing mechanism
Former-commit-id: 8c82cfa5dd4bec957426b5bf176d242c77552ab0
2023-10-10 18:49:55 +08:00
hiyouga
6d0d46c7fb tiny fix
Former-commit-id: 31ccd3329ac634b239c43d60bd955cd95670df16
2023-10-10 17:41:13 +08:00
hiyouga
820540780a update readme
Former-commit-id: 4a9c8a4f18b07455c34e6c1e6bbc81cbefd82eea
2023-10-09 20:02:50 +08:00
hiyouga
f74d600497 fix flash shift short attention
Former-commit-id: e44ad23eafa39b3ac0400b6f97cd440106a87f44
2023-10-09 17:54:48 +08:00
hiyouga
94fec9f50e fix webui args
Former-commit-id: 64aa75c8cd7c84ab4a0f1dbaf4763765ba973f54
2023-10-09 17:13:57 +08:00
hiyouga
e387a50475 fix shift short attention
Former-commit-id: 9a49cce8e6f6b222f74a07bdab40efee6a77b0f1
2023-10-09 17:07:46 +08:00
hiyouga
5c4248a29c update webui #1086
Former-commit-id: 65a48bc398f18f71f5f2659b2070e3b9593af243
2023-10-09 14:50:14 +08:00
hiyouga
f22886e2b6 fix #1097
Former-commit-id: c5b8796322d9d48e815038f9fecf0ce39036a4ee
2023-10-08 22:29:26 +08:00
hiyouga
33af3cbf37 add llamafy_qwen.py
Former-commit-id: 6cdc91543c022edcc98076488f06e809fde9bad7
2023-10-08 22:05:36 +08:00
hiyouga
728dfb1be7 fix #1068 #1074
Former-commit-id: 26c6bfd21de06cc56be9a58e2ef69045ea70cc14
2023-09-28 14:39:16 +08:00
hiyouga
e49f7f1afe fix bug in packed sft dataset
Former-commit-id: 51d26b2af6612e65a91c576da5270028da27b322
2023-09-28 01:16:46 +08:00
hiyouga
21a454fa6c tiny fix
Former-commit-id: 35b355b76d2a8f8adf3750a905224e52d03d218f
2023-09-28 01:03:04 +08:00
hiyouga
22c6c27f78 tiny fix
Former-commit-id: 7451b2ae7e58d0f1857f01a037672a8c53b1bd0d
2023-09-28 01:02:11 +08:00
hiyouga
aecbb43096 fix #1064
Former-commit-id: fd4660aa72d981d7efdad465f24a59358626c975
2023-09-28 00:53:29 +08:00
hiyouga
fa53fd2db2 fix bug in pretraining
Former-commit-id: 18a2d90bd6e7c3e1e3513e6f9d895e4048b35b04
2023-09-28 00:45:20 +08:00
hiyouga
1c150995ae fix layer norm dtype
Former-commit-id: 67af21961b68d9b54d07b09e444c7140869f26da
2023-09-28 00:25:55 +08:00
hiyouga
6c5d8f089e fix #1026
Former-commit-id: d0940d0dbd03d4bbcc955304566b0d5507edf9e6
2023-09-27 22:57:09 +08:00
hiyouga
dd623325e8 fix #424
Former-commit-id: daaf89f1126112a73b9f115b0f5617a8cd974a3e
2023-09-27 22:49:43 +08:00
hiyouga
e8a375c8f2 fix #1032
Former-commit-id: 1235b2da5a79ffefd1342054ea8e7dabf47398c1
2023-09-27 22:42:16 +08:00
hiyouga
386d85ae72 refactor finetuning Args
Former-commit-id: be425a70a4c8f051717cf1e4464dbd79dae4c0b5
2023-09-27 22:28:06 +08:00
hiyouga
ebb3901b05 update readme
Former-commit-id: badbc210435d92cea8799bcd1af4c738da902cd7
2023-09-27 21:57:47 +08:00
hiyouga
20130b486c support LongLoRA
Former-commit-id: 0832ed37e7947d699f17375648a52f80752c2b6b
2023-09-27 21:55:50 +08:00
hiyouga
73c48d0463 add CMMLU, update eval script
Former-commit-id: 47f31f06a946eefa5a972e4a566cf3ce05e1e111
2023-09-23 21:10:17 +08:00
hiyouga
f7cecd20e3 update evaluate
Former-commit-id: 288137a76ed1528faa39b467da22f6468ba368ee
2023-09-23 11:55:31 +08:00
hiyouga
2bc64a7636 move file
Former-commit-id: 8711ca9b5421f971ee4cb2fada23832f1021577c
2023-09-23 11:52:12 +08:00
hiyouga
9564ddbb48 shuffle few shot examples
Former-commit-id: 2c9c14c122382e640dfa41a3799628c764c99457
2023-09-23 00:53:20 +08:00
hiyouga
28062c71b5 fix MMLU
Former-commit-id: eeab92323899694010469451b8dfb1f00d685bff
2023-09-23 00:42:23 +08:00
hiyouga
35d1921081 add MMLU and C-Eval script
Former-commit-id: 3403f876127b4b99c5e3edb2834cc3b9a3a0063f
2023-09-23 00:34:17 +08:00
hiyouga
4fbdf18c70 fix #1000
Former-commit-id: 85de2d0a99e4a81fae890a963ccbb5c6142d52d4
2023-09-22 15:00:48 +08:00
hiyouga
5e07ab01f0 update readme
Former-commit-id: 776f9ea3a5837cb3f80ebe53f19e9951400bf05d
2023-09-22 14:34:13 +08:00
hiyouga
fac465a21e fix webui
Former-commit-id: e28485b476816c1bd6c34f7ff9efaa9e3fb85176
2023-09-21 19:55:38 +08:00
hiyouga
e145a2ce0c tiny fix
Former-commit-id: d24ea58c1a44b94227f4cb60f13fc1dd79997d01
2023-09-21 19:52:06 +08:00
hiyouga
dc68c313ee fix #944
Former-commit-id: 032245647848aaa4167086636b6c985268c5fee3
2023-09-21 19:51:02 +08:00
hiyouga
95c0d9ab24 tiny fix
Former-commit-id: 1a7ddd8c1d20dc251f53923bd0ab9f3f1031dd21
2023-09-21 15:25:29 +08:00
hoshi-hiyouga
46a718f339 Merge pull request #975 from statelesshz/npu-support
Add Ascend NPU support

Former-commit-id: b348c7569c0d3f46b03fb274226444ac7a80e68d
2023-09-20 14:56:50 +08:00
statelesshz
496ba46960 support export model on Ascend NPU
Former-commit-id: 50f94e6d9d62c848db7a3db85fa999d67ddd9f04
2023-09-20 10:26:02 +08:00
hiyouga
43ae0aca1d fix webui
Former-commit-id: 2aa06a5a74d98ec25ed6e1e39df11230670f5bad
2023-09-19 18:35:21 +08:00
hiyouga
b8574c1b82 fix error info
Former-commit-id: b90ed220c5e94086d2b73045eff2440ff1b58c5c
2023-09-19 18:30:23 +08:00
hiyouga
32f8b1082b add tests.cal_flops.py
Former-commit-id: 47a119db6c6e937f6ed96f70e3cda6031b9fbd0d
2023-09-16 23:40:41 +08:00
hiyouga
6443fef31a update readme
Former-commit-id: 813c2df5dc179d82c6c999f63c2640e7c3f6aaff
2023-09-16 17:33:01 +08:00
hiyouga
14c3795a7d fix #913
Former-commit-id: d67c11d69277292648dd9889a7321345e2c0c437
2023-09-15 20:58:28 +08:00
hiyouga
3d9e2de573 fix #896
Former-commit-id: 4b70d623d817460de4732749110622e4a1b51958
2023-09-14 18:37:34 +08:00
hiyouga
0ca36a0f8d fix #887
Former-commit-id: e131bc03e05ccae3c6ad8bb42ccf2cdcc2cf3cea
2023-09-14 17:56:58 +08:00
mmbwf
3e5555502a Update utils.py
Fix parameters load error.

Former-commit-id: 112850364c7fdb53e3a38d42861404fc519108ce
2023-09-14 15:38:04 +08:00
hiyouga
fbf5b5e0a9 add MathInstruct dataset
Former-commit-id: 3d1d4b47055739854cf9788a902607e1bbba3723
2023-09-13 22:30:14 +08:00
hiyouga
3305e66f8c fix ppo save model
Former-commit-id: 300ca6d904524f46cb520056e1319a1e9a13d169
2023-09-12 16:25:29 +08:00
hiyouga
e19a44c12b fix #762 #814
Former-commit-id: 9a30ee5009040afbc524dbac0dad99904b2adf5f
2023-09-12 16:10:10 +08:00
hiyouga
8b0e6b9d1b tiny fix
Former-commit-id: d8ea0691f84c971e6860526714fc9873c350b064
2023-09-11 18:27:08 +08:00
hiyouga
f3e638ac6a Release v0.1.8
Former-commit-id: d9666411375964d334d0a93ec162b27e05f70d49
2023-09-11 17:31:34 +08:00
hiyouga
42e0b30476 update flashattn, fix ppo save model
Former-commit-id: 0b08bc3dac246d4aa3f89afb7172529dcad9c39f
2023-09-11 17:25:36 +08:00
hiyouga
a09a7b650d remove PeftTrainer
Former-commit-id: cc0cff3e991f194732d278e627648e528118a719
2023-09-10 22:23:23 +08:00
hiyouga
332d7bbd56 truncate readme
Former-commit-id: fed5d0cc87e4a5a023f2edae622f2820bded1509
2023-09-10 21:04:20 +08:00
hiyouga
d3b6fece71 update readme
Former-commit-id: c42fe77fec2918fe8811d48ec88e9a7c1e6f07ab
2023-09-10 21:01:20 +08:00
hiyouga
9d963b82de update readme
Former-commit-id: b4109cfe548e091cd20fa84815dce5ff3974a090
2023-09-10 20:52:21 +08:00
hiyouga
a402161631 support FlashAttention2
Former-commit-id: 23e56c5554b948d4f08ad87849b261eafd2c7890
2023-09-10 20:43:56 +08:00
hiyouga
b481ad58e6 fix #850
Former-commit-id: e5975c4c6b8bd47ec506b0d4a4703bee05495436
2023-09-10 14:22:03 +08:00
hiyouga
f91c5f2638 fix lora target
Former-commit-id: d822e41e7ac7e310ee49e347fc45754284ce30b8
2023-09-09 17:04:45 +08:00
hiyouga
7143c551ab support lora target auto find
Former-commit-id: bce9984733d88bf013847eed523d1c75fdf0995e
2023-09-09 15:38:37 +08:00
hiyouga
50e93392dd fix chatglm2 tokenizer
Former-commit-id: 1ab60b4a93fa1be5dfe6ffbd4deb64c0f9d9b431
2023-09-09 13:50:29 +08:00
hiyouga
9f83e93839 add baichuan2 convert script
Former-commit-id: 4d676e0ea9e59c1be13ecb47734917ba78938ac8
2023-09-08 22:59:41 +08:00
hiyouga
692b132dbf fix bug in DPO data collator
Former-commit-id: 4fc262cdf1347691e253bdfbd96568db5a49c086
2023-09-08 20:45:07 +08:00
hiyouga
e70b3e8947 fix #761
Former-commit-id: be76f6cbe5143f781b6b39603b80392253b3080a
2023-09-08 20:22:18 +08:00
hiyouga
612d97db6f change to right-padding, update reward score #803
Former-commit-id: baa90415bc8f5ebd423d001378b51c3a3a6c2ec7
2023-09-08 20:04:31 +08:00
hiyouga
bb1b67c076 fix chatglm template
Former-commit-id: 69a824628b4d6a56a680a7e713b217877c6c15c5
2023-09-08 14:45:58 +08:00
hiyouga
5a75c31caa update requirements
Former-commit-id: d796a4a5709c390629bafbeb7c91fccf6a9076d0
2023-09-07 19:26:25 +08:00
hiyouga
8b9210286b fix #818
Former-commit-id: e81fd458c279ed2f3cee780e517482b425c8886d
2023-09-07 19:19:53 +08:00
hiyouga
b5acec34f7 add deepspeed check in PPO training
Former-commit-id: e203ec7f71f504ccbaa89c27d20b8a0d9fa53f7e
2023-09-07 19:12:40 +08:00
hiyouga
86d835878c fix #809
Former-commit-id: 2783ca75365d7c373cefba039788a48f0b8f35fc
2023-09-07 19:04:32 +08:00
hiyouga
eae7b331d3 fix baichuan templates
Former-commit-id: f48a49e835b32f3991cfad8874c7b9c78953809f
2023-09-07 18:54:14 +08:00
hiyouga
ed89e29bcc update baichuan2 template
Former-commit-id: 16d9f8ba176443c5b397233da621600d6e1e1eec
2023-09-06 21:43:06 +08:00
hiyouga
c2b1886aff add Baichuan2 models
Former-commit-id: 90b3f02c44c0b8cc1b59f37af3a1ec28874a8a61
2023-09-06 18:40:11 +08:00
hiyouga
218f36bca5 add Baichuan2 models
Former-commit-id: 36960025e9274b574f57e7a7bf453cd96956e922
2023-09-06 18:36:04 +08:00
hoshi-hiyouga
b91fc1f5b3 Merge pull request #786 from kinghuin/patch-1
fix utils.py bug

Former-commit-id: 26aad616340748e1594a60119ca9434908bf7465
2023-09-05 10:49:34 +08:00
Q
2a22bf9c15 fix utils.py bug
Former-commit-id: dc490117d50c3cbc070b804bac89400f4290272f
2023-09-05 10:38:01 +08:00
hiyouga
62e2037125 fix #763
Former-commit-id: e424b928a35097b783af879a2290f59b2158801d
2023-09-01 23:13:05 +08:00
hiyouga
e5b72c6a77 refactor dataset_attr, add eos in pt, fix #757
Former-commit-id: 0feec9a830b917b36686b61938a66e842eccf930
2023-09-01 19:00:45 +08:00
codingma
93be211f80 Merge pull request #741 from hiyouga/feature-addDatasetCheck
Feature add dataset check

Former-commit-id: 4b6dabe73d2c7edc94cd495390577c8bcf88428b
2023-08-31 20:57:36 +08:00
codemayq
9ae3fb4ced update llama2 template
Former-commit-id: 01de1d51d9fa5a22a338b6ed18ffad4d0ad5e3e8
2023-08-30 16:23:56 +08:00
codemayq
f641075789 add dataset stage check
Former-commit-id: 5c719a7ce988339d034a653456da9742dc2cec7c
2023-08-30 16:23:08 +08:00
codingma
f7658db1b6 Merge pull request #651 from hiyouga/feature-dataset_stage
add dataset stage

Former-commit-id: 3b0ef57405cbc22ff8ce4eef2cfcb73872519db5
2023-08-28 16:03:45 +08:00
codemayq
b869bc1a20 add ad gen dataset
Former-commit-id: fcd0788aa4dda0cecc1420d369d371032a207810
2023-08-27 20:35:32 +08:00
codemayq
a72d756d77 add text format dataset preview in webui
Former-commit-id: cd30871aadb40cd3d598a6d0b415946744d2d550
2023-08-24 19:45:36 +08:00
codemayq
d3fd8f89b8 add stage in DatasetAttr
Former-commit-id: 9c55200d8de0623640f529dbf39b8b0f169636d3
2023-08-23 20:54:53 +08:00
hiyouga
180a05a446 fix import error
Former-commit-id: b3207a974a45038591b8cbbcf20d1ca1142d6679
2023-08-23 20:45:03 +08:00
hiyouga
eb9ac9ee1f fix #649
Former-commit-id: e6120a937ddb4f3c0b9bcb2466742f5cf4f77f8c
2023-08-23 20:21:15 +08:00
codemayq
a6662b73f5 add readme for dataset
Former-commit-id: bdcb0ea40e726e4c5752f938b379ed9a18e7e1d0
2023-08-23 19:55:45 +08:00
codemayq
cbc7db3478 add dataset stage and filter dataset when stage chosen in webui
Former-commit-id: 26e4136449a4df6028d834fd16a0f4a7c532759d
2023-08-23 18:54:23 +08:00
hiyouga
4606340f0f fix webui
Former-commit-id: 95304b6822d9fe04bcddc1ee246a56389bd5f96a
2023-08-23 11:03:35 +08:00
hoshi-hiyouga
d4b4ccd597 Merge pull request #644 from hiyouga/fix-quantization_bit
fix quantization bit is ""

Former-commit-id: e1a8eca182e532b48e472919b4474656a726b40c
2023-08-23 10:45:45 +08:00
codemayq
9c3f4e3a37 fix quantization bit is ""
Former-commit-id: 0dcab66f8843e2887f9f7ca66334122fef35c5b7
2023-08-23 10:08:17 +08:00
codemayq
440e00d8f9 fix quantization is ""
Former-commit-id: 2469cc16d1dd3f5ee822edc18b2d7021ff7cba03
2023-08-23 10:04:03 +08:00
hiyouga
6310613699 update template
Former-commit-id: a95f3a4d62de1073a78125401cf4289ec0523156
2023-08-22 19:46:09 +08:00
hoshi-hiyouga
f55907dbea Merge pull request #629 from panpan0000/main
add rm dataset explanation

Former-commit-id: c2b4571d0ffb6298d6e07212982d9c13efd65adf
2023-08-22 13:41:44 +08:00
Peter Pan
5cac87d317 add rm dataset explanation
Signed-off-by: Peter Pan <Peter.Pan@daocloud.io>

Former-commit-id: 1efb95025be6501f1b30b20e7c711d3590b5d1ee
2023-08-22 01:33:59 -04:00
hoshi-hiyouga
9c0622de13 Merge pull request #619 from hiyouga/feature-templateTest
add template encode test

Former-commit-id: 8a1587ae49fff3968e0182f4fcc9a65dfdb260fc
2023-08-21 20:56:34 +08:00
codemayq
37b93c8b71 add template encode test
Former-commit-id: c15e0d6847cbc055d8376b3c43ac4fbd17b5877a
2023-08-21 20:51:24 +08:00
hiyouga
d6be98cda6 fix #617
Former-commit-id: a7bdaf1c92c7d798caf8438dc42a8972632ec584
2023-08-21 18:16:11 +08:00
hiyouga
4d128acc17 fix #608
Former-commit-id: c02a6809124fcfd06628c49c95d419ec2d8cc8ef
2023-08-21 17:49:36 +08:00
hiyouga
516df9ecce fix baichuan template for training #597 #616
Former-commit-id: 6530c1d972301eac9ef058b3235618bb09833f15
2023-08-21 17:41:51 +08:00
hiyouga
8eec1d50e1 fix #595
Former-commit-id: a360ccf9aa0484ce783eaa5857cf698b3ac2051e
2023-08-20 16:40:00 +08:00
hoshi-hiyouga
cfb096d43a Merge pull request #596 from beat4ocean/beat
fix KeyError: 'lang' bug

Former-commit-id: dd22541cdf1b832d20bb894d78c034afce841bfb
2023-08-20 16:37:40 +08:00
beat4ocean
713fa28804 fix KeyError: 'lang' bug
Former-commit-id: 4d4d9172b1f362cb4876315f1f5739e417055065
2023-08-20 15:32:36 +08:00
hiyouga
5549f35939 fix ppo trainer #551
Former-commit-id: 050a5447c191b8c50a0826a0f03bae499bff8b48
2023-08-20 14:07:11 +08:00
hiyouga
6eed1db36c Release v0.1.7
Former-commit-id: 81abe8d6cabaa1ebe74dc32a5dc143389e4c9f31
2023-08-18 17:21:27 +08:00
hiyouga
948124f55e tiny fix
Former-commit-id: 0ee159654ac6339c162745b004e2152ba6fe3c81
2023-08-18 13:07:35 +08:00
hiyouga
2b191ca776 support ppo score norm (trl 0.5.1.dev required)
Former-commit-id: 2b25db6d260ec1532281a592e873579346c7d21c
2023-08-18 12:02:42 +08:00
hiyouga
be4d2822ea fix PPO trainer #551 , update readme
Former-commit-id: faead74849470cebae9e37cde5fab2a71b32aa43
2023-08-18 11:43:10 +08:00
hiyouga
736ddd0319 update readme
Former-commit-id: beaf2fb737dbe64d35334d88b42935c89ef09eee
2023-08-18 01:51:55 +08:00
hiyouga
dfa289aa72 Update .gitignore
Former-commit-id: a1772a4dfef8dfaf7c2c321fad0a70ccf95fe6a0
2023-08-18 01:43:42 +08:00
hiyouga
c2644f939a update training resuming
Former-commit-id: 2ec75c31f609e65116ac3b621eeb7d8ccbf69135
2023-08-18 01:41:17 +08:00
hoshi-hiyouga
f11c1ae562 Merge pull request #434 from niuba/main
add last_checkpoint support

Former-commit-id: b78d461f2826c194c332ead37825704c2cb8b910
2023-08-18 01:38:31 +08:00
hoshi-hiyouga
3126164aa6 Merge branch 'main' into main
Former-commit-id: 870d2c7bf74d0da5a927bef4b8b01d15cc66a3e9
2023-08-18 01:37:23 +08:00
hiyouga
ed10486cad support bf16 ppo #551
Former-commit-id: 092088967de7409a2d51847cfc7afc83a8887320
2023-08-18 00:40:32 +08:00
hiyouga
04fa430c6c fix ChatGLM2 ppo #527 #528
Former-commit-id: 60d6ad64d7c9f6445b0df8de0153c3a311974198
2023-08-18 00:34:59 +08:00
hiyouga
fa1893b59c fix generation bug #532
Former-commit-id: c071121e67374e5f09798db57cfc8668617a36ae
2023-08-17 22:21:34 +08:00
hiyouga
e993e717a5 fix streaming in pt stage #548 #549
Former-commit-id: 050e992bee2a9293cc7399b578de807b5bf9bddc
2023-08-17 17:59:26 +08:00
hiyouga
c80e56423a update readme
Former-commit-id: b74af3c9cf29e1690ae4d5acb27599b1abd152e2
2023-08-17 11:00:22 +08:00
hiyouga
ffa09a01d6 fix baichuan and intern template
Former-commit-id: e1fd18fa6ef1009f978aca5210a259251a0b19a6
2023-08-17 01:27:20 +08:00
hiyouga
7d04f8567b fix generation
Former-commit-id: 66a0300d312ef91c24fcf80667fa3b0bb8e1a342
2023-08-16 22:39:54 +08:00
hiyouga
baa709674f fix system prompt
Former-commit-id: 411e775aa939bdd154a3f1e92921ede90d989f18
2023-08-16 01:35:52 +08:00
hiyouga
ca9a494d0c fix baichuan template #481
Former-commit-id: 7608c6c25877d97ef26a1c209c4073c9c42f4535
2023-08-15 11:38:21 +08:00
hoshi-hiyouga
37eb8c05cc Merge pull request #516 from liuyanyi/add_gitignore
[Enhance] Add .gitignore file

Former-commit-id: 12cfe5482f5ef95d8c386d0af0de381e72eab0f9
2023-08-15 11:25:40 +08:00
hiyouga
7c046edb7b fix ChatGLM RLHF
Former-commit-id: 4e43e887e432ceb7e9287b4e309b63af3c3ba1bf
2023-08-15 11:19:20 +08:00
Yanyi Liu
22cea38b20 Add .gitignore
Former-commit-id: a2ebdeef81706596617da4409fc5da71739bccdc
2023-08-15 11:13:45 +08:00
hiyouga
ef2ca0a827 alert pad_token source
Former-commit-id: f26a84e0d927d2554890daf431a93652e18f4235
2023-08-15 00:07:56 +08:00
hiyouga
7f0b908de2 update webui
Former-commit-id: da30d0fb4abdb825f3383ddd106bb06a84695b7a
2023-08-14 22:45:26 +08:00
hoshi-hiyouga
5fc5e776ff Merge pull request #511 from hiyouga/feature-autoTemplate
add template match and stage in webui

Former-commit-id: 413752ecba845cddaff5fb48db7d3d24b960eec1
2023-08-14 22:44:04 +08:00
codemayq
93b281c016 auto match template when change model_name
Former-commit-id: ab2d7ab0572765ce33a52ac71641062d5d904db4
2023-08-14 20:56:05 +08:00
codemayq
9585699918 add template match and stage in webui
Former-commit-id: d6283e7f041f08f76d18350cb5f6a6c58ca80e92
2023-08-14 20:42:59 +08:00
hiyouga
bceaba551d fix ChatGLM lm_head #494
Former-commit-id: bf0048abdaeb2b9592d38ac991704ad014370b47
2023-08-14 14:14:48 +08:00
hiyouga
0bfeed3a7e fix bug in webui
Former-commit-id: c95f0f687689934379b6c24abf872ffcde06073b
2023-08-14 11:38:42 +08:00
hiyouga
70a780c3c0 fix webui cache
Former-commit-id: 9aba5c197fbc8abaab77f454374f8b497f0310d0
2023-08-14 11:37:01 +08:00
hiyouga
d74ab5306c update readme_zh
Former-commit-id: bdfe7e0285fdeb3a2728669dbdabf70c9652735c
2023-08-14 11:13:25 +08:00
hiyouga
688e8601ab web UI integrating RLHF
Former-commit-id: 137fd146b90f89a1164b56e6d507b30b1f5c2437
2023-08-14 10:48:47 +08:00
hiyouga
4933ab5956 fix #480
Former-commit-id: ec15ca8fffacba2c34e1849c5ce90ca9989d66a2
2023-08-14 00:23:56 +08:00
hiyouga
6c7225a5d4 fix webui
Former-commit-id: 2c8b7414be9b43e20cc1d0575cc4dc1c7545fd86
2023-08-12 23:52:07 +08:00
hiyouga
a22982f2fa tiny fix
Former-commit-id: 50a34c043de6d9e1410291e1d8c1ea9d53754e9e
2023-08-12 22:02:43 +08:00
hiyouga
c95479dddb fix rope scaling
Former-commit-id: 2e0dd36700ec5e8294581c1db4b9431f755fc5f8
2023-08-12 22:00:01 +08:00
hiyouga
fc48bd8da0 update readme
Former-commit-id: 94ac570cb62aa9cd5dba105f0bb4c4da43eca042
2023-08-12 21:29:06 +08:00
hiyouga
d5323bfa3f update readme
Former-commit-id: ecfe87f34b383901f8e97ffb90af459cd55419b1
2023-08-12 21:25:19 +08:00
hiyouga
e9d4a2b507 update readme
Former-commit-id: eadbe9b7a0b6c8897e7a763b519cc5b7e00f3b2c
2023-08-12 21:23:05 +08:00
hiyouga
37bcbe8046 update readme
Former-commit-id: 6fa381400c21fa249cebcdff8c3afd72f8de20b3
2023-08-12 21:00:11 +08:00
hiyouga
fdfb644f0a support rope scaling, fix #475 #476 #478
Former-commit-id: 337d5f68b72230e545e7a94ca789187c7a2b7187
2023-08-12 20:46:27 +08:00
hoshi-hiyouga
cde9f3db57 Merge pull request #479 from hiyouga/feature-addCmdExport
add sft script preview in webui

Former-commit-id: 060225e57d13d8164beb6920410c181fbb28b77a
2023-08-12 20:41:52 +08:00
codemayq
8bf5a98815 add sft script preview in webui
Former-commit-id: 2b72649b404750226aa418b61ef5a6c9ac03938f
2023-08-12 13:53:55 +08:00
hiyouga
be566a15a5 fix unusual output of 8bit models #278 #391
Former-commit-id: 337ce5272b81f5561162beb08814b0e5abf23703
2023-08-12 00:25:29 +08:00
niuba
458955d0fb add last_checkpoint support
Former-commit-id: 9f1977e4de00b14a9d1b555c25bcaf12998d5046
2023-08-09 16:39:27 +08:00
263 changed files with 21595 additions and 7131 deletions

13
.dockerignore Normal file
View File

@@ -0,0 +1,13 @@
.vscode
.git
.github
.venv
cache
data
hf_cache
output
examples
.dockerignore
.gitattributes
.gitignore
Dockerfile

128
.github/CODE_OF_CONDUCT.md vendored Normal file
View File

@@ -0,0 +1,128 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
`hoshihiyouga AT gmail DOT com`.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

21
.github/CONTRIBUTING.md vendored Normal file
View File

@@ -0,0 +1,21 @@
# Contributing to LLaMA Factory
Everyone is welcome to contribute, and we value everybody's contribution. Code contributions are not the only way to help the community. Answering questions, helping others, and improving the documentation are also immensely valuable.
It also helps us if you spread the word! Reference the library in blog posts about the awesome projects it made possible, shout out on Twitter every time it has helped you, or simply ⭐️ the repository to say thank you.
However you choose to contribute, please be mindful and respect our [code of conduct](CODE_OF_CONDUCT.md).
**This guide was heavily inspired by [transformers guide to contributing](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md).**
## Ways to contribute
There are several ways you can contribute to LLaMA Factory:
* Fix outstanding issues with the existing code.
* Submit issues related to bugs or desired new features.
* Contribute to the examples or to the documentation.
### Style guide
LLaMA Factory follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html), check it for details.

58
.github/ISSUE_TEMPLATE/bug-report.yml vendored Normal file
View File

@@ -0,0 +1,58 @@
name: "\U0001F41B Bug / Help"
description: Create a report to help us improve the LLaMA Factory
body:
- type: checkboxes
id: reminder
attributes:
label: Reminder
description: |
Please ensure you have read the README carefully and searched the existing issues.
请确保您已经认真阅读了 README 并且搜索过现有的 Issue。
options:
- label: I have read the README and searched the existing issues.
required: true
- type: textarea
id: system-info
validations:
required: true
attributes:
label: System Info
description: |
Please share your system info with us. You can run the command **llamafactory-cli env** and copy-paste its output below.
请提供您的系统信息。您可以在命令行运行 **llamafactory-cli env** 并将其输出复制到该文本框中。
placeholder: llamafactory version, platform, python version, ...
- type: textarea
id: reproduction
validations:
required: true
attributes:
label: Reproduction
description: |
Please provide code snippets, error messages and stack traces that reproduces the problem.
请提供运行参数,错误信息以及异常堆栈以便于我们复现该问题。
Remember to use Markdown tags to correctly format your code.
请合理使用 Markdown 标签来格式化您的文本。
placeholder: |
llamafactory-cli train ...
- type: textarea
id: expected-behavior
validations:
required: false
attributes:
label: Expected behavior
description: |
Please provide a clear and concise description of what you would expect to happen.
请提供您原本的目的,即这段代码的期望行为。
- type: textarea
id: others
validations:
required: false
attributes:
label: Others

7
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,7 @@
# What does this PR do?
Fixes # (issue)
## Before submitting
- [ ] Did you read the [contributor guideline](https://github.com/hiyouga/LLaMA-Factory/blob/main/.github/CONTRIBUTING.md)?

7
.github/SECURITY.md vendored Normal file
View File

@@ -0,0 +1,7 @@
# Reporting Security Issues
To report a security issue, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/hiyouga/LLaMA-Factory/security/advisories/new) tab.
We will send a response indicating the next steps in handling your report. After the initial reply to your report, the security team will keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance.
Report security bugs in third-party modules to the person or team maintaining the module.

39
.github/workflows/tests.yml vendored Normal file
View File

@@ -0,0 +1,39 @@
name: tests
on:
push:
branches:
- main
paths:
- "**.py"
- "requirements.txt"
- ".github/workflows/*.yml"
pull_request:
branches:
- main
paths:
- "**.py"
- "requirements.txt"
- ".github/workflows/*.yml"
jobs:
tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.8"
cache: "pip"
cache-dependency-path: "setup.py"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install .[torch,dev]
- name: Check quality
run: |
make style && make quality
- name: Test with pytest
run: |
make test

165
.gitignore vendored Normal file
View File

@@ -0,0 +1,165 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/
# custom .gitignore
user.config
saves/
cache/

37
CITATION.cff Normal file
View File

@@ -0,0 +1,37 @@
cff-version: 1.2.0
date-released: 2024-03
message: "If you use this software, please cite it as below."
authors:
- family-names: "Zheng"
given-names: "Yaowei"
- family-names: "Zhang"
given-names: "Richong"
- family-names: "Zhang"
given-names: "Junhao"
- family-names: "Ye"
given-names: "Yanhan"
- family-names: "Luo"
given-names: "Zheyan"
- family-names: "Ma"
given-names: "Yongqiang"
title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models"
url: "https://arxiv.org/abs/2403.13372"
preferred-citation:
type: article
authors:
- family-names: "Zheng"
given-names: "Yaowei"
- family-names: "Zhang"
given-names: "Richong"
- family-names: "Zhang"
given-names: "Junhao"
- family-names: "Ye"
given-names: "Yanhan"
- family-names: "Luo"
given-names: "Zheyan"
- family-names: "Ma"
given-names: "Yongqiang"
journal: "arXiv preprint arXiv:2403.13372"
title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models"
url: "https://arxiv.org/abs/2403.13372"
year: 2024

14
Dockerfile Normal file
View File

@@ -0,0 +1,14 @@
FROM nvcr.io/nvidia/pytorch:24.01-py3
WORKDIR /app
COPY requirements.txt /app/
RUN pip install -r requirements.txt
COPY . /app/
RUN pip install -e .[metrics,bitsandbytes,qwen]
VOLUME [ "/root/.cache/huggingface/", "/app/data", "/app/output" ]
EXPOSE 7860
CMD [ "llamafactory-cli", "webui" ]

14
Makefile Normal file
View File

@@ -0,0 +1,14 @@
.PHONY: quality style test
check_dirs := scripts src tests
quality:
ruff check $(check_dirs)
ruff format --check $(check_dirs)
style:
ruff check $(check_dirs) --fix
ruff format $(check_dirs)
test:
pytest tests/

773
README.md
View File

@@ -1,110 +1,288 @@
# LLaMA Efficient Tuning
![# LLaMA Factory](assets/logo.png)
[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Efficient-Tuning?style=social)](https://github.com/hiyouga/LLaMA-Efficient-Tuning/stargazers)
[![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Efficient-Tuning)](LICENSE)
[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Efficient-Tuning)](https://github.com/hiyouga/LLaMA-Efficient-Tuning/commits/main)
[![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/)
[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Efficient-Tuning/pulls)
[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Factory?style=social)](https://github.com/hiyouga/LLaMA-Factory/stargazers)
[![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Factory)](LICENSE)
[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main)
[![PyPI](https://img.shields.io/pypi/v/llamafactory)](https://pypi.org/project/llamafactory/)
[![Citation](https://img.shields.io/badge/citation-44-green)](#projects-using-llama-factory)
[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls)
[![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK)
[![Twitter](https://img.shields.io/twitter/follow/llamafactory_ai)](https://twitter.com/llamafactory_ai)
[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)
[![Open in DSW](https://gallery.pai-ml.com/assets/open-in-dsw.svg)](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)
[![Spaces](https://img.shields.io/badge/🤗-Open%20in%20Spaces-blue)](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
[![Studios](https://img.shields.io/badge/ModelScope-Open%20in%20Studios-blue)](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
[![GitHub Tread](https://trendshift.io/api/badge/repositories/4535)](https://trendshift.io/repositories/4535)
👋 Join our [WeChat](assets/wechat.jpg).
\[ English | [中文](README_zh.md) \]
**Fine-tuning a large language model can be easy as...**
https://github.com/hiyouga/LLaMA-Factory/assets/16256802/9840a653-7e9c-41c8-ae89-7ace5698baf6
Choose your path:
- **Colab**: https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing
- **PAI-DSW**: https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
- **Local machine**: Please refer to [usage](#getting-started)
## Table of Contents
- [Features](#features)
- [Benchmark](#benchmark)
- [Changelog](#changelog)
- [Supported Models](#supported-models)
- [Supported Training Approaches](#supported-training-approaches)
- [Provided Datasets](#provided-datasets)
- [Requirement](#requirement)
- [Getting Started](#getting-started)
- [Projects using LLaMA Factory](#projects-using-llama-factory)
- [License](#license)
- [Citation](#citation)
- [Acknowledgement](#acknowledgement)
## Features
- **Various models**: LLaMA, LLaVA, Mistral, Mixtral-MoE, Qwen, Yi, Gemma, Baichuan, ChatGLM, Phi, etc.
- **Integrated methods**: (Continuous) pre-training, (multimodal) supervised fine-tuning, reward modeling, PPO, DPO, KTO, ORPO, etc.
- **Scalable resources**: 32-bit full-tuning, 16-bit freeze-tuning, 16-bit LoRA and 2/4/8-bit QLoRA via AQLM/AWQ/GPTQ/LLM.int8.
- **Advanced algorithms**: GaLore, BAdam, DoRA, LongLoRA, LLaMA Pro, Mixture-of-Depths, LoRA+, LoftQ and Agent tuning.
- **Practical tricks**: FlashAttention-2, Unsloth, RoPE scaling, NEFTune and rsLoRA.
- **Experiment monitors**: LlamaBoard, TensorBoard, Wandb, MLflow, etc.
- **Faster inference**: OpenAI-style API, Gradio UI and CLI with vLLM worker.
## Benchmark
Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning), LLaMA Factory's LoRA tuning offers up to **3.7 times faster** training speed with a better Rouge score on the advertising text generation task. By leveraging 4-bit quantization technique, LLaMA Factory's QLoRA further improves the efficiency regarding the GPU memory.
![benchmark](assets/benchmark.svg)
<details><summary>Definitions</summary>
- **Training Speed**: the number of training samples processed per second during the training. (bs=4, cutoff_len=1024)
- **Rouge Score**: Rouge-2 score on the development set of the [advertising text generation](https://aclanthology.org/D19-1321.pdf) task. (bs=4, cutoff_len=1024)
- **GPU Memory**: Peak GPU memory usage in 4-bit quantized training. (bs=1, cutoff_len=1024)
- We adopt `pre_seq_len=128` for ChatGLM's P-Tuning and `lora_rank=32` for LLaMA Factory's LoRA tuning.
</details>
## Changelog
[23/08/11] Now we support **[DPO training](https://arxiv.org/abs/2305.18290)** for instruction-tuned models. See [this example](#dpo-training) to train your models (experimental feature).
[24/06/07] We supported fine-tuning the **[Qwen-2](https://qwenlm.github.io/blog/qwen2/)** series models.
[23/08/03] Now we support training the **Qwen-7B** model in this repo. Try `--model_name_or_path Qwen/Qwen-7B-Chat` and `--lora_target c_attn` arguments to train the Qwen-7B model. Remember to use `--template chatml` argument when you are using the Qwen-7B-Chat model.
[24/06/05] We supported fine-tuning the **[GLM-4-9B/GLM-4-9B-Chat](https://github.com/THUDM/GLM-4)** models.
[23/07/31] Now we support dataset streaming. Try `--streaming` and `--max_steps 100` arguments to stream your dataset.
[24/05/26] We supported **[SimPO](https://arxiv.org/abs/2405.14734)** algorithm for preference learning. See [examples](examples/README.md) for usage.
[23/07/29] We release two instruction-tuned 13B models at Hugging Face. See these Hugging Face Repos ([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/baichuan-13b-sft)) for details.
<details><summary>Full Changelog</summary>
[23/07/19] Now we support training the **LLaMA-2** models in this repo. Try `--model_name_or_path meta-llama/Llama-2-7b-hf` argument to use the LLaMA-2 model. Remember to use `--template llama2` argument when you are using the LLaMA-2-chat model.
[24/05/20] We supported fine-tuning the **PaliGemma** series models. Note that the PaliGemma models are pre-trained models, you need to fine-tune them with `gemma` template for chat completion.
[23/07/18] Now we develop an all-in-one Web UI for training, evaluation and inference. Try `train_web.py` to fine-tune models in your Web browser. Thank [@KanadeSiina](https://github.com/KanadeSiina) and [@codemayq](https://github.com/codemayq) for their efforts in the development.
[24/05/18] We supported **[KTO](https://arxiv.org/abs/2402.01306)** algorithm for preference learning. See [examples](examples/README.md) for usage.
[23/07/11] Now we support training the **Baichuan-13B** model in this repo. Try `--model_name_or_path baichuan-inc/Baichuan-13B-Base` and `--lora_target W_pack` arguments to train the Baichuan-13B model. Remember to use `--template baichuan` argument when you are using the Baichuan-13B-Chat model.
[24/05/14] We supported training and inference on the Ascend NPU devices. Check [installation](#installation) section for details.
[23/07/09] Now we release [FastEdit](https://github.com/hiyouga/FastEdit)⚡🩹, an easy-to-use package for editing the factual knowledge of large language models efficiently. Please follow [FastEdit](https://github.com/hiyouga/FastEdit) if you are interested.
[24/04/26] We supported fine-tuning the **LLaVA-1.5** multimodal LLMs. See [examples](examples/README.md) for usage.
[23/07/07] Now we support training the **InternLM-7B** model in this repo. Try `--model_name_or_path internlm/internlm-7b` argument to use the InternLM model. Remember to use `--template intern` argument when you are using the InternLM-chat model.
[24/04/22] We provided a **[Colab notebook](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)** for fine-tuning the Llama-3 model on a free T4 GPU. Two Llama-3-derived models fine-tuned using LLaMA Factory are available at Hugging Face, check [Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat) and [Llama3-Chinese](https://huggingface.co/zhichen/Llama3-Chinese) for details.
[23/07/05] Now we support training the **Falcon-7B/40B** models in this repo. Try `--model_name_or_path tiiuae/falcon-7b` and `--lora_target query_key_value` arguments to use the Falcon model.
[24/04/21] We supported **[Mixture-of-Depths](https://arxiv.org/abs/2404.02258)** according to [AstraMindAI's implementation](https://github.com/astramind-ai/Mixture-of-depths). See [examples](examples/README.md) for usage.
[23/06/29] We provide a **reproducible example** of training a chat model using instruction-following datasets, see this [Hugging Face Repo](https://huggingface.co/hiyouga/baichuan-7b-sft) for details.
[24/04/16] We supported **[BAdam](https://arxiv.org/abs/2404.02827)**. See [examples](examples/README.md) for usage.
[23/06/22] Now we align the [demo API](src/api_demo.py) with the [OpenAI's](https://platform.openai.com/docs/api-reference/chat) format where you can insert the fine-tuned model in **arbitrary ChatGPT-based applications**.
[24/04/16] We supported **[unsloth](https://github.com/unslothai/unsloth)**'s long-sequence training (Llama-2-7B-56k within 24GB). It achieves **117%** speed and **50%** memory compared with FlashAttention-2, more benchmarks can be found in [this page](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison).
[23/06/15] Now we support training the **Baichuan-7B** model in this repo. Try `--model_name_or_path baichuan-inc/Baichuan-7B` and `--lora_target W_pack` arguments to use the Baichuan-7B model.
[24/03/31] We supported **[ORPO](https://arxiv.org/abs/2403.07691)**. See [examples](examples/README.md) for usage.
[23/06/03] Now we support quantized training and inference (aka **[QLoRA](https://github.com/artidoro/qlora)**). Try `--quantization_bit 4/8` argument to work with quantized models.
[24/03/21] Our paper "[LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models](https://arxiv.org/abs/2403.13372)" is available at arXiv!
[23/05/31] Now we support training the **BLOOM & BLOOMZ** models in this repo. Try `--model_name_or_path bigscience/bloomz-7b1-mt` and `--lora_target query_key_value` arguments to use the BLOOMZ model.
[24/03/20] We supported **FSDP+QLoRA** that fine-tunes a 70B model on 2x24GB GPUs. See [examples](examples/README.md) for usage.
[24/03/13] We supported **[LoRA+](https://arxiv.org/abs/2402.12354)**. See [examples](examples/README.md) for usage.
[24/03/07] We supported gradient low-rank projection (**[GaLore](https://arxiv.org/abs/2403.03507)**) algorithm. See [examples](examples/README.md) for usage.
[24/03/07] We integrated **[vLLM](https://github.com/vllm-project/vllm)** for faster and concurrent inference. Try `infer_backend: vllm` to enjoy **270%** inference speed.
[24/02/28] We supported weight-decomposed LoRA (**[DoRA](https://arxiv.org/abs/2402.09353)**). Try `use_dora: true` to activate DoRA training.
[24/02/15] We supported **block expansion** proposed by [LLaMA Pro](https://github.com/TencentARC/LLaMA-Pro). See [examples](examples/README.md) for usage.
[24/02/05] Qwen1.5 (Qwen2 beta version) series models are supported in LLaMA-Factory. Check this [blog post](https://qwenlm.github.io/blog/qwen1.5/) for details.
[24/01/18] We supported **agent tuning** for most models, equipping model with tool using abilities by fine-tuning with `dataset: glaive_toolcall_en`.
[23/12/23] We supported **[unsloth](https://github.com/unslothai/unsloth)**'s implementation to boost LoRA tuning for the LLaMA, Mistral and Yi models. Try `use_unsloth: true` argument to activate unsloth patch. It achieves **170%** speed in our benchmark, check [this page](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison) for details.
[23/12/12] We supported fine-tuning the latest MoE model **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)** in our framework. See hardware requirement [here](#hardware-requirement).
[23/12/01] We supported downloading pre-trained models and datasets from the **[ModelScope Hub](https://modelscope.cn/models)** for Chinese mainland users. See [this tutorial](#download-from-modelscope-hub) for usage.
[23/10/21] We supported **[NEFTune](https://arxiv.org/abs/2310.05914)** trick for fine-tuning. Try `neftune_noise_alpha: 5` argument to activate NEFTune.
[23/09/27] We supported **$S^2$-Attn** proposed by [LongLoRA](https://github.com/dvlab-research/LongLoRA) for the LLaMA models. Try `shift_attn: true` argument to enable shift short attention.
[23/09/23] We integrated MMLU, C-Eval and CMMLU benchmarks in this repo. See [examples](examples/README.md) for usage.
[23/09/10] We supported **[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)**. Try `flash_attn: fa2` argument to enable FlashAttention-2 if you are using RTX4090, A100 or H100 GPUs.
[23/08/12] We supported **RoPE scaling** to extend the context length of the LLaMA models. Try `rope_scaling: linear` argument in training and `rope_scaling: dynamic` argument at inference to extrapolate the position embeddings.
[23/08/11] We supported **[DPO training](https://arxiv.org/abs/2305.18290)** for instruction-tuned models. See [examples](examples/README.md) for usage.
[23/07/31] We supported **dataset streaming**. Try `streaming: true` and `max_steps: 10000` arguments to load your dataset in streaming mode.
[23/07/29] We released two instruction-tuned 13B models at Hugging Face. See these Hugging Face Repos ([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft)) for details.
[23/07/18] We developed an **all-in-one Web UI** for training, evaluation and inference. Try `train_web.py` to fine-tune models in your Web browser. Thank [@KanadeSiina](https://github.com/KanadeSiina) and [@codemayq](https://github.com/codemayq) for their efforts in the development.
[23/07/09] We released **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹, an easy-to-use package for editing the factual knowledge of large language models efficiently. Please follow [FastEdit](https://github.com/hiyouga/FastEdit) if you are interested.
[23/06/29] We provided a **reproducible example** of training a chat model using instruction-following datasets, see [Baichuan-7B-sft](https://huggingface.co/hiyouga/Baichuan-7B-sft) for details.
[23/06/22] We aligned the [demo API](src/api_demo.py) with the [OpenAI's](https://platform.openai.com/docs/api-reference/chat) format where you can insert the fine-tuned model in **arbitrary ChatGPT-based applications**.
[23/06/03] We supported quantized training and inference (aka **[QLoRA](https://github.com/artidoro/qlora)**). See [examples](examples/README.md) for usage.
</details>
## Supported Models
| Model | Model size | Default module | Template |
| -------------------------------------------------------- | --------------------------- | ----------------- |----------|
| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - |
| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 |
| [BLOOM](https://huggingface.co/bigscience/bloom) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
| [BLOOMZ](https://huggingface.co/bigscience/bloomz) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
| [Falcon](https://huggingface.co/tiiuae/falcon-7b) | 7B/40B | query_key_value | - |
| [Baichuan](https://github.com/baichuan-inc/baichuan-13B) | 7B/13B | W_pack | baichuan |
| [InternLM](https://github.com/InternLM/InternLM) | 7B | q_proj,v_proj | intern |
| [Qwen](https://github.com/QwenLM/Qwen-7B) | 7B | c_attn | chatml |
| [XVERSE](https://github.com/xverse-ai/XVERSE-13B) | 13B | q_proj,v_proj | - |
| [ChatGLM2](https://github.com/THUDM/ChatGLM2-6B) | 6B | query_key_value | chatglm2 |
| Model | Model size | Template |
| -------------------------------------------------------- | -------------------------------- | --------- |
| [Baichuan2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
| [BLOOM](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
| [BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
| [Command-R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
| [DeepSeek (MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
| [Gemma/CodeGemma](https://huggingface.co/google) | 2B/7B | gemma |
| [GLM4](https://huggingface.co/THUDM) | 9B | glm4 |
| [InternLM2](https://huggingface.co/internlm) | 7B/20B | intern2 |
| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
| [LLaMA-3](https://huggingface.co/meta-llama) | 8B/70B | llama3 |
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | vicuna |
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
| [PaliGemma](https://huggingface.co/google) | 3B | gemma |
| [Phi-1.5/2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
| [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | phi |
| [Qwen](https://huggingface.co/Qwen) | 1.8B/7B/14B/72B | qwen |
| [Qwen1.5 (Code/MoE)](https://huggingface.co/Qwen) | 0.5B/1.8B/4B/7B/14B/32B/72B/110B | qwen |
| [Qwen2 (MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/7B/57B/72B | qwen |
| [StarCoder2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
| [Yi (1/1.5)](https://huggingface.co/01-ai) | 6B/9B/34B | yi |
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
| [Yuan](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
- **Default module** is used for the `--lora_target` argument. Please use `python src/train_bash.py -h` to see all available options.
- For the "base" models, the `--template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the corresponding template for the "chat" models.
> [!NOTE]
> For the "base" models, the `template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the **corresponding template** for the "instruct/chat" models.
>
> Remember to use the **SAME** template in training and inference.
Please refer to [constants.py](src/llamafactory/extras/constants.py) for a full list of models we supported.
You also can add a custom chat template to [template.py](src/llamafactory/data/template.py).
## Supported Training Approaches
| Approach | Full-parameter | Partial-parameter | LoRA | QLoRA |
| ---------------------- | -------------- | ----------------- | ---- | ----- |
| Pre-Training | ✅ | ✅ | ✅ | ✅ |
| Supervised Fine-Tuning | ✅ | ✅ | ✅ | ✅ |
| Reward Model Training | | | ✅ | ✅ |
| PPO Training | | | ✅ | ✅ |
| DPO Training | ✅ | | ✅ | ✅ |
| Approach | Full-tuning | Freeze-tuning | LoRA | QLoRA |
| ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ |
| Pre-Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| Supervised Fine-Tuning | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| Reward Modeling | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| PPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| DPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| KTO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| ORPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| SimPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
## Provided Datasets
- For pre-training:
- [Wiki Demo (en)](data/wiki_demo.txt)
- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
- For supervised fine-tuning:
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca)
- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
- [Self-cognition (zh)](data/self_cognition.json)
- [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection)
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
- For reward modelling or DPO training:
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
<details><summary>Pre-training datasets</summary>
Please refer to [data/README.md](data/README.md) for details.
- [Wiki Demo (en)](data/wiki_demo.txt)
- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
- [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2)
- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
- [FineWeb (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb)
- [FineWeb-Edu (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu)
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
</details>
<details><summary>Supervised fine-tuning datasets</summary>
- [Identity (en&zh)](data/identity.json)
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca-3)
- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
- [Glaive Function Calling V2 (en&zh)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
- [OpenOrca (en)](https://huggingface.co/datasets/Open-Orca/OpenOrca)
- [SlimOrca (en)](https://huggingface.co/datasets/Open-Orca/SlimOrca)
- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct)
- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
- [Wiki QA (en)](https://huggingface.co/datasets/wiki_qa)
- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)
- [Advertise Generating (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
- [Cosmopedia (en)](https://huggingface.co/datasets/HuggingFaceTB/cosmopedia)
- [STEM (zh)](https://huggingface.co/datasets/hfl/stem_zh_instruction)
- [Ruozhiba (zh)](https://huggingface.co/datasets/hfl/ruozhiba_gpt4_turbo)
- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k)
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de)
- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de)
- [OpenSchnabeltier (de)](https://huggingface.co/datasets/mayflowergmbh/openschnabeltier_de)
- [Evol Instruct (de)](https://huggingface.co/datasets/mayflowergmbh/evol-instruct_de)
- [Dolphin (de)](https://huggingface.co/datasets/mayflowergmbh/dolphin_de)
- [Booksum (de)](https://huggingface.co/datasets/mayflowergmbh/booksum_de)
- [Airoboros (de)](https://huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de)
- [Ultrachat (de)](https://huggingface.co/datasets/mayflowergmbh/ultra-chat_de)
</details>
<details><summary>Preference datasets</summary>
- [DPO mixed (en&zh)](https://huggingface.co/datasets/hiyouga/DPO-En-Zh-20k)
- [UltraFeedback (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized)
- [Orca DPO Pairs (en)](https://huggingface.co/datasets/Intel/orca_dpo_pairs)
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de)
- [KTO mixed (en)](https://huggingface.co/datasets/argilla/kto-mix-15k)
</details>
Some datasets require confirmation before using them, so we recommend logging in with your Hugging Face account using these commands.
@@ -115,320 +293,273 @@ huggingface-cli login
## Requirement
- Python 3.8+ and PyTorch 1.13.1+
- 🤗Transformers, Datasets, Accelerate, PEFT and TRL
- sentencepiece and tiktoken
- jieba, rouge-chinese and nltk (used at evaluation)
- gradio and matplotlib (used in web_demo.py)
- uvicorn, fastapi and sse-starlette (used in api_demo.py)
| Mandatory | Minimum | Recommend |
| ------------ | ------- | --------- |
| python | 3.8 | 3.11 |
| torch | 1.13.1 | 2.3.0 |
| transformers | 4.41.2 | 4.41.2 |
| datasets | 2.16.0 | 2.19.2 |
| accelerate | 0.30.1 | 0.30.1 |
| peft | 0.11.1 | 0.11.1 |
| trl | 0.8.6 | 0.9.4 |
And **powerful GPUs**!
| Optional | Minimum | Recommend |
| ------------ | ------- | --------- |
| CUDA | 11.6 | 12.2 |
| deepspeed | 0.10.0 | 0.14.0 |
| bitsandbytes | 0.39.0 | 0.43.1 |
| vllm | 0.4.3 | 0.4.3 |
| flash-attn | 2.3.0 | 2.5.9 |
### Hardware Requirement
\* *estimated*
| Method | Bits | 7B | 13B | 30B | 70B | 110B | 8x7B | 8x22B |
| ----------------- | ---- | ----- | ----- | ----- | ------ | ------ | ----- | ------ |
| Full | AMP | 120GB | 240GB | 600GB | 1200GB | 2000GB | 900GB | 2400GB |
| Full | 16 | 60GB | 120GB | 300GB | 600GB | 900GB | 400GB | 1200GB |
| Freeze | 16 | 20GB | 40GB | 80GB | 200GB | 360GB | 160GB | 400GB |
| LoRA/GaLore/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | 240GB | 120GB | 320GB |
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | 140GB | 60GB | 160GB |
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | 72GB | 30GB | 96GB |
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | 48GB | 18GB | 48GB |
## Getting Started
### Data Preparation (optional)
### Installation
Please refer to `data/example_dataset` for checking the details about the format of dataset files. You can either use a single `.json` file or a [dataset loading script](https://huggingface.co/docs/datasets/dataset_script) with multiple files to create a custom dataset.
Note: please update `data/dataset_info.json` to use your custom dataset. About the format of this file, please refer to `data/README.md`.
### Dependence Installation (optional)
> [!IMPORTANT]
> Installation is mandatory.
```bash
git clone https://github.com/hiyouga/LLaMA-Efficient-Tuning.git
conda create -n llama_etuning python=3.10
conda activate llama_etuning
cd LLaMA-Efficient-Tuning
pip install -r requirements.txt
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
cd LLaMA-Factory
pip install -e '.[torch,metrics]'
```
If you want to enable the quantized LoRA (QLoRA) on the Windows platform, you will be required to install a pre-built version of `bitsandbytes` library, which supports CUDA 11.1 to 12.1.
Extra dependencies available: torch, torch_npu, metrics, deepspeed, bitsandbytes, vllm, galore, badam, gptq, awq, aqlm, qwen, modelscope, quality
> [!TIP]
> Use `pip install --no-deps -e .` to resolve package conflicts.
<details><summary>For Windows users</summary>
If you want to enable the quantized LoRA (QLoRA) on the Windows platform, you need to install a pre-built version of `bitsandbytes` library, which supports CUDA 11.1 to 12.2, please select the appropriate [release version](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels) based on your CUDA version.
```bash
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.2.post2-py3-none-win_amd64.whl
```
### All-in-one Web UI
To enable FlashAttention-2 on the Windows platform, you need to install the precompiled `flash-attn` library, which supports CUDA 12.1 to 12.2. Please download the corresponding version from [flash-attention](https://github.com/bdashore3/flash-attention/releases) based on your requirements.
</details>
<details><summary>For Ascend NPU users</summary>
Join [NPU user group](assets/wechat_npu.jpg).
To install LLaMA Factory on Ascend NPU devices, please specify extra dependencies: `pip install -e '.[torch-npu,metrics]'`. Additionally, you need to install the **[Ascend CANN Toolkit and Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**. Please follow the [installation tutorial](https://www.hiascend.com/document/detail/en/CANNCommunityEdition/600alphaX/softwareinstall/instg/atlasdeploy_03_0031.html) or use the following commands:
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_web.py
# replace the url according to your CANN version and devices
# install CANN Toolkit
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run
bash Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run --install
# install CANN Kernels
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run
bash Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run --install
# set env variables
source /usr/local/Ascend/ascend-toolkit/set_env.sh
```
Currently the web UI only supports training on **a single GPU**.
| Requirement | Minimum | Recommend |
| ------------ | ------- | ----------- |
| CANN | 8.0.RC1 | 8.0.RC1 |
| torch | 2.1.0 | 2.1.0 |
| torch-npu | 2.1.0 | 2.1.0.post3 |
| deepspeed | 0.13.2 | 0.13.2 |
### Pre-Training
Docker image:
- 32GB: [Download page](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html)
- 64GB: [Download page](http://mirrors.cn-central-221.ovaijisuan.com/detail/131.html)
Remember to use `ASCEND_RT_VISIBLE_DEVICES` instead of `CUDA_VISIBLE_DEVICES` to specify the device to use.
If you cannot infer model on NPU devices, try setting `do_sample: false` in the configurations.
</details>
### Data Preparation
Please refer to [data/README.md](data/README.md) for checking the details about the format of dataset files. You can either use datasets on HuggingFace / ModelScope hub or load the dataset in local disk.
> [!NOTE]
> Please update `data/dataset_info.json` to use your custom dataset.
### Quickstart
Use the following 3 commands to run LoRA **fine-tuning**, **inference** and **merging** of the Llama3-8B-Instruct model, respectively.
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage pt \
--model_name_or_path path_to_your_model \
--do_train \
--dataset wiki_demo \
--template default \
--finetuning_type lora \
--output_dir path_to_pt_checkpoint \
--overwrite_cache \
--per_device_train_batch_size 4 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--plot_loss \
--fp16
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_sft.yaml
CUDA_VISIBLE_DEVICES=0 llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
```
### Supervised Fine-Tuning
See [examples/README.md](examples/README.md) for advanced usage (including distributed training).
> [!TIP]
> Use `llamafactory-cli help` to show help information.
### Fine-Tuning with LLaMA Board GUI (powered by [Gradio](https://github.com/gradio-app/gradio))
#### Use local environment
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft \
--model_name_or_path path_to_your_model \
--do_train \
--dataset alpaca_gpt4_en \
--template default \
--finetuning_type lora \
--output_dir path_to_sft_checkpoint \
--overwrite_cache \
--per_device_train_batch_size 4 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--plot_loss \
--fp16
```
Remember to specify `--lora_target W_pack` if you are using Baichuan models.
### Reward Model Training
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage rm \
--model_name_or_path path_to_your_model \
--do_train \
--dataset comparison_gpt4_en \
--template default \
--finetuning_type lora \
--resume_lora_training False \
--checkpoint_dir path_to_sft_checkpoint \
--output_dir path_to_rm_checkpoint \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 1e-5 \
--num_train_epochs 1.0 \
--plot_loss \
--fp16
```
### PPO Training
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage ppo \
--model_name_or_path path_to_your_model \
--do_train \
--dataset alpaca_gpt4_en \
--template default \
--finetuning_type lora \
--resume_lora_training False \
--checkpoint_dir path_to_sft_checkpoint \
--reward_model path_to_rm_checkpoint \
--output_dir path_to_ppo_checkpoint \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 1e-5 \
--num_train_epochs 1.0 \
--plot_loss
```
### DPO Training
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage dpo \
--model_name_or_path path_to_your_model \
--do_train \
--dataset comparison_gpt4_en \
--template default \
--finetuning_type lora \
--resume_lora_training False \
--checkpoint_dir path_to_sft_checkpoint \
--output_dir path_to_dpo_checkpoint \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 1e-5 \
--num_train_epochs 1.0 \
--plot_loss \
--fp16
```
### Distributed Training
```bash
accelerate config # configure the environment
accelerate launch src/train_bash.py # arguments (same as above)
```
<details><summary>Example configuration for full-tuning with DeepSpeed ZeRO-2</summary>
```yaml
compute_environment: LOCAL_MACHINE
deepspeed_config:
gradient_accumulation_steps: 4
gradient_clipping: 0.5
offload_optimizer_device: none
offload_param_device: none
zero3_init_flag: false
zero_stage: 2
distributed_type: DEEPSPEED
downcast_bf16: 'no'
machine_rank: 0
main_training_function: main
mixed_precision: fp16
num_machines: 1
num_processes: 4
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false
CUDA_VISIBLE_DEVICES=0 GRADIO_SHARE=1 llamafactory-cli webui
```
</details>
### Evaluation (BLEU and ROUGE_CHINESE)
#### Use Docker
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft \
--model_name_or_path path_to_your_model \
--do_eval \
--dataset alpaca_gpt4_en \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint \
--output_dir path_to_eval_result \
--per_device_eval_batch_size 8 \
--max_samples 100 \
--predict_with_generate
docker build -f ./Dockerfile -t llama-factory:latest .
docker run --gpus=all \
-v ./hf_cache:/root/.cache/huggingface/ \
-v ./data:/app/data \
-v ./output:/app/output \
-p 7860:7860 \
--shm-size 16G \
--name llama_factory \
-d llama-factory:latest
```
We recommend using `--per_device_eval_batch_size=1` and `--max_target_length 128` at 4/8-bit evaluation.
### Predict
#### Use Docker Compose
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft \
--model_name_or_path path_to_your_model \
--do_predict \
--dataset alpaca_gpt4_en \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint \
--output_dir path_to_predict_result \
--per_device_eval_batch_size 8 \
--max_samples 100 \
--predict_with_generate
docker compose -f ./docker-compose.yml up -d
```
### API Demo
<details><summary>Details about volume</summary>
- hf_cache: Utilize Hugging Face cache on the host machine. Reassignable if a cache already exists in a different directory.
- data: Place datasets on this dir of the host machine so that they can be selected on LLaMA Board GUI.
- output: Set export dir to this location so that the merged result can be accessed directly on the host machine.
</details>
### Deploy with OpenAI-style API and vLLM
```bash
python src/api_demo.py \
--model_name_or_path path_to_your_model \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint
CUDA_VISIBLE_DEVICES=0,1 API_PORT=8000 llamafactory-cli api examples/inference/llama3_vllm.yaml
```
Visit `http://localhost:8000/docs` for API documentation.
> [!TIP]
> Visit https://platform.openai.com/docs/api-reference/chat/create for API document.
### CLI Demo
### Download from ModelScope Hub
If you have trouble with downloading models and datasets from Hugging Face, you can use ModelScope.
```bash
python src/cli_demo.py \
--model_name_or_path path_to_your_model \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint
export USE_MODELSCOPE_HUB=1 # `set USE_MODELSCOPE_HUB=1` for Windows
```
### Web Demo
Train the model by specifying a model ID of the ModelScope Hub as the `model_name_or_path`. You can find a full list of model IDs at [ModelScope Hub](https://modelscope.cn/models), e.g., `LLM-Research/Meta-Llama-3-8B-Instruct`.
```bash
python src/web_demo.py \
--model_name_or_path path_to_your_model \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint
### Use W&B Logger
To use [Weights & Biases](https://wandb.ai) for logging experimental results, you need to add the following arguments.
```yaml
report_to: wandb
run_name: test_run # optional
```
### Export model
Set `WANDB_API_KEY` to [your key](https://wandb.ai/authorize) when launching training tasks to log in with your W&B account.
```bash
python src/export_model.py \
--model_name_or_path path_to_your_model \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint \
--output_dir path_to_export
```
## Projects using LLaMA Factory
## TODO
If you have a project that should be incorporated, please contact via email or create a pull request.
- [ ] Supporting flash attention ([torch](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) / [xformers](https://github.com/facebookresearch/xformers) / [flashattn](https://github.com/Dao-AILab/flash-attention)).
- [ ] Implementing multi-query attention for faster inference.
- [ ] Supporting full-parameter RLHF training.
<details><summary>Click to show</summary>
1. Wang et al. ESRL: Efficient Sampling-based Reinforcement Learning for Sequence Generation. 2023. [[arxiv]](https://arxiv.org/abs/2308.02223)
1. Yu et al. Open, Closed, or Small Language Models for Text Classification? 2023. [[arxiv]](https://arxiv.org/abs/2308.10092)
1. Wang et al. UbiPhysio: Support Daily Functioning, Fitness, and Rehabilitation with Action Understanding and Feedback in Natural Language. 2023. [[arxiv]](https://arxiv.org/abs/2308.10526)
1. Luceri et al. Leveraging Large Language Models to Detect Influence Campaigns in Social Media. 2023. [[arxiv]](https://arxiv.org/abs/2311.07816)
1. Zhang et al. Alleviating Hallucinations of Large Language Models through Induced Hallucinations. 2023. [[arxiv]](https://arxiv.org/abs/2312.15710)
1. Wang et al. Know Your Needs Better: Towards Structured Understanding of Marketer Demands with Analogical Reasoning Augmented LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2401.04319)
1. Wang et al. CANDLE: Iterative Conceptualization and Instantiation Distillation from Large Language Models for Commonsense Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2401.07286)
1. Choi et al. FACT-GPT: Fact-Checking Augmentation via Claim Matching with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2402.05904)
1. Zhang et al. AutoMathText: Autonomous Data Selection with Language Models for Mathematical Texts. 2024. [[arxiv]](https://arxiv.org/abs/2402.07625)
1. Lyu et al. KnowTuning: Knowledge-aware Fine-tuning for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11176)
1. Yang et al. LaCo: Large Language Model Pruning via Layer Collaps. 2024. [[arxiv]](https://arxiv.org/abs/2402.11187)
1. Bhardwaj et al. Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic. 2024. [[arxiv]](https://arxiv.org/abs/2402.11746)
1. Yang et al. Enhancing Empathetic Response Generation by Augmenting LLMs with Small-scale Empathetic Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11801)
1. Yi et al. Generation Meets Verification: Accelerating Large Language Model Inference with Smart Parallel Auto-Correct Decoding. 2024. [[arxiv]](https://arxiv.org/abs/2402.11809)
1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819)
1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204)
1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714)
1. Yu et al. KIEval: A Knowledge-grounded Interactive Evaluation Framework for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.15043)
1. Huang et al. Key-Point-Driven Data Synthesis with its Enhancement on Mathematical Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2403.02333)
1. Duan et al. Negating Negatives: Alignment without Human Positive Samples via Distributional Dispreference Optimization. 2024. [[arxiv]](https://arxiv.org/abs/2403.03419)
1. Xie and Schwertfeger. Empowering Robotics with Large Language Models: osmAG Map Comprehension with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2403.08228)
1. Wu et al. Large Language Models are Parallel Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2403.09073)
1. Zhang et al. EDT: Improving Large Language Models' Generation by Entropy-based Dynamic Temperature Sampling. 2024. [[arxiv]](https://arxiv.org/abs/2403.14541)
1. Weller et al. FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2403.15246)
1. Hongbin Na. CBT-LLM: A Chinese Large Language Model for Cognitive Behavioral Therapy-based Mental Health Question Answering. 2024. [[arxiv]](https://arxiv.org/abs/2403.16008)
1. Zan et al. CodeS: Natural Language to Code Repository via Multi-Layer Sketch. 2024. [[arxiv]](https://arxiv.org/abs/2403.16443)
1. Liu et al. Extensive Self-Contrast Enables Feedback-Free Language Model Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2404.00604)
1. Luo et al. BAdam: A Memory Efficient Full Parameter Training Method for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.02827)
1. Du et al. Chinese Tiny LLM: Pretraining a Chinese-Centric Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2404.04167)
1. Ma et al. Parameter Efficient Quasi-Orthogonal Fine-Tuning via Givens Rotation. 2024. [[arxiv]](https://arxiv.org/abs/2404.04316)
1. Liu et al. Dynamic Generation of Personalities with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.07084)
1. Shang et al. How Far Have We Gone in Stripped Binary Code Understanding Using Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.09836)
1. Huang et al. LLMTune: Accelerate Database Knob Tuning with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.11581)
1. Deng et al. Text-Tuple-Table: Towards Information Integration in Text-to-Table Generation via Global Tuple Extraction. 2024. [[arxiv]](https://arxiv.org/abs/2404.14215)
1. Acikgoz et al. Hippocrates: An Open-Source Framework for Advancing Large Language Models in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2404.16621)
1. Zhang et al. Small Language Models Need Strong Verifiers to Self-Correct Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2404.17140)
1. Zhou et al. FREB-TQA: A Fine-Grained Robustness Evaluation Benchmark for Table Question Answering. 2024. [[arxiv]](https://arxiv.org/abs/2404.18585)
1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: A large language model for Astronomy, based on ChatGLM2-6B and Qwen-14B.
1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: A large language model specialized in Chinese legal domain, based on Baichuan-13B, is capable of retrieving and reasoning on legal knowledge.
1. **[Sunsimiao](https://github.com/X-D-Lab/Sunsimiao)**: A large language model specialized in Chinese medical domain, based on Baichuan-7B and ChatGLM-6B.
1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: A series of large language models for Chinese medical domain, based on LLaMA2-7B and Baichuan-13B.
1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**: A series of MBTI Personality large language models, capable of giving any LLM 16 different personality types based on different datasets and training methods.
1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**: A large language model specialized in generate metadata for stable diffusion. [[🤗Demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt)
1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**: A multimodal large language model specialized in Chinese medical domain, based on LLaVA-1.5-7B.
</details>
## License
This repository is licensed under the [Apache-2.0 License](LICENSE).
Please follow the model licenses to use the corresponding model weights:
- [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md)
- [LLaMA-2](https://ai.meta.com/llama/license/)
- [BLOOM](https://huggingface.co/spaces/bigscience/license)
- [Falcon](LICENSE)
- [Baichuan](https://huggingface.co/baichuan-inc/baichuan-7B/resolve/main/baichuan-7B%20%E6%A8%A1%E5%9E%8B%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf)
- [InternLM](https://github.com/InternLM/InternLM#open-source-license)
- [Qwen](https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/LICENSE)
- [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf)
- [ChatGLM2](https://github.com/THUDM/ChatGLM2-6B/blob/main/MODEL_LICENSE)
Please follow the model licenses to use the corresponding model weights: [Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command-R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [GLM4](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE) / [InternLM2](https://github.com/InternLM/InternLM#license) / [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2 (LLaVA-1.5)](https://ai.meta.com/llama/license/) / [LLaMA-3](https://llama.meta.com/llama3/license/) / [Mistral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yi-1.5](LICENSE) / [Yuan](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
## Citation
If this work is helpful, please kindly cite as:
```bibtex
@Misc{llama-efficient-tuning,
title = {LLaMA Efficient Tuning},
author = {hiyouga},
howpublished = {\url{https://github.com/hiyouga/LLaMA-Efficient-Tuning}},
year = {2023}
@article{zheng2024llamafactory,
title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models},
author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Yongqiang Ma},
journal={arXiv preprint arXiv:2403.13372},
year={2024},
url={http://arxiv.org/abs/2403.13372}
}
```
## Acknowledgement
This repo is a sibling of [ChatGLM-Efficient-Tuning](https://github.com/hiyouga/ChatGLM-Efficient-Tuning). They share a similar code structure of efficient tuning on large language models.
This repo benefits from [PEFT](https://github.com/huggingface/peft), [TRL](https://github.com/huggingface/trl), [QLoRA](https://github.com/artidoro/qlora) and [FastChat](https://github.com/lm-sys/FastChat). Thanks for their wonderful works.
## Star History
![Star History Chart](https://api.star-history.com/svg?repos=hiyouga/LLaMA-Efficient-Tuning&type=Date)
![Star History Chart](https://api.star-history.com/svg?repos=hiyouga/LLaMA-Factory&type=Date)

View File

@@ -1,109 +1,288 @@
# LLaMA Efficient Tuning
![# LLaMA Factory](assets/logo.png)
[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Efficient-Tuning?style=social)](https://github.com/hiyouga/LLaMA-Efficient-Tuning/stargazers)
[![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Efficient-Tuning)](LICENSE)
[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Efficient-Tuning)](https://github.com/hiyouga/LLaMA-Efficient-Tuning/commits/main)
[![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/)
[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Efficient-Tuning/pulls)
[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Factory?style=social)](https://github.com/hiyouga/LLaMA-Factory/stargazers)
[![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Factory)](LICENSE)
[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main)
[![PyPI](https://img.shields.io/pypi/v/llamafactory)](https://pypi.org/project/llamafactory/)
[![Citation](https://img.shields.io/badge/citation-44-green)](#使用了-llama-factory-的项目)
[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls)
[![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK)
[![Twitter](https://img.shields.io/twitter/follow/llamafactory_ai)](https://twitter.com/llamafactory_ai)
[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)
[![Open in DSW](https://gallery.pai-ml.com/assets/open-in-dsw.svg)](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)
[![Spaces](https://img.shields.io/badge/🤗-Open%20in%20Spaces-blue)](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
[![Studios](https://img.shields.io/badge/ModelScope-Open%20in%20Studios-blue)](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
[![GitHub Tread](https://trendshift.io/api/badge/repositories/4535)](https://trendshift.io/repositories/4535)
👋 加入我们的[微信群](assets/wechat.jpg)。
\[ [English](README.md) | 中文 \]
**微调大模型可以像这样轻松…**
https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd-d76c6d0a6594
选择你的打开方式:
- **Colab**https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing
- **PAI-DSW**: https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
- **本地机器**:请见[如何使用](#如何使用)
## 目录
- [项目特色](#项目特色)
- [性能指标](#性能指标)
- [更新日志](#更新日志)
- [模型](#模型)
- [训练方法](#训练方法)
- [数据集](#数据集)
- [软硬件依赖](#软硬件依赖)
- [如何使用](#如何使用)
- [使用了 LLaMA Factory 的项目](#使用了-llama-factory-的项目)
- [协议](#协议)
- [引用](#引用)
- [致谢](#致谢)
## 项目特色
- **多种模型**LLaMA、LLaVA、Mistral、Mixtral-MoE、Qwen、Yi、Gemma、Baichuan、ChatGLM、Phi 等等。
- **集成方法**增量预训练、多模态指令监督微调、奖励模型训练、PPO 训练、DPO 训练、KTO 训练、ORPO 训练等等。
- **多种精度**32 比特全参数微调、16 比特冻结微调、16 比特 LoRA 微调和基于 AQLM/AWQ/GPTQ/LLM.int8 的 2/4/8 比特 QLoRA 微调。
- **先进算法**GaLore、BAdam、DoRA、LongLoRA、LLaMA Pro、Mixture-of-Depths、LoRA+、LoftQ 和 Agent 微调。
- **实用技巧**FlashAttention-2、Unsloth、RoPE scaling、NEFTune 和 rsLoRA。
- **实验监控**LlamaBoard、TensorBoard、Wandb、MLflow 等等。
- **极速推理**:基于 vLLM 的 OpenAI 风格 API、浏览器界面和命令行接口。
## 性能指标
与 ChatGLM 官方的 [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning) 微调相比LLaMA Factory 的 LoRA 微调提供了 **3.7 倍**的加速比,同时在广告文案生成任务上取得了更高的 Rouge 分数。结合 4 比特量化技术LLaMA Factory 的 QLoRA 微调进一步降低了 GPU 显存消耗。
![benchmark](assets/benchmark.svg)
<details><summary>变量定义</summary>
- **Training Speed**: 训练阶段每秒处理的样本数量。(批处理大小=4截断长度=1024
- **Rouge Score**: [广告文案生成](https://aclanthology.org/D19-1321.pdf)任务验证集上的 Rouge-2 分数。(批处理大小=4截断长度=1024
- **GPU Memory**: 4 比特量化训练的 GPU 显存峰值。(批处理大小=1截断长度=1024
- 我们在 ChatGLM 的 P-Tuning 中采用 `pre_seq_len=128`,在 LLaMA Factory 的 LoRA 微调中采用 `lora_rank=32`
</details>
## 更新日志
[23/08/11] 现在我们支持了指令模型的 **[DPO 训练](https://arxiv.org/abs/2305.18290)**。详情请参阅[此示例](#dpo-训练)(实验性功能)
[24/06/07] 我们支持了 **[Qwen-2](https://qwenlm.github.io/blog/qwen2/)** 系列模型的微调
[23/08/03] 现在我们支持了 **Qwen-7B** 模型的训练。请尝试使用 `--model_name_or_path Qwen/Qwen-7B-Chat``--lora_target c_attn` 参数。使用 Qwen-7B-Chat 模型请添加 `--template chatml` 参数
[24/06/05] 我们支持了 **[GLM-4-9B/GLM-4-9B-Chat](https://github.com/THUDM/GLM-4)** 模型的微调
[23/07/31] 现在我们支持了训练数据流式加载。请尝试使用 `--streaming``--max_steps 100` 参数来流式加载数据集
[24/05/26] 我们支持了 **[SimPO](https://arxiv.org/abs/2405.14734)** 偏好对齐算法。详细用法请参照 [examples](examples/README_zh.md)
[23/07/29] 我们在 Hugging Face 发布了两个 13B 指令微调模型。详细内容请查阅我们的 Hugging Face 项目([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/baichuan-13b-sft))。
<details><summary>展开日志</summary>
[23/07/19] 现在我们支持了 **LLaMA-2** 模型的训练。请尝试使用 `--model_name_or_path meta-llama/Llama-2-7b-hf` 参数。请注意使用 LLaMA-2-chat 模型需要添加 `--template llama2` 参数
[24/05/20] 我们支持了 **PaliGemma** 系列模型的微调。注意 PaliGemma 是预训练模型,你需要使用 `gemma` 模板进行微调使其获得对话能力
[23/07/18] 我们开发了支持训练和测试的浏览器一键微调界面。请尝试使用 `train_web.py` 在您的浏览器中微调模型。感谢 [@KanadeSiina](https://github.com/KanadeSiina) 和 [@codemayq](https://github.com/codemayq) 在该功能开发中付出的努力
[24/05/18] 我们支持了 **[KTO](https://arxiv.org/abs/2402.01306)** 偏好对齐算法。详细用法请参照 [examples](examples/README_zh.md)
[23/07/11] 现在我们支持了 **Baichuan-13B** 模型的训练。请尝试使用 `--model_name_or_path baichuan-inc/Baichuan-13B-Base``--lora_target W_pack` 参数。请注意使用 Baichuan-13B-Chat 模型需要添加 `--template baichuan` 参数
[24/05/14] 我们支持了昇腾 NPU 设备的训练和推理。详情请查阅[安装](#安装-llama-factory)部分
[23/07/09] 我们开源了 [FastEdit](https://github.com/hiyouga/FastEdit)⚡🩹,一个简单易用的、能迅速编辑大模型事实记忆的工具包。如果您感兴趣请关注我们的 [FastEdit](https://github.com/hiyouga/FastEdit) 项目
[24/04/26] 我们支持了多模态模型 **LLaVA-1.5** 的微调。详细用法请参照 [examples](examples/README_zh.md)
[23/07/07] 现在我们支持了 **InternLM-7B** 模型的训练。请尝试使用 `--model_name_or_path internlm/internlm-7b` 参数。请注意使用 InternLM-chat 模型需要添加 `--template intern` 参数
[24/04/22] 我们提供了在免费 T4 GPU 上微调 Llama-3 模型的 **[Colab 笔记本](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)**。Hugging Face 社区公开了两个利用 LLaMA Factory 微调的 Llama-3 模型,详情请见 [Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat) 和 [Llama3-Chinese](https://huggingface.co/zhichen/Llama3-Chinese)
[23/07/05] 现在我们支持了 **Falcon-7B/40B** 模型的训练。请尝试使用 `--model_name_or_path tiiuae/falcon-7b``--lora_target query_key_value` 参数
[24/04/21] 我们基于 [AstraMindAI 的仓库](https://github.com/astramind-ai/Mixture-of-depths)支持了 **[混合深度训练](https://arxiv.org/abs/2404.02258)**。详细用法请参照 [examples](examples/README_zh.md)
[23/06/29] 我们提供了一个**可复现的**指令模型微调示例,详细内容请查阅 [Hugging Face 项目](https://huggingface.co/hiyouga/baichuan-7b-sft)。
[24/04/16] 我们支持了 **[BAdam](https://arxiv.org/abs/2404.02827)**。详细用法请参照 [examples](examples/README_zh.md)。
[23/06/22] 我们对齐了[示例 API](src/api_demo.py) 与 [OpenAI API](https://platform.openai.com/docs/api-reference/chat) 的格式,您可以将微调模型接入任意基于 ChatGPT 的应用中
[24/04/16] 我们支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的长序列训练24GB 可训练 Llama-2-7B-56k。该方法相比 FlashAttention-2 提供了 **117%** 的训练速度和 **50%** 的显存节约。更多数据请见[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)
[23/06/15] 现在我们支持了 **Baichuan-7B** 模型的训练。请尝试使用 `--model_name_or_path baichuan-inc/Baichuan-7B``--lora_target W_pack` 参数
[24/03/31] 我们支持了 **[ORPO](https://arxiv.org/abs/2403.07691)**。详细用法请参照 [examples](examples/README_zh.md)
[23/06/03] 现在我们实现了 4 比特的 LoRA 训练(也称 [QLoRA](https://github.com/artidoro/qlora))。请尝试使用 `--quantization_bit 4` 参数进行 4 比特量化微调。
[24/03/21] 我们的论文 "[LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models](https://arxiv.org/abs/2403.13372)" 可在 arXiv 上查看!
[23/05/31] 现在我们支持了 **BLOOM & BLOOMZ** 模型的训练。请尝试使用 `--model_name_or_path bigscience/bloomz-7b1-mt``--lora_target query_key_value` 参数
[24/03/20] 我们支持了能在 2x24GB GPU 上微调 70B 模型的 **FSDP+QLoRA**。详细用法请参照 [examples](examples/README_zh.md)
[24/03/13] 我们支持了 **[LoRA+](https://arxiv.org/abs/2402.12354)**。详细用法请参照 [examples](examples/README_zh.md)。
[24/03/07] 我们支持了梯度低秩投影(**[GaLore](https://arxiv.org/abs/2403.03507)**)算法。详细用法请参照 [examples](examples/README_zh.md)。
[24/03/07] 我们集成了 **[vLLM](https://github.com/vllm-project/vllm)** 以实现极速并发推理。请使用 `infer_backend: vllm` 来获得 **270%** 的推理速度。
[24/02/28] 我们支持了 **[DoRA](https://arxiv.org/abs/2402.09353)** 微调。请使用 `use_dora: true` 参数进行 DoRA 微调。
[24/02/15] 我们支持了 [LLaMA Pro](https://github.com/TencentARC/LLaMA-Pro) 提出的**块扩展**方法。详细用法请参照 [examples](examples/README_zh.md)。
[24/02/05] Qwen1.5Qwen2 测试版)系列模型已在 LLaMA-Factory 中实现微调支持。详情请查阅该[博客页面](https://qwenlm.github.io/zh/blog/qwen1.5/)。
[24/01/18] 我们针对绝大多数模型实现了 **Agent 微调**,微调时指定 `dataset: glaive_toolcall_zh` 即可使模型获得工具调用能力。
[23/12/23] 我们针对 LLaMA, Mistral 和 Yi 模型支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的 LoRA 训练加速。请使用 `use_unsloth: true` 参数启用 unsloth 优化。该方法可提供 **170%** 的训练速度,详情请查阅[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)。
[23/12/12] 我们支持了微调最新的混合专家模型 **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)**。硬件需求请查阅[此处](#硬件依赖)。
[23/12/01] 我们支持了从 **[魔搭社区](https://modelscope.cn/models)** 下载预训练模型和数据集。详细用法请参照 [此教程](#从魔搭社区下载)。
[23/10/21] 我们支持了 **[NEFTune](https://arxiv.org/abs/2310.05914)** 训练技巧。请使用 `neftune_noise_alpha: 5` 参数启用 NEFTune。
[23/09/27] 我们针对 LLaMA 模型支持了 [LongLoRA](https://github.com/dvlab-research/LongLoRA) 提出的 **$S^2$-Attn**。请使用 `shift_attn: true` 参数以启用该功能。
[23/09/23] 我们在项目中集成了 MMLU、C-Eval 和 CMMLU 评估集。详细用法请参照 [examples](examples/README_zh.md)。
[23/09/10] 我们支持了 **[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)**。如果您使用的是 RTX4090、A100 或 H100 GPU请使用 `flash_attn: fa2` 参数以启用 FlashAttention-2。
[23/08/12] 我们支持了 **RoPE 插值**来扩展 LLaMA 模型的上下文长度。请使用 `rope_scaling: linear` 参数训练模型或使用 `rope_scaling: dynamic` 参数评估模型。
[23/08/11] 我们支持了指令模型的 **[DPO 训练](https://arxiv.org/abs/2305.18290)**。详细用法请参照 [examples](examples/README_zh.md)。
[23/07/31] 我们支持了**数据流式加载**。请使用 `streaming: true``max_steps: 10000` 参数来流式加载数据集。
[23/07/29] 我们在 Hugging Face 发布了两个 13B 指令微调模型。详细内容请查阅我们的 Hugging Face 项目([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft))。
[23/07/18] 我们开发了支持训练和测试的**浏览器一体化界面**。请使用 `train_web.py` 在您的浏览器中微调模型。感谢 [@KanadeSiina](https://github.com/KanadeSiina) 和 [@codemayq](https://github.com/codemayq) 在该功能开发中付出的努力。
[23/07/09] 我们开源了 **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹,一个简单易用的、能迅速编辑大模型事实记忆的工具包。如果您感兴趣请关注我们的 [FastEdit](https://github.com/hiyouga/FastEdit) 项目。
[23/06/29] 我们提供了一个**可复现的**指令模型微调示例,详细内容请查阅 [Baichuan-7B-sft](https://huggingface.co/hiyouga/Baichuan-7B-sft)。
[23/06/22] 我们对齐了[示例 API](src/api_demo.py) 与 [OpenAI API](https://platform.openai.com/docs/api-reference/chat) 的格式,您可以将微调模型接入**任意基于 ChatGPT 的应用**中。
[23/06/03] 我们实现了 4 比特的 LoRA 训练(也称 **[QLoRA](https://github.com/artidoro/qlora)**)。详细用法请参照 [examples](examples/README_zh.md)。
</details>
## 模型
| 模型名 | 模型大小 | 默认模块 | Template |
| -------------------------------------------------------- | --------------------------- | ----------------- |----------|
| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - |
| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 |
| [BLOOM](https://huggingface.co/bigscience/bloom) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
| [BLOOMZ](https://huggingface.co/bigscience/bloomz) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
| [Falcon](https://huggingface.co/tiiuae/falcon-7b) | 7B/40B | query_key_value | - |
| [Baichuan](https://github.com/baichuan-inc/baichuan-13B) | 7B/13B | W_pack | baichuan |
| [InternLM](https://github.com/InternLM/InternLM) | 7B | q_proj,v_proj | intern |
| [Qwen](https://github.com/QwenLM/Qwen-7B) | 7B | c_attn | chatml |
| [XVERSE](https://github.com/xverse-ai/XVERSE-13B) | 13B | q_proj,v_proj | - |
| 模型名 | 模型大小 | Template |
| -------------------------------------------------------- | -------------------------------- | --------- |
| [Baichuan2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
| [BLOOM](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
| [BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
| [Command-R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
| [DeepSeek (MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
| [Gemma/CodeGemma](https://huggingface.co/google) | 2B/7B | gemma |
| [GLM4](https://huggingface.co/THUDM) | 9B | glm4 |
| [InternLM2](https://huggingface.co/internlm) | 7B/20B | intern2 |
| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
| [LLaMA-3](https://huggingface.co/meta-llama) | 8B/70B | llama3 |
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | vicuna |
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
| [PaliGemma](https://huggingface.co/google) | 3B | gemma |
| [Phi-1.5/2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
| [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | phi |
| [Qwen](https://huggingface.co/Qwen) | 1.8B/7B/14B/72B | qwen |
| [Qwen1.5 (Code/MoE)](https://huggingface.co/Qwen) | 0.5B/1.8B/4B/7B/14B/32B/72B/110B | qwen |
| [Qwen2 (MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/7B/57B/72B | qwen |
| [StarCoder2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
| [Yi (1/1.5)](https://huggingface.co/01-ai) | 6B/9B/34B | yi |
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
| [Yuan](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
- **默认模块**是 `--lora_target` 参数的部分可选项。请使用 `python src/train_bash.py -h` 查看全部可选项。
- 对于所有“基座”Base模型`--template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”Chat模型请务必使用对应的模板。
> [!NOTE]
> 对于所有“基座”Base模型`template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Instruct/Chat模型请务必使用**对应的模板**
>
> 请务必在训练和推理时采用**完全一致**的模板。
项目所支持模型的完整列表请参阅 [constants.py](src/llamafactory/extras/constants.py)。
您也可以在 [template.py](src/llamafactory/data/template.py) 中添加自己的对话模板。
## 训练方法
| 方法 | 全参数训练 | 部分参数训练 | LoRA | QLoRA |
| ---------- | ---------- | ----------- | ---- | ----- |
| 预训练 | ✅ | ✅ | ✅ | ✅ |
| 指令监督微调 | ✅ | ✅ | ✅ | ✅ |
| 奖励模型训练 | | | ✅ | ✅ |
| PPO 训练 | | | ✅ | ✅ |
| DPO 训练 | ✅ | | ✅ | ✅ |
| 方法 | 全参数训练 | 部分参数训练 | LoRA | QLoRA |
| ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ |
| 预训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| 指令监督微调 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| 奖励模型训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| PPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| DPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| KTO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| ORPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| SimPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
## 数据集
- 用于预训练:
- [Wiki Demo (en)](data/wiki_demo.txt)
- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
- 用于指令监督微调:
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca)
- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
- [Self-cognition (zh)](data/self_cognition.json)
- [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection)
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
- 用于奖励模型或 DPO 训练:
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
<details><summary>预训练数据集</summary>
使用方法请参考 [data/README.md](data/README_zh.md) 文件。
- [Wiki Demo (en)](data/wiki_demo.txt)
- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
- [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2)
- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
- [FineWeb (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb)
- [FineWeb-Edu (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu)
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
</details>
<details><summary>指令微调数据集</summary>
- [Identity (en&zh)](data/identity.json)
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca-3)
- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
- [Glaive Function Calling V2 (en&zh)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
- [OpenOrca (en)](https://huggingface.co/datasets/Open-Orca/OpenOrca)
- [SlimOrca (en)](https://huggingface.co/datasets/Open-Orca/SlimOrca)
- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct)
- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
- [Wiki QA (en)](https://huggingface.co/datasets/wiki_qa)
- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)
- [Advertise Generating (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
- [Cosmopedia (en)](https://huggingface.co/datasets/HuggingFaceTB/cosmopedia)
- [STEM (zh)](https://huggingface.co/datasets/hfl/stem_zh_instruction)
- [Ruozhiba (zh)](https://huggingface.co/datasets/hfl/ruozhiba_gpt4_turbo)
- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k)
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de)
- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de)
- [OpenSchnabeltier (de)](https://huggingface.co/datasets/mayflowergmbh/openschnabeltier_de)
- [Evol Instruct (de)](https://huggingface.co/datasets/mayflowergmbh/evol-instruct_de)
- [Dolphin (de)](https://huggingface.co/datasets/mayflowergmbh/dolphin_de)
- [Booksum (de)](https://huggingface.co/datasets/mayflowergmbh/booksum_de)
- [Airoboros (de)](https://huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de)
- [Ultrachat (de)](https://huggingface.co/datasets/mayflowergmbh/ultra-chat_de)
</details>
<details><summary>偏好数据集</summary>
- [DPO mixed (en&zh)](https://huggingface.co/datasets/hiyouga/DPO-En-Zh-20k)
- [UltraFeedback (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized)
- [Orca DPO Pairs (en)](https://huggingface.co/datasets/Intel/orca_dpo_pairs)
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de)
- [KTO mixed (en)](https://huggingface.co/datasets/argilla/kto-mix-15k)
</details>
部分数据集的使用需要确认,我们推荐使用下述命令登录您的 Hugging Face 账户。
@@ -112,320 +291,273 @@ pip install --upgrade huggingface_hub
huggingface-cli login
```
## 软件依赖
## 软件依赖
- Python 3.8+ 和 PyTorch 1.13.1+
- 🤗Transformers, Datasets, Accelerate, PEFT 和 TRL
- sentencepiece 和 tiktoken
- jieba, rouge-chinese 和 nltk (用于评估)
- gradio 和 matplotlib (用于网页端交互)
- uvicorn, fastapi 和 sse-starlette (用于 API)
| 必需项 | 至少 | 推荐 |
| ------------ | ------- | --------- |
| python | 3.8 | 3.11 |
| torch | 1.13.1 | 2.3.0 |
| transformers | 4.41.2 | 4.41.2 |
| datasets | 2.16.0 | 2.19.2 |
| accelerate | 0.30.1 | 0.30.1 |
| peft | 0.11.1 | 0.11.1 |
| trl | 0.8.6 | 0.9.4 |
以及 **强而有力的 GPU**
| 可选项 | 至少 | 推荐 |
| ------------ | ------- | --------- |
| CUDA | 11.6 | 12.2 |
| deepspeed | 0.10.0 | 0.14.0 |
| bitsandbytes | 0.39.0 | 0.43.1 |
| vllm | 0.4.3 | 0.4.3 |
| flash-attn | 2.3.0 | 2.5.9 |
### 硬件依赖
\* *估算值*
| 方法 | 精度 | 7B | 13B | 30B | 70B | 110B | 8x7B | 8x22B |
| ----------------- | ---- | ----- | ----- | ----- | ------ | ------ | ----- | ------ |
| Full | AMP | 120GB | 240GB | 600GB | 1200GB | 2000GB | 900GB | 2400GB |
| Full | 16 | 60GB | 120GB | 300GB | 600GB | 900GB | 400GB | 1200GB |
| Freeze | 16 | 20GB | 40GB | 80GB | 200GB | 360GB | 160GB | 400GB |
| LoRA/GaLore/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | 240GB | 120GB | 320GB |
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | 140GB | 60GB | 160GB |
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | 72GB | 30GB | 96GB |
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | 48GB | 18GB | 48GB |
## 如何使用
### 数据准备(可跳过)
### 安装 LLaMA Factory
关于数据集文件的格式,请参考 `data/example_dataset` 文件夹的内容。构建自定义数据集时,既可以使用单个 `.json` 文件,也可以使用一个[数据加载脚本](https://huggingface.co/docs/datasets/dataset_script)和多个文件。
注意:使用自定义数据集时,请更新 `data/dataset_info.json` 文件,该文件的格式请参考 `data/README.md`
### 环境搭建(可跳过)
> [!IMPORTANT]
> 此步骤为必需。
```bash
git clone https://github.com/hiyouga/LLaMA-Efficient-Tuning.git
conda create -n llama_etuning python=3.10
conda activate llama_etuning
cd LLaMA-Efficient-Tuning
pip install -r requirements.txt
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
cd LLaMA-Factory
pip install -e '.[torch,metrics]'
```
如果要在 Windows 平台上开启量化 LoRAQLoRA需要安装预编译的 `bitsandbytes` 库, 支持 CUDA 11.1 到 12.1.
可选的额外依赖项torch、torch_npu、metrics、deepspeed、bitsandbytes、vllm、galore、badam、gptq、awq、aqlm、qwen、modelscope、quality
> [!TIP]
> 遇到包冲突时,可使用 `pip install --no-deps -e .` 解决。
<details><summary>Windows 用户指南</summary>
如果要在 Windows 平台上开启量化 LoRAQLoRA需要安装预编译的 `bitsandbytes` 库, 支持 CUDA 11.1 到 12.2, 请根据您的 CUDA 版本情况选择适合的[发布版本](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels)。
```bash
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.2.post2-py3-none-win_amd64.whl
```
### 浏览器一键微调/测试
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_web.py
```
目前网页 UI 仅支持**单卡训练**。
### 预训练
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage pt \
--model_name_or_path path_to_your_model \
--do_train \
--dataset wiki_demo \
--template default \
--finetuning_type lora \
--output_dir path_to_pt_checkpoint \
--overwrite_cache \
--per_device_train_batch_size 4 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--plot_loss \
--fp16
```
### 指令监督微调
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft \
--model_name_or_path path_to_your_model \
--do_train \
--dataset alpaca_gpt4_zh \
--template default \
--finetuning_type lora \
--output_dir path_to_sft_checkpoint \
--overwrite_cache \
--per_device_train_batch_size 4 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--plot_loss \
--fp16
```
使用 Baichuan 模型时请指定 `--lora_target W_pack` 参数。
### 奖励模型训练
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage rm \
--model_name_or_path path_to_your_model \
--do_train \
--dataset comparison_gpt4_zh \
--template default \
--finetuning_type lora \
--resume_lora_training False \
--checkpoint_dir path_to_sft_checkpoint \
--output_dir path_to_rm_checkpoint \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 1e-5 \
--num_train_epochs 1.0 \
--plot_loss \
--fp16
```
### PPO 训练
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage ppo \
--model_name_or_path path_to_your_model \
--do_train \
--dataset alpaca_gpt4_zh \
--template default \
--finetuning_type lora \
--resume_lora_training False \
--checkpoint_dir path_to_sft_checkpoint \
--reward_model path_to_rm_checkpoint \
--output_dir path_to_ppo_checkpoint \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 1e-5 \
--num_train_epochs 1.0 \
--plot_loss
```
### DPO 训练
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage dpo \
--model_name_or_path path_to_your_model \
--do_train \
--dataset comparison_gpt4_zh \
--template default \
--finetuning_type lora \
--resume_lora_training False \
--checkpoint_dir path_to_sft_checkpoint \
--output_dir path_to_dpo_checkpoint \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 1e-5 \
--num_train_epochs 1.0 \
--plot_loss \
--fp16
```
### 多 GPU 分布式训练
```bash
accelerate config # 首先配置分布式环境
accelerate launch src/train_bash.py # 参数同上
```
<details><summary>使用 DeepSpeed ZeRO-2 进行全参数微调的 Accelerate 配置示例</summary>
```yaml
compute_environment: LOCAL_MACHINE
deepspeed_config:
gradient_accumulation_steps: 4
gradient_clipping: 0.5
offload_optimizer_device: none
offload_param_device: none
zero3_init_flag: false
zero_stage: 2
distributed_type: DEEPSPEED
downcast_bf16: 'no'
machine_rank: 0
main_training_function: main
mixed_precision: fp16
num_machines: 1
num_processes: 4
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false
```
如果要在 Windows 平台上开启 FlashAttention-2需要安装预编译的 `flash-attn` 库,支持 CUDA 12.1 到 12.2,请根据需求到 [flash-attention](https://github.com/bdashore3/flash-attention/releases) 下载对应版本安装。
</details>
### 指标评估BLEU分数和汉语ROUGE分数
<details><summary>昇腾 NPU 用户指南</summary>
加入 [NPU 用户群](assets/wechat_npu.jpg)。
在昇腾 NPU 设备上安装 LLaMA Factory 时,需要指定额外依赖项,使用 `pip install -e '.[torch-npu,metrics]'` 命令安装。此外,还需要安装 **[Ascend CANN Toolkit and Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**,安装方法请参考[安装教程](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC2alpha002/quickstart/quickstart/quickstart_18_0004.html)或使用以下命令:
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft \
--model_name_or_path path_to_your_model \
--do_eval \
--dataset alpaca_gpt4_zh \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint \
--output_dir path_to_eval_result \
--per_device_eval_batch_size 8 \
--max_samples 100 \
--predict_with_generate
# 请替换 URL 为 CANN 版本和设备型号对应的 URL
# 安装 CANN Toolkit
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run
bash Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run --install
# 安装 CANN Kernels
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run
bash Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run --install
# 设置环境变量
source /usr/local/Ascend/ascend-toolkit/set_env.sh
```
我们建议在量化模型的评估中使用 `--per_device_eval_batch_size=1``--max_target_length 128` 参数。
| 依赖项 | 至少 | 推荐 |
| ------------ | ------- | ----------- |
| CANN | 8.0.RC1 | 8.0.RC1 |
| torch | 2.1.0 | 2.1.0 |
| torch-npu | 2.1.0 | 2.1.0.post3 |
| deepspeed | 0.13.2 | 0.13.2 |
### 模型预测
Docker 镜像:
- 32GB[下载地址](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html)
- 64GB[下载地址](http://mirrors.cn-central-221.ovaijisuan.com/detail/131.html)
请使用 `ASCEND_RT_VISIBLE_DEVICES` 而非 `CUDA_VISIBLE_DEVICES` 来指定运算设备。
如果遇到无法正常推理的情况,请尝试设置 `do_sample: false`
</details>
### 数据准备
关于数据集文件的格式,请参考 [data/README_zh.md](data/README_zh.md) 的内容。你可以使用 HuggingFace / ModelScope 上的数据集或加载本地数据集。
> [!NOTE]
> 使用自定义数据集时,请更新 `data/dataset_info.json` 文件。
### 快速开始
下面三行命令分别对 Llama3-8B-Instruct 模型进行 LoRA **微调**、**推理**和**合并**。
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft \
--model_name_or_path path_to_your_model \
--do_predict \
--dataset alpaca_gpt4_zh \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint \
--output_dir path_to_predict_result \
--per_device_eval_batch_size 8 \
--max_samples 100 \
--predict_with_generate
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_sft.yaml
CUDA_VISIBLE_DEVICES=0 llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
```
### API 服务
高级用法请参考 [examples/README_zh.md](examples/README_zh.md)(包括多 GPU 微调)。
> [!TIP]
> 使用 `llamafactory-cli help` 显示帮助信息。
### LLaMA Board 可视化微调(由 [Gradio](https://github.com/gradio-app/gradio) 驱动)
#### 使用本地环境
```bash
python src/api_demo.py \
--model_name_or_path path_to_your_model \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint
CUDA_VISIBLE_DEVICES=0 GRADIO_SHARE=1 llamafactory-cli webui
```
关于 API 文档请见 `http://localhost:8000/docs`
### 命令行测试
#### 使用 Docker
```bash
python src/cli_demo.py \
--model_name_or_path path_to_your_model \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint
docker build -f ./Dockerfile -t llama-factory:latest .
docker run --gpus=all \
-v ./hf_cache:/root/.cache/huggingface/ \
-v ./data:/app/data \
-v ./output:/app/output \
-p 7860:7860 \
--shm-size 16G \
--name llama_factory \
-d llama-factory:latest
```
### 浏览器测试
#### 使用 Docker Compose
```bash
python src/web_demo.py \
--model_name_or_path path_to_your_model \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint
docker compose -f ./docker-compose.yml up -d
```
### 导出微调模型
<details><summary>数据卷详情</summary>
- hf_cache使用宿主机的 Hugging Face 缓存文件夹,允许更改为新的目录。
- data宿主机中存放数据集的文件夹路径。
- output将导出目录设置为该路径后即可在宿主机中访问导出后的模型。
</details>
### 利用 vLLM 部署 OpenAI API
```bash
python src/export_model.py \
--model_name_or_path path_to_your_model \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint \
--output_dir path_to_export
CUDA_VISIBLE_DEVICES=0,1 API_PORT=8000 llamafactory-cli api examples/inference/llama3_vllm.yaml
```
## TODO
> [!TIP]
> API 文档请查阅 https://platform.openai.com/docs/api-reference/chat/create。
- [ ] 实现 flash attention ([torch](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) / [xformers](https://github.com/facebookresearch/xformers) / [flashattn](https://github.com/Dao-AILab/flash-attention))。
- [ ] 在推理阶段使用 Multi-query attention 进行加速。
- [ ] 支持 RLHF 的全参数微调
### 从魔搭社区下载
如果您在 Hugging Face 模型和数据集的下载中遇到了问题,可以通过下述方法使用魔搭社区
```bash
export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1`
```
`model_name_or_path` 设置为模型 ID 来加载对应的模型。在[魔搭社区](https://modelscope.cn/models)查看所有可用的模型,例如 `LLM-Research/Meta-Llama-3-8B-Instruct`
### 使用 W&B 面板
若要使用 [Weights & Biases](https://wandb.ai) 记录实验数据,请添加下面的参数。
```yaml
report_to: wandb
run_name: test_run # 可选
```
在启动训练任务时,将 `WANDB_API_KEY` 设置为[密钥](https://wandb.ai/authorize)来登录 W&B 账户。
## 使用了 LLaMA Factory 的项目
如果您有项目希望添加至下述列表,请通过邮件联系或者创建一个 PR。
<details><summary>点击显示</summary>
1. Wang et al. ESRL: Efficient Sampling-based Reinforcement Learning for Sequence Generation. 2023. [[arxiv]](https://arxiv.org/abs/2308.02223)
1. Yu et al. Open, Closed, or Small Language Models for Text Classification? 2023. [[arxiv]](https://arxiv.org/abs/2308.10092)
1. Wang et al. UbiPhysio: Support Daily Functioning, Fitness, and Rehabilitation with Action Understanding and Feedback in Natural Language. 2023. [[arxiv]](https://arxiv.org/abs/2308.10526)
1. Luceri et al. Leveraging Large Language Models to Detect Influence Campaigns in Social Media. 2023. [[arxiv]](https://arxiv.org/abs/2311.07816)
1. Zhang et al. Alleviating Hallucinations of Large Language Models through Induced Hallucinations. 2023. [[arxiv]](https://arxiv.org/abs/2312.15710)
1. Wang et al. Know Your Needs Better: Towards Structured Understanding of Marketer Demands with Analogical Reasoning Augmented LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2401.04319)
1. Wang et al. CANDLE: Iterative Conceptualization and Instantiation Distillation from Large Language Models for Commonsense Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2401.07286)
1. Choi et al. FACT-GPT: Fact-Checking Augmentation via Claim Matching with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2402.05904)
1. Zhang et al. AutoMathText: Autonomous Data Selection with Language Models for Mathematical Texts. 2024. [[arxiv]](https://arxiv.org/abs/2402.07625)
1. Lyu et al. KnowTuning: Knowledge-aware Fine-tuning for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11176)
1. Yang et al. LaCo: Large Language Model Pruning via Layer Collaps. 2024. [[arxiv]](https://arxiv.org/abs/2402.11187)
1. Bhardwaj et al. Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic. 2024. [[arxiv]](https://arxiv.org/abs/2402.11746)
1. Yang et al. Enhancing Empathetic Response Generation by Augmenting LLMs with Small-scale Empathetic Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11801)
1. Yi et al. Generation Meets Verification: Accelerating Large Language Model Inference with Smart Parallel Auto-Correct Decoding. 2024. [[arxiv]](https://arxiv.org/abs/2402.11809)
1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819)
1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204)
1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714)
1. Yu et al. KIEval: A Knowledge-grounded Interactive Evaluation Framework for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.15043)
1. Huang et al. Key-Point-Driven Data Synthesis with its Enhancement on Mathematical Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2403.02333)
1. Duan et al. Negating Negatives: Alignment without Human Positive Samples via Distributional Dispreference Optimization. 2024. [[arxiv]](https://arxiv.org/abs/2403.03419)
1. Xie and Schwertfeger. Empowering Robotics with Large Language Models: osmAG Map Comprehension with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2403.08228)
1. Wu et al. Large Language Models are Parallel Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2403.09073)
1. Zhang et al. EDT: Improving Large Language Models' Generation by Entropy-based Dynamic Temperature Sampling. 2024. [[arxiv]](https://arxiv.org/abs/2403.14541)
1. Weller et al. FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2403.15246)
1. Hongbin Na. CBT-LLM: A Chinese Large Language Model for Cognitive Behavioral Therapy-based Mental Health Question Answering. 2024. [[arxiv]](https://arxiv.org/abs/2403.16008)
1. Zan et al. CodeS: Natural Language to Code Repository via Multi-Layer Sketch. 2024. [[arxiv]](https://arxiv.org/abs/2403.16443)
1. Liu et al. Extensive Self-Contrast Enables Feedback-Free Language Model Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2404.00604)
1. Luo et al. BAdam: A Memory Efficient Full Parameter Training Method for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.02827)
1. Du et al. Chinese Tiny LLM: Pretraining a Chinese-Centric Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2404.04167)
1. Ma et al. Parameter Efficient Quasi-Orthogonal Fine-Tuning via Givens Rotation. 2024. [[arxiv]](https://arxiv.org/abs/2404.04316)
1. Liu et al. Dynamic Generation of Personalities with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.07084)
1. Shang et al. How Far Have We Gone in Stripped Binary Code Understanding Using Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.09836)
1. Huang et al. LLMTune: Accelerate Database Knob Tuning with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.11581)
1. Deng et al. Text-Tuple-Table: Towards Information Integration in Text-to-Table Generation via Global Tuple Extraction. 2024. [[arxiv]](https://arxiv.org/abs/2404.14215)
1. Acikgoz et al. Hippocrates: An Open-Source Framework for Advancing Large Language Models in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2404.16621)
1. Zhang et al. Small Language Models Need Strong Verifiers to Self-Correct Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2404.17140)
1. Zhou et al. FREB-TQA: A Fine-Grained Robustness Evaluation Benchmark for Table Question Answering. 2024. [[arxiv]](https://arxiv.org/abs/2404.18585)
1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: 天文大模型 StarWhisper基于 ChatGLM2-6B 和 Qwen-14B 在天文数据上微调而得。
1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: 中文法律领域大模型 DISC-LawLLM基于 Baichuan-13B 微调而得,具有法律推理和知识检索能力。
1. **[Sunsimiao](https://github.com/X-D-Lab/Sunsimiao)**: 孙思邈中文医疗大模型 Sumsimiao基于 Baichuan-7B 和 ChatGLM-6B 在中文医疗数据上微调而得。
1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: 医疗大模型项目 CareGPT基于 LLaMA2-7B 和 Baichuan-13B 在中文医疗数据上微调而得。
1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**MBTI性格大模型项目根据数据集与训练方式让任意 LLM 拥有 16 个不同的性格类型。
1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**:一个用于生成 Stable Diffusion 提示词的大型语言模型。[[🤗Demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt)
1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**:中文多模态医学大模型,基于 LLaVA-1.5-7B 在中文多模态医疗数据上微调而得。
</details>
## 协议
本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源。
使用模型权重时,请遵循对应的模型协议:
- [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md)
- [LLaMA-2](https://ai.meta.com/llama/license/)
- [BLOOM](https://huggingface.co/spaces/bigscience/license)
- [Falcon](LICENSE)
- [Baichuan](https://huggingface.co/baichuan-inc/baichuan-7B/resolve/main/baichuan-7B%20%E6%A8%A1%E5%9E%8B%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf)
- [InternLM](https://github.com/InternLM/InternLM#open-source-license)
- [Qwen](https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/LICENSE)
使用模型权重时,请遵循对应的模型协议:[Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command-R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [GLM4](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE) / [InternLM2](https://github.com/InternLM/InternLM#license) / [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2 (LLaVA-1.5)](https://ai.meta.com/llama/license/) / [LLaMA-3](https://llama.meta.com/llama3/license/) / [Mistral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yi-1.5](LICENSE) / [Yuan](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
## 引用
如果您觉得此项目有帮助,请考虑以下列格式引用
```bibtex
@Misc{llama-efficient-tuning,
title = {LLaMA Efficient Tuning},
author = {hiyouga},
howpublished = {\url{https://github.com/hiyouga/LLaMA-Efficient-Tuning}},
year = {2023}
@article{zheng2024llamafactory,
title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models},
author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Yongqiang Ma},
journal={arXiv preprint arXiv:2403.13372},
year={2024},
url={http://arxiv.org/abs/2403.13372}
}
```
## 致谢
本项目是 [ChatGLM-Efficient-Tuning](https://github.com/hiyouga/ChatGLM-Efficient-Tuning) 的同类项目。采用了类似的代码结构和训练方法
本项目受益于 [PEFT](https://github.com/huggingface/peft)、[TRL](https://github.com/huggingface/trl)、[QLoRA](https://github.com/artidoro/qlora) 和 [FastChat](https://github.com/lm-sys/FastChat),感谢以上诸位作者的付出
## Star History
![Star History Chart](https://api.star-history.com/svg?repos=hiyouga/LLaMA-Efficient-Tuning&type=Date)
![Star History Chart](https://api.star-history.com/svg?repos=hiyouga/LLaMA-Factory&type=Date)

1216
assets/benchmark.svg Normal file

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 29 KiB

View File

@@ -1,18 +1,350 @@
If you are using a custom dataset, please provide your dataset definition in the following format in `dataset_info.json`.
The [dataset_info.json](dataset_info.json) contains all available datasets. If you are using a custom dataset, please **make sure** to add a *dataset description* in `dataset_info.json` and specify `dataset: dataset_name` before training to use it.
Currently we support datasets in **alpaca** and **sharegpt** format.
```json
"dataset_name": {
"hf_hub_url": "the name of the dataset repository on the HuggingFace hub. (if specified, ignore below 3 arguments)",
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore below 2 arguments)",
"file_name": "the name of the dataset file in the this directory. (required if above are not specified)",
"file_sha1": "the SHA-1 hash value of the dataset file. (optional)",
"columns": {
"prompt": "the name of the column in the datasets containing the prompts. (default: instruction)",
"query": "the name of the column in the datasets containing the queries. (default: input)",
"response": "the name of the column in the datasets containing the responses. (default: output)",
"history": "the name of the column in the datasets containing the history of chat. (default: None)"
"hf_hub_url": "the name of the dataset repository on the Hugging Face hub. (if specified, ignore script_url and file_name)",
"ms_hub_url": "the name of the dataset repository on the Model Scope hub. (if specified, ignore script_url and file_name)",
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore file_name)",
"file_name": "the name of the dataset folder or dataset file in this directory. (required if above are not specified)",
"formatting": "the format of the dataset. (optional, default: alpaca, can be chosen from {alpaca, sharegpt})",
"ranking": "whether the dataset is a preference dataset or not. (default: False)",
"subset": "the name of the subset. (optional, default: None)",
"folder": "the name of the folder of the dataset repository on the Hugging Face hub. (optional, default: None)",
"num_samples": "the number of samples in the dataset used for training. (optional, default: None)",
"columns (optional)": {
"prompt": "the column name in the dataset containing the prompts. (default: instruction)",
"query": "the column name in the dataset containing the queries. (default: input)",
"response": "the column name in the dataset containing the responses. (default: output)",
"history": "the column name in the dataset containing the histories. (default: None)",
"messages": "the column name in the dataset containing the messages. (default: conversations)",
"system": "the column name in the dataset containing the system prompts. (default: None)",
"tools": "the column name in the dataset containing the tool description. (default: None)",
"images": "the column name in the dataset containing the image inputs. (default: None)",
"chosen": "the column name in the dataset containing the chosen answers. (default: None)",
"rejected": "the column name in the dataset containing the rejected answers. (default: None)",
"kto_tag": "the column name in the dataset containing the kto tags. (default: None)"
},
"tags (optional, used for the sharegpt format)": {
"role_tag": "the key in the message represents the identity. (default: from)",
"content_tag": "the key in the message represents the content. (default: value)",
"user_tag": "the value of the role_tag represents the user. (default: human)",
"assistant_tag": "the value of the role_tag represents the assistant. (default: gpt)",
"observation_tag": "the value of the role_tag represents the tool results. (default: observation)",
"function_tag": "the value of the role_tag represents the function call. (default: function_call)",
"system_tag": "the value of the role_tag represents the system prompt. (default: system, can override system column)"
}
}
```
where the `prompt` and `response` columns should contain non-empty values. The `query` column will be concatenated with the `prompt` column and used as input for the model. The `history` column should contain a list where each element is a string tuple representing a query-response pair.
## Alpaca Format
### Supervised Fine-Tuning Dataset
* [Example dataset](alpaca_en_demo.json)
In supervised fine-tuning, the `instruction` column will be concatenated with the `input` column and used as the human prompt, then the human prompt would be `instruction\ninput`. The `output` column represents the model response.
The `system` column will be used as the system prompt if specified.
The `history` column is a list consisting of string tuples representing prompt-response pairs in the history messages. Note that the responses in the history **will also be learned by the model** in supervised fine-tuning.
```json
[
{
"instruction": "human instruction (required)",
"input": "human input (optional)",
"output": "model response (required)",
"system": "system prompt (optional)",
"history": [
["human instruction in the first round (optional)", "model response in the first round (optional)"],
["human instruction in the second round (optional)", "model response in the second round (optional)"]
]
}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"columns": {
"prompt": "instruction",
"query": "input",
"response": "output",
"system": "system",
"history": "history"
}
}
```
### Pre-training Dataset
- [Example dataset](c4_demo.json)
In pre-training, only the `text` column will be used for model learning.
```json
[
{"text": "document"},
{"text": "document"}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"columns": {
"prompt": "text"
}
}
```
### Preference Dataset
Preference datasets are used for reward modeling, DPO training and ORPO training.
It requires a better response in `chosen` column and a worse response in `rejected` column.
```json
[
{
"instruction": "human instruction (required)",
"input": "human input (optional)",
"chosen": "chosen answer (required)",
"rejected": "rejected answer (required)"
}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"ranking": true,
"columns": {
"prompt": "instruction",
"query": "input",
"chosen": "chosen",
"rejected": "rejected"
}
}
```
### KTO Dataset
- [Example dataset](kto_en_demo.json)
KTO datasets require a extra `kto_tag` column containing the boolean human feedback.
```json
[
{
"instruction": "human instruction (required)",
"input": "human input (optional)",
"output": "model response (required)",
"kto_tag": "human feedback [true/false] (required)"
}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"columns": {
"prompt": "instruction",
"query": "input",
"response": "output",
"kto_tag": "kto_tag"
}
}
```
### Multimodal Dataset
- [Example dataset](mllm_demo.json)
Multimodal datasets require a `images` column containing the paths to the input images. Currently we only support one image.
```json
[
{
"instruction": "human instruction (required)",
"input": "human input (optional)",
"output": "model response (required)",
"images": [
"image path (required)"
]
}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"columns": {
"prompt": "instruction",
"query": "input",
"response": "output",
"images": "images"
}
}
```
## Sharegpt Format
### Supervised Fine-Tuning Dataset
- [Example dataset](glaive_toolcall_en_demo.json)
Compared to the alpaca format, the sharegpt format allows the datasets have **more roles**, such as human, gpt, observation and function. They are presented in a list of objects in the `conversations` column.
Note that the human and observation should appear in odd positions, while gpt and function should appear in even positions.
```json
[
{
"conversations": [
{
"from": "human",
"value": "human instruction"
},
{
"from": "function_call",
"value": "tool arguments"
},
{
"from": "observation",
"value": "tool result"
},
{
"from": "gpt",
"value": "model response"
}
],
"system": "system prompt (optional)",
"tools": "tool description (optional)"
}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"formatting": "sharegpt",
"columns": {
"messages": "conversations",
"system": "system",
"tools": "tools"
}
}
```
### Preference Dataset
- [Example dataset](dpo_en_demo.json)
Preference datasets in sharegpt format also require a better message in `chosen` column and a worse message in `rejected` column.
```json
[
{
"conversations": [
{
"from": "human",
"value": "human instruction"
},
{
"from": "gpt",
"value": "model response"
},
{
"from": "human",
"value": "human instruction"
}
],
"chosen": {
"from": "gpt",
"value": "chosen answer (required)"
},
"rejected": {
"from": "gpt",
"value": "rejected answer (required)"
}
}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"formatting": "sharegpt",
"ranking": true,
"columns": {
"messages": "conversations",
"chosen": "chosen",
"rejected": "rejected"
}
}
```
### OpenAI Format
The openai format is simply a special case of the sharegpt format, where the first message may be a system prompt.
```json
[
{
"messages": [
{
"role": "system",
"content": "system prompt (optional)"
},
{
"role": "user",
"content": "human instruction"
},
{
"role": "assistant",
"content": "model response"
}
]
}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"formatting": "sharegpt",
"columns": {
"messages": "messages"
},
"tags": {
"role_tag": "role",
"content_tag": "content",
"user_tag": "user",
"assistant_tag": "assistant",
"system_tag": "system"
}
}
```
The KTO datasets and multimodal datasets in sharegpt format are similar to the alpaca format.
Pre-training datasets are **incompatible** with the sharegpt format.

View File

@@ -1,18 +1,350 @@
如果您使用自定义数据集,请务必`dataset_info.json` 文件中以如下格式提供您的数据集定义
[dataset_info.json](dataset_info.json) 包含了所有可用的数据集。如果您希望使用自定义数据集,请**务必**`dataset_info.json` 文件中添加*数据集描述*,并通过修改 `dataset: 数据集名称` 配置来使用数据集
目前我们支持 **alpaca** 格式和 **sharegpt** 格式的数据集。
```json
"数据集名称": {
"hf_hub_url": "HuggingFace上的项目地址(若指定,则忽略下列三个参数",
"script_url": "包含数据加载脚本的本地文件夹名称(若指定,则忽略下列两个参数",
"file_name": "该目录下数据集文件名称(若上述参数未指定,则此项必需",
"file_sha1": "数据集文件的SHA-1哈希值可选",
"columns": {
"hf_hub_url": "Hugging Face 的数据集仓库地址(若指定,则忽略 script_url 和 file_name",
"ms_hub_url": "ModelScope 的数据集仓库地址(若指定,则忽略 script_url 和 file_name",
"script_url": "包含数据加载脚本的本地文件名称(若指定,则忽略 file_name",
"file_name": "该目录下数据集文件夹或文件的名称(若上述参数未指定,则此项必需",
"formatting": "数据集格式可选默认alpaca可以为 alpaca 或 sharegpt",
"ranking": "是否为偏好数据集可选默认False",
"subset": "数据集子集的名称可选默认None",
"folder": "Hugging Face 仓库的文件夹名称可选默认None",
"num_samples": "该数据集中用于训练的样本数量。可选默认None",
"columns可选": {
"prompt": "数据集代表提示词的表头名称默认instruction",
"query": "数据集代表请求的表头名称默认input",
"response": "数据集代表回答的表头名称默认output",
"history": "数据集代表历史对话的表头名称默认None"
"history": "数据集代表历史对话的表头名称默认None",
"messages": "数据集代表消息列表的表头名称默认conversations",
"system": "数据集代表系统提示的表头名称默认None",
"tools": "数据集代表工具描述的表头名称默认None",
"images": "数据集代表图像输入的表头名称默认None",
"chosen": "数据集代表更优回答的表头名称默认None",
"rejected": "数据集代表更差回答的表头名称默认None",
"kto_tag": "数据集代表 KTO 标签的表头名称默认None"
},
"tags可选用于 sharegpt 格式)": {
"role_tag": "消息中代表发送者身份的键名默认from",
"content_tag": "消息中代表文本内容的键名默认value",
"user_tag": "消息中代表用户的 role_tag默认human",
"assistant_tag": "消息中代表助手的 role_tag默认gpt",
"observation_tag": "消息中代表工具返回结果的 role_tag默认observation",
"function_tag": "消息中代表工具调用的 role_tag默认function_call",
"system_tag": "消息中代表系统提示的 role_tag默认system会覆盖 system column"
}
}
```
其中 `prompt``response` 列应当是非空的字符串。`query` 列的内容将会和 `prompt` 列拼接作为模型输入。`history` 列应当是一个列表,其中每个元素是一个字符串二元组,分别代表用户请求和模型答复。
## Alpaca 格式
### 指令监督微调数据集
- [样例数据集](alpaca_zh_demo.json)
在指令监督微调时,`instruction` 列对应的内容会与 `input` 列对应的内容拼接后作为人类指令,即人类指令为 `instruction\ninput`。而 `output` 列对应的内容为模型回答。
如果指定,`system` 列对应的内容将被作为系统提示词。
`history` 列是由多个字符串二元组构成的列表,分别代表历史消息中每轮对话的指令和回答。注意在指令监督微调时,历史消息中的回答内容**也会被用于模型学习**。
```json
[
{
"instruction": "人类指令(必填)",
"input": "人类输入(选填)",
"output": "模型回答(必填)",
"system": "系统提示词(选填)",
"history": [
["第一轮指令(选填)", "第一轮回答(选填)"],
["第二轮指令(选填)", "第二轮回答(选填)"]
]
}
]
```
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
```json
"数据集名称": {
"file_name": "data.json",
"columns": {
"prompt": "instruction",
"query": "input",
"response": "output",
"system": "system",
"history": "history"
}
}
```
### 预训练数据集
- [样例数据集](c4_demo.json)
在预训练时,只有 `text` 列中的内容会用于模型学习。
```json
[
{"text": "document"},
{"text": "document"}
]
```
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
```json
"数据集名称": {
"file_name": "data.json",
"columns": {
"prompt": "text"
}
}
```
### 偏好数据集
偏好数据集用于奖励模型训练、DPO 训练和 ORPO 训练。
它需要在 `chosen` 列中提供更优的回答,并在 `rejected` 列中提供更差的回答。
```json
[
{
"instruction": "人类指令(必填)",
"input": "人类输入(选填)",
"chosen": "优质回答(必填)",
"rejected": "劣质回答(必填)"
}
]
```
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
```json
"数据集名称": {
"file_name": "data.json",
"ranking": true,
"columns": {
"prompt": "instruction",
"query": "input",
"chosen": "chosen",
"rejected": "rejected"
}
}
```
### KTO 数据集
- [样例数据集](kto_en_demo.json)
KTO 数据集需要额外添加一个 `kto_tag` 列,包含 bool 类型的人类反馈。
```json
[
{
"instruction": "人类指令(必填)",
"input": "人类输入(选填)",
"output": "模型回答(必填)",
"kto_tag": "人类反馈 [true/false](必填)"
}
]
```
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
```json
"数据集名称": {
"file_name": "data.json",
"columns": {
"prompt": "instruction",
"query": "input",
"response": "output",
"kto_tag": "kto_tag"
}
}
```
### 多模态数据集
- [样例数据集](mllm_demo.json)
多模态数据集需要额外添加一个 `images` 列,包含输入图像的路径。目前我们仅支持单张图像输入。
```json
[
{
"instruction": "人类指令(必填)",
"input": "人类输入(选填)",
"output": "模型回答(必填)",
"images": [
"图像路径(必填)"
]
}
]
```
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
```json
"数据集名称": {
"file_name": "data.json",
"columns": {
"prompt": "instruction",
"query": "input",
"response": "output",
"images": "images"
}
}
```
## Sharegpt 格式
### 指令监督微调数据集
- [样例数据集](glaive_toolcall_zh_demo.json)
相比 alpaca 格式的数据集sharegpt 格式支持**更多的角色种类**,例如 human、gpt、observation、function 等等。它们构成一个对象列表呈现在 `conversations` 列中。
注意其中 human 和 observation 必须出现在奇数位置gpt 和 function 必须出现在偶数位置。
```json
[
{
"conversations": [
{
"from": "human",
"value": "人类指令"
},
{
"from": "function_call",
"value": "工具参数"
},
{
"from": "observation",
"value": "工具结果"
},
{
"from": "gpt",
"value": "模型回答"
}
],
"system": "系统提示词(选填)",
"tools": "工具描述(选填)"
}
]
```
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
```json
"数据集名称": {
"file_name": "data.json",
"formatting": "sharegpt",
"columns": {
"messages": "conversations",
"system": "system",
"tools": "tools"
}
}
```
### 偏好数据集
- [样例数据集](dpo_zh_demo.json)
Sharegpt 格式的偏好数据集同样需要在 `chosen` 列中提供更优的消息,并在 `rejected` 列中提供更差的消息。
```json
[
{
"conversations": [
{
"from": "human",
"value": "人类指令"
},
{
"from": "gpt",
"value": "模型回答"
},
{
"from": "human",
"value": "人类指令"
}
],
"chosen": {
"from": "gpt",
"value": "优质回答"
},
"rejected": {
"from": "gpt",
"value": "劣质回答"
}
}
]
```
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
```json
"数据集名称": {
"file_name": "data.json",
"formatting": "sharegpt",
"ranking": true,
"columns": {
"messages": "conversations",
"chosen": "chosen",
"rejected": "rejected"
}
}
```
### OpenAI 格式
OpenAI 格式仅仅是 sharegpt 格式的一种特殊情况,其中第一条消息可能是系统提示词。
```json
[
{
"messages": [
{
"role": "system",
"content": "系统提示词(选填)"
},
{
"role": "user",
"content": "人类指令"
},
{
"role": "assistant",
"content": "模型回答"
}
]
}
]
```
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
```json
"数据集名称": {
"file_name": "data.json",
"formatting": "sharegpt",
"columns": {
"messages": "messages"
},
"tags": {
"role_tag": "role",
"content_tag": "content",
"user_tag": "user",
"assistant_tag": "assistant",
"system_tag": "system"
}
}
```
Sharegpt 格式中的 KTO 数据集和多模态数据集与 alpaca 格式的类似。
预训练数据集**不支持** sharegpt 格式。

View File

@@ -1 +0,0 @@
3779ddbc040543ab1834ef216c983d6fcc06cc9a

View File

@@ -1 +0,0 @@
fc9a6a3458caca2af8dafc6181773fe10c6d8657

View File

@@ -1 +0,0 @@
25508714b7879a1e5a6764ba7f979a980f549f1a

View File

@@ -1 +0,0 @@
7cb6a7d11455bddc3d495750a2392683d775b184

View File

@@ -1,7 +1,10 @@
import json
import datasets
from typing import Any, Dict, List
import os
import datasets
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
_DESCRIPTION = "BELLE multiturn chat dataset."
@@ -14,66 +17,51 @@ _CITATION = """\
}
"""
_HOMEPAGE = "https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M"
_HOMEPAGE = "{}/datasets/BelleGroup/multiturn_chat_0.8M".format(_HF_ENDPOINT)
_LICENSE = "gpl-3.0"
_URL = "https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json"
_URL = "{}/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json".format(_HF_ENDPOINT)
class BelleMultiturn(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.0.0")
def _info(self) -> datasets.DatasetInfo:
features = datasets.Features({
"instruction": datasets.Value("string"),
"output": datasets.Value("string"),
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
})
def _info(self):
features = datasets.Features(
{"conversations": [{"from": datasets.Value("string"), "value": datasets.Value("string")}]}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION
description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
def _split_generators(self, dl_manager: datasets.DownloadManager):
file_path = dl_manager.download(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": file_path
}
)
]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": file_path})]
def _generate_examples(self, filepath: str) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat with history
def _generate_examples(self, filepath: str):
with open(filepath, "r", encoding="utf-8") as f:
for key, row in enumerate(f):
data = json.loads(row)
conversations = []
prompt = data["instruction"].strip()
response = data["output"].strip()
assist_idx = prompt.rfind("Assistant:")
human_idx = prompt.rfind("Human:")
query = prompt[human_idx+6:assist_idx].strip()
query = prompt[human_idx + 6 : assist_idx].strip()
prompt = prompt[:human_idx].strip()
history = []
conversations.insert(0, {"from": "gpt", "value": response})
conversations.insert(0, {"from": "human", "value": query})
while prompt.rfind("Assistant:") != -1:
assist_idx = prompt.rfind("Assistant:")
human_idx = prompt.rfind("Human:")
if human_idx != -1:
old_query = prompt[human_idx+6:assist_idx].strip()
old_resp = prompt[assist_idx+10:].strip()
history.insert(0, (old_query, old_resp))
old_query = prompt[human_idx + 6 : assist_idx].strip()
old_resp = prompt[assist_idx + 10 :].strip()
conversations.insert(0, {"from": "gpt", "value": old_resp})
conversations.insert(0, {"from": "human", "value": old_query})
else:
break
prompt = prompt[:human_idx].strip()
yield key, {
"instruction": query,
"output": response,
"history": history
}
yield key, {"conversations": conversations}

View File

@@ -1 +0,0 @@
f5cb08305ff5dc9c17a09809c54c8c8834aadc70

View File

@@ -1 +0,0 @@
aee47b7b443496e37808d7f34ef10403ff99bcc3

View File

@@ -1,46 +0,0 @@
import json
import datasets
from typing import Any, Dict, List
_DESCRIPTION = "An example of dataset for LLaMA."
_CITATION = ""
_HOMEPAGE = ""
_LICENSE = ""
_URL = "examples.json"
class ExampleDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.0.0")
def _info(self) -> datasets.DatasetInfo:
features = datasets.Features({
"instruction": datasets.Value("string"),
"input": datasets.Value("string"),
"output": datasets.Value("string"),
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
file_path = dl_manager.download(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": file_path
}
)
]
def _generate_examples(self, filepath: str) -> Dict[int, Dict[str, Any]]:
example_dataset = json.load(open(filepath, "r", encoding="utf-8"))
for key, example in enumerate(example_dataset):
yield key, example

View File

@@ -1,65 +1,56 @@
import json
import os
from typing import List
import datasets
from typing import Any, Dict, List
_DESCRIPTION = "Human preference data about helpfulness and harmlessness for ChatGLM."
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
_DESCRIPTION = "Human preference data about helpfulness and harmlessness."
_CITATION = ""
_HOMEPAGE = "https://huggingface.co/datasets/Anthropic/hh-rlhf"
_HOMEPAGE = "{}/datasets/Anthropic/hh-rlhf".format(_HF_ENDPOINT)
_LICENSE = "mit"
_URL = "https://huggingface.co/datasets/Anthropic/hh-rlhf/resolve/main/"
_URL = "{}/datasets/Anthropic/hh-rlhf/resolve/main/".format(_HF_ENDPOINT)
_URLS = {
"train": [
_URL + "harmless-base/train.jsonl.gz",
_URL + "helpful-base/train.jsonl.gz",
_URL + "helpful-online/train.jsonl.gz",
_URL + "helpful-rejection-sampled/train.jsonl.gz"
_URL + "helpful-rejection-sampled/train.jsonl.gz",
],
"test": [
_URL + "harmless-base/test.jsonl.gz",
_URL + "helpful-base/test.jsonl.gz",
_URL + "helpful-online/test.jsonl.gz",
_URL + "helpful-rejection-sampled/test.jsonl.gz"
]
_URL + "helpful-rejection-sampled/test.jsonl.gz",
],
}
class HhRlhfEn(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.0.0")
def _info(self) -> datasets.DatasetInfo:
features = datasets.Features({
"instruction": datasets.Value("string"),
"output": datasets.Sequence(datasets.Value("string")),
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
})
features = datasets.Features(
{
"instruction": datasets.Value("string"),
"chosen": datasets.Value("string"),
"rejected": datasets.Value("string"),
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION
description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
def _split_generators(self, dl_manager: datasets.DownloadManager):
file_path = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": file_path["train"]
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepaths": file_path["test"]
}
)
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": file_path["train"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": file_path["test"]}),
]
def _generate_examples(self, filepaths: List[str]) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat for ChatGLM
def _generate_examples(self, filepaths: List[str]):
key = 0
for filepath in filepaths:
with open(filepath, "r", encoding="utf-8") as f:
@@ -69,12 +60,12 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder):
rejected = data["rejected"]
assist_idx = rejected.rfind("\n\nAssistant: ")
r_reject = rejected[assist_idx+13:].strip()
r_reject = rejected[assist_idx + 13 :].strip()
assist_idx = chosen.rfind("\n\nAssistant: ")
r_accept = chosen[assist_idx+13:].strip()
r_accept = chosen[assist_idx + 13 :].strip()
human_idx = chosen.rfind("\n\nHuman: ")
query = chosen[human_idx+9:assist_idx].strip()
query = chosen[human_idx + 9 : assist_idx].strip()
prompt = chosen[:human_idx]
history = []
@@ -82,16 +73,12 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder):
assist_idx = prompt.rfind("\n\nAssistant: ")
human_idx = prompt.rfind("\n\nHuman: ")
if human_idx != -1:
old_query = prompt[human_idx+9:assist_idx].strip()
old_resp = prompt[assist_idx+13:].strip()
old_query = prompt[human_idx + 9 : assist_idx].strip()
old_resp = prompt[assist_idx + 13 :].strip()
history.insert(0, (old_query, old_resp))
else:
break
prompt = prompt[:human_idx]
yield key, {
"instruction": query,
"output": [r_accept, r_reject],
"history": history
}
yield key, {"instruction": query, "chosen": r_accept, "rejected": r_reject, "history": history}
key += 1

View File

@@ -1 +0,0 @@
274079ea921762be356de85b18f13fa60b7ba8cb

View File

@@ -1 +0,0 @@
57fd080be5bffe4153fe3ee26a175e3d56da30f3

View File

@@ -1 +0,0 @@
38c89869c6aeca2a3af9ea1e09afe460f9b46810

View File

@@ -1,7 +1,11 @@
import json
import datasets
from typing import Any, Dict, List
import os
from typing import List
import datasets
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
_DESCRIPTION = "UltraChat: Large-scale, Informative, and Diverse Multi-round Dialogue Data."
@@ -16,61 +20,41 @@ _CITATION = """\
}
"""
_HOMEPAGE = "https://huggingface.co/datasets/stingning/ultrachat"
_HOMEPAGE = "{}/datasets/stingning/ultrachat".format(_HF_ENDPOINT)
_LICENSE = "cc-by-nc-4.0"
_BASE_DATA_URL = "https://huggingface.co/datasets/stingning/ultrachat/resolve/main/train_{idx}.jsonl"
_BASE_DATA_URL = "{}/datasets/stingning/ultrachat/resolve/main/train_{{idx}}.jsonl".format(_HF_ENDPOINT)
class BelleMultiturn(datasets.GeneratorBasedBuilder):
class UltraChat(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.0.0")
def _info(self) -> datasets.DatasetInfo:
features = datasets.Features({
"instruction": datasets.Value("string"),
"output": datasets.Value("string"),
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
})
def _info(self):
features = datasets.Features(
{"conversations": [{"from": datasets.Value("string"), "value": datasets.Value("string")}]}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION
description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(9)] # multiple shards
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": file_paths
}
)
]
def _split_generators(self, dl_manager: datasets.DownloadManager):
file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(10)] # multiple shards
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": file_paths})]
def _generate_examples(self, filepaths: List[str]) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat for ChatGLM
def _generate_examples(self, filepaths: List[str]):
for filepath in filepaths:
with open(filepath, "r", encoding="utf-8") as f:
for row in f:
try:
data = json.loads(row)
except:
except Exception:
continue
key = data["id"]
content = data["data"]
key: int = data["id"]
content: List[str] = data["data"]
if len(content) % 2 == 1:
content.pop(-1)
if len(content) < 2:
continue
query = content[-2]
response = content[-1]
history = [[content[2*i], content[2*i+1]] for i in range(len(content) // 2 - 1)]
yield key, {
"instruction": query,
"output": response,
"history": history
}
conversations = [
{"from": "human" if i % 2 == 0 else "gpt", "value": content[i]} for i in range(len(content))
]
yield key, {"conversations": conversations}

File diff suppressed because one or more lines are too long

23
docker-compose.yml Normal file
View File

@@ -0,0 +1,23 @@
version: '3.8'
services:
llama-factory:
build:
dockerfile: Dockerfile
context: .
container_name: llama_factory
volumes:
- ./hf_cache:/root/.cache/huggingface/
- ./data:/app/data
- ./output:/app/output
ports:
- "7860:7860"
ipc: host
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: "all"
capabilities: [gpu]
restart: unless-stopped

160
evaluation/ceval/ceval.py Normal file
View File

@@ -0,0 +1,160 @@
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datasets
import pandas as pd
_CITATION = """\
@article{huang2023ceval,
title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models},
author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian},
journal={arXiv preprint arXiv:2305.08322},
year={2023}
}
"""
_DESCRIPTION = """\
C-Eval is a comprehensive Chinese evaluation suite for foundation models. It consists of 13948 multi-choice questions spanning 52 diverse disciplines and four difficulty levels.
"""
_HOMEPAGE = "https://cevalbenchmark.com"
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License"
_URL = "ceval.zip"
task_list = [
"computer_network",
"operating_system",
"computer_architecture",
"college_programming",
"college_physics",
"college_chemistry",
"advanced_mathematics",
"probability_and_statistics",
"discrete_mathematics",
"electrical_engineer",
"metrology_engineer",
"high_school_mathematics",
"high_school_physics",
"high_school_chemistry",
"high_school_biology",
"middle_school_mathematics",
"middle_school_biology",
"middle_school_physics",
"middle_school_chemistry",
"veterinary_medicine",
"college_economics",
"business_administration",
"marxism",
"mao_zedong_thought",
"education_science",
"teacher_qualification",
"high_school_politics",
"high_school_geography",
"middle_school_politics",
"middle_school_geography",
"modern_chinese_history",
"ideological_and_moral_cultivation",
"logic",
"law",
"chinese_language_and_literature",
"art_studies",
"professional_tour_guide",
"legal_professional",
"high_school_chinese",
"high_school_history",
"middle_school_history",
"civil_servant",
"sports_science",
"plant_protection",
"basic_medicine",
"clinical_medicine",
"urban_and_rural_planner",
"accountant",
"fire_engineer",
"environmental_impact_assessment_engineer",
"tax_accountant",
"physician",
]
class CevalConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
class Ceval(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
CevalConfig(
name=task_name,
)
for task_name in task_list
]
def _info(self):
features = datasets.Features(
{
"id": datasets.Value("int32"),
"question": datasets.Value("string"),
"A": datasets.Value("string"),
"B": datasets.Value("string"),
"C": datasets.Value("string"),
"D": datasets.Value("string"),
"answer": datasets.Value("string"),
"explanation": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_URL)
task_name = self.config.name
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, "test", f"{task_name}_test.csv"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir, "val", f"{task_name}_val.csv"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, "dev", f"{task_name}_dev.csv"),
},
),
]
def _generate_examples(self, filepath):
df = pd.read_csv(filepath, encoding="utf-8")
for i, instance in enumerate(df.to_dict(orient="records")):
if "answer" not in instance.keys():
instance["answer"] = ""
if "explanation" not in instance.keys():
instance["explanation"] = ""
yield i, instance

167
evaluation/cmmlu/cmmlu.py Normal file
View File

@@ -0,0 +1,167 @@
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datasets
import pandas as pd
_CITATION = """\
@article{li2023cmmlu,
title={CMMLU: Measuring massive multitask language understanding in Chinese},
author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin},
journal={arXiv preprint arXiv:2306.09212},
year={2023}
}
"""
_DESCRIPTION = """\
CMMLU is a comprehensive Chinese assessment suite specifically designed to evaluate the advanced knowledge and reasoning abilities of LLMs within the Chinese language and cultural context.
"""
_HOMEPAGE = "https://github.com/haonan-li/CMMLU"
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License"
_URL = "cmmlu.zip"
task_list = [
"agronomy",
"anatomy",
"ancient_chinese",
"arts",
"astronomy",
"business_ethics",
"chinese_civil_service_exam",
"chinese_driving_rule",
"chinese_food_culture",
"chinese_foreign_policy",
"chinese_history",
"chinese_literature",
"chinese_teacher_qualification",
"clinical_knowledge",
"college_actuarial_science",
"college_education",
"college_engineering_hydrology",
"college_law",
"college_mathematics",
"college_medical_statistics",
"college_medicine",
"computer_science",
"computer_security",
"conceptual_physics",
"construction_project_management",
"economics",
"education",
"electrical_engineering",
"elementary_chinese",
"elementary_commonsense",
"elementary_information_and_technology",
"elementary_mathematics",
"ethnology",
"food_science",
"genetics",
"global_facts",
"high_school_biology",
"high_school_chemistry",
"high_school_geography",
"high_school_mathematics",
"high_school_physics",
"high_school_politics",
"human_sexuality",
"international_law",
"journalism",
"jurisprudence",
"legal_and_moral_basis",
"logical",
"machine_learning",
"management",
"marketing",
"marxist_theory",
"modern_chinese",
"nutrition",
"philosophy",
"professional_accounting",
"professional_law",
"professional_medicine",
"professional_psychology",
"public_relations",
"security_study",
"sociology",
"sports_science",
"traditional_chinese_medicine",
"virology",
"world_history",
"world_religions",
]
class CMMLUConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super().__init__(version=datasets.Version("1.0.1"), **kwargs)
class CMMLU(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
CMMLUConfig(
name=task_name,
)
for task_name in task_list
]
def _info(self):
features = datasets.Features(
{
"question": datasets.Value("string"),
"A": datasets.Value("string"),
"B": datasets.Value("string"),
"C": datasets.Value("string"),
"D": datasets.Value("string"),
"answer": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_URL)
task_name = self.config.name
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, f"test/{task_name}.csv"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, f"dev/{task_name}.csv"),
},
),
]
def _generate_examples(self, filepath):
df = pd.read_csv(filepath, header=0, index_col=0, encoding="utf-8")
for i, instance in enumerate(df.to_dict(orient="records")):
question = instance.pop("Question", "")
answer = instance.pop("Answer", "")
instance["question"] = question
instance["answer"] = answer
yield i, instance

161
evaluation/mmlu/mmlu.py Normal file
View File

@@ -0,0 +1,161 @@
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datasets
import pandas as pd
_CITATION = """\
@article{hendryckstest2021,
title={Measuring Massive Multitask Language Understanding},
author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},
journal={Proceedings of the International Conference on Learning Representations (ICLR)},
year={2021}
}
"""
_DESCRIPTION = """\
Measuring Massive Multitask Language Understanding by Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt (ICLR 2021).
"""
_HOMEPAGE = "https://github.com/hendrycks/test"
_LICENSE = "MIT"
_URL = "mmlu.zip"
task_list = [
"high_school_european_history",
"business_ethics",
"clinical_knowledge",
"medical_genetics",
"high_school_us_history",
"high_school_physics",
"high_school_world_history",
"virology",
"high_school_microeconomics",
"econometrics",
"college_computer_science",
"high_school_biology",
"abstract_algebra",
"professional_accounting",
"philosophy",
"professional_medicine",
"nutrition",
"global_facts",
"machine_learning",
"security_studies",
"public_relations",
"professional_psychology",
"prehistory",
"anatomy",
"human_sexuality",
"college_medicine",
"high_school_government_and_politics",
"college_chemistry",
"logical_fallacies",
"high_school_geography",
"elementary_mathematics",
"human_aging",
"college_mathematics",
"high_school_psychology",
"formal_logic",
"high_school_statistics",
"international_law",
"high_school_mathematics",
"high_school_computer_science",
"conceptual_physics",
"miscellaneous",
"high_school_chemistry",
"marketing",
"professional_law",
"management",
"college_physics",
"jurisprudence",
"world_religions",
"sociology",
"us_foreign_policy",
"high_school_macroeconomics",
"computer_security",
"moral_scenarios",
"moral_disputes",
"electrical_engineering",
"astronomy",
"college_biology",
]
class MMLUConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
class MMLU(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
MMLUConfig(
name=task_name,
)
for task_name in task_list
]
def _info(self):
features = datasets.Features(
{
"question": datasets.Value("string"),
"A": datasets.Value("string"),
"B": datasets.Value("string"),
"C": datasets.Value("string"),
"D": datasets.Value("string"),
"answer": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_URL)
task_name = self.config.name
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, "data", "test", f"{task_name}_test.csv"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir, "data", "val", f"{task_name}_val.csv"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, "data", "dev", f"{task_name}_dev.csv"),
},
),
]
def _generate_examples(self, filepath):
df = pd.read_csv(filepath, header=None)
df.columns = ["question", "A", "B", "C", "D", "answer"]
for i, instance in enumerate(df.to_dict(orient="records")):
yield i, instance

233
examples/README.md Normal file
View File

@@ -0,0 +1,233 @@
We provide diverse examples about fine-tuning LLMs.
Make sure to execute these commands in the `LLaMA-Factory` directory.
## Table of Contents
- [LoRA Fine-Tuning on A Single GPU](#lora-fine-tuning-on-a-single-gpu)
- [QLoRA Fine-Tuning on a Single GPU](#qlora-fine-tuning-on-a-single-gpu)
- [LoRA Fine-Tuning on Multiple GPUs](#lora-fine-tuning-on-multiple-gpus)
- [LoRA Fine-Tuning on Multiple NPUs](#lora-fine-tuning-on-multiple-npus)
- [Full-Parameter Fine-Tuning on Multiple GPUs](#full-parameter-fine-tuning-on-multiple-gpus)
- [Merging LoRA Adapters and Quantization](#merging-lora-adapters-and-quantization)
- [Inferring LoRA Fine-Tuned Models](#inferring-lora-fine-tuned-models)
- [Extras](#extras)
## Examples
### LoRA Fine-Tuning on A Single GPU
#### (Continuous) Pre-Training
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_pretrain.yaml
```
#### Supervised Fine-Tuning
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_sft.yaml
```
#### Multimodal Supervised Fine-Tuning
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llava1_5_lora_sft.yaml
```
#### Reward Modeling
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_reward.yaml
```
#### PPO Training
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_ppo.yaml
```
#### DPO/ORPO/SimPO Training
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_dpo.yaml
```
#### KTO Training
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_kto.yaml
```
#### Preprocess Dataset
It is useful for large dataset, use `tokenized_path` in config to load the preprocessed dataset.
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_preprocess.yaml
```
#### Evaluating on MMLU/CMMLU/C-Eval Benchmarks
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli eval examples/lora_single_gpu/llama3_lora_eval.yaml
```
#### Batch Predicting and Computing BLEU and ROUGE Scores
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_predict.yaml
```
### QLoRA Fine-Tuning on a Single GPU
#### Supervised Fine-Tuning with 4/8-bit Bitsandbytes Quantization (Recommended)
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_bitsandbytes.yaml
```
#### Supervised Fine-Tuning with 4/8-bit GPTQ Quantization
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_gptq.yaml
```
#### Supervised Fine-Tuning with 4-bit AWQ Quantization
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_awq.yaml
```
#### Supervised Fine-Tuning with 2-bit AQLM Quantization
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_aqlm.yaml
```
### LoRA Fine-Tuning on Multiple GPUs
#### Supervised Fine-Tuning on Single Node
```bash
CUDA_VISIBLE_DEVICES=0,1,2,3 llamafactory-cli train examples/lora_multi_gpu/llama3_lora_sft.yaml
```
#### Supervised Fine-Tuning on Multiple Nodes
```bash
CUDA_VISIBLE_DEVICES=0,1,2,3 NNODES=2 RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/lora_multi_gpu/llama3_lora_sft.yaml
CUDA_VISIBLE_DEVICES=0,1,2,3 NNODES=2 RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/lora_multi_gpu/llama3_lora_sft.yaml
```
#### Supervised Fine-Tuning with DeepSpeed ZeRO-3 (Weight Sharding)
```bash
CUDA_VISIBLE_DEVICES=0,1,2,3 llamafactory-cli train examples/lora_multi_gpu/llama3_lora_sft_ds.yaml
```
### LoRA Fine-Tuning on Multiple NPUs
#### Supervised Fine-Tuning with DeepSpeed ZeRO-0
```bash
ASCEND_RT_VISIBLE_DEVICES=0,1,2,3 llamafactory-cli train examples/lora_multi_npu/llama3_lora_sft_ds.yaml
```
### Full-Parameter Fine-Tuning on Multiple GPUs
#### Supervised Fine-Tuning on Single Node
```bash
CUDA_VISIBLE_DEVICES=0,1,2,3 llamafactory-cli train examples/full_multi_gpu/llama3_full_sft.yaml
```
#### Supervised Fine-Tuning on Multiple Nodes
```bash
CUDA_VISIBLE_DEVICES=0,1,2,3 NNODES=2 RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/full_multi_gpu/llama3_full_sft.yaml
CUDA_VISIBLE_DEVICES=0,1,2,3 NNODES=2 RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/full_multi_gpu/llama3_full_sft.yaml
```
#### Batch Predicting and Computing BLEU and ROUGE Scores
```bash
CUDA_VISIBLE_DEVICES=0,1,2,3 llamafactory-cli train examples/full_multi_gpu/llama3_full_predict.yaml
```
### Merging LoRA Adapters and Quantization
#### Merge LoRA Adapters
Note: DO NOT use quantized model or `quantization_bit` when merging LoRA adapters.
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
```
#### Quantizing Model using AutoGPTQ
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_gptq.yaml
```
### Inferring LoRA Fine-Tuned Models
Use `CUDA_VISIBLE_DEVICES=0,1` to infer models on multiple devices.
#### Use CLI
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
```
#### Use Web UI
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli webchat examples/inference/llama3_lora_sft.yaml
```
#### Launch OpenAI-style API
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli api examples/inference/llama3_lora_sft.yaml
```
### Extras
#### Full-Parameter Fine-Tuning using GaLore
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml
```
#### Full-Parameter Fine-Tuning using BAdam
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
```
#### LoRA+ Fine-Tuning
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/loraplus/llama3_lora_sft.yaml
```
#### Mixture-of-Depths Fine-Tuning
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/mod/llama3_full_sft.yaml
```
#### LLaMA-Pro Fine-Tuning
```bash
bash examples/extras/llama_pro/expand.sh
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml
```
#### FSDP+QLoRA Fine-Tuning
```bash
bash examples/extras/fsdp_qlora/single_node.sh
```

233
examples/README_zh.md Normal file
View File

@@ -0,0 +1,233 @@
我们提供了多样化的大模型微调示例脚本。
请确保在 `LLaMA-Factory` 目录下执行下述命令。
## 目录
- [单 GPU LoRA 微调](#单-gpu-lora-微调)
- [单 GPU QLoRA 微调](#单-gpu-qlora-微调)
- [多 GPU LoRA 微调](#多-gpu-lora-微调)
- [多 NPU LoRA 微调](#多-npu-lora-微调)
- [多 GPU 全参数微调](#多-gpu-全参数微调)
- [合并 LoRA 适配器与模型量化](#合并-lora-适配器与模型量化)
- [推理 LoRA 模型](#推理-lora-模型)
- [杂项](#杂项)
## 示例
### 单 GPU LoRA 微调
#### (增量)预训练
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_pretrain.yaml
```
#### 指令监督微调
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_sft.yaml
```
#### 多模态指令监督微调
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llava1_5_lora_sft.yaml
```
#### 奖励模型训练
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_reward.yaml
```
#### PPO 训练
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_ppo.yaml
```
#### DPO/ORPO/SimPO 训练
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_dpo.yaml
```
#### KTO 训练
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_kto.yaml
```
#### 预处理数据集
对于大数据集有帮助,在配置中使用 `tokenized_path` 以加载预处理后的数据集。
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_preprocess.yaml
```
#### 在 MMLU/CMMLU/C-Eval 上评估
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli eval examples/lora_single_gpu/llama3_lora_eval.yaml
```
#### 批量预测并计算 BLEU 和 ROUGE 分数
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_predict.yaml
```
### 单 GPU QLoRA 微调
#### 基于 4/8 比特 Bitsandbytes 量化进行指令监督微调(推荐)
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_bitsandbytes.yaml
```
#### 基于 4/8 比特 GPTQ 量化进行指令监督微调
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_gptq.yaml
```
#### 基于 4 比特 AWQ 量化进行指令监督微调
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_awq.yaml
```
#### 基于 2 比特 AQLM 量化进行指令监督微调
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_aqlm.yaml
```
### 多 GPU LoRA 微调
#### 在单机上进行指令监督微调
```bash
CUDA_VISIBLE_DEVICES=0,1,2,3 llamafactory-cli train examples/lora_multi_gpu/llama3_lora_sft.yaml
```
#### 在多机上进行指令监督微调
```bash
CUDA_VISIBLE_DEVICES=0,1,2,3 NNODES=2 RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/lora_multi_gpu/llama3_lora_sft.yaml
CUDA_VISIBLE_DEVICES=0,1,2,3 NNODES=2 RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/lora_multi_gpu/llama3_lora_sft.yaml
```
#### 使用 DeepSpeed ZeRO-3 平均分配显存
```bash
CUDA_VISIBLE_DEVICES=0,1,2,3 llamafactory-cli train examples/lora_multi_gpu/llama3_lora_sft_ds.yaml
```
### 多 NPU LoRA 微调
#### 使用 DeepSpeed ZeRO-0 进行指令监督微调
```bash
ASCEND_RT_VISIBLE_DEVICES=0,1,2,3 llamafactory-cli train examples/lora_multi_npu/llama3_lora_sft_ds.yaml
```
### 多 GPU 全参数微调
#### 在单机上进行指令监督微调
```bash
CUDA_VISIBLE_DEVICES=0,1,2,3 llamafactory-cli train examples/full_multi_gpu/llama3_full_sft.yaml
```
#### 在多机上进行指令监督微调
```bash
CUDA_VISIBLE_DEVICES=0,1,2,3 NNODES=2 RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/full_multi_gpu/llama3_full_sft.yaml
CUDA_VISIBLE_DEVICES=0,1,2,3 NNODES=2 RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/full_multi_gpu/llama3_full_sft.yaml
```
#### 批量预测并计算 BLEU 和 ROUGE 分数
```bash
CUDA_VISIBLE_DEVICES=0,1,2,3 llamafactory-cli train examples/full_multi_gpu/llama3_full_predict.yaml
```
### 合并 LoRA 适配器与模型量化
#### 合并 LoRA 适配器
注:请勿使用量化后的模型或 `quantization_bit` 参数来合并 LoRA 适配器。
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
```
#### 使用 AutoGPTQ 量化模型
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_gptq.yaml
```
### 推理 LoRA 模型
使用 `CUDA_VISIBLE_DEVICES=0,1` 进行多卡推理。
#### 使用命令行接口
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
```
#### 使用浏览器界面
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli webchat examples/inference/llama3_lora_sft.yaml
```
#### 启动 OpenAI 风格 API
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli api examples/inference/llama3_lora_sft.yaml
```
### 杂项
#### 使用 GaLore 进行全参数训练
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml
```
#### 使用 BAdam 进行全参数训练
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
```
#### LoRA+ 微调
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/loraplus/llama3_lora_sft.yaml
```
#### 深度混合微调
```bash
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/mod/llama3_full_sft.yaml
```
#### LLaMA-Pro 微调
```bash
bash examples/extras/llama_pro/expand.sh
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml
```
#### FSDP+QLoRA 微调
```bash
bash examples/extras/fsdp_qlora/single_node.sh
```

View File

@@ -0,0 +1,25 @@
compute_environment: LOCAL_MACHINE
debug: false
distributed_type: FSDP
downcast_bf16: 'no'
fsdp_config:
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_backward_prefetch: BACKWARD_PRE
fsdp_forward_prefetch: false
fsdp_cpu_ram_efficient_loading: true
fsdp_offload_params: true # offload may affect training speed
fsdp_sharding_strategy: FULL_SHARD
fsdp_state_dict_type: FULL_STATE_DICT
fsdp_sync_module_states: true
fsdp_use_orig_params: true
machine_rank: 0
main_training_function: main
mixed_precision: fp16 # or bf16
num_machines: 1 # the number of nodes
num_processes: 2 # the number of GPUs in all nodes
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false

View File

@@ -0,0 +1,41 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: sft
do_train: true
finetuning_type: full
use_badam: true
badam_switch_mode: ascending
badam_switch_interval: 50
badam_verbose: 2
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/full/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
pure_bf16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,42 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
quantization_bit: 4
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: all
### ddp
ddp_timeout: 180000000
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,6 @@
#!/bin/bash
# DO NOT use GPTQ/AWQ model in FSDP+QLoRA
CUDA_VISIBLE_DEVICES=0,1 accelerate launch \
--config_file examples/accelerate/fsdp_config.yaml \
src/train.py examples/extras/fsdp_qlora/llama3_lora_sft.yaml

View File

@@ -0,0 +1,42 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: sft
do_train: true
finetuning_type: full
use_galore: true
galore_layerwise: true
galore_target: mlp,self_attn
galore_rank: 128
galore_scale: 2.0
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/full/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 1
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
pure_bf16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,6 @@
#!/bin/bash
python scripts/llama_pro.py \
--model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct \
--output_dir models/llama3-8b-instruct-pro \
--num_expand 8

View File

@@ -0,0 +1,40 @@
### model
model_name_or_path: models/llama3-8b-instruct-pro
### method
stage: sft
do_train: true
finetuning_type: freeze
freeze_trainable_layers: 8
freeze_trainable_modules: all
use_llama_pro: true
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b-instruct-pro/freeze/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,39 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: all
loraplus_lr_ratio: 16.0
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,39 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: sft
do_train: true
finetuning_type: full
mixture_of_depths: convert
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b-mod/full/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
optim: paged_adamw_8bit
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
pure_bf16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,23 @@
### model
model_name_or_path: saves/llama3-8b/full/sft
### method
stage: sft
do_predict: true
finetuning_type: full
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 50
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/full/predict
overwrite_output_dir: true
### eval
per_device_eval_batch_size: 1
predict_with_generate: true

View File

@@ -0,0 +1,41 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: sft
do_train: true
finetuning_type: full
### ddp
ddp_timeout: 180000000
deepspeed: examples/deepspeed/ds_z3_config.json
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/full/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 2
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,2 @@
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
template: llama3

View File

@@ -0,0 +1,4 @@
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
adapter_name_or_path: saves/llama3-8b/lora/sft
template: llama3
finetuning_type: lora

View File

@@ -0,0 +1,4 @@
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
template: llama3
infer_backend: vllm
vllm_enforce_eager: true

View File

@@ -0,0 +1,41 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: all
### ddp
ddp_timeout: 180000000
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 2
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,42 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: all
### ddp
ddp_timeout: 180000000
deepspeed: examples/deepspeed/ds_z3_config.json
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 2
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,42 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: all
### ddp
ddp_timeout: 180000000
deepspeed: examples/deepspeed/ds_z0_config.json
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 2
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,40 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: dpo
do_train: true
finetuning_type: lora
lora_target: all
pref_beta: 0.1
pref_loss: sigmoid # [sigmoid (dpo), orpo, simpo]
### dataset
dataset: dpo_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/dpo
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 5.0e-6
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,19 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
adapter_name_or_path: saves/llama3-8b/lora/sft
### method
finetuning_type: lora
### dataset
task: mmlu
split: test
template: fewshot
lang: en
n_shot: 5
### output
save_dir: saves/llama3-8b/lora/eval
### eval
batch_size: 4

View File

@@ -0,0 +1,38 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: kto
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: kto_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/kto
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 5.0e-6
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,38 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
reward_model: saves/llama3-8b/lora/reward
### method
stage: ppo
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/ppo
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-5
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### generate
max_new_tokens: 512
top_k: 0
top_p: 0.9

View File

@@ -0,0 +1,24 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
adapter_name_or_path: saves/llama3-8b/lora/sft
### method
stage: sft
do_predict: true
finetuning_type: lora
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 50
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/predict
overwrite_output_dir: true
### eval
per_device_eval_batch_size: 1
predict_with_generate: true

View File

@@ -0,0 +1,37 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: pt
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: c4_demo
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,38 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: rm
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: dpo_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/reward
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-5
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,38 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,21 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
tokenized_path: saves/llama3-8b/dataset/sft
### output
output_dir: saves/llama3-8b/lora/sft
overwrite_output_dir: true

View File

@@ -0,0 +1,39 @@
### model
model_name_or_path: llava-hf/llava-1.5-7b-hf
visual_inputs: true
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: mllm_demo
template: vicuna
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llava1_5-7b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,11 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
template: llama3
### export
export_dir: models/llama3_gptq
export_quantization_bit: 4
export_quantization_dataset: data/c4_demo.json
export_size: 2
export_device: cpu
export_legacy_format: false

View File

@@ -0,0 +1,13 @@
### Note: DO NOT use quantized model or quantization_bit when merging lora adapters
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
adapter_name_or_path: saves/llama3-8b/lora/sft
template: llama3
finetuning_type: lora
### export
export_dir: models/llama3_lora_sft
export_size: 2
export_device: cpu
export_legacy_format: false

View File

@@ -0,0 +1,38 @@
### model
model_name_or_path: ISTA-DASLab/Meta-Llama-3-8B-Instruct-AQLM-2Bit-1x16
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,38 @@
### model
model_name_or_path: TechxGenus/Meta-Llama-3-8B-Instruct-AWQ
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,39 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
quantization_bit: 4
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -0,0 +1,38 @@
### model
model_name_or_path: TechxGenus/Meta-Llama-3-8B-Instruct-GPTQ
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500

View File

@@ -1,3 +1,33 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[tool.ruff]
target-version = "py38"
line-length = 119
indent-width = 4
[tool.ruff.lint]
ignore = ["C408", "C901", "E501", "E731", "E741", "W605"]
select = ["C", "E", "F", "I", "W"]
[tool.ruff.lint.isort]
lines-after-imports = 2
known-first-party = ["llamafactory"]
known-third-party = [
"accelerate",
"datasets",
"gradio",
"numpy",
"peft",
"torch",
"transformers",
"trl"
]
[tool.ruff.format]
quote-style = "double"
indent-style = "space"
docstring-code-format = true
skip-magic-trailing-comma = false
line-ending = "auto"

View File

@@ -1,18 +1,19 @@
torch>=1.13.1
transformers>=4.29.1
datasets>=2.12.0
accelerate>=0.21.0
peft>=0.4.0
trl>=0.5.0
transformers>=4.41.2
datasets>=2.16.0
accelerate>=0.30.1
peft>=0.11.1
trl>=0.8.6
gradio>=4.0.0
scipy
einops
sentencepiece
tiktoken
jieba
rouge-chinese
nltk
gradio>=3.36.0
protobuf
uvicorn
pydantic==1.10.11
fastapi==0.95.1
pydantic
fastapi
sse-starlette
matplotlib
matplotlib>=3.7.0
fire
packaging
pyyaml

31
scripts/cal_flops.py Normal file
View File

@@ -0,0 +1,31 @@
# coding=utf-8
# Calculates the flops of pre-trained models.
# Usage: python cal_flops.py --model_name_or_path path_to_model --batch_size 1 --seq_length 512
# Inspired by: https://www.deepspeed.ai/tutorials/flops-profiler/
import fire
import torch
from deepspeed.accelerator import get_accelerator # type: ignore
from deepspeed.profiling.flops_profiler import get_model_profile # type: ignore
from llamafactory.chat import ChatModel
def calculate_flops(
model_name_or_path: str,
batch_size: int = 1,
seq_length: int = 256,
flash_attn: str = "auto",
):
with get_accelerator().device(0):
chat_model = ChatModel(dict(model_name_or_path=model_name_or_path, template="empty", flash_attn=flash_attn))
fake_input = torch.ones((batch_size, seq_length), dtype=torch.long, device=chat_model.model.device)
input_dict = {"input_ids": fake_input, "labels": fake_input.clone()}
flops, macs, params = get_model_profile(chat_model.model, kwargs=input_dict, print_profile=True, detailed=True)
print("FLOPs:", flops)
print("MACs:", macs)
print("Params:", params)
if __name__ == "__main__":
fire.Fire(calculate_flops)

76
scripts/cal_lr.py Normal file
View File

@@ -0,0 +1,76 @@
# coding=utf-8
# Calculates the optimal learning rate for 7B/13B models using LLaMA's hyper-parameters.
# Usage: python cal_lr.py --model_name_or_path path_to_model --dataset alpaca_en --cutoff_len 1024 --batch_size 16
# Inspired by: https://github.com/imoneoi/openchat/blob/master/ochat/training_deepspeed/train.py
import math
from typing import Literal
import fire
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import DataCollatorForLanguageModeling, DataCollatorForSeq2Seq
from llamafactory.data import get_dataset
from llamafactory.extras.constants import IGNORE_INDEX
from llamafactory.hparams import get_train_args
from llamafactory.model import load_tokenizer
BASE_LR = 3e-4 # 1.5e-4 for 30B-70B models
BASE_BS = 4_000_000 # from llama paper
def calculate_lr(
model_name_or_path: str,
batch_size: int, # total batch size, namely (batch size * gradient accumulation * world size)
stage: Literal["pt", "sft"] = "sft",
dataset: str = "alpaca_en",
dataset_dir: str = "data",
template: str = "default",
cutoff_len: int = 1024, # i.e. maximum input length during training
is_mistral: bool = False, # mistral model uses a smaller learning rate,
):
model_args, data_args, training_args, _, _ = get_train_args(
dict(
stage=stage,
model_name_or_path=model_name_or_path,
dataset=dataset,
dataset_dir=dataset_dir,
template=template,
cutoff_len=cutoff_len,
output_dir="dummy_dir",
overwrite_cache=True,
)
)
tokenizer_module = load_tokenizer(model_args)
tokenizer = tokenizer_module["tokenizer"]
trainset = get_dataset(model_args, data_args, training_args, stage, **tokenizer_module)
if stage == "pt":
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
elif stage == "sft":
data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, label_pad_token_id=IGNORE_INDEX)
else:
raise NotImplementedError
dataloader = DataLoader(trainset, batch_size, shuffle=False, collate_fn=data_collator, pin_memory=True)
valid_tokens, total_tokens = 0, 0
for batch in tqdm(dataloader):
valid_tokens += torch.sum(batch["labels"] != IGNORE_INDEX).item()
total_tokens += torch.numel(batch["labels"])
batch_max_len = cutoff_len * batch_size # max tokens in a batch
valid_ratio = valid_tokens / total_tokens
batch_valid_len = batch_max_len * valid_ratio
lr = BASE_LR * math.sqrt(batch_valid_len / BASE_BS) # lr ~ sqrt(batch_size)
lr = lr / 6.0 if is_mistral else lr
print(
"Optimal learning rate is {:.2e} for valid ratio% {:.2f} and effective batch size {:.2f}".format(
lr, valid_ratio * 100, batch_valid_len
)
)
if __name__ == "__main__":
fire.Fire(calculate_lr)

116
scripts/cal_ppl.py Normal file
View File

@@ -0,0 +1,116 @@
# coding=utf-8
# Calculates the ppl on the dataset of the pre-trained models.
# Usage: python cal_ppl.py --model_name_or_path path_to_model --save_name ppl.json
import json
from dataclasses import dataclass
from typing import Any, Dict, Literal, Optional, Sequence
import fire
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import DataCollatorForLanguageModeling, DataCollatorForSeq2Seq
from llamafactory.data import get_dataset
from llamafactory.extras.constants import IGNORE_INDEX
from llamafactory.hparams import get_train_args
from llamafactory.model import load_model, load_tokenizer
@dataclass
class PairwiseDataCollatorWithPadding(DataCollatorForSeq2Seq):
r"""
Data collator for pairwise data.
"""
train_on_prompt: bool = False
def __call__(self, features: Sequence[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
r"""
Pads batched data to the longest sequence in the batch.
We generate 2 * n examples where the first n examples represent chosen examples and
the last n examples represent rejected examples.
"""
chosen_features = []
for feature in features:
prompt_len, answer_len = len(feature["prompt_ids"]), len(feature["chosen_ids"])
input_ids = feature["prompt_ids"] + feature["chosen_ids"]
attention_mask = [1] * (prompt_len + answer_len)
labels = input_ids if self.train_on_prompt else [IGNORE_INDEX] * prompt_len + feature["chosen_ids"]
chosen_features.append({"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels})
return super().__call__(chosen_features)
def cal_ppl(
model_name_or_path: str,
save_name: str,
batch_size: int = 4,
stage: Literal["pt", "sft", "rm"] = "sft",
dataset: str = "alpaca_en",
dataset_dir: str = "data",
template: str = "default",
cutoff_len: int = 1024,
max_samples: Optional[int] = None,
train_on_prompt: bool = False,
):
model_args, data_args, training_args, finetuning_args, _ = get_train_args(
dict(
stage=stage,
model_name_or_path=model_name_or_path,
dataset=dataset,
dataset_dir=dataset_dir,
template=template,
cutoff_len=cutoff_len,
max_samples=max_samples,
train_on_prompt=train_on_prompt,
output_dir="dummy_dir",
overwrite_cache=True,
)
)
tokenizer_module = load_tokenizer(model_args)
tokenizer = tokenizer_module["tokenizer"]
trainset = get_dataset(model_args, data_args, training_args, stage, **tokenizer_module)
model = load_model(tokenizer, model_args, finetuning_args, is_trainable=False)
if stage == "pt":
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
elif stage == "sft":
data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, label_pad_token_id=IGNORE_INDEX)
elif stage == "rm":
data_collator = PairwiseDataCollatorWithPadding(
tokenizer=tokenizer, label_pad_token_id=IGNORE_INDEX, train_on_prompt=train_on_prompt
)
else:
raise NotImplementedError
dataloader = DataLoader(trainset, batch_size, shuffle=False, collate_fn=data_collator, pin_memory=True)
criterion = torch.nn.CrossEntropyLoss(reduction="none")
total_ppl = 0
perplexities = []
batch: Dict[str, "torch.Tensor"]
with torch.no_grad():
for batch in tqdm(dataloader):
batch = batch.to(model.device)
outputs = model(**batch)
shift_logits: "torch.Tensor" = outputs["logits"][..., :-1, :]
shift_labels: "torch.Tensor" = batch["labels"][..., 1:]
loss_mask = shift_labels != IGNORE_INDEX
flatten_logits = shift_logits.contiguous().view(shift_labels.size(0) * shift_labels.size(1), -1)
flatten_labels = shift_labels.contiguous().view(-1)
token_logps: "torch.Tensor" = criterion(flatten_logits, flatten_labels)
token_logps = token_logps.contiguous().view(shift_logits.size(0), -1)
sentence_logps = (token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
total_ppl += sentence_logps.exp().sum().item()
perplexities.extend(sentence_logps.exp().tolist())
with open(save_name, "w", encoding="utf-8") as f:
json.dump(perplexities, f, indent=2)
print("Average perplexity is {:.2f}".format(total_ppl / len(perplexities)))
print("Perplexities have been saved at {}.".format(save_name))
if __name__ == "__main__":
fire.Fire(cal_ppl)

51
scripts/length_cdf.py Normal file
View File

@@ -0,0 +1,51 @@
# coding=utf-8
# Calculates the distribution of the input lengths in the dataset.
# Usage: python length_cdf.py --model_name_or_path path_to_model --dataset alpaca_en --template default
from collections import defaultdict
import fire
from tqdm import tqdm
from llamafactory.data import get_dataset
from llamafactory.hparams import get_train_args
from llamafactory.model import load_tokenizer
def length_cdf(
model_name_or_path: str,
dataset: str = "alpaca_en",
dataset_dir: str = "data",
template: str = "default",
interval: int = 1000,
):
model_args, data_args, training_args, _, _ = get_train_args(
dict(
stage="sft",
model_name_or_path=model_name_or_path,
dataset=dataset,
dataset_dir=dataset_dir,
template=template,
cutoff_len=1_000_000,
output_dir="dummy_dir",
overwrite_cache=True,
)
)
tokenizer_module = load_tokenizer(model_args)
trainset = get_dataset(model_args, data_args, training_args, stage="sft", **tokenizer_module)
total_num = len(trainset)
length_dict = defaultdict(int)
for sample in tqdm(trainset["input_ids"]):
length_dict[len(sample) // interval * interval] += 1
length_tuples = list(length_dict.items())
length_tuples.sort()
count_accu, prob_accu = 0, 0
for length, count in length_tuples:
count_accu += count
prob_accu += count / total_num * 100
print("{:d} ({:.2f}%) samples have length < {}.".format(count_accu, prob_accu, length + interval))
if __name__ == "__main__":
fire.Fire(length_cdf)

114
scripts/llama_pro.py Normal file
View File

@@ -0,0 +1,114 @@
# coding=utf-8
# Performs block expansion for LLaMA, Mistral, Qwen1.5 or Yi models.
# Usage: python llama_pro.py --model_name_or_path meta-llama/Llama-2-7b-hf --output_dir llama2_pro --num_expand 8
# Inspired by: https://github.com/TencentARC/LLaMA-Pro/blob/main/scripts/block_expansion.py
import json
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Optional
import fire
import torch
from safetensors.torch import save_file
from tqdm import tqdm
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
from transformers.modeling_utils import (
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
shard_checkpoint,
)
if TYPE_CHECKING:
from transformers import PretrainedConfig, PreTrainedModel
def change_name(name: str, old_index: int, new_index: int) -> str:
return name.replace(".{:d}.".format(old_index), ".{:d}.".format(new_index))
def block_expansion(
model_name_or_path: str,
output_dir: str,
num_expand: int,
shard_size: Optional[str] = "2GB",
save_safetensors: Optional[bool] = False,
):
config: "PretrainedConfig" = AutoConfig.from_pretrained(model_name_or_path)
num_layers = getattr(config, "num_hidden_layers")
setattr(config, "num_hidden_layers", num_layers + num_expand)
config.save_pretrained(output_dir)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
tokenizer.save_pretrained(output_dir)
config: "PretrainedConfig" = AutoConfig.from_pretrained(model_name_or_path) # load the original one
if save_safetensors:
setattr(config, "tie_word_embeddings", False) # safetensors does not allow shared weights
model: "PreTrainedModel" = AutoModelForCausalLM.from_pretrained(
model_name_or_path,
config=config,
torch_dtype="auto",
trust_remote_code=True,
low_cpu_mem_usage=True,
)
state_dict = model.state_dict()
if num_layers % num_expand != 0:
raise ValueError("`num_layers` {} should be divisible by `num_expand` {}.".format(num_layers, num_expand))
split = num_layers // num_expand
layer_cnt = 0
output_state_dict = OrderedDict()
for i in range(num_layers):
for key, value in state_dict.items():
if ".{:d}.".format(i) in key:
output_state_dict[change_name(key, i, layer_cnt)] = value
print("Add layer {} copied from layer {}".format(layer_cnt, i))
layer_cnt += 1
if (i + 1) % split == 0:
for key, value in state_dict.items():
if ".{:d}.".format(i) in key:
if "down_proj" in key or "o_proj" in key:
output_state_dict[change_name(key, i, layer_cnt)] = torch.zeros_like(value)
else:
output_state_dict[change_name(key, i, layer_cnt)] = torch.clone(value)
print("Add layer {} expanded from layer {}".format(layer_cnt, i))
layer_cnt += 1
for key, value in state_dict.items():
if key not in output_state_dict:
output_state_dict[key] = value
weights_name = SAFE_WEIGHTS_NAME if save_safetensors else WEIGHTS_NAME
shards, index = shard_checkpoint(output_state_dict, max_shard_size=shard_size, weights_name=weights_name)
for shard_file, shard in tqdm(shards.items(), desc="Save weights"):
if save_safetensors:
save_file(shard, os.path.join(output_dir, shard_file), metadata={"format": "pt"})
else:
torch.save(shard, os.path.join(output_dir, shard_file))
if index is None:
print("Model weights saved in {}".format(os.path.join(output_dir, weights_name)))
else:
index_name = SAFE_WEIGHTS_INDEX_NAME if save_safetensors else WEIGHTS_INDEX_NAME
with open(os.path.join(output_dir, index_name), "w", encoding="utf-8") as f:
json.dump(index, f, indent=2, sort_keys=True)
print("Model weights saved in {}".format(output_dir))
print("Fine-tune this model with:")
print("model_name_or_path: {}".format(output_dir))
print("finetuning_type: freeze")
print("freeze_trainable_layers: {}".format(num_expand))
print("use_llama_pro: true")
if __name__ == "__main__":
fire.Fire(block_expansion)

View File

@@ -0,0 +1,92 @@
# coding=utf-8
# Converts the Baichuan2-7B model in the same format as LLaMA2-7B.
# Usage: python llamafy_baichuan2.py --input_dir input --output_dir output
# Inspired by: https://huggingface.co/fireballoon/baichuan-llama-7b/blob/main/convert_baichuan_to_llama.py
# Converted model: https://huggingface.co/hiyouga/Baichuan2-7B-Base-LLaMAfied
import json
import os
from collections import OrderedDict
from typing import Any, Dict, Optional
import fire
import torch
from safetensors.torch import save_file
from tqdm import tqdm
from transformers.modeling_utils import (
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
shard_checkpoint,
)
CONFIG_NAME = "config.json"
def save_weight(input_dir: str, output_dir: str, shard_size: str, save_safetensors: bool):
baichuan2_state_dict: Dict[str, torch.Tensor] = OrderedDict()
for filepath in tqdm(os.listdir(input_dir), desc="Load weights"):
if os.path.isfile(os.path.join(input_dir, filepath)) and filepath.endswith(".bin"):
shard_weight = torch.load(os.path.join(input_dir, filepath), map_location="cpu")
baichuan2_state_dict.update(shard_weight)
llama2_state_dict: Dict[str, torch.Tensor] = OrderedDict()
for key, value in tqdm(baichuan2_state_dict.items(), desc="Convert format"):
if "W_pack" in key:
proj_size = value.size(0) // 3
llama2_state_dict[key.replace("W_pack", "q_proj")] = value[:proj_size, :]
llama2_state_dict[key.replace("W_pack", "k_proj")] = value[proj_size : 2 * proj_size, :]
llama2_state_dict[key.replace("W_pack", "v_proj")] = value[2 * proj_size :, :]
elif "lm_head" in key:
llama2_state_dict[key] = torch.nn.functional.normalize(value)
else:
llama2_state_dict[key] = value
weights_name = SAFE_WEIGHTS_NAME if save_safetensors else WEIGHTS_NAME
shards, index = shard_checkpoint(llama2_state_dict, max_shard_size=shard_size, weights_name=weights_name)
for shard_file, shard in tqdm(shards.items(), desc="Save weights"):
if save_safetensors:
save_file(shard, os.path.join(output_dir, shard_file), metadata={"format": "pt"})
else:
torch.save(shard, os.path.join(output_dir, shard_file))
if index is None:
print("Model weights saved in {}".format(os.path.join(output_dir, WEIGHTS_NAME)))
else:
index_name = SAFE_WEIGHTS_INDEX_NAME if save_safetensors else WEIGHTS_INDEX_NAME
with open(os.path.join(output_dir, index_name), "w", encoding="utf-8") as f:
json.dump(index, f, indent=2, sort_keys=True)
print("Model weights saved in {}".format(output_dir))
def save_config(input_dir: str, output_dir: str):
with open(os.path.join(input_dir, CONFIG_NAME), "r", encoding="utf-8") as f:
llama2_config_dict: Dict[str, Any] = json.load(f)
llama2_config_dict["architectures"] = ["LlamaForCausalLM"]
llama2_config_dict.pop("auto_map", None)
llama2_config_dict.pop("tokenizer_class", None)
llama2_config_dict["model_type"] = "llama"
with open(os.path.join(output_dir, CONFIG_NAME), "w", encoding="utf-8") as f:
json.dump(llama2_config_dict, f, indent=2)
print("Model config saved in {}".format(os.path.join(output_dir, CONFIG_NAME)))
def llamafy_baichuan2(
input_dir: str, output_dir: str, shard_size: Optional[str] = "2GB", save_safetensors: Optional[bool] = False
):
try:
os.makedirs(output_dir, exist_ok=False)
except Exception as e:
raise print("Output dir already exists", e)
save_weight(input_dir, output_dir, shard_size, save_safetensors)
save_config(input_dir, output_dir)
if __name__ == "__main__":
fire.Fire(llamafy_baichuan2)

144
scripts/llamafy_qwen.py Normal file
View File

@@ -0,0 +1,144 @@
# coding=utf-8
# Converts the Qwen models in the same format as LLaMA2.
# Usage: python llamafy_qwen.py --input_dir input --output_dir output
# Converted model: https://huggingface.co/hiyouga/Qwen-14B-Chat-LLaMAfied
import json
import os
from collections import OrderedDict
from typing import Any, Dict, Optional
import fire
import torch
from safetensors import safe_open
from safetensors.torch import save_file
from tqdm import tqdm
from transformers.modeling_utils import (
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
shard_checkpoint,
)
from transformers.utils import check_min_version
try:
check_min_version("4.34.0")
except Exception:
raise ValueError("Please upgrade `transformers` to 4.34.0")
CONFIG_NAME = "config.json"
def save_weight(input_dir: str, output_dir: str, shard_size: str, save_safetensors: bool) -> str:
qwen_state_dict: Dict[str, torch.Tensor] = OrderedDict()
for filepath in tqdm(os.listdir(input_dir), desc="Load weights"):
if os.path.isfile(os.path.join(input_dir, filepath)) and filepath.endswith(".safetensors"):
with safe_open(os.path.join(input_dir, filepath), framework="pt", device="cpu") as f:
for key in f.keys():
qwen_state_dict[key] = f.get_tensor(key)
llama2_state_dict: Dict[str, torch.Tensor] = OrderedDict()
torch_dtype = None
for key, value in tqdm(qwen_state_dict.items(), desc="Convert format"):
if torch_dtype is None:
torch_dtype = value.dtype
if "wte" in key:
llama2_state_dict["model.embed_tokens.weight"] = value
elif "ln_f" in key:
llama2_state_dict["model.norm.weight"] = value
else:
key = key.replace("transformer.h", "model.layers")
if "attn.c_attn" in key:
proj_size = value.size(0) // 3
llama2_state_dict[key.replace("attn.c_attn", "self_attn.q_proj")] = value[:proj_size, ...]
llama2_state_dict[key.replace("attn.c_attn", "self_attn.k_proj")] = value[
proj_size : 2 * proj_size, ...
]
llama2_state_dict[key.replace("attn.c_attn", "self_attn.v_proj")] = value[2 * proj_size :, ...]
elif "attn.c_proj" in key:
llama2_state_dict[key.replace("attn.c_proj", "self_attn.o_proj")] = value
llama2_state_dict[key.replace("attn.c_proj.weight", "self_attn.o_proj.bias")] = torch.zeros_like(
value[:, 0]
).squeeze()
elif "ln_1" in key:
llama2_state_dict[key.replace("ln_1", "input_layernorm")] = value
elif "ln_2" in key:
llama2_state_dict[key.replace("ln_2", "post_attention_layernorm")] = value
elif "mlp.w1" in key:
llama2_state_dict[key.replace("mlp.w1", "mlp.up_proj")] = value
elif "mlp.w2" in key:
llama2_state_dict[key.replace("mlp.w2", "mlp.gate_proj")] = value
elif "mlp.c_proj" in key:
llama2_state_dict[key.replace("mlp.c_proj", "mlp.down_proj")] = value
elif "lm_head" in key:
llama2_state_dict[key] = value
else:
raise KeyError("Unable to process key {}".format(key))
weights_name = SAFE_WEIGHTS_NAME if save_safetensors else WEIGHTS_NAME
shards, index = shard_checkpoint(llama2_state_dict, max_shard_size=shard_size, weights_name=weights_name)
for shard_file, shard in tqdm(shards.items(), desc="Save weights"):
if save_safetensors:
save_file(shard, os.path.join(output_dir, shard_file), metadata={"format": "pt"})
else:
torch.save(shard, os.path.join(output_dir, shard_file))
if index is None:
print("Model weights saved in {}".format(os.path.join(output_dir, weights_name)))
else:
index_name = SAFE_WEIGHTS_INDEX_NAME if save_safetensors else WEIGHTS_INDEX_NAME
with open(os.path.join(output_dir, index_name), "w", encoding="utf-8") as f:
json.dump(index, f, indent=2, sort_keys=True)
print("Model weights saved in {}".format(output_dir))
return str(torch_dtype).replace("torch.", "")
def save_config(input_dir: str, output_dir: str, torch_dtype: str):
with open(os.path.join(input_dir, CONFIG_NAME), "r", encoding="utf-8") as f:
qwen_config_dict: Dict[str, Any] = json.load(f)
llama2_config_dict: Dict[str, Any] = OrderedDict()
llama2_config_dict["architectures"] = ["LlamaForCausalLM"]
llama2_config_dict["hidden_act"] = "silu"
llama2_config_dict["hidden_size"] = qwen_config_dict["hidden_size"]
llama2_config_dict["initializer_range"] = qwen_config_dict["initializer_range"]
llama2_config_dict["intermediate_size"] = qwen_config_dict["intermediate_size"] // 2
llama2_config_dict["max_position_embeddings"] = qwen_config_dict["max_position_embeddings"]
llama2_config_dict["model_type"] = "llama"
llama2_config_dict["num_attention_heads"] = qwen_config_dict["num_attention_heads"]
llama2_config_dict["num_hidden_layers"] = qwen_config_dict["num_hidden_layers"]
llama2_config_dict["num_key_value_heads"] = qwen_config_dict["hidden_size"] // qwen_config_dict["kv_channels"]
llama2_config_dict["pretraining_tp"] = 1
llama2_config_dict["rms_norm_eps"] = qwen_config_dict["layer_norm_epsilon"]
llama2_config_dict["rope_scaling"] = None
llama2_config_dict["tie_word_embeddings"] = qwen_config_dict["tie_word_embeddings"]
llama2_config_dict["torch_dtype"] = torch_dtype
llama2_config_dict["transformers_version"] = "4.34.0"
llama2_config_dict["use_cache"] = True
llama2_config_dict["vocab_size"] = qwen_config_dict["vocab_size"]
llama2_config_dict["attention_bias"] = True
with open(os.path.join(output_dir, CONFIG_NAME), "w", encoding="utf-8") as f:
json.dump(llama2_config_dict, f, indent=2)
print("Model config saved in {}".format(os.path.join(output_dir, CONFIG_NAME)))
def llamafy_qwen(
input_dir: str, output_dir: str, shard_size: Optional[str] = "2GB", save_safetensors: Optional[bool] = False
):
try:
os.makedirs(output_dir, exist_ok=False)
except Exception as e:
raise print("Output dir already exists", e)
torch_dtype = save_weight(input_dir, output_dir, shard_size, save_safetensors)
save_config(input_dir, output_dir, torch_dtype)
if __name__ == "__main__":
fire.Fire(llamafy_qwen)

82
scripts/loftq_init.py Normal file
View File

@@ -0,0 +1,82 @@
# coding=utf-8
# Initializes LoRA weights with LoRA-fine-tuning-aware Quantization (LoftQ)
# Usage: python loftq_init.py --model_name_or_path path_to_model --save_dir output_dir
# Inspired by: https://github.com/huggingface/peft/blob/main/examples/loftq_finetuning/quantize_save_load.py
import os
from typing import TYPE_CHECKING, Optional
import fire
import torch
import torch.nn as nn
from peft import LoftQConfig, LoraConfig, TaskType, get_peft_model
from transformers import AutoModelForCausalLM, AutoTokenizer
if TYPE_CHECKING:
from transformers import PreTrainedModel
class Shell(nn.Module):
def __init__(self, weight: torch.Tensor, bias: Optional[torch.Tensor] = None):
super().__init__()
self.weight = nn.Parameter(weight, requires_grad=False)
if bias is not None:
self.bias = nn.Parameter(bias, requires_grad=False)
def unwrap_model(model: nn.Module, pattern=".base_layer") -> None:
for name in {k.split(pattern)[0] for k, _ in model.named_modules() if pattern in k}:
parent_name = ".".join(name.split(".")[:-1])
child_name = name.split(".")[-1]
parent_module = model.get_submodule(parent_name)
child_module = getattr(parent_module, child_name)
base_layer = getattr(child_module, "base_layer")
weight = getattr(base_layer, "weight", None)
bias = getattr(base_layer, "bias", None)
setattr(parent_module, child_name, Shell(weight, bias))
print("Model unwrapped.")
def quantize_loftq(
model_name_or_path: str,
save_dir: str,
loftq_bits: Optional[int] = 4,
loftq_iter: Optional[int] = 1,
lora_alpha: Optional[int] = None,
lora_rank: Optional[int] = 16,
lora_target: Optional[str] = "q_proj,v_proj",
save_safetensors: Optional[bool] = False,
):
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True, torch_dtype="auto")
loftq_config = LoftQConfig(loftq_bits=loftq_bits, loftq_iter=loftq_iter)
lora_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
inference_mode=True,
r=lora_rank,
lora_alpha=lora_alpha if lora_alpha is not None else lora_rank * 2,
lora_dropout=0.1,
target_modules=[name.strip() for name in lora_target.split(",")],
init_lora_weights="loftq",
loftq_config=loftq_config,
)
# Init LoftQ model
lora_model = get_peft_model(model, lora_config)
base_model: "PreTrainedModel" = lora_model.get_base_model()
# Save LoftQ model
setattr(lora_model.base_model.peft_config["default"], "base_model_name_or_path", save_dir)
setattr(lora_model.base_model.peft_config["default"], "init_lora_weights", True)
lora_model.save_pretrained(os.path.join(save_dir, "adapters"), safe_serialization=save_safetensors)
# Save base model
unwrap_model(base_model)
base_model.save_pretrained(save_dir, safe_serialization=save_safetensors)
tokenizer.save_pretrained(save_dir)
if __name__ == "__main__":
fire.Fire(quantize_loftq)

64
scripts/test_toolcall.py Normal file
View File

@@ -0,0 +1,64 @@
import json
import os
from typing import Sequence
from openai import OpenAI
from transformers.utils.versions import require_version
require_version("openai>=1.5.0", "To fix: pip install openai>=1.5.0")
def calculate_gpa(grades: Sequence[str], hours: Sequence[int]) -> float:
grade_to_score = {"A": 4, "B": 3, "C": 2}
total_score, total_hour = 0, 0
for grade, hour in zip(grades, hours):
total_score += grade_to_score[grade] * hour
total_hour += hour
return round(total_score / total_hour, 2)
def main():
client = OpenAI(
api_key="{}".format(os.environ.get("API_KEY", "0")),
base_url="http://localhost:{}/v1".format(os.environ.get("API_PORT", 8000)),
)
tools = [
{
"type": "function",
"function": {
"name": "calculate_gpa",
"description": "Calculate the Grade Point Average (GPA) based on grades and credit hours",
"parameters": {
"type": "object",
"properties": {
"grades": {"type": "array", "items": {"type": "string"}, "description": "The grades"},
"hours": {"type": "array", "items": {"type": "integer"}, "description": "The credit hours"},
},
"required": ["grades", "hours"],
},
},
}
]
tool_map = {"calculate_gpa": calculate_gpa}
messages = []
messages.append({"role": "user", "content": "My grades are A, A, B, and C. The credit hours are 3, 4, 3, and 2."})
result = client.chat.completions.create(messages=messages, model="test", tools=tools)
if result.choices[0].message.tool_calls is None:
raise ValueError("Cannot retrieve function call from the response.")
messages.append(result.choices[0].message)
tool_call = result.choices[0].message.tool_calls[0].function
print(tool_call)
# Function(arguments='{"grades": ["A", "A", "B", "C"], "hours": [3, 4, 3, 2]}', name='calculate_gpa')
name, arguments = tool_call.name, json.loads(tool_call.arguments)
tool_result = tool_map[name](**arguments)
messages.append({"role": "tool", "content": json.dumps({"gpa": tool_result}, ensure_ascii=False)})
result = client.chat.completions.create(messages=messages, model="test", tools=tools)
print(result.choices[0].message.content)
# Based on the grades and credit hours you provided, your Grade Point Average (GPA) is 3.42.
if __name__ == "__main__":
main()

View File

@@ -1,13 +1,14 @@
import os
import re
from setuptools import setup, find_packages
from setuptools import find_packages, setup
def get_version():
with open(os.path.join("src", "llmtuner", "__init__.py"), "r", encoding="utf-8") as f:
with open(os.path.join("src", "llamafactory", "extras", "env.py"), "r", encoding="utf-8") as f:
file_content = f.read()
pattern = r"{0}\W*=\W*\"([^\"]+)\"".format("__version__")
version, = re.findall(pattern, file_content)
pattern = r"{}\W*=\W*\"([^\"]+)\"".format("VERSION")
(version,) = re.findall(pattern, file_content)
return version
@@ -18,25 +19,44 @@ def get_requires():
return lines
def main():
extra_require = {
"torch": ["torch>=1.13.1"],
"torch-npu": ["torch==2.1.0", "torch-npu==2.1.0.post3", "decorator"],
"metrics": ["nltk", "jieba", "rouge-chinese"],
"deepspeed": ["deepspeed>=0.10.0,<=0.14.0"],
"bitsandbytes": ["bitsandbytes>=0.39.0"],
"vllm": ["vllm>=0.4.3"],
"galore": ["galore-torch"],
"badam": ["badam"],
"gptq": ["optimum>=1.16.0", "auto-gptq>=0.5.0"],
"awq": ["autoawq"],
"aqlm": ["aqlm[gpu]>=1.1.0"],
"qwen": ["transformers_stream_generator"],
"modelscope": ["modelscope"],
"dev": ["ruff", "pytest"],
}
def main():
setup(
name="llmtuner",
name="llamafactory",
version=get_version(),
author="hiyouga",
author_email="hiyouga" "@" "buaa.edu.cn",
description="Easy-to-use fine-tuning framework using PEFT",
description="Easy-to-use LLM fine-tuning framework",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords=["LLaMA", "BLOOM", "Falcon", "LLM", "ChatGPT", "transformer", "pytorch", "deep learning"],
license="Apache 2.0 License",
url="https://github.com/hiyouga/LLaMA-Efficient-Tuning",
url="https://github.com/hiyouga/LLaMA-Factory",
package_dir={"": "src"},
packages=find_packages("src"),
python_requires=">=3.8.0",
install_requires=get_requires(),
extras_require=extra_require,
entry_points={"console_scripts": ["llamafactory-cli = llamafactory.cli:main"]},
classifiers=[
"Development Status :: 3 - Alpha",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
@@ -46,8 +66,9 @@ def main():
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
]
],
)

19
src/api.py Normal file
View File

@@ -0,0 +1,19 @@
import os
import uvicorn
from llamafactory.api.app import create_app
from llamafactory.chat import ChatModel
def main():
chat_model = ChatModel()
app = create_app(chat_model)
api_host = os.environ.get("API_HOST", "0.0.0.0")
api_port = int(os.environ.get("API_PORT", "8000"))
print("Visit http://localhost:{}/docs for API document.".format(api_port))
uvicorn.run(app, host=api_host, port=api_port)
if __name__ == "__main__":
main()

View File

@@ -1,14 +0,0 @@
import uvicorn
from llmtuner import ChatModel, create_app
def main():
chat_model = ChatModel()
app = create_app(chat_model)
uvicorn.run(app, host="0.0.0.0", port=8000, workers=1)
print("Visit http://localhost:8000/docs for API document.")
if __name__ == "__main__":
main()

View File

@@ -1,38 +0,0 @@
from llmtuner import ChatModel
def main():
chat_model = ChatModel()
history = []
print("Welcome to the CLI application, use `clear` to remove the history, use `exit` to exit the application.")
while True:
try:
query = input("\nUser: ")
except UnicodeDecodeError:
print("Detected decoding error at the inputs, please set the terminal encoding to utf-8.")
continue
except Exception:
raise
if query.strip() == "exit":
break
if query.strip() == "clear":
history = []
print("History has been removed.")
continue
print("Assistant: ", end="", flush=True)
response = ""
for new_text in chat_model.stream_chat(query, history):
print(new_text, end="", flush=True)
response += new_text
print()
history = history + [(query, response)]
if __name__ == "__main__":
main()

View File

@@ -1,9 +0,0 @@
from llmtuner import export_model
def main():
export_model()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,6 @@
# Level: api, webui > chat, eval, train > data, model > hparams > extras
from .cli import VERSION
__version__ = VERSION

108
src/llamafactory/api/app.py Normal file
View File

@@ -0,0 +1,108 @@
import os
from contextlib import asynccontextmanager
from typing import Optional
from typing_extensions import Annotated
from ..chat import ChatModel
from ..extras.misc import torch_gc
from ..extras.packages import is_fastapi_available, is_starlette_available, is_uvicorn_available
from .chat import (
create_chat_completion_response,
create_score_evaluation_response,
create_stream_chat_completion_response,
)
from .protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
ModelCard,
ModelList,
ScoreEvaluationRequest,
ScoreEvaluationResponse,
)
if is_fastapi_available():
from fastapi import Depends, FastAPI, HTTPException, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.security.http import HTTPAuthorizationCredentials, HTTPBearer
if is_starlette_available():
from sse_starlette import EventSourceResponse
if is_uvicorn_available():
import uvicorn
@asynccontextmanager
async def lifespan(app: "FastAPI"): # collects GPU memory
yield
torch_gc()
def create_app(chat_model: "ChatModel") -> "FastAPI":
app = FastAPI(lifespan=lifespan)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
api_key = os.environ.get("API_KEY")
security = HTTPBearer(auto_error=False)
async def verify_api_key(auth: Annotated[Optional[HTTPAuthorizationCredentials], Depends(security)]):
if api_key and (auth is None or auth.credentials != api_key):
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API key.")
@app.get(
"/v1/models",
response_model=ModelList,
status_code=status.HTTP_200_OK,
dependencies=[Depends(verify_api_key)],
)
async def list_models():
model_card = ModelCard(id="gpt-3.5-turbo")
return ModelList(data=[model_card])
@app.post(
"/v1/chat/completions",
response_model=ChatCompletionResponse,
status_code=status.HTTP_200_OK,
dependencies=[Depends(verify_api_key)],
)
async def create_chat_completion(request: ChatCompletionRequest):
if not chat_model.engine.can_generate:
raise HTTPException(status_code=status.HTTP_405_METHOD_NOT_ALLOWED, detail="Not allowed")
if request.stream:
generate = create_stream_chat_completion_response(request, chat_model)
return EventSourceResponse(generate, media_type="text/event-stream")
else:
return await create_chat_completion_response(request, chat_model)
@app.post(
"/v1/score/evaluation",
response_model=ScoreEvaluationResponse,
status_code=status.HTTP_200_OK,
dependencies=[Depends(verify_api_key)],
)
async def create_score_evaluation(request: ScoreEvaluationRequest):
if chat_model.engine.can_generate:
raise HTTPException(status_code=status.HTTP_405_METHOD_NOT_ALLOWED, detail="Not allowed")
return await create_score_evaluation_response(request, chat_model)
return app
def run_api() -> None:
chat_model = ChatModel()
app = create_app(chat_model)
api_host = os.environ.get("API_HOST", "0.0.0.0")
api_port = int(os.environ.get("API_PORT", "8000"))
print("Visit http://localhost:{}/docs for API document.".format(api_port))
uvicorn.run(app, host=api_host, port=api_port)

View File

@@ -0,0 +1,219 @@
import base64
import io
import json
import os
import uuid
from typing import TYPE_CHECKING, AsyncGenerator, Dict, List, Optional, Tuple
from ..data import Role as DataRole
from ..extras.logging import get_logger
from ..extras.packages import is_fastapi_available, is_pillow_available, is_requests_available
from .common import dictify, jsonify
from .protocol import (
ChatCompletionMessage,
ChatCompletionResponse,
ChatCompletionResponseChoice,
ChatCompletionResponseUsage,
ChatCompletionStreamResponse,
ChatCompletionStreamResponseChoice,
Finish,
Function,
FunctionCall,
Role,
ScoreEvaluationResponse,
)
if is_fastapi_available():
from fastapi import HTTPException, status
if is_pillow_available():
from PIL import Image
if is_requests_available():
import requests
if TYPE_CHECKING:
from numpy.typing import NDArray
from ..chat import ChatModel
from .protocol import ChatCompletionRequest, ScoreEvaluationRequest
logger = get_logger(__name__)
ROLE_MAPPING = {
Role.USER: DataRole.USER.value,
Role.ASSISTANT: DataRole.ASSISTANT.value,
Role.SYSTEM: DataRole.SYSTEM.value,
Role.FUNCTION: DataRole.FUNCTION.value,
Role.TOOL: DataRole.OBSERVATION.value,
}
def _process_request(
request: "ChatCompletionRequest",
) -> Tuple[List[Dict[str, str]], Optional[str], Optional[str], Optional["NDArray"]]:
logger.info("==== request ====\n{}".format(json.dumps(dictify(request), indent=2, ensure_ascii=False)))
if len(request.messages) == 0:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid length")
if request.messages[0].role == Role.SYSTEM:
system = request.messages.pop(0).content
else:
system = None
if len(request.messages) % 2 == 0:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Only supports u/a/u/a/u...")
input_messages = []
image = None
for i, message in enumerate(request.messages):
if i % 2 == 0 and message.role not in [Role.USER, Role.TOOL]:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid role")
elif i % 2 == 1 and message.role not in [Role.ASSISTANT, Role.FUNCTION]:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid role")
if message.role == Role.ASSISTANT and isinstance(message.tool_calls, list) and len(message.tool_calls):
name = message.tool_calls[0].function.name
arguments = message.tool_calls[0].function.arguments
content = json.dumps({"name": name, "argument": arguments}, ensure_ascii=False)
input_messages.append({"role": ROLE_MAPPING[Role.FUNCTION], "content": content})
elif isinstance(message.content, list):
for input_item in message.content:
if input_item.type == "text":
input_messages.append({"role": ROLE_MAPPING[message.role], "content": input_item.text})
else:
image_url = input_item.image_url.url
if image_url.startswith("data:image"): # base64 image
image_data = base64.b64decode(image_url.split(",", maxsplit=1)[1])
image_path = io.BytesIO(image_data)
elif os.path.isfile(image_url): # local file
image_path = open(image_url, "rb")
else: # web uri
image_path = requests.get(image_url, stream=True).raw
image = Image.open(image_path).convert("RGB")
else:
input_messages.append({"role": ROLE_MAPPING[message.role], "content": message.content})
tool_list = request.tools
if isinstance(tool_list, list) and len(tool_list):
try:
tools = json.dumps([dictify(tool.function) for tool in tool_list], ensure_ascii=False)
except Exception:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid tools")
else:
tools = None
return input_messages, system, tools, image
def _create_stream_chat_completion_chunk(
completion_id: str,
model: str,
delta: "ChatCompletionMessage",
index: Optional[int] = 0,
finish_reason: Optional["Finish"] = None,
) -> str:
choice_data = ChatCompletionStreamResponseChoice(index=index, delta=delta, finish_reason=finish_reason)
chunk = ChatCompletionStreamResponse(id=completion_id, model=model, choices=[choice_data])
return jsonify(chunk)
async def create_chat_completion_response(
request: "ChatCompletionRequest", chat_model: "ChatModel"
) -> "ChatCompletionResponse":
completion_id = "chatcmpl-{}".format(uuid.uuid4().hex)
input_messages, system, tools, image = _process_request(request)
responses = await chat_model.achat(
input_messages,
system,
tools,
image,
do_sample=request.do_sample,
temperature=request.temperature,
top_p=request.top_p,
max_new_tokens=request.max_tokens,
num_return_sequences=request.n,
stop=request.stop,
)
prompt_length, response_length = 0, 0
choices = []
for i, response in enumerate(responses):
if tools:
result = chat_model.engine.template.format_tools.extract(response.response_text)
else:
result = response.response_text
if isinstance(result, tuple):
name, arguments = result
function = Function(name=name, arguments=arguments)
tool_call = FunctionCall(id="call_{}".format(uuid.uuid4().hex), function=function)
response_message = ChatCompletionMessage(role=Role.ASSISTANT, tool_calls=[tool_call])
finish_reason = Finish.TOOL
else:
response_message = ChatCompletionMessage(role=Role.ASSISTANT, content=result)
finish_reason = Finish.STOP if response.finish_reason == "stop" else Finish.LENGTH
choices.append(ChatCompletionResponseChoice(index=i, message=response_message, finish_reason=finish_reason))
prompt_length = response.prompt_length
response_length += response.response_length
usage = ChatCompletionResponseUsage(
prompt_tokens=prompt_length,
completion_tokens=response_length,
total_tokens=prompt_length + response_length,
)
return ChatCompletionResponse(id=completion_id, model=request.model, choices=choices, usage=usage)
async def create_stream_chat_completion_response(
request: "ChatCompletionRequest", chat_model: "ChatModel"
) -> AsyncGenerator[str, None]:
completion_id = "chatcmpl-{}".format(uuid.uuid4().hex)
input_messages, system, tools, image = _process_request(request)
if tools:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Cannot stream function calls.")
if request.n > 1:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Cannot stream multiple responses.")
yield _create_stream_chat_completion_chunk(
completion_id=completion_id, model=request.model, delta=ChatCompletionMessage(role=Role.ASSISTANT, content="")
)
async for new_token in chat_model.astream_chat(
input_messages,
system,
tools,
image,
do_sample=request.do_sample,
temperature=request.temperature,
top_p=request.top_p,
max_new_tokens=request.max_tokens,
stop=request.stop,
):
if len(new_token) != 0:
yield _create_stream_chat_completion_chunk(
completion_id=completion_id, model=request.model, delta=ChatCompletionMessage(content=new_token)
)
yield _create_stream_chat_completion_chunk(
completion_id=completion_id, model=request.model, delta=ChatCompletionMessage(), finish_reason=Finish.STOP
)
yield "[DONE]"
async def create_score_evaluation_response(
request: "ScoreEvaluationRequest", chat_model: "ChatModel"
) -> "ScoreEvaluationResponse":
if len(request.messages) == 0:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid request")
scores = await chat_model.aget_scores(request.messages, max_length=request.max_length)
return ScoreEvaluationResponse(model=request.model, scores=scores)

View File

@@ -0,0 +1,20 @@
import json
from typing import TYPE_CHECKING, Any, Dict
if TYPE_CHECKING:
from pydantic import BaseModel
def dictify(data: "BaseModel") -> Dict[str, Any]:
try: # pydantic v2
return data.model_dump(exclude_unset=True)
except AttributeError: # pydantic v1
return data.dict(exclude_unset=True)
def jsonify(data: "BaseModel") -> str:
try: # pydantic v2
return json.dumps(data.model_dump(exclude_unset=True), ensure_ascii=False)
except AttributeError: # pydantic v1
return data.json(exclude_unset=True, ensure_ascii=False)

View File

@@ -0,0 +1,139 @@
import time
from enum import Enum, unique
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel, Field
from typing_extensions import Literal
@unique
class Role(str, Enum):
USER = "user"
ASSISTANT = "assistant"
SYSTEM = "system"
FUNCTION = "function"
TOOL = "tool"
@unique
class Finish(str, Enum):
STOP = "stop"
LENGTH = "length"
TOOL = "tool_calls"
class ModelCard(BaseModel):
id: str
object: Literal["model"] = "model"
created: int = Field(default_factory=lambda: int(time.time()))
owned_by: Literal["owner"] = "owner"
class ModelList(BaseModel):
object: Literal["list"] = "list"
data: List[ModelCard] = []
class Function(BaseModel):
name: str
arguments: str
class FunctionDefinition(BaseModel):
name: str
description: str
parameters: Dict[str, Any]
class FunctionAvailable(BaseModel):
type: Literal["function", "code_interpreter"] = "function"
function: Optional[FunctionDefinition] = None
class FunctionCall(BaseModel):
id: str
type: Literal["function"] = "function"
function: Function
class ImageURL(BaseModel):
url: str
class MultimodalInputItem(BaseModel):
type: Literal["text", "image_url"]
text: Optional[str] = None
image_url: Optional[ImageURL] = None
class ChatMessage(BaseModel):
role: Role
content: Optional[Union[str, List[MultimodalInputItem]]] = None
tool_calls: Optional[List[FunctionCall]] = None
class ChatCompletionMessage(BaseModel):
role: Optional[Role] = None
content: Optional[str] = None
tool_calls: Optional[List[FunctionCall]] = None
class ChatCompletionRequest(BaseModel):
model: str
messages: List[ChatMessage]
tools: Optional[List[FunctionAvailable]] = None
do_sample: bool = True
temperature: Optional[float] = None
top_p: Optional[float] = None
n: int = 1
max_tokens: Optional[int] = None
stop: Optional[Union[str, List[str]]] = None
stream: bool = False
class ChatCompletionResponseChoice(BaseModel):
index: int
message: ChatCompletionMessage
finish_reason: Finish
class ChatCompletionStreamResponseChoice(BaseModel):
index: int
delta: ChatCompletionMessage
finish_reason: Optional[Finish] = None
class ChatCompletionResponseUsage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class ChatCompletionResponse(BaseModel):
id: str
object: Literal["chat.completion"] = "chat.completion"
created: int = Field(default_factory=lambda: int(time.time()))
model: str
choices: List[ChatCompletionResponseChoice]
usage: ChatCompletionResponseUsage
class ChatCompletionStreamResponse(BaseModel):
id: str
object: Literal["chat.completion.chunk"] = "chat.completion.chunk"
created: int = Field(default_factory=lambda: int(time.time()))
model: str
choices: List[ChatCompletionStreamResponseChoice]
class ScoreEvaluationRequest(BaseModel):
model: str
messages: List[str]
max_length: Optional[int] = None
class ScoreEvaluationResponse(BaseModel):
id: str
object: Literal["score.evaluation"] = "score.evaluation"
model: str
scores: List[float]

View File

@@ -0,0 +1,5 @@
from .base_engine import BaseEngine
from .chat_model import ChatModel
__all__ = ["BaseEngine", "ChatModel"]

View File

@@ -0,0 +1,69 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, AsyncGenerator, Dict, List, Literal, Optional, Sequence, Union
if TYPE_CHECKING:
from numpy.typing import NDArray
from transformers import PreTrainedModel, PreTrainedTokenizer
from vllm import AsyncLLMEngine
from ..data import Template
from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments
@dataclass
class Response:
response_text: str
response_length: int
prompt_length: int
finish_reason: Literal["stop", "length"]
class BaseEngine(ABC):
model: Union["PreTrainedModel", "AsyncLLMEngine"]
tokenizer: "PreTrainedTokenizer"
can_generate: bool
template: "Template"
generating_args: Dict[str, Any]
@abstractmethod
def __init__(
self,
model_args: "ModelArguments",
data_args: "DataArguments",
finetuning_args: "FinetuningArguments",
generating_args: "GeneratingArguments",
) -> None: ...
@abstractmethod
async def start(
self,
) -> None: ...
@abstractmethod
async def chat(
self,
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
**input_kwargs,
) -> List["Response"]: ...
@abstractmethod
async def stream_chat(
self,
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
**input_kwargs,
) -> AsyncGenerator[str, None]: ...
@abstractmethod
async def get_scores(
self,
batch_input: List[str],
**input_kwargs,
) -> List[float]: ...

View File

@@ -0,0 +1,140 @@
import asyncio
from threading import Thread
from typing import TYPE_CHECKING, Any, AsyncGenerator, Dict, Generator, List, Optional, Sequence
from ..extras.misc import torch_gc
from ..hparams import get_infer_args
from .hf_engine import HuggingfaceEngine
from .vllm_engine import VllmEngine
if TYPE_CHECKING:
from numpy.typing import NDArray
from .base_engine import BaseEngine, Response
def _start_background_loop(loop: asyncio.AbstractEventLoop) -> None:
asyncio.set_event_loop(loop)
loop.run_forever()
class ChatModel:
def __init__(self, args: Optional[Dict[str, Any]] = None) -> None:
model_args, data_args, finetuning_args, generating_args = get_infer_args(args)
if model_args.infer_backend == "huggingface":
self.engine: "BaseEngine" = HuggingfaceEngine(model_args, data_args, finetuning_args, generating_args)
elif model_args.infer_backend == "vllm":
self.engine: "BaseEngine" = VllmEngine(model_args, data_args, finetuning_args, generating_args)
else:
raise NotImplementedError("Unknown backend: {}".format(model_args.infer_backend))
self._loop = asyncio.new_event_loop()
self._thread = Thread(target=_start_background_loop, args=(self._loop,), daemon=True)
self._thread.start()
asyncio.run_coroutine_threadsafe(self.engine.start(), self._loop)
def chat(
self,
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
**input_kwargs,
) -> List["Response"]:
task = asyncio.run_coroutine_threadsafe(self.achat(messages, system, tools, image, **input_kwargs), self._loop)
return task.result()
async def achat(
self,
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
**input_kwargs,
) -> List["Response"]:
return await self.engine.chat(messages, system, tools, image, **input_kwargs)
def stream_chat(
self,
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
**input_kwargs,
) -> Generator[str, None, None]:
generator = self.astream_chat(messages, system, tools, image, **input_kwargs)
while True:
try:
task = asyncio.run_coroutine_threadsafe(generator.__anext__(), self._loop)
yield task.result()
except StopAsyncIteration:
break
async def astream_chat(
self,
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
**input_kwargs,
) -> AsyncGenerator[str, None]:
async for new_token in self.engine.stream_chat(messages, system, tools, image, **input_kwargs):
yield new_token
def get_scores(
self,
batch_input: List[str],
**input_kwargs,
) -> List[float]:
task = asyncio.run_coroutine_threadsafe(self.aget_scores(batch_input, **input_kwargs), self._loop)
return task.result()
async def aget_scores(
self,
batch_input: List[str],
**input_kwargs,
) -> List[float]:
return await self.engine.get_scores(batch_input, **input_kwargs)
def run_chat() -> None:
try:
import platform
if platform.system() != "Windows":
import readline # noqa: F401
except ImportError:
print("Install `readline` for a better experience.")
chat_model = ChatModel()
messages = []
print("Welcome to the CLI application, use `clear` to remove the history, use `exit` to exit the application.")
while True:
try:
query = input("\nUser: ")
except UnicodeDecodeError:
print("Detected decoding error at the inputs, please set the terminal encoding to utf-8.")
continue
except Exception:
raise
if query.strip() == "exit":
break
if query.strip() == "clear":
messages = []
torch_gc()
print("History has been removed.")
continue
messages.append({"role": "user", "content": query})
print("Assistant: ", end="", flush=True)
response = ""
for new_text in chat_model.stream_chat(messages):
print(new_text, end="", flush=True)
response += new_text
print()
messages.append({"role": "assistant", "content": response})

View File

@@ -0,0 +1,324 @@
import asyncio
import concurrent.futures
import os
from threading import Thread
from typing import TYPE_CHECKING, Any, AsyncGenerator, Callable, Dict, List, Optional, Sequence, Tuple, Union
import torch
from transformers import GenerationConfig, TextIteratorStreamer
from ..data import get_template_and_fix_tokenizer
from ..extras.logging import get_logger
from ..extras.misc import get_logits_processor
from ..model import load_model, load_tokenizer
from .base_engine import BaseEngine, Response
if TYPE_CHECKING:
from numpy.typing import NDArray
from transformers import PreTrainedModel, PreTrainedTokenizer, ProcessorMixin
from transformers.image_processing_utils import BaseImageProcessor
from trl import PreTrainedModelWrapper
from ..data import Template
from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments
logger = get_logger(__name__)
class HuggingfaceEngine(BaseEngine):
def __init__(
self,
model_args: "ModelArguments",
data_args: "DataArguments",
finetuning_args: "FinetuningArguments",
generating_args: "GeneratingArguments",
) -> None:
self.can_generate = finetuning_args.stage == "sft"
tokenizer_module = load_tokenizer(model_args)
self.tokenizer = tokenizer_module["tokenizer"]
self.processor = tokenizer_module["processor"]
self.tokenizer.padding_side = "left" if self.can_generate else "right"
self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args.template)
self.model = load_model(
self.tokenizer, model_args, finetuning_args, is_trainable=False, add_valuehead=(not self.can_generate)
) # must after fixing tokenizer to resize vocab
self.generating_args = generating_args.to_dict()
@staticmethod
def _process_args(
model: "PreTrainedModel",
tokenizer: "PreTrainedTokenizer",
processor: Optional["ProcessorMixin"],
template: "Template",
generating_args: Dict[str, Any],
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
input_kwargs: Optional[Dict[str, Any]] = {},
) -> Tuple[Dict[str, Any], int]:
if (
processor is not None
and image is not None
and not hasattr(processor, "image_seq_length")
and template.image_token not in messages[0]["content"]
): # llava-like models
messages[0]["content"] = template.image_token + messages[0]["content"]
paired_messages = messages + [{"role": "assistant", "content": ""}]
system = system or generating_args["default_system"]
pixel_values = None
prompt_ids, _ = template.encode_oneturn(
tokenizer=tokenizer, messages=paired_messages, system=system, tools=tools
)
if processor is not None and image is not None: # add image features
image_processor: "BaseImageProcessor" = getattr(processor, "image_processor")
batch_feature = image_processor(image, return_tensors="pt")
pixel_values = batch_feature.to(model.device)["pixel_values"] # shape (B, C, H, W)
if hasattr(processor, "image_seq_length"): # paligemma models
image_token_id = tokenizer.convert_tokens_to_ids(template.image_token)
prompt_ids = [image_token_id] * getattr(processor, "image_seq_length") + prompt_ids
prompt_length = len(prompt_ids)
inputs = torch.tensor([prompt_ids], device=model.device)
attention_mask = torch.ones_like(inputs, dtype=torch.bool)
do_sample: Optional[bool] = input_kwargs.pop("do_sample", None)
temperature: Optional[float] = input_kwargs.pop("temperature", None)
top_p: Optional[float] = input_kwargs.pop("top_p", None)
top_k: Optional[float] = input_kwargs.pop("top_k", None)
num_return_sequences: int = input_kwargs.pop("num_return_sequences", 1)
repetition_penalty: Optional[float] = input_kwargs.pop("repetition_penalty", None)
length_penalty: Optional[float] = input_kwargs.pop("length_penalty", None)
max_length: Optional[int] = input_kwargs.pop("max_length", None)
max_new_tokens: Optional[int] = input_kwargs.pop("max_new_tokens", None)
stop: Optional[Union[str, List[str]]] = input_kwargs.pop("stop", None)
if stop is not None:
logger.warning("Stop parameter is not supported in Huggingface engine yet.")
generating_args = generating_args.copy()
generating_args.update(
dict(
do_sample=do_sample if do_sample is not None else generating_args["do_sample"],
temperature=temperature if temperature is not None else generating_args["temperature"],
top_p=top_p if top_p is not None else generating_args["top_p"],
top_k=top_k if top_k is not None else generating_args["top_k"],
num_return_sequences=num_return_sequences,
repetition_penalty=repetition_penalty
if repetition_penalty is not None
else generating_args["repetition_penalty"],
length_penalty=length_penalty if length_penalty is not None else generating_args["length_penalty"],
eos_token_id=[tokenizer.eos_token_id] + tokenizer.additional_special_tokens_ids,
pad_token_id=tokenizer.pad_token_id,
)
)
if isinstance(num_return_sequences, int) and num_return_sequences > 1: # do_sample needs temperature > 0
generating_args["do_sample"] = True
generating_args["temperature"] = generating_args["temperature"] or 1.0
if not generating_args["temperature"]:
generating_args["do_sample"] = False
if not generating_args["do_sample"]:
generating_args.pop("temperature", None)
generating_args.pop("top_p", None)
if max_length:
generating_args.pop("max_new_tokens", None)
generating_args["max_length"] = max_length
if max_new_tokens:
generating_args.pop("max_length", None)
generating_args["max_new_tokens"] = max_new_tokens
gen_kwargs = dict(
inputs=inputs,
attention_mask=attention_mask,
generation_config=GenerationConfig(**generating_args),
logits_processor=get_logits_processor(),
)
if pixel_values is not None:
gen_kwargs["pixel_values"] = pixel_values
return gen_kwargs, prompt_length
@staticmethod
@torch.inference_mode()
def _chat(
model: "PreTrainedModel",
tokenizer: "PreTrainedTokenizer",
processor: Optional["ProcessorMixin"],
template: "Template",
generating_args: Dict[str, Any],
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
input_kwargs: Optional[Dict[str, Any]] = {},
) -> List["Response"]:
gen_kwargs, prompt_length = HuggingfaceEngine._process_args(
model, tokenizer, processor, template, generating_args, messages, system, tools, image, input_kwargs
)
generate_output = model.generate(**gen_kwargs)
response_ids = generate_output[:, prompt_length:]
response = tokenizer.batch_decode(response_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
results = []
for i in range(len(response)):
eos_index = (response_ids[i] == tokenizer.eos_token_id).nonzero()
response_length = (eos_index[0].item() + 1) if len(eos_index) else len(response_ids[i])
results.append(
Response(
response_text=response[i],
response_length=response_length,
prompt_length=prompt_length,
finish_reason="stop" if len(eos_index) else "length",
)
)
return results
@staticmethod
@torch.inference_mode()
def _stream_chat(
model: "PreTrainedModel",
tokenizer: "PreTrainedTokenizer",
processor: Optional["ProcessorMixin"],
template: "Template",
generating_args: Dict[str, Any],
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
input_kwargs: Optional[Dict[str, Any]] = {},
) -> Callable[[], str]:
gen_kwargs, _ = HuggingfaceEngine._process_args(
model, tokenizer, processor, template, generating_args, messages, system, tools, image, input_kwargs
)
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
gen_kwargs["streamer"] = streamer
thread = Thread(target=model.generate, kwargs=gen_kwargs, daemon=True)
thread.start()
def stream():
try:
return streamer.__next__()
except StopIteration:
raise StopAsyncIteration()
return stream
@staticmethod
@torch.inference_mode()
def _get_scores(
model: "PreTrainedModelWrapper",
tokenizer: "PreTrainedTokenizer",
batch_input: List[str],
input_kwargs: Optional[Dict[str, Any]] = {},
) -> List[float]:
max_length = input_kwargs.pop("max_length", None)
device = getattr(model.pretrained_model, "device", "cuda")
inputs = tokenizer(
batch_input,
padding=True,
truncation=True,
max_length=max_length or getattr(model.config, "max_position_embeddings", 1024),
return_tensors="pt",
add_special_tokens=True,
).to(device)
input_ids: torch.Tensor = inputs["input_ids"]
_, _, values = model(**inputs, output_hidden_states=True, return_dict=True)
if getattr(model.config, "model_type", None) == "chatglm":
values = torch.transpose(values, 0, 1)
scores = []
for i in range(input_ids.size(0)):
end_indexes = (input_ids[i] != tokenizer.pad_token_id).nonzero()
end_index = end_indexes[-1].item() if len(end_indexes) else 0
scores.append(values[i, end_index].nan_to_num().item())
return scores
async def start(self) -> None:
self._semaphore = asyncio.Semaphore(int(os.environ.get("MAX_CONCURRENT", 1)))
async def chat(
self,
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
**input_kwargs,
) -> List["Response"]:
if not self.can_generate:
raise ValueError("The current model does not support `chat`.")
loop = asyncio.get_running_loop()
input_args = (
self.model,
self.tokenizer,
self.processor,
self.template,
self.generating_args,
messages,
system,
tools,
image,
input_kwargs,
)
async with self._semaphore:
with concurrent.futures.ThreadPoolExecutor() as pool:
return await loop.run_in_executor(pool, self._chat, *input_args)
async def stream_chat(
self,
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
**input_kwargs,
) -> AsyncGenerator[str, None]:
if not self.can_generate:
raise ValueError("The current model does not support `stream_chat`.")
loop = asyncio.get_running_loop()
input_args = (
self.model,
self.tokenizer,
self.processor,
self.template,
self.generating_args,
messages,
system,
tools,
image,
input_kwargs,
)
async with self._semaphore:
with concurrent.futures.ThreadPoolExecutor() as pool:
stream = self._stream_chat(*input_args)
while True:
try:
yield await loop.run_in_executor(pool, stream)
except StopAsyncIteration:
break
async def get_scores(
self,
batch_input: List[str],
**input_kwargs,
) -> List[float]:
if self.can_generate:
raise ValueError("Cannot get scores using an auto-regressive model.")
loop = asyncio.get_running_loop()
input_args = (self.model, self.tokenizer, batch_input, input_kwargs)
async with self._semaphore:
with concurrent.futures.ThreadPoolExecutor() as pool:
return await loop.run_in_executor(pool, self._get_scores, *input_args)

View File

@@ -0,0 +1,214 @@
import uuid
from typing import TYPE_CHECKING, AsyncGenerator, AsyncIterator, Dict, List, Optional, Sequence, Union
from ..data import get_template_and_fix_tokenizer
from ..extras.logging import get_logger
from ..extras.misc import get_device_count
from ..extras.packages import is_vllm_available
from ..model import load_config, load_tokenizer
from ..model.model_utils.visual import LlavaMultiModalProjectorForYiVLForVLLM
from .base_engine import BaseEngine, Response
if is_vllm_available():
from vllm import AsyncEngineArgs, AsyncLLMEngine, RequestOutput, SamplingParams
from vllm.lora.request import LoRARequest
from vllm.sequence import MultiModalData
if TYPE_CHECKING:
from numpy.typing import NDArray
from transformers.image_processing_utils import BaseImageProcessor
from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments
logger = get_logger(__name__)
class VllmEngine(BaseEngine):
def __init__(
self,
model_args: "ModelArguments",
data_args: "DataArguments",
finetuning_args: "FinetuningArguments",
generating_args: "GeneratingArguments",
) -> None:
config = load_config(model_args) # may download model from ms hub
self.can_generate = finetuning_args.stage == "sft"
tokenizer_module = load_tokenizer(model_args)
self.tokenizer = tokenizer_module["tokenizer"]
self.processor = tokenizer_module["processor"]
self.tokenizer.padding_side = "left"
self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args.template)
self.generating_args = generating_args.to_dict()
engine_args = {
"model": model_args.model_name_or_path,
"trust_remote_code": True,
"download_dir": model_args.cache_dir,
"dtype": model_args.vllm_dtype,
"max_model_len": model_args.vllm_maxlen,
"tensor_parallel_size": get_device_count() or 1,
"gpu_memory_utilization": model_args.vllm_gpu_util,
"disable_log_stats": True,
"disable_log_requests": True,
"enforce_eager": model_args.vllm_enforce_eager,
"enable_lora": model_args.adapter_name_or_path is not None,
"max_lora_rank": model_args.vllm_max_lora_rank,
}
if model_args.visual_inputs:
image_size = config.vision_config.image_size
patch_size = config.vision_config.patch_size
self.image_feature_size = (image_size // patch_size) ** 2
engine_args["image_input_type"] = "pixel_values"
engine_args["image_token_id"] = self.tokenizer.convert_tokens_to_ids(self.template.image_token)
engine_args["image_input_shape"] = "1,3,{},{}".format(image_size, image_size)
engine_args["image_feature_size"] = self.image_feature_size
if getattr(config, "is_yi_vl_derived_model", None):
import vllm.model_executor.models.llava
logger.info("Detected Yi-VL model, applying projector patch.")
vllm.model_executor.models.llava.LlavaMultiModalProjector = LlavaMultiModalProjectorForYiVLForVLLM
self.model = AsyncLLMEngine.from_engine_args(AsyncEngineArgs(**engine_args))
if model_args.adapter_name_or_path is not None:
self.lora_request = LoRARequest("default", 1, model_args.adapter_name_or_path[0])
else:
self.lora_request = None
async def _generate(
self,
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
**input_kwargs,
) -> AsyncIterator["RequestOutput"]:
request_id = "chatcmpl-{}".format(uuid.uuid4().hex)
if (
self.processor is not None
and image is not None
and not hasattr(self.processor, "image_seq_length")
and self.template.image_token not in messages[0]["content"]
): # llava-like models (TODO: paligemma models)
messages[0]["content"] = self.template.image_token * self.image_feature_size + messages[0]["content"]
paired_messages = messages + [{"role": "assistant", "content": ""}]
system = system or self.generating_args["default_system"]
prompt_ids, _ = self.template.encode_oneturn(
tokenizer=self.tokenizer, messages=paired_messages, system=system, tools=tools
)
if self.processor is not None and image is not None: # add image features
image_processor: "BaseImageProcessor" = getattr(self.processor, "image_processor")
pixel_values = image_processor(image, return_tensors="pt")["pixel_values"]
multi_modal_data = MultiModalData(type=MultiModalData.Type.IMAGE, data=pixel_values)
else:
multi_modal_data = None
prompt_length = len(prompt_ids)
use_beam_search: bool = self.generating_args["num_beams"] > 1
temperature: Optional[float] = input_kwargs.pop("temperature", None)
top_p: Optional[float] = input_kwargs.pop("top_p", None)
top_k: Optional[float] = input_kwargs.pop("top_k", None)
num_return_sequences: int = input_kwargs.pop("num_return_sequences", 1)
repetition_penalty: Optional[float] = input_kwargs.pop("repetition_penalty", None)
length_penalty: Optional[float] = input_kwargs.pop("length_penalty", None)
max_length: Optional[int] = input_kwargs.pop("max_length", None)
max_new_tokens: Optional[int] = input_kwargs.pop("max_new_tokens", None)
stop: Optional[Union[str, List[str]]] = input_kwargs.pop("stop", None)
if "max_new_tokens" in self.generating_args:
max_tokens = self.generating_args["max_new_tokens"]
elif "max_length" in self.generating_args:
if self.generating_args["max_length"] > prompt_length:
max_tokens = self.generating_args["max_length"] - prompt_length
else:
max_tokens = 1
if max_length:
max_tokens = max_length - prompt_length if max_length > prompt_length else 1
if max_new_tokens:
max_tokens = max_new_tokens
sampling_params = SamplingParams(
n=num_return_sequences,
repetition_penalty=(
repetition_penalty if repetition_penalty is not None else self.generating_args["repetition_penalty"]
)
or 1.0, # repetition_penalty must > 0
temperature=temperature if temperature is not None else self.generating_args["temperature"],
top_p=(top_p if top_p is not None else self.generating_args["top_p"]) or 1.0, # top_p must > 0
top_k=top_k if top_k is not None else self.generating_args["top_k"],
use_beam_search=use_beam_search,
length_penalty=length_penalty if length_penalty is not None else self.generating_args["length_penalty"],
stop=stop,
stop_token_ids=[self.tokenizer.eos_token_id] + self.tokenizer.additional_special_tokens_ids,
max_tokens=max_tokens,
skip_special_tokens=True,
)
result_generator = self.model.generate(
inputs={"prompt_token_ids": prompt_ids, "multi_modal_data": multi_modal_data},
sampling_params=sampling_params,
request_id=request_id,
lora_request=self.lora_request,
)
return result_generator
async def start(self) -> None:
pass
async def chat(
self,
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
**input_kwargs,
) -> List["Response"]:
final_output = None
generator = await self._generate(messages, system, tools, image, **input_kwargs)
async for request_output in generator:
final_output = request_output
results = []
for output in final_output.outputs:
results.append(
Response(
response_text=output.text,
response_length=len(output.token_ids),
prompt_length=len(final_output.prompt_token_ids),
finish_reason=output.finish_reason,
)
)
return results
async def stream_chat(
self,
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
**input_kwargs,
) -> AsyncGenerator[str, None]:
generated_text = ""
generator = await self._generate(messages, system, tools, image, **input_kwargs)
async for result in generator:
delta_text = result.outputs[0].text[len(generated_text) :]
generated_text = result.outputs[0].text
yield delta_text
async def get_scores(
self,
batch_input: List[str],
**input_kwargs,
) -> List[float]:
raise NotImplementedError("vLLM engine does not support get_scores.")

106
src/llamafactory/cli.py Normal file
View File

@@ -0,0 +1,106 @@
import os
import random
import subprocess
import sys
from enum import Enum, unique
from . import launcher
from .api.app import run_api
from .chat.chat_model import run_chat
from .eval.evaluator import run_eval
from .extras.env import VERSION, print_env
from .extras.logging import get_logger
from .extras.misc import get_device_count
from .train.tuner import export_model, run_exp
from .webui.interface import run_web_demo, run_web_ui
USAGE = (
"-" * 70
+ "\n"
+ "| Usage: |\n"
+ "| llamafactory-cli api -h: launch an OpenAI-style API server |\n"
+ "| llamafactory-cli chat -h: launch a chat interface in CLI |\n"
+ "| llamafactory-cli eval -h: evaluate models |\n"
+ "| llamafactory-cli export -h: merge LoRA adapters and export model |\n"
+ "| llamafactory-cli train -h: train models |\n"
+ "| llamafactory-cli webchat -h: launch a chat interface in Web UI |\n"
+ "| llamafactory-cli webui: launch LlamaBoard |\n"
+ "| llamafactory-cli version: show version info |\n"
+ "-" * 70
)
WELCOME = (
"-" * 58
+ "\n"
+ "| Welcome to LLaMA Factory, version {}".format(VERSION)
+ " " * (21 - len(VERSION))
+ "|\n|"
+ " " * 56
+ "|\n"
+ "| Project page: https://github.com/hiyouga/LLaMA-Factory |\n"
+ "-" * 58
)
logger = get_logger(__name__)
@unique
class Command(str, Enum):
API = "api"
CHAT = "chat"
ENV = "env"
EVAL = "eval"
EXPORT = "export"
TRAIN = "train"
WEBDEMO = "webchat"
WEBUI = "webui"
VER = "version"
HELP = "help"
def main():
command = sys.argv.pop(1)
if command == Command.API:
run_api()
elif command == Command.CHAT:
run_chat()
elif command == Command.ENV:
print_env()
elif command == Command.EVAL:
run_eval()
elif command == Command.EXPORT:
export_model()
elif command == Command.TRAIN:
force_torchrun = os.environ.get("FORCE_TORCHRUN", "0").lower() in ["true", "1"]
if force_torchrun or get_device_count() > 1:
master_addr = os.environ.get("MASTER_ADDR", "127.0.0.1")
master_port = os.environ.get("MASTER_PORT", str(random.randint(20001, 29999)))
logger.info("Initializing distributed tasks at: {}:{}".format(master_addr, master_port))
subprocess.run(
(
"torchrun --nnodes {nnodes} --node_rank {node_rank} --nproc_per_node {nproc_per_node} "
"--master_addr {master_addr} --master_port {master_port} {file_name} {args}"
).format(
nnodes=os.environ.get("NNODES", "1"),
node_rank=os.environ.get("RANK", "0"),
nproc_per_node=os.environ.get("NPROC_PER_NODE", str(get_device_count())),
master_addr=master_addr,
master_port=master_port,
file_name=launcher.__file__,
args=" ".join(sys.argv[1:]),
),
shell=True,
)
else:
run_exp()
elif command == Command.WEBDEMO:
run_web_demo()
elif command == Command.WEBUI:
run_web_ui()
elif command == Command.VER:
print(WELCOME)
elif command == Command.HELP:
print(USAGE)
else:
raise NotImplementedError("Unknown command: {}".format(command))

View File

@@ -0,0 +1,16 @@
from .collator import KTODataCollatorWithPadding, PairwiseDataCollatorWithPadding
from .data_utils import Role, split_dataset
from .loader import get_dataset
from .template import TEMPLATES, Template, get_template_and_fix_tokenizer
__all__ = [
"KTODataCollatorWithPadding",
"PairwiseDataCollatorWithPadding",
"Role",
"split_dataset",
"get_dataset",
"TEMPLATES",
"Template",
"get_template_and_fix_tokenizer",
]

View File

@@ -0,0 +1,221 @@
import os
from functools import partial
from typing import TYPE_CHECKING, Any, Dict, List, Union
from datasets import Features
from ..extras.logging import get_logger
from .data_utils import Role
if TYPE_CHECKING:
from datasets import Dataset, IterableDataset
from ..hparams import DataArguments
from .parser import DatasetAttr
logger = get_logger(__name__)
def _convert_images(images: List[Any], dataset_attr: "DatasetAttr", data_args: "DataArguments") -> List[Any]:
r"""
Optionally concatenates image path to dataset dir when loading from local disk.
"""
outputs = []
if dataset_attr.load_from in ["script", "file"]:
for image in images:
if isinstance(image, str) and os.path.isfile(os.path.join(data_args.dataset_dir, image)):
outputs.append(os.path.join(data_args.dataset_dir, image))
else:
outputs.append(image)
return outputs
def convert_alpaca(
examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr", data_args: "DataArguments"
) -> Dict[str, List[Any]]:
r"""
Converts alpaca format dataset to the standard format.
"""
outputs = {"prompt": [], "response": [], "system": [], "tools": [], "images": []}
convert_images = partial(_convert_images, dataset_attr=dataset_attr, data_args=data_args)
for i in range(len(examples[dataset_attr.prompt])):
prompt = []
if dataset_attr.history and isinstance(examples[dataset_attr.history][i], list):
for old_prompt, old_response in examples[dataset_attr.history][i]:
prompt.append({"role": Role.USER.value, "content": old_prompt})
prompt.append({"role": Role.ASSISTANT.value, "content": old_response})
content = []
if dataset_attr.prompt and examples[dataset_attr.prompt][i]:
content.append(examples[dataset_attr.prompt][i])
if dataset_attr.query and examples[dataset_attr.query][i]:
content.append(examples[dataset_attr.query][i])
prompt.append({"role": Role.USER.value, "content": "\n".join(content)}) # "prompt\nquery"
if dataset_attr.kto_tag and isinstance(examples[dataset_attr.kto_tag][i], bool): # kto example
response = [{"role": Role.ASSISTANT.value, "content": examples[dataset_attr.response][i]}]
if examples[dataset_attr.kto_tag][i]:
response = response + [{"role": Role.ASSISTANT.value, "content": ""}]
else:
response = [{"role": Role.ASSISTANT.value, "content": ""}] + response
elif (
dataset_attr.ranking
and isinstance(examples[dataset_attr.chosen][i], str)
and isinstance(examples[dataset_attr.rejected][i], str)
): # pairwise example
response = [
{"role": Role.ASSISTANT.value, "content": examples[dataset_attr.chosen][i]},
{"role": Role.ASSISTANT.value, "content": examples[dataset_attr.rejected][i]},
]
elif dataset_attr.response and isinstance(examples[dataset_attr.response][i], str): # normal example
response = [{"role": Role.ASSISTANT.value, "content": examples[dataset_attr.response][i]}]
else: # unsupervised
response = []
outputs["prompt"].append(prompt)
outputs["response"].append(response)
outputs["system"].append(examples[dataset_attr.system][i] if dataset_attr.system else "")
outputs["tools"].append(examples[dataset_attr.tools][i] if dataset_attr.tools else "")
outputs["images"].append(convert_images(examples[dataset_attr.images][i]) if dataset_attr.images else [])
return outputs
def convert_sharegpt(
examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr", data_args: "DataArguments"
) -> Dict[str, List[Any]]:
r"""
Converts sharegpt format dataset to the standard format.
"""
outputs = {"prompt": [], "response": [], "system": [], "tools": [], "images": []}
convert_images = partial(_convert_images, dataset_attr=dataset_attr, data_args=data_args)
tag_mapping = {
dataset_attr.user_tag: Role.USER.value,
dataset_attr.assistant_tag: Role.ASSISTANT.value,
dataset_attr.observation_tag: Role.OBSERVATION.value,
dataset_attr.function_tag: Role.FUNCTION.value,
dataset_attr.system_tag: Role.SYSTEM.value,
}
odd_tags = (dataset_attr.user_tag, dataset_attr.observation_tag)
even_tags = (dataset_attr.assistant_tag, dataset_attr.function_tag)
accept_tags = (odd_tags, even_tags)
for i, messages in enumerate(examples[dataset_attr.messages]):
if dataset_attr.system_tag and messages[0][dataset_attr.role_tag] == dataset_attr.system_tag:
system = messages[0][dataset_attr.content_tag]
messages = messages[1:]
else:
system = examples[dataset_attr.system][i] if dataset_attr.system else ""
if len(messages) == 0:
continue
aligned_messages = []
broken_data = False
for turn_idx, message in enumerate(messages):
if message[dataset_attr.role_tag] not in accept_tags[turn_idx % 2]:
logger.warning("Invalid role tag in {}.".format(messages))
broken_data = True
aligned_messages.append(
{"role": tag_mapping[message[dataset_attr.role_tag]], "content": message[dataset_attr.content_tag]}
)
if (not dataset_attr.ranking and len(aligned_messages) % 2 != 0) or (
dataset_attr.ranking and len(aligned_messages) % 2 == 0
):
logger.warning("Invalid message count in {}.".format(messages))
broken_data = True
if dataset_attr.kto_tag and isinstance(examples[dataset_attr.kto_tag][i], bool): # kto example
prompt = aligned_messages[:-1]
response = aligned_messages[-1:]
if examples[dataset_attr.kto_tag][i]:
response = response + [{"role": Role.ASSISTANT.value, "content": ""}]
else:
response = [{"role": Role.ASSISTANT.value, "content": ""}] + response
elif (
dataset_attr.ranking
and isinstance(examples[dataset_attr.chosen][i], dict)
and isinstance(examples[dataset_attr.rejected][i], dict)
): # pairwise example
chosen = examples[dataset_attr.chosen][i]
rejected = examples[dataset_attr.rejected][i]
if (
chosen[dataset_attr.role_tag] not in accept_tags[-1]
or rejected[dataset_attr.role_tag] not in accept_tags[-1]
):
logger.warning("Invalid role tag in {}.".format([chosen, rejected]))
broken_data = True
prompt = aligned_messages
response = [
{"role": tag_mapping[chosen[dataset_attr.role_tag]], "content": chosen[dataset_attr.content_tag]},
{"role": tag_mapping[rejected[dataset_attr.role_tag]], "content": rejected[dataset_attr.content_tag]},
]
else: # normal example
prompt = aligned_messages[:-1]
response = aligned_messages[-1:]
if broken_data:
logger.warning("Skipping this abnormal example.")
continue
outputs["prompt"].append(prompt)
outputs["response"].append(response)
outputs["system"].append(system)
outputs["tools"].append(examples[dataset_attr.tools][i] if dataset_attr.tools else "")
outputs["images"].append(convert_images(examples[dataset_attr.images][i]) if dataset_attr.images else [])
return outputs
def align_dataset(
dataset: Union["Dataset", "IterableDataset"], dataset_attr: "DatasetAttr", data_args: "DataArguments"
) -> Union["Dataset", "IterableDataset"]:
r"""
Aligned dataset:
prompt: [{"role": "user", "content": "..."}] * (2T - 1)
response: [{"role": "assistant", "content": "..."}] * N (N > 1 for ranking dataset)
system: "..."
tools: "...",
images: [],
"""
if dataset_attr.formatting == "alpaca":
convert_func = partial(convert_alpaca, dataset_attr=dataset_attr, data_args=data_args)
else:
convert_func = partial(convert_sharegpt, dataset_attr=dataset_attr, data_args=data_args)
column_names = list(next(iter(dataset)).keys())
features = Features.from_dict(
{
"prompt": [
{"role": {"dtype": "string", "_type": "Value"}, "content": {"dtype": "string", "_type": "Value"}}
],
"response": [
{"role": {"dtype": "string", "_type": "Value"}, "content": {"dtype": "string", "_type": "Value"}}
],
"system": {"dtype": "string", "_type": "Value"},
"tools": {"dtype": "string", "_type": "Value"},
"images": [{"_type": "Image"}],
}
)
kwargs = {}
if not data_args.streaming:
kwargs = dict(
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=(not data_args.overwrite_cache),
desc="Converting format of dataset",
)
return dataset.map(
convert_func,
batched=True,
remove_columns=column_names,
features=features,
**kwargs,
)

View File

@@ -0,0 +1,81 @@
from dataclasses import dataclass
from typing import Any, Dict, Sequence
import torch
from transformers import DataCollatorForSeq2Seq
@dataclass
class PairwiseDataCollatorWithPadding(DataCollatorForSeq2Seq):
r"""
Data collator for pairwise data.
"""
def __call__(self, features: Sequence[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
r"""
Pads batched data to the longest sequence in the batch.
We generate 2 * n examples where the first n examples represent chosen examples and
the last n examples represent rejected examples.
"""
concatenated_features = []
for key in ("chosen", "rejected"):
for feature in features:
target_feature = {
"input_ids": feature["{}_input_ids".format(key)],
"attention_mask": feature["{}_attention_mask".format(key)],
"labels": feature["{}_labels".format(key)],
}
if "pixel_values" in feature:
target_feature["pixel_values"] = feature["pixel_values"]
if "{}_token_type_ids".format(key) in feature:
target_feature["token_type_ids"] = feature["{}_token_type_ids".format(key)]
concatenated_features.append(target_feature)
return super().__call__(concatenated_features)
@dataclass
class KTODataCollatorWithPadding(DataCollatorForSeq2Seq):
r"""
Data collator for KTO data.
"""
def __call__(self, features: Sequence[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
target_features = []
kl_features = []
kto_tags = []
for feature in features:
target_feature = {
"input_ids": feature["input_ids"],
"attention_mask": feature["attention_mask"],
"labels": feature["labels"],
}
kl_feature = {
"input_ids": feature["kl_input_ids"],
"attention_mask": feature["kl_attention_mask"],
"labels": feature["kl_labels"],
}
if "pixel_values" in feature:
target_feature["pixel_values"] = feature["pixel_values"]
if "token_type_ids" in feature:
target_feature["token_type_ids"] = feature["token_type_ids"]
kl_feature["token_type_ids"] = feature["kl_token_type_ids"]
target_features.append(target_feature)
kl_features.append(kl_feature)
kto_tags.append(feature["kto_tags"])
batch = super().__call__(target_features)
kl_batch = super().__call__(kl_features)
batch["kl_input_ids"] = kl_batch["input_ids"]
batch["kl_attention_mask"] = kl_batch["attention_mask"]
batch["kl_labels"] = kl_batch["labels"]
if "token_type_ids" in batch:
batch["kl_token_type_ids"] = kl_batch["token_type_ids"]
batch["kto_tags"] = torch.tensor(kto_tags)
return batch

Some files were not shown because too many files have changed in this diff Show More