Compare commits
1170 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e2299e261b | ||
|
|
8a44dce326 | ||
|
|
6d9233833b | ||
|
|
d019603835 | ||
|
|
478e8194d9 | ||
|
|
1890d3dafe | ||
|
|
522a3e8493 | ||
|
|
18968405d0 | ||
|
|
71a1c1321a | ||
|
|
cf58a6d860 | ||
|
|
9adc0a2c3f | ||
|
|
16419b2834 | ||
|
|
82a2bac866 | ||
|
|
151ef48b40 | ||
|
|
a255c3a476 | ||
|
|
f4ec4fa6ad | ||
|
|
2635794727 | ||
|
|
d2f845d70d | ||
|
|
bb8aba5abf | ||
|
|
9f16c50155 | ||
|
|
25bb9f5ad9 | ||
|
|
7b985f55db | ||
|
|
fd0357a26d | ||
|
|
31f9daa362 | ||
|
|
15ea576246 | ||
|
|
19a6916d80 | ||
|
|
585c475f71 | ||
|
|
e62dae37fe | ||
|
|
11672f760d | ||
|
|
b9f84900ee | ||
|
|
5f65558088 | ||
|
|
0f54a78144 | ||
|
|
2986bef530 | ||
|
|
065f7fb5da | ||
|
|
c1d5073bd3 | ||
|
|
ee46011b34 | ||
|
|
d55f420206 | ||
|
|
fcf75633a0 | ||
|
|
e77ced045d | ||
|
|
331f53381f | ||
|
|
1d675a287d | ||
|
|
be33ef67fb | ||
|
|
f5cd17881e | ||
|
|
c09b648934 | ||
|
|
f2fd9d1b25 | ||
|
|
167342af8a | ||
|
|
76f9bd1820 | ||
|
|
a893505924 | ||
|
|
ed25e051a9 | ||
|
|
5e5fc337f9 | ||
|
|
58e9ca8aa0 | ||
|
|
a4c4b8496f | ||
|
|
38c9641777 | ||
|
|
8b8fdb3a85 | ||
|
|
290057069e | ||
|
|
46203856fc | ||
|
|
80b89978d9 | ||
|
|
5a221d91f9 | ||
|
|
3a3f4072e5 | ||
|
|
0c0cdc26bc | ||
|
|
2581cc844b | ||
|
|
d58fcd094e | ||
|
|
86063e27ea | ||
|
|
88eafd865b | ||
|
|
3f7bd98bfa | ||
|
|
b72c4bd118 | ||
|
|
808ff89a2d | ||
|
|
6d7f1299bd | ||
|
|
0420a608ca | ||
|
|
2047eab723 | ||
|
|
e11b40c344 | ||
|
|
b869506a57 | ||
|
|
72d5b06b08 | ||
|
|
94726bdc8d | ||
|
|
4d1791e905 | ||
|
|
528e06ccaa | ||
|
|
fec641ec82 | ||
|
|
8f401e37f8 | ||
|
|
9feb78e7b4 | ||
|
|
c2022431aa | ||
|
|
0817c24c04 | ||
|
|
cfb926fb84 | ||
|
|
34746d6151 | ||
|
|
5bb447b118 | ||
|
|
a28261a866 | ||
|
|
800de98dc8 | ||
|
|
222423bcef | ||
|
|
e71737351f | ||
|
|
4f298894da | ||
|
|
a8fae3869d | ||
|
|
db9b977e4f | ||
|
|
87d685b59f | ||
|
|
e4046bdd1f | ||
|
|
5baa3add8c | ||
|
|
332f637592 | ||
|
|
31daa6570b | ||
|
|
33525a34b6 | ||
|
|
3607caa2ad | ||
|
|
0fc2e19279 | ||
|
|
ef994600db | ||
|
|
7638f1070e | ||
|
|
c2120432db | ||
|
|
66184762e8 | ||
|
|
41a9e231cb | ||
|
|
1bb06e06df | ||
|
|
381f7120e6 | ||
|
|
f7857c83e1 | ||
|
|
d0da6f40b0 | ||
|
|
28d145a066 | ||
|
|
ae32c148d1 | ||
|
|
2a05941b14 | ||
|
|
11c38b9173 | ||
|
|
73c1c15b62 | ||
|
|
7f58bf984f | ||
|
|
ec552372ba | ||
|
|
17d32fb5c7 | ||
|
|
4b61610b12 | ||
|
|
07798e4aad | ||
|
|
6d6acd0213 | ||
|
|
a789e0f263 | ||
|
|
f9ee00b6b6 | ||
|
|
31bfdb08cd | ||
|
|
12c83e00fc | ||
|
|
9dc7b6c7ac | ||
|
|
627548bf7f | ||
|
|
dc65ecdf09 | ||
|
|
e577990eb2 | ||
|
|
1f3b729a4b | ||
|
|
0aa7ac210f | ||
|
|
40382f1387 | ||
|
|
75b3819e43 | ||
|
|
e63c2df0b1 | ||
|
|
25d4889789 | ||
|
|
8c0a721c4c | ||
|
|
9e972bc9ec | ||
|
|
1675712a4c | ||
|
|
e0c9012f7f | ||
|
|
a25024bd0c | ||
|
|
867980196e | ||
|
|
4e25d037c8 | ||
|
|
6ba6926221 | ||
|
|
b6b53b61f7 | ||
|
|
647c51a772 | ||
|
|
3b843ac9d4 | ||
|
|
0ef1f981da | ||
|
|
944a2aec4d | ||
|
|
4f31ad997c | ||
|
|
8683582300 | ||
|
|
5ccc607222 | ||
|
|
d8bd46f1bf | ||
|
|
8c2a712247 | ||
|
|
53e41bf2c7 | ||
|
|
0eeae9061c | ||
|
|
08729dbefc | ||
|
|
2c120aa0df | ||
|
|
cca6286b6f | ||
|
|
8516054e4d | ||
|
|
d1a8cd67d2 | ||
|
|
8a5b4bdfd4 | ||
|
|
3bceef02ee | ||
|
|
166a830938 | ||
|
|
18767fe026 | ||
|
|
18a1a4b9da | ||
|
|
6015fe700e | ||
|
|
369dae8dd3 | ||
|
|
2aaf3697d7 | ||
|
|
5504b5254c | ||
|
|
b2e4f11602 | ||
|
|
e3f95abca7 | ||
|
|
2f44f70c2c | ||
|
|
f8f05a883b | ||
|
|
5f473e2696 | ||
|
|
88b1874c04 | ||
|
|
58bc6943dc | ||
|
|
2dedf7b401 | ||
|
|
5769a553d2 | ||
|
|
552816e04b | ||
|
|
b5fa1044b8 | ||
|
|
3c55976a0e | ||
|
|
4611f67fae | ||
|
|
a5346041bb | ||
|
|
df42e438c1 | ||
|
|
7dbfd7dff6 | ||
|
|
a897d46049 | ||
|
|
adff887659 | ||
|
|
eba78f2159 | ||
|
|
ec05c8cdb4 | ||
|
|
0a869c4ed4 | ||
|
|
f792eaf8d4 | ||
|
|
8a41c96761 | ||
|
|
e5d9d8c55d | ||
|
|
3e44c8fe3a | ||
|
|
925e421bde | ||
|
|
bbb636bdba | ||
|
|
a30bdbb1c0 | ||
|
|
95b7e10a06 | ||
|
|
0385c60177 | ||
|
|
44895ebe36 | ||
|
|
44dfbf9dbd | ||
|
|
0a465fc3ca | ||
|
|
01eeae50b5 | ||
|
|
7eeeffdb8a | ||
|
|
eca06531c3 | ||
|
|
d90b40b60f | ||
|
|
1898c1e9a6 | ||
|
|
8d2f8b0dd8 | ||
|
|
df42281256 | ||
|
|
896cf476d5 | ||
|
|
37961d5f06 | ||
|
|
bb047bc844 | ||
|
|
448adedf6a | ||
|
|
469c7cd462 | ||
|
|
ebf6a07681 | ||
|
|
53f0fff513 | ||
|
|
ab7567693d | ||
|
|
1b8aab0723 | ||
|
|
30ebe61914 | ||
|
|
6f1c8dacea | ||
|
|
8881237475 | ||
|
|
584755be4b | ||
|
|
3d3324be5c | ||
|
|
4196d5b4d6 | ||
|
|
101c95ce65 | ||
|
|
19ebc0e7a2 | ||
|
|
1ce15b5d9e | ||
|
|
d670d62a66 | ||
|
|
6522467ddb | ||
|
|
aacd9642f5 | ||
|
|
4446c92517 | ||
|
|
8c65548b10 | ||
|
|
fb22651faf | ||
|
|
cfff136b2a | ||
|
|
bac2c64f87 | ||
|
|
be1ec97c8e | ||
|
|
bbd432415d | ||
|
|
1fef702382 | ||
|
|
39865d8a1f | ||
|
|
c7b27bd70b | ||
|
|
86e4fab0d5 | ||
|
|
ff3e40e4a5 | ||
|
|
ea830cad0c | ||
|
|
225e270fd5 | ||
|
|
c1768cfb14 | ||
|
|
53edd62f8b | ||
|
|
41a7e128b6 | ||
|
|
6b8c41c3ac | ||
|
|
2f09c34980 | ||
|
|
76dc69ce36 | ||
|
|
6c9d05539a | ||
|
|
b6bc17f730 | ||
|
|
c07ba8ccc0 | ||
|
|
ed86f621a0 | ||
|
|
c6a3175bbf | ||
|
|
452291417d | ||
|
|
ab9db8b7c7 | ||
|
|
877e2ea791 | ||
|
|
6ea42d5b63 | ||
|
|
31c117e696 | ||
|
|
04f057334f | ||
|
|
99a54d06ca | ||
|
|
8332c85f37 | ||
|
|
fcf1a3df62 | ||
|
|
f4f52ae67d | ||
|
|
0b08d5882a | ||
|
|
62eeafaba6 | ||
|
|
5a52e41399 | ||
|
|
e8083f8f3f | ||
|
|
338b3a03f0 | ||
|
|
c8b01b41ac | ||
|
|
6d08a418ed | ||
|
|
e3066d1489 | ||
|
|
487e3f2507 | ||
|
|
b82a53cad8 | ||
|
|
5bec82ca9d | ||
|
|
57354fc990 | ||
|
|
89f240805c | ||
|
|
27bbea886c | ||
|
|
3ec3dda33a | ||
|
|
ae9f338bf7 | ||
|
|
bf44f76dc7 | ||
|
|
c18581f0a4 | ||
|
|
9f6c5c4798 | ||
|
|
7bc03ac986 | ||
|
|
85d7e4f4ab | ||
|
|
bf69747f40 | ||
|
|
f1146bf7b6 | ||
|
|
9efd1fec90 | ||
|
|
3b91839a55 | ||
|
|
bc4421eeef | ||
|
|
5003820a6a | ||
|
|
cd2485f28d | ||
|
|
918a367378 | ||
|
|
3d35aeca72 | ||
|
|
53b1e5fd1d | ||
|
|
b852c895cf | ||
|
|
aaa7ed8712 | ||
|
|
205aca5b03 | ||
|
|
87b1f851f1 | ||
|
|
fca814b30d | ||
|
|
a20c2b6ecf | ||
|
|
fee94e1c54 | ||
|
|
047a596542 | ||
|
|
3d45606984 | ||
|
|
310c107d56 | ||
|
|
089e4d9e96 | ||
|
|
ae56c3cf49 | ||
|
|
0a0288a286 | ||
|
|
25da686758 | ||
|
|
e2da3cc9fa | ||
|
|
c42e5cf401 | ||
|
|
9943cd1c96 | ||
|
|
1e6f96508a | ||
|
|
d401974f69 | ||
|
|
09b2dbe859 | ||
|
|
7f8ef8c132 | ||
|
|
fcb6283a72 | ||
|
|
0027f46ccc | ||
|
|
967a27695e | ||
|
|
3ce8a326c6 | ||
|
|
91b56b7baf | ||
|
|
e2fa961302 | ||
|
|
87d6d7dc61 | ||
|
|
00019e2ca4 | ||
|
|
b104739d63 | ||
|
|
6ef0d13e42 | ||
|
|
b238d1aa04 | ||
|
|
aa497d5d96 | ||
|
|
fecf04b2f4 | ||
|
|
3f157e2f6f | ||
|
|
c7c558562e | ||
|
|
c2ea5fb618 | ||
|
|
fa9c32bb8d | ||
|
|
c610deb5a2 | ||
|
|
2bb3255e74 | ||
|
|
b28b74c71e | ||
|
|
1ed921bff7 | ||
|
|
80f634cc95 | ||
|
|
a3eb5e200c | ||
|
|
2d02c0e22d | ||
|
|
093eda2ad6 | ||
|
|
dbaf621f57 | ||
|
|
ceb701c2d4 | ||
|
|
29ad3783f5 | ||
|
|
fa2386e73c | ||
|
|
e0045e8386 | ||
|
|
b94c941196 | ||
|
|
ba66ac084f | ||
|
|
83479c9ef0 | ||
|
|
df8ac15ef0 | ||
|
|
8cea5cd967 | ||
|
|
a2d7d6a518 | ||
|
|
a63e624eca | ||
|
|
8596c321ce | ||
|
|
54cd799aa0 | ||
|
|
8185eb1890 | ||
|
|
03213984ec | ||
|
|
aeeee9d4b5 | ||
|
|
c8a1fb99bf | ||
|
|
f0181a41ff | ||
|
|
f6b06d0c6f | ||
|
|
1047217f78 | ||
|
|
16a9a44849 | ||
|
|
58fb24ce41 | ||
|
|
a9afffa246 | ||
|
|
1fdd053022 | ||
|
|
0a833968a0 | ||
|
|
58b681de78 | ||
|
|
22d5fc5f4c | ||
|
|
cc0119f698 | ||
|
|
580cedebde | ||
|
|
43bd1b070c | ||
|
|
42aa9c65be | ||
|
|
b0b87fa33f | ||
|
|
22912eba1a | ||
|
|
e2748fa967 | ||
|
|
248d5daaff | ||
|
|
8f5921692e | ||
|
|
e880eb8844 | ||
|
|
dc076c4e52 | ||
|
|
8306e93ef3 | ||
|
|
6a2cd129c0 | ||
|
|
30d7f6a22e | ||
|
|
5440ebbae6 | ||
|
|
22dbe694e9 | ||
|
|
64ac6ca396 | ||
|
|
377d37fa7f | ||
|
|
55296744a8 | ||
|
|
d0889012c2 | ||
|
|
3a8b2890eb | ||
|
|
5b2284a51d | ||
|
|
4807d8a4ef | ||
|
|
c6e1313977 | ||
|
|
66819fd3ee | ||
|
|
bd85e370be | ||
|
|
cc097174cc | ||
|
|
7d135bbdb8 | ||
|
|
4845a76535 | ||
|
|
67645c0db8 | ||
|
|
f463b3f038 | ||
|
|
01defc2779 | ||
|
|
c9e77ab352 | ||
|
|
c3de160d1c | ||
|
|
3693d7b571 | ||
|
|
a63144c28f | ||
|
|
2b3b0473cd | ||
|
|
9d929897ce | ||
|
|
313a5e1494 | ||
|
|
74dd25224a | ||
|
|
c7efc7f2ed | ||
|
|
c71c78da50 | ||
|
|
f4897da009 | ||
|
|
a6951db970 | ||
|
|
9d27aaa38f | ||
|
|
3b19b6f31b | ||
|
|
5b15ca0b0b | ||
|
|
aad79127e6 | ||
|
|
c42dcab32b | ||
|
|
be519c84d9 | ||
|
|
b2dc6dc59a | ||
|
|
9df626dc18 | ||
|
|
8d4b9200a1 | ||
|
|
7806df46ba | ||
|
|
bba026a212 | ||
|
|
6e111eb29f | ||
|
|
2b69ae0eb2 | ||
|
|
13d73574ef | ||
|
|
bc264807ae | ||
|
|
f9815dd20a | ||
|
|
1f58943b32 | ||
|
|
6476507429 | ||
|
|
35862d19ec | ||
|
|
1272cb00df | ||
|
|
e9ac26db4c | ||
|
|
20ee1d2e19 | ||
|
|
cbc1dd0c88 | ||
|
|
870bbabbc4 | ||
|
|
8fd84c375e | ||
|
|
32b5364051 | ||
|
|
cf72aec098 | ||
|
|
87849d12d2 | ||
|
|
a19512436f | ||
|
|
6c89d93aea | ||
|
|
345f40a660 | ||
|
|
8b9a814653 | ||
|
|
05fabf9095 | ||
|
|
95eede911a | ||
|
|
7bc7f7d673 | ||
|
|
054fdbe186 | ||
|
|
f0f80819a0 | ||
|
|
e702678252 | ||
|
|
553579986a | ||
|
|
622cb04f27 | ||
|
|
f3ba11a432 | ||
|
|
8b1f53bca5 | ||
|
|
ac25fef80e | ||
|
|
15f819d273 | ||
|
|
f2d1c43d28 | ||
|
|
464acc7d6c | ||
|
|
a96c5da737 | ||
|
|
28d09b81c9 | ||
|
|
a769d0e3d4 | ||
|
|
1b98b5e65c | ||
|
|
3cc5408da7 | ||
|
|
689f5c4554 | ||
|
|
ab5d042cd3 | ||
|
|
4d43317aa1 | ||
|
|
ed3b0c5b40 | ||
|
|
67a97794ee | ||
|
|
2c7c93cb9b | ||
|
|
4d4fe08d14 | ||
|
|
85a919b6f7 | ||
|
|
fe2abe20fc | ||
|
|
12444720db | ||
|
|
510faf5805 | ||
|
|
722e01c8ab | ||
|
|
6050e6cff9 | ||
|
|
c8abbe4fc3 | ||
|
|
f2881c9d4a | ||
|
|
1ded3abdf1 | ||
|
|
e641f1215a | ||
|
|
ca736bcab7 | ||
|
|
bddb2646bd | ||
|
|
e4c57f54f8 | ||
|
|
6de82ca843 | ||
|
|
b2c02df555 | ||
|
|
ca86d6361e | ||
|
|
b6fb00e046 | ||
|
|
86c84972c8 | ||
|
|
9390927875 | ||
|
|
c4a585f232 | ||
|
|
300feb3245 | ||
|
|
cacafb0038 | ||
|
|
6509114259 | ||
|
|
7d4cb79822 | ||
|
|
b867e164fe | ||
|
|
26bbfc084d | ||
|
|
c376eed31d | ||
|
|
7c595abc38 | ||
|
|
c428ab68d8 | ||
|
|
968b9f1852 | ||
|
|
018266c66e | ||
|
|
111c644bf1 | ||
|
|
ed5c641e8b | ||
|
|
de72d1f0e7 | ||
|
|
8bfb856923 | ||
|
|
8fdbaab95d | ||
|
|
a01668bbe8 | ||
|
|
3385616a37 | ||
|
|
1f0d89328d | ||
|
|
a7feab45d5 | ||
|
|
f34322afd7 | ||
|
|
3815fa40b7 | ||
|
|
c43050b3fa | ||
|
|
3e152872ad | ||
|
|
ae6ad55758 | ||
|
|
0118a2fc04 | ||
|
|
4dd81976f4 | ||
|
|
2b4da8baf6 | ||
|
|
7d1b4071e8 | ||
|
|
8fc5377f50 | ||
|
|
e5812f261d | ||
|
|
f7e85cd7de | ||
|
|
749395420b | ||
|
|
7d536d1d75 | ||
|
|
7fd0d2fc2f | ||
|
|
ec696bbcdd | ||
|
|
df24345d65 | ||
|
|
386dd26097 | ||
|
|
514f976cc1 | ||
|
|
66b870fd08 | ||
|
|
24d3c7e378 | ||
|
|
484128b641 | ||
|
|
588ea95732 | ||
|
|
800567cde7 | ||
|
|
7a3ba5a25d | ||
|
|
dfff411e1a | ||
|
|
e20baa4218 | ||
|
|
d1ab9b501a | ||
|
|
3cbc9109ea | ||
|
|
3259397f89 | ||
|
|
eb5af3d90b | ||
|
|
b6810b209a | ||
|
|
158e0e1f63 | ||
|
|
294a103ead | ||
|
|
7f71276ad8 | ||
|
|
93d4570a59 | ||
|
|
527ba2eb2e | ||
|
|
3021b31cf3 | ||
|
|
9f2427907e | ||
|
|
570ce100c1 | ||
|
|
27547355e6 | ||
|
|
c5ef52a67a | ||
|
|
b48b47d519 | ||
|
|
9bdba2f6a8 | ||
|
|
d6ce902d80 | ||
|
|
ce6dcf3600 | ||
|
|
e7f92d16d8 | ||
|
|
abd26f5f67 | ||
|
|
4d35ace75e | ||
|
|
72222d1598 | ||
|
|
26d914b8fc | ||
|
|
7b01c0676c | ||
|
|
571a9b8669 | ||
|
|
ed35eb1e9e | ||
|
|
d291e0d60d | ||
|
|
1874d579c5 | ||
|
|
c692339020 | ||
|
|
2c1eef34cb | ||
|
|
af178cbcd1 | ||
|
|
5d85be31ca | ||
|
|
372b71c847 | ||
|
|
41a9c415e1 | ||
|
|
915e32a5f8 | ||
|
|
f4dd429cbf | ||
|
|
7435cde2ef | ||
|
|
7056087e92 | ||
|
|
fed7ae5661 | ||
|
|
5019c6148b | ||
|
|
2e1396cd6b | ||
|
|
b5e9df5df8 | ||
|
|
3622856994 | ||
|
|
7367c6ec21 | ||
|
|
6579ec8c4c | ||
|
|
a7fbae47d5 | ||
|
|
f203a9d78e | ||
|
|
bae73e676c | ||
|
|
806e1061d4 | ||
|
|
f920091667 | ||
|
|
801979f779 | ||
|
|
df2d32e7aa | ||
|
|
60cf12727b | ||
|
|
7621526d22 | ||
|
|
559b84dceb | ||
|
|
7e4c5d4bb3 | ||
|
|
2a4ed6610e | ||
|
|
1d8e9c7897 | ||
|
|
43654028eb | ||
|
|
2f6fc27c8b | ||
|
|
d789b667d7 | ||
|
|
66a1abac6a | ||
|
|
665db18661 | ||
|
|
30d97ca879 | ||
|
|
c62a6ca59d | ||
|
|
77c2c7076b | ||
|
|
7466fd4387 | ||
|
|
c1369a1ec9 | ||
|
|
d677fe053d | ||
|
|
7c6785d3df | ||
|
|
77341ee3c4 | ||
|
|
5b4b60cfb5 | ||
|
|
0f3d54d8a0 | ||
|
|
7272792f65 | ||
|
|
4cc8e16595 | ||
|
|
ca5a759f94 | ||
|
|
be51e56a2e | ||
|
|
3a9171e275 | ||
|
|
bd0f3b4050 | ||
|
|
206a8364d4 | ||
|
|
097d031066 | ||
|
|
2674b42b59 | ||
|
|
edf2e51bbc | ||
|
|
47877acc2a | ||
|
|
d111a324bc | ||
|
|
388f0a6e05 | ||
|
|
8c13c02c55 | ||
|
|
a101fde917 | ||
|
|
1f4373b6e5 | ||
|
|
525747b472 | ||
|
|
472f12c985 | ||
|
|
b681f24f43 | ||
|
|
fd02b089b6 | ||
|
|
57d4c4a4f8 | ||
|
|
3595d26846 | ||
|
|
22a79c169d | ||
|
|
75dfe259cf | ||
|
|
2e257d6af0 | ||
|
|
e734222373 | ||
|
|
6a351b9912 | ||
|
|
cfc04aa162 | ||
|
|
943c795318 | ||
|
|
7fb61bad04 | ||
|
|
47efcdb1dd | ||
|
|
59cbce1a46 | ||
|
|
7e755e9cac | ||
|
|
9d1e2c3c1f | ||
|
|
5af32ce705 | ||
|
|
4e8861e653 | ||
|
|
d4d7ffb17c | ||
|
|
46f834ec75 | ||
|
|
6ec64a7e56 | ||
|
|
d71446e387 | ||
|
|
eada49e56b | ||
|
|
8f42d7df56 | ||
|
|
33a90b9026 | ||
|
|
710902b0d0 | ||
|
|
7b4f5d3b21 | ||
|
|
13093963b1 | ||
|
|
2e477e7458 | ||
|
|
4b6252151e | ||
|
|
f3765d1996 | ||
|
|
1f5cdd66b7 | ||
|
|
5b0ddbb835 | ||
|
|
4f92b56f06 | ||
|
|
a1f6ff92be | ||
|
|
ef98e91618 | ||
|
|
9fdf800750 | ||
|
|
32c698e4c2 | ||
|
|
75e80fa820 | ||
|
|
f8329bc632 | ||
|
|
9f74d36ba4 | ||
|
|
fc2435f135 | ||
|
|
0636519ba3 | ||
|
|
573bf03a6f | ||
|
|
9e529be4e7 | ||
|
|
7af4ffa6cc | ||
|
|
5b67ccd1c6 | ||
|
|
5166dbbcd3 | ||
|
|
21adb09730 | ||
|
|
28b5f656db | ||
|
|
68ee2d512f | ||
|
|
a5f7e0efc6 | ||
|
|
211038584a | ||
|
|
ff5ba97970 | ||
|
|
27f2c3cae1 | ||
|
|
48f0819327 | ||
|
|
5c6d88e91c | ||
|
|
0a04d9470f | ||
|
|
f0408c0dde | ||
|
|
a041f4a111 | ||
|
|
cdf9dae53e | ||
|
|
1917f431f5 | ||
|
|
a770afbff2 | ||
|
|
b1a5bf025b | ||
|
|
adff3e5050 | ||
|
|
0e88c5754f | ||
|
|
3fff875f99 | ||
|
|
e2d9ab3591 | ||
|
|
3db5cf44ea | ||
|
|
994b9089e9 | ||
|
|
4c1513a845 | ||
|
|
86e009b504 | ||
|
|
c1e1918db1 | ||
|
|
341225a405 | ||
|
|
8c93921952 | ||
|
|
45367105fc | ||
|
|
df71359069 | ||
|
|
a03d14a9a6 | ||
|
|
41d7ca395e | ||
|
|
757573bec1 | ||
|
|
16d655b119 | ||
|
|
f6483de197 | ||
|
|
da34411bf2 | ||
|
|
1891b64072 | ||
|
|
a14069acf8 | ||
|
|
0ea708c226 | ||
|
|
cb474c7b11 | ||
|
|
e4d11a117b | ||
|
|
68365045b4 | ||
|
|
502555b65d | ||
|
|
0bc52c0aae | ||
|
|
6bf2663b8e | ||
|
|
d337de668e | ||
|
|
ec372f91e9 | ||
|
|
20b1bd8c54 | ||
|
|
ee17741591 | ||
|
|
93a6925ec5 | ||
|
|
47405a8e8a | ||
|
|
54ba30c47f | ||
|
|
b92214f78b | ||
|
|
71e4404c0d | ||
|
|
5ab997d484 | ||
|
|
6e7048831b | ||
|
|
97cd932c19 | ||
|
|
dfc7a7d5cd | ||
|
|
27e13a8371 | ||
|
|
bf6ad1fbed | ||
|
|
bc71380b59 | ||
|
|
137c87ff60 | ||
|
|
485b8dc18b | ||
|
|
875f9078d1 | ||
|
|
d3bfcbd3af | ||
|
|
e36db692e7 | ||
|
|
460a40756c | ||
|
|
18057e14ef | ||
|
|
025c8fe302 | ||
|
|
446129ca7a | ||
|
|
834c4e8ad9 | ||
|
|
11d961cf3c | ||
|
|
00b93d8b2f | ||
|
|
281fd5bb89 | ||
|
|
cb10050cb9 | ||
|
|
2935c4cddb | ||
|
|
0d6ec70c6f | ||
|
|
74777b4ded | ||
|
|
5f2bd04799 | ||
|
|
9a1a5f9778 | ||
|
|
edc8aefa59 | ||
|
|
ee1c786a12 | ||
|
|
a3e4f2b716 | ||
|
|
6685f1fb9e | ||
|
|
c89ff328f6 | ||
|
|
c6f1bc65c0 | ||
|
|
0f43c61229 | ||
|
|
8567dab167 | ||
|
|
0517d7bee5 | ||
|
|
5bc0b9b31c | ||
|
|
3d219b91b9 | ||
|
|
a90c6306f8 | ||
|
|
60558388ec | ||
|
|
b29a7f8cd6 | ||
|
|
a1501591e8 | ||
|
|
1408aa078d | ||
|
|
5acaa476d6 | ||
|
|
8ac4f87c91 | ||
|
|
14d3001824 | ||
|
|
1ac9389ddc | ||
|
|
0b0e27c2f1 | ||
|
|
fd1199cce4 | ||
|
|
3c9eda8265 | ||
|
|
6622cdb43f | ||
|
|
49c28a7dab | ||
|
|
a42671c2d7 | ||
|
|
f17ab6ad92 | ||
|
|
ca548af2a2 | ||
|
|
579997688f | ||
|
|
e6ba7ef3e6 | ||
|
|
20fdf177e8 | ||
|
|
f0b01803ea | ||
|
|
f5c4841ff2 | ||
|
|
1e01283d81 | ||
|
|
2196448c21 | ||
|
|
96a81ce89d | ||
|
|
a715490c2a | ||
|
|
973cf8e980 | ||
|
|
4357e42391 | ||
|
|
884b49e662 | ||
|
|
38c94d2e9c | ||
|
|
67d2eb6b2a | ||
|
|
b670fb57db | ||
|
|
188b4be64d | ||
|
|
889c042ecd | ||
|
|
3c4f8eaa55 | ||
|
|
6a75d57060 | ||
|
|
fda2cf677b | ||
|
|
cfdf5a5a78 | ||
|
|
a1437c15f7 | ||
|
|
42e7489713 | ||
|
|
024760f866 | ||
|
|
46f0189e88 | ||
|
|
edc7498111 | ||
|
|
9103fdf866 | ||
|
|
95bf795de4 | ||
|
|
bf99223a80 | ||
|
|
9caf9b6f91 | ||
|
|
727c7b0dc6 | ||
|
|
13d184b280 | ||
|
|
12a91774b0 | ||
|
|
88018000ac | ||
|
|
f6eda1c35d | ||
|
|
a2ebdbc112 | ||
|
|
e930a42083 | ||
|
|
4b123f49cb | ||
|
|
556eca918d | ||
|
|
31fcd03f3c | ||
|
|
89d9dd5aa5 | ||
|
|
d1aad72826 | ||
|
|
8e5b4bddf4 | ||
|
|
5a7cb9af4e | ||
|
|
d1cda4ec68 | ||
|
|
8aaf1185a5 | ||
|
|
b46bd07119 | ||
|
|
08fa707085 | ||
|
|
72ba29d81a | ||
|
|
cf2dc4c444 | ||
|
|
d82d86e16d | ||
|
|
bde31d8600 | ||
|
|
e115d55585 | ||
|
|
daea86e047 | ||
|
|
a4f69d8914 | ||
|
|
98f382fda3 | ||
|
|
cd899734f3 | ||
|
|
f51b435bcf | ||
|
|
0f82a55305 | ||
|
|
9fd7a410bb | ||
|
|
98fb3d015a | ||
|
|
bfb2ad7c79 | ||
|
|
135bfbf7c1 | ||
|
|
c6b17ebc20 | ||
|
|
b55eb30474 | ||
|
|
cec2f1fc00 | ||
|
|
8367ec03a7 | ||
|
|
37013f8068 | ||
|
|
8360544d65 | ||
|
|
b5cdef43a1 | ||
|
|
2e5d521ed8 | ||
|
|
dbe35d52d1 | ||
|
|
8bcdb6f52c | ||
|
|
5cfcb8262e | ||
|
|
0b331a318b | ||
|
|
5d6cf55208 | ||
|
|
9a1ec19845 | ||
|
|
a79e93f335 | ||
|
|
abcb94a738 | ||
|
|
a4f2d5aa6f | ||
|
|
6b738d1c89 | ||
|
|
f4c518b370 | ||
|
|
d475dd3809 | ||
|
|
5675c47a01 | ||
|
|
16e950454e | ||
|
|
2926265a14 | ||
|
|
af2607de1a | ||
|
|
826d7808b4 | ||
|
|
4c89aca243 | ||
|
|
43a065bb07 | ||
|
|
4513a2cc75 | ||
|
|
f29c1ac6e5 | ||
|
|
05abe47c8b | ||
|
|
6c185a2c57 | ||
|
|
af2cb33bb2 | ||
|
|
f16a4a8264 | ||
|
|
b232552d42 | ||
|
|
0edccc11a5 | ||
|
|
b2f5c0e0db | ||
|
|
5f5d4c1923 | ||
|
|
a7d7f79855 | ||
|
|
f0bff18324 | ||
|
|
b631bdc5b7 | ||
|
|
c65f7e9bd5 | ||
|
|
3e0fa4a8da | ||
|
|
fa3150548e | ||
|
|
235ed85b0f | ||
|
|
1ca639a777 | ||
|
|
e36a994fe6 | ||
|
|
19ffcfea76 | ||
|
|
85f3a09c83 | ||
|
|
60b9a9c1fa | ||
|
|
984e38575c | ||
|
|
665df5d733 | ||
|
|
4bc0bea0e9 | ||
|
|
5cfa342f01 | ||
|
|
c106cc24e4 | ||
|
|
372da52d4a | ||
|
|
c7479751e8 | ||
|
|
870a54ac84 | ||
|
|
12fcfc2b72 | ||
|
|
875270b851 | ||
|
|
43fab306b6 | ||
|
|
77242f4169 | ||
|
|
95ae30f678 | ||
|
|
7408e778ca | ||
|
|
ba303fd1aa | ||
|
|
60d9896a70 | ||
|
|
485a80d294 | ||
|
|
63bfe9967e | ||
|
|
a720b82e63 | ||
|
|
d3b0048d8c | ||
|
|
9a0aca42a5 | ||
|
|
5e802b0645 | ||
|
|
dd7a1dbfae | ||
|
|
ca67b7a568 | ||
|
|
76cd879c84 | ||
|
|
e0c049e590 | ||
|
|
727943f078 | ||
|
|
8393b08666 | ||
|
|
9049f72d2f | ||
|
|
32f45c9e91 | ||
|
|
05f3a3c944 | ||
|
|
f91fe10985 | ||
|
|
14f7bfc545 | ||
|
|
7f90b0cd20 | ||
|
|
308abfec6c | ||
|
|
bb88536166 | ||
|
|
d2df3f2d6e | ||
|
|
2abfad9c1f | ||
|
|
2af932d969 | ||
|
|
c29fa61a9c | ||
|
|
a30931fe0f | ||
|
|
3ff9b87012 | ||
|
|
f4f315fd11 | ||
|
|
530165d9a5 | ||
|
|
dbd1458adf | ||
|
|
dedefecd2b | ||
|
|
46f441dd37 | ||
|
|
49b58fd6af | ||
|
|
103a507b39 | ||
|
|
0a75224f62 | ||
|
|
04d7629abf | ||
|
|
1b6786a21f | ||
|
|
5080f2314c | ||
|
|
41beb7f0a3 | ||
|
|
799873aa14 | ||
|
|
fe2c7eaa93 | ||
|
|
6392d45ea7 | ||
|
|
c60ea675d7 | ||
|
|
16c7c92396 | ||
|
|
c7ab302c69 | ||
|
|
7598b37543 | ||
|
|
cc9717e2f2 | ||
|
|
08f2f99f4b | ||
|
|
77bf3d66c7 | ||
|
|
f14f67f803 | ||
|
|
820b6e7b32 | ||
|
|
27aece94cf | ||
|
|
3f2508be92 | ||
|
|
fce11bb386 | ||
|
|
2723438531 | ||
|
|
f330b73682 | ||
|
|
0f1e592326 | ||
|
|
4d7dd0330d | ||
|
|
ea2ca2777f | ||
|
|
4b2b92fd9a | ||
|
|
784088db3f | ||
|
|
0ecf0d51e3 | ||
|
|
bc04ca464a | ||
|
|
44829df762 | ||
|
|
94ddfa66c0 | ||
|
|
8db8ed5a41 | ||
|
|
041ecd0de1 | ||
|
|
d812249db7 | ||
|
|
88528f1a87 | ||
|
|
82533114a7 | ||
|
|
6d9fbb3fa9 | ||
|
|
9953ae3d03 | ||
|
|
c0c387e4db | ||
|
|
ae60ea15da | ||
|
|
72cd1123a8 | ||
|
|
1364190a66 | ||
|
|
6d17c59090 | ||
|
|
e0f2c0b5dc | ||
|
|
073e34855d | ||
|
|
ff9ba70bb8 | ||
|
|
adbebb0e3f | ||
|
|
3f6b3eed98 | ||
|
|
f45e81e186 | ||
|
|
ba648fd003 | ||
|
|
b0e5a76f4c | ||
|
|
8692796c9b | ||
|
|
d0edcde4ea | ||
|
|
8c4c2e580c | ||
|
|
07f33e7641 | ||
|
|
1998c641af | ||
|
|
be1e5f9d62 | ||
|
|
fdeec6db52 | ||
|
|
a4d335b42f | ||
|
|
fcb134e144 | ||
|
|
a47e24222a | ||
|
|
b96b995620 | ||
|
|
c231706aa5 | ||
|
|
35b5117a59 | ||
|
|
80f716bc10 | ||
|
|
ca95e98ca0 | ||
|
|
d5559461c1 | ||
|
|
f4acd81e2f | ||
|
|
31feb6e26c | ||
|
|
7d5c0a069c | ||
|
|
937f49ec3d | ||
|
|
abc2a73a33 | ||
|
|
5e1bf7572c | ||
|
|
8fdb32d0a3 | ||
|
|
c709d5f7db | ||
|
|
f5b2749ec2 | ||
|
|
ee5853c565 | ||
|
|
6ec6df8a5f | ||
|
|
fc95800840 | ||
|
|
765715af21 | ||
|
|
639a7f6796 | ||
|
|
35379c7c0e | ||
|
|
d992f5353f | ||
|
|
875eef45f3 | ||
|
|
556a4aa972 | ||
|
|
8dc1969111 | ||
|
|
b74c229498 | ||
|
|
3dbca466fd | ||
|
|
ce6f7fdb82 | ||
|
|
7528bc1bc0 | ||
|
|
9dd5f7d642 | ||
|
|
99ecb0daaf | ||
|
|
39d8d7995a | ||
|
|
2ac2cde03e | ||
|
|
aa6c3766de | ||
|
|
f4f5d7e3ce | ||
|
|
efbf6018d3 | ||
|
|
1090bb8bf3 | ||
|
|
26bc79f971 | ||
|
|
4c1f015eca | ||
|
|
0655a183d3 | ||
|
|
7754024e9b | ||
|
|
b4913569a8 | ||
|
|
eae9f09ca8 | ||
|
|
8264e5ceaa | ||
|
|
b76f319e45 | ||
|
|
82d744716a | ||
|
|
1a3764ab8f | ||
|
|
d2ede9d393 | ||
|
|
5690f513fc | ||
|
|
123a845209 | ||
|
|
b1b7d735b3 | ||
|
|
230c69f7ce | ||
|
|
bfc43558ef | ||
|
|
f2ae2cc04d | ||
|
|
6e9c03f958 | ||
|
|
2696f614a7 | ||
|
|
070b944895 | ||
|
|
f5f091d390 | ||
|
|
14ab14a0e6 | ||
|
|
4f7c850115 | ||
|
|
391eca66cf | ||
|
|
a67199246d | ||
|
|
5f67fdaac9 | ||
|
|
05e6fe4287 | ||
|
|
91cc571e6e | ||
|
|
890926e60c | ||
|
|
87aa332583 | ||
|
|
f90c4ca672 | ||
|
|
a922e85a5c | ||
|
|
9a65820592 | ||
|
|
f4e16ae373 | ||
|
|
e2cfd34da0 | ||
|
|
668dea9706 | ||
|
|
084be442f2 | ||
|
|
29cb4a1327 | ||
|
|
81a61134b8 | ||
|
|
cb1a49aa02 | ||
|
|
351b4efc6c | ||
|
|
9b551309de | ||
|
|
9fed4a2ef4 | ||
|
|
bceac4f554 | ||
|
|
ae3a88d3a7 | ||
|
|
9138a7a5ba | ||
|
|
9912b43fcc | ||
|
|
5ac37555a4 | ||
|
|
34bdc730a6 | ||
|
|
e45a9d70fc | ||
|
|
232b36059c | ||
|
|
d9fbd675d5 | ||
|
|
0206e7b9de | ||
|
|
a886544d3d | ||
|
|
8c9b929bb0 | ||
|
|
1bb1ae834e | ||
|
|
0d9e364a90 | ||
|
|
3b28c003dd | ||
|
|
48ff9fb150 | ||
|
|
c43bc74fe6 | ||
|
|
eaf9cc2195 | ||
|
|
4bd276f58f | ||
|
|
f8cf0d5e5d | ||
|
|
79bc60db33 | ||
|
|
dc7c54067e | ||
|
|
932f0d5c20 | ||
|
|
9670f5e41a | ||
|
|
97a23e1cbe | ||
|
|
11fcd055ec | ||
|
|
b0d9966663 | ||
|
|
5c51ab7e1f | ||
|
|
26f293d587 | ||
|
|
a3b52fd380 | ||
|
|
27d8706d6d | ||
|
|
bf59383783 | ||
|
|
1078611259 | ||
|
|
e6fc0ac8fe | ||
|
|
554ca3d8dc | ||
|
|
86dfdf956d | ||
|
|
c0e4475485 | ||
|
|
2b65f8bd5c | ||
|
|
09e78272c2 | ||
|
|
cccce564bd | ||
|
|
4adec327de | ||
|
|
1f093334d1 | ||
|
|
e0e8507108 | ||
|
|
f5962f8128 | ||
|
|
b31d808655 | ||
|
|
247cda4b68 | ||
|
|
e30975e9a2 | ||
|
|
de9f1583c2 | ||
|
|
ab48653e63 | ||
|
|
6d7a1e3f8f | ||
|
|
e093dad7cb | ||
|
|
b103a121f0 | ||
|
|
3578abc7a4 | ||
|
|
17d398f419 | ||
|
|
3453a8eebb | ||
|
|
77a089c35c | ||
|
|
516d83c946 | ||
|
|
fd02c9f973 | ||
|
|
351e80a656 | ||
|
|
4f04e2ed93 | ||
|
|
a810d1b98e | ||
|
|
fbe963a96a | ||
|
|
d13b8bee8a | ||
|
|
0aa072a155 | ||
|
|
57dde7c3bc | ||
|
|
6b9003f781 | ||
|
|
9c1c59e481 | ||
|
|
31daec2749 | ||
|
|
2bff90719b | ||
|
|
e4570e28a8 | ||
|
|
d84a730daa | ||
|
|
0fd1a05cec | ||
|
|
6373d307ec | ||
|
|
a32c3a50fc | ||
|
|
66b5634ebf | ||
|
|
92b3697e2c | ||
|
|
969e605c7e | ||
|
|
a3320f26cf | ||
|
|
45329d9e3c | ||
|
|
6481321470 | ||
|
|
efcf5e050d | ||
|
|
dfa686b617 | ||
|
|
fe638cf11f | ||
|
|
7cdc16abdf |
@@ -4,8 +4,12 @@
|
|||||||
.venv
|
.venv
|
||||||
cache
|
cache
|
||||||
data
|
data
|
||||||
examples
|
docker
|
||||||
|
saves
|
||||||
|
hf_cache
|
||||||
|
ms_cache
|
||||||
|
om_cache
|
||||||
|
output
|
||||||
.dockerignore
|
.dockerignore
|
||||||
.gitattributes
|
.gitattributes
|
||||||
.gitignore
|
.gitignore
|
||||||
Dockerfile
|
|
||||||
|
|||||||
40
.env.local
Normal file
40
.env.local
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# Note: actually we do not support .env, just for reference
|
||||||
|
# api
|
||||||
|
API_HOST=
|
||||||
|
API_PORT=
|
||||||
|
API_KEY=
|
||||||
|
API_MODEL_NAME=
|
||||||
|
API_VERBOSE=
|
||||||
|
FASTAPI_ROOT_PATH=
|
||||||
|
MAX_CONCURRENT=
|
||||||
|
# general
|
||||||
|
DISABLE_VERSION_CHECK=
|
||||||
|
FORCE_CHECK_IMPORTS=
|
||||||
|
ALLOW_EXTRA_ARGS=
|
||||||
|
LLAMAFACTORY_VERBOSITY=
|
||||||
|
USE_MODELSCOPE_HUB=
|
||||||
|
USE_OPENMIND_HUB=
|
||||||
|
USE_RAY=
|
||||||
|
RECORD_VRAM=
|
||||||
|
# torchrun
|
||||||
|
FORCE_TORCHRUN=
|
||||||
|
MASTER_ADDR=
|
||||||
|
MASTER_PORT=
|
||||||
|
NNODES=
|
||||||
|
NODE_RANK=
|
||||||
|
NPROC_PER_NODE=
|
||||||
|
# wandb
|
||||||
|
WANDB_DISABLED=
|
||||||
|
WANDB_PROJECT=
|
||||||
|
WANDB_API_KEY=
|
||||||
|
# gradio ui
|
||||||
|
GRADIO_SHARE=
|
||||||
|
GRADIO_SERVER_NAME=
|
||||||
|
GRADIO_SERVER_PORT=
|
||||||
|
GRADIO_ROOT_PATH=
|
||||||
|
GRADIO_IPV6=
|
||||||
|
# setup
|
||||||
|
ENABLE_SHORT_CONSOLE=
|
||||||
|
# reserved (do not use)
|
||||||
|
LLAMABOARD_ENABLED=
|
||||||
|
LLAMABOARD_WORKDIR=
|
||||||
46
.github/CONTRIBUTING.md
vendored
46
.github/CONTRIBUTING.md
vendored
@@ -19,3 +19,49 @@ There are several ways you can contribute to LLaMA Factory:
|
|||||||
### Style guide
|
### Style guide
|
||||||
|
|
||||||
LLaMA Factory follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html), check it for details.
|
LLaMA Factory follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html), check it for details.
|
||||||
|
|
||||||
|
### Create a Pull Request
|
||||||
|
|
||||||
|
1. Fork the [repository](https://github.com/hiyouga/LLaMA-Factory) by clicking on the [Fork](https://github.com/hiyouga/LLaMA-Factory/fork) button on the repository's page. This creates a copy of the code under your GitHub user account.
|
||||||
|
|
||||||
|
2. Clone your fork to your local disk, and add the base repository as a remote:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone git@github.com:[username]/LLaMA-Factory.git
|
||||||
|
cd LLaMA-Factory
|
||||||
|
git remote add upstream https://github.com/hiyouga/LLaMA-Factory.git
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Create a new branch to hold your development changes:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git checkout -b dev_your_branch
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Set up a development environment by running the following command in a virtual environment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install -e ".[dev]"
|
||||||
|
```
|
||||||
|
|
||||||
|
If LLaMA Factory was already installed in the virtual environment, remove it with `pip uninstall llamafactory` before reinstalling it in editable mode with the -e flag.
|
||||||
|
|
||||||
|
5. Check code before commit:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make commit
|
||||||
|
make style && make quality
|
||||||
|
make test
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Submit changes:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git add .
|
||||||
|
git commit -m "commit message"
|
||||||
|
git fetch upstream
|
||||||
|
git rebase upstream/main
|
||||||
|
git push -u origin dev_your_branch
|
||||||
|
```
|
||||||
|
|
||||||
|
7. Create a merge request from your branch `dev_your_branch` at [origin repo](https://github.com/hiyouga/LLaMA-Factory).
|
||||||
|
|||||||
63
.github/ISSUE_TEMPLATE/1-bug-report.yml
vendored
Normal file
63
.github/ISSUE_TEMPLATE/1-bug-report.yml
vendored
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
name: "\U0001F41B Bug / help"
|
||||||
|
description: Create a report to help us improve the LLaMA Factory
|
||||||
|
labels: ["bug", "pending"]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Issues included in **[FAQs](https://github.com/hiyouga/LLaMA-Factory/issues/4614)** or those with **insufficient** information may be closed without a response.
|
||||||
|
已经包含在 **[常见问题](https://github.com/hiyouga/LLaMA-Factory/issues/4614)** 内或提供信息**不完整**的 issues 可能不会被回复。
|
||||||
|
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Please do not create issues that are not related to framework bugs under this category, use **[Discussions](https://github.com/hiyouga/LLaMA-Factory/discussions/categories/q-a)** instead.
|
||||||
|
请勿在此分类下创建和框架 bug 无关的 issues,请使用 **[讨论区](https://github.com/hiyouga/LLaMA-Factory/discussions/categories/q-a)**。
|
||||||
|
|
||||||
|
- type: checkboxes
|
||||||
|
id: reminder
|
||||||
|
attributes:
|
||||||
|
label: Reminder
|
||||||
|
description: |
|
||||||
|
Please ensure you have read the above rules carefully and searched the existing issues (including FAQs).
|
||||||
|
请确保您已经认真阅读了上述规则并且搜索过现有的 issues(包括常见问题)。
|
||||||
|
|
||||||
|
options:
|
||||||
|
- label: I have read the above rules and searched the existing issues.
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: system-info
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
attributes:
|
||||||
|
label: System Info
|
||||||
|
description: |
|
||||||
|
Please share your system info with us. You can run the command **llamafactory-cli env** and copy-paste its output below.
|
||||||
|
请提供您的系统信息。您可以在命令行运行 **llamafactory-cli env** 并将其输出复制到该文本框中。
|
||||||
|
|
||||||
|
placeholder: llamafactory version, platform, python version, ...
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: reproduction
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
attributes:
|
||||||
|
label: Reproduction
|
||||||
|
description: |
|
||||||
|
Please provide entry arguments, error messages and stack traces that reproduces the problem.
|
||||||
|
请提供入口参数,错误日志以及异常堆栈以便于我们复现问题。
|
||||||
|
Remember to wrap your log messages with \`\`\`.
|
||||||
|
请务必使用 Markdown 标签 \`\`\` 来包裹您的日志信息。
|
||||||
|
|
||||||
|
value: |
|
||||||
|
```text
|
||||||
|
Put your message here.
|
||||||
|
```
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: others
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
attributes:
|
||||||
|
label: Others
|
||||||
41
.github/ISSUE_TEMPLATE/2-feature-request.yml
vendored
Normal file
41
.github/ISSUE_TEMPLATE/2-feature-request.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
name: "\U0001F680 Feature request"
|
||||||
|
description: Submit a request for a new feature
|
||||||
|
labels: ["enhancement", "pending"]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Please do not create issues that are not related to new features under this category.
|
||||||
|
请勿在此分类下创建和新特性无关的 issues。
|
||||||
|
|
||||||
|
- type: checkboxes
|
||||||
|
id: reminder
|
||||||
|
attributes:
|
||||||
|
label: Reminder
|
||||||
|
description: |
|
||||||
|
Please ensure you have read the above rules carefully and searched the existing issues.
|
||||||
|
请确保您已经认真阅读了上述规则并且搜索过现有的 issues。
|
||||||
|
|
||||||
|
options:
|
||||||
|
- label: I have read the above rules and searched the existing issues.
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: |
|
||||||
|
A clear and concise description of the feature proposal.
|
||||||
|
请详细描述您希望加入的新功能特性。
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: contribution
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
attributes:
|
||||||
|
label: Pull Request
|
||||||
|
description: |
|
||||||
|
Have you already created the relevant PR and submitted the code?
|
||||||
|
您是否已经创建了相关 PR 并提交了代码?
|
||||||
58
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
58
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -1,58 +0,0 @@
|
|||||||
name: "\U0001F41B Bug / Help"
|
|
||||||
description: Create a report to help us improve the LLaMA Factory
|
|
||||||
body:
|
|
||||||
- type: checkboxes
|
|
||||||
id: reminder
|
|
||||||
attributes:
|
|
||||||
label: Reminder
|
|
||||||
description: |
|
|
||||||
Please ensure you have read the README carefully and searched the existing issues.
|
|
||||||
请确保您已经认真阅读了 README 并且搜索过现有的 Issue。
|
|
||||||
|
|
||||||
options:
|
|
||||||
- label: I have read the README and searched the existing issues.
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
id: reproduction
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
attributes:
|
|
||||||
label: Reproduction
|
|
||||||
description: |
|
|
||||||
Please provide code snippets, error messages and stack traces that reproduces the problem.
|
|
||||||
请提供运行参数,错误信息以及异常堆栈以便于我们复现该问题。
|
|
||||||
Remember to use Markdown tags to correctly format your code.
|
|
||||||
请合理使用 Markdown 标签来格式化您的文本。
|
|
||||||
|
|
||||||
placeholder: |
|
|
||||||
python src/train_bash.py ...
|
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
id: expected-behavior
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
attributes:
|
|
||||||
label: Expected behavior
|
|
||||||
description: |
|
|
||||||
Please provide a clear and concise description of what you would expect to happen.
|
|
||||||
请提供您原本的目的,即这段代码的期望行为。
|
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
id: system-info
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
attributes:
|
|
||||||
label: System Info
|
|
||||||
description: |
|
|
||||||
Please share your system info with us. You can run the command **transformers-cli env** and copy-paste its output below.
|
|
||||||
请提供您的系统信息。您可以在命令行运行 **transformers-cli env** 并将其输出复制到该文本框中。
|
|
||||||
|
|
||||||
placeholder: transformers version, platform, python version, ...
|
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
id: others
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
attributes:
|
|
||||||
label: Others
|
|
||||||
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
blank_issues_enabled: false
|
||||||
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -5,3 +5,4 @@ Fixes # (issue)
|
|||||||
## Before submitting
|
## Before submitting
|
||||||
|
|
||||||
- [ ] Did you read the [contributor guideline](https://github.com/hiyouga/LLaMA-Factory/blob/main/.github/CONTRIBUTING.md)?
|
- [ ] Did you read the [contributor guideline](https://github.com/hiyouga/LLaMA-Factory/blob/main/.github/CONTRIBUTING.md)?
|
||||||
|
- [ ] Did you write any new necessary tests?
|
||||||
|
|||||||
32
.github/workflows/label_issue.yml
vendored
Normal file
32
.github/workflows/label_issue.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
name: label_issue
|
||||||
|
|
||||||
|
on:
|
||||||
|
issues:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
label_issue:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
ISSUE_URL: ${{ github.event.issue.html_url }}
|
||||||
|
ISSUE_TITLE: ${{ github.event.issue.title }}
|
||||||
|
run: |
|
||||||
|
LABEL=""
|
||||||
|
NPU_KEYWORDS=(npu huawei ascend 华为 昇腾)
|
||||||
|
ISSUE_TITLE_LOWER=$(echo $ISSUE_TITLE | tr '[:upper:]' '[:lower:]')
|
||||||
|
for KEYWORD in ${NPU_KEYWORDS[@]}; do
|
||||||
|
if [[ $ISSUE_TITLE_LOWER == *$KEYWORD* ]] && [[ $ISSUE_TITLE_LOWER != *input* ]]; then
|
||||||
|
LABEL="npu"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if [ -n "$LABEL" ]; then
|
||||||
|
gh issue edit $ISSUE_URL --add-label $LABEL
|
||||||
|
fi
|
||||||
40
.github/workflows/publish.yml
vendored
Normal file
40
.github/workflows/publish.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
name: publish
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types:
|
||||||
|
- published
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
publish:
|
||||||
|
name: Upload release to PyPI
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
environment:
|
||||||
|
name: release
|
||||||
|
url: https://pypi.org/p/llamafactory
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
id-token: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.9"
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
python -m pip install build
|
||||||
|
|
||||||
|
- name: Build package
|
||||||
|
run: |
|
||||||
|
python -m build
|
||||||
|
|
||||||
|
- name: Publish package
|
||||||
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
49
.github/workflows/tests.yml
vendored
49
.github/workflows/tests.yml
vendored
@@ -2,28 +2,61 @@ name: tests
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ "main" ]
|
branches:
|
||||||
|
- "main"
|
||||||
|
paths:
|
||||||
|
- "**.py"
|
||||||
|
- "requirements.txt"
|
||||||
|
- ".github/workflows/*.yml"
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ "main" ]
|
branches:
|
||||||
|
- "main"
|
||||||
|
paths:
|
||||||
|
- "**.py"
|
||||||
|
- "requirements.txt"
|
||||||
|
- ".github/workflows/*.yml"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check_code_quality:
|
tests:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
python-version:
|
||||||
|
- "3.9"
|
||||||
|
- "3.10"
|
||||||
|
- "3.11"
|
||||||
|
- "3.12"
|
||||||
|
os:
|
||||||
|
- "ubuntu-latest"
|
||||||
|
- "windows-latest"
|
||||||
|
- "macos-13"
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ${{ matrix.os }}
|
||||||
|
|
||||||
|
env:
|
||||||
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
|
OS_NAME: ${{ matrix.os }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "3.8"
|
python-version: ${{ matrix.python-version }}
|
||||||
|
cache: "pip"
|
||||||
|
cache-dependency-path: "setup.py"
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
python -m pip install ruff
|
python -m pip install ".[torch,dev]"
|
||||||
|
|
||||||
- name: Check quality
|
- name: Check quality
|
||||||
run: |
|
run: |
|
||||||
make style && make quality
|
make style && make quality
|
||||||
|
|
||||||
|
- name: Test with pytest
|
||||||
|
run: |
|
||||||
|
make test
|
||||||
|
|||||||
17
.gitignore
vendored
17
.gitignore
vendored
@@ -159,7 +159,20 @@ cython_debug/
|
|||||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||||
.idea/
|
.idea/
|
||||||
|
|
||||||
|
# vscode
|
||||||
|
.vscode/
|
||||||
|
|
||||||
|
# uv
|
||||||
|
uv.lock
|
||||||
|
|
||||||
# custom .gitignore
|
# custom .gitignore
|
||||||
user.config
|
ms_cache/
|
||||||
saves/
|
hf_cache/
|
||||||
|
om_cache/
|
||||||
cache/
|
cache/
|
||||||
|
config/
|
||||||
|
saves/
|
||||||
|
output/
|
||||||
|
wandb/
|
||||||
|
swanlog/
|
||||||
|
generated_predictions.jsonl
|
||||||
|
|||||||
28
.pre-commit-config.yaml
Normal file
28
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v5.0.0
|
||||||
|
hooks:
|
||||||
|
- id: check-ast
|
||||||
|
- id: check-added-large-files
|
||||||
|
args: ['--maxkb=25000']
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: check-yaml
|
||||||
|
- id: debug-statements
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: trailing-whitespace
|
||||||
|
args: [--markdown-linebreak-ext=md]
|
||||||
|
- id: no-commit-to-branch
|
||||||
|
args: ['--branch', 'main']
|
||||||
|
|
||||||
|
- repo: https://github.com/asottile/pyupgrade
|
||||||
|
rev: v3.17.0
|
||||||
|
hooks:
|
||||||
|
- id: pyupgrade
|
||||||
|
args: [--py38-plus]
|
||||||
|
|
||||||
|
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||||
|
rev: v0.6.9
|
||||||
|
hooks:
|
||||||
|
- id: ruff
|
||||||
|
args: [--fix]
|
||||||
|
- id: ruff-format
|
||||||
11
CITATION.cff
11
CITATION.cff
@@ -12,12 +12,16 @@ authors:
|
|||||||
given-names: "Yanhan"
|
given-names: "Yanhan"
|
||||||
- family-names: "Luo"
|
- family-names: "Luo"
|
||||||
given-names: "Zheyan"
|
given-names: "Zheyan"
|
||||||
|
- family-names: "Feng"
|
||||||
|
given-names: "Zhangchi"
|
||||||
- family-names: "Ma"
|
- family-names: "Ma"
|
||||||
given-names: "Yongqiang"
|
given-names: "Yongqiang"
|
||||||
title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models"
|
title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models"
|
||||||
url: "https://arxiv.org/abs/2403.13372"
|
url: "https://arxiv.org/abs/2403.13372"
|
||||||
preferred-citation:
|
preferred-citation:
|
||||||
type: article
|
type: conference-paper
|
||||||
|
conference:
|
||||||
|
name: "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)"
|
||||||
authors:
|
authors:
|
||||||
- family-names: "Zheng"
|
- family-names: "Zheng"
|
||||||
given-names: "Yaowei"
|
given-names: "Yaowei"
|
||||||
@@ -29,9 +33,12 @@ preferred-citation:
|
|||||||
given-names: "Yanhan"
|
given-names: "Yanhan"
|
||||||
- family-names: "Luo"
|
- family-names: "Luo"
|
||||||
given-names: "Zheyan"
|
given-names: "Zheyan"
|
||||||
|
- family-names: "Feng"
|
||||||
|
given-names: "Zhangchi"
|
||||||
- family-names: "Ma"
|
- family-names: "Ma"
|
||||||
given-names: "Yongqiang"
|
given-names: "Yongqiang"
|
||||||
journal: "arXiv preprint arXiv:2403.13372"
|
|
||||||
title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models"
|
title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models"
|
||||||
url: "https://arxiv.org/abs/2403.13372"
|
url: "https://arxiv.org/abs/2403.13372"
|
||||||
year: 2024
|
year: 2024
|
||||||
|
publisher: "Association for Computational Linguistics"
|
||||||
|
address: "Bangkok, Thailand"
|
||||||
|
|||||||
14
Dockerfile
14
Dockerfile
@@ -1,14 +0,0 @@
|
|||||||
FROM nvcr.io/nvidia/pytorch:24.01-py3
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
COPY requirements.txt /app/
|
|
||||||
RUN pip install -r requirements.txt
|
|
||||||
|
|
||||||
COPY . /app/
|
|
||||||
RUN pip install -e .[deepspeed,metrics,bitsandbytes,qwen]
|
|
||||||
|
|
||||||
VOLUME [ "/root/.cache/huggingface/", "/app/data", "/app/output" ]
|
|
||||||
EXPOSE 7860
|
|
||||||
|
|
||||||
CMD [ "llamafactory-cli", "webui" ]
|
|
||||||
1
MANIFEST.in
Normal file
1
MANIFEST.in
Normal file
@@ -0,0 +1 @@
|
|||||||
|
include LICENSE requirements.txt
|
||||||
14
Makefile
14
Makefile
@@ -1,6 +1,13 @@
|
|||||||
.PHONY: quality style
|
.PHONY: build commit quality style test
|
||||||
|
|
||||||
check_dirs := scripts src tests
|
check_dirs := scripts src tests setup.py
|
||||||
|
|
||||||
|
build:
|
||||||
|
pip install build && python -m build
|
||||||
|
|
||||||
|
commit:
|
||||||
|
pre-commit install
|
||||||
|
pre-commit run --all-files
|
||||||
|
|
||||||
quality:
|
quality:
|
||||||
ruff check $(check_dirs)
|
ruff check $(check_dirs)
|
||||||
@@ -9,3 +16,6 @@ quality:
|
|||||||
style:
|
style:
|
||||||
ruff check $(check_dirs) --fix
|
ruff check $(check_dirs) --fix
|
||||||
ruff format $(check_dirs)
|
ruff format $(check_dirs)
|
||||||
|
|
||||||
|
test:
|
||||||
|
CUDA_VISIBLE_DEVICES= WANDB_DISABLED=true pytest -vv tests/
|
||||||
|
|||||||
659
README.md
659
README.md
@@ -1,32 +1,50 @@
|
|||||||

|

|
||||||
|
|
||||||
[](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
[](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
||||||
[](LICENSE)
|
|
||||||
[](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
[](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
||||||
[](https://pypi.org/project/llmtuner/)
|
[](https://github.com/hiyouga/LLaMA-Factory/graphs/contributors)
|
||||||
[](https://pypi.org/project/llmtuner/)
|
[](https://github.com/hiyouga/LLaMA-Factory/actions/workflows/tests.yml)
|
||||||
[](#projects-using-llama-factory)
|
[](https://pypi.org/project/llamafactory/)
|
||||||
|
[](https://scholar.google.com/scholar?cites=12620864006390196564)
|
||||||
[](https://github.com/hiyouga/LLaMA-Factory/pulls)
|
[](https://github.com/hiyouga/LLaMA-Factory/pulls)
|
||||||
[](https://discord.gg/rKfvV9r9FK)
|
|
||||||
[](https://twitter.com/llamafactory_ai)
|
[](https://twitter.com/llamafactory_ai)
|
||||||
|
[](https://discord.gg/rKfvV9r9FK)
|
||||||
|
[](https://gitcode.com/zhengyaowei/LLaMA-Factory)
|
||||||
|
|
||||||
|
[](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)
|
||||||
|
[](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)
|
||||||
[](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
[](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
||||||
[](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
[](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
||||||
[](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)
|
[](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/)
|
||||||
|
|
||||||
[](https://trendshift.io/repositories/4535)
|
<h3 align="center">
|
||||||
|
Easily fine-tune 100+ large language models with zero-code <a href="#quickstart">CLI</a> and <a href="#fine-tuning-with-llama-board-gui-powered-by-gradio">Web UI</a>
|
||||||
|
</h3>
|
||||||
|
<p align="center">
|
||||||
|
<picture>
|
||||||
|
<img alt="Github trend" src="https://trendshift.io/api/badge/repositories/4535">
|
||||||
|
</picture>
|
||||||
|
</p>
|
||||||
|
|
||||||
👋 Join our [WeChat](assets/wechat.jpg).
|
👋 Join our [WeChat](assets/wechat.jpg) or [NPU user group](assets/wechat_npu.jpg).
|
||||||
|
|
||||||
\[ English | [中文](README_zh.md) \]
|
\[ English | [中文](README_zh.md) \]
|
||||||
|
|
||||||
**Fine-tuning a large language model can be easy as...**
|
**Fine-tuning a large language model can be easy as...**
|
||||||
|
|
||||||
https://github.com/hiyouga/LLaMA-Factory/assets/16256802/9840a653-7e9c-41c8-ae89-7ace5698baf6
|
https://github.com/user-attachments/assets/7c96b465-9df7-45f4-8053-bf03e58386d3
|
||||||
|
|
||||||
Choose your path:
|
Choose your path:
|
||||||
|
|
||||||
- **Colab**: https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing
|
- **Documentation**: https://llamafactory.readthedocs.io/en/latest/
|
||||||
|
- **Colab (free)**: https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing
|
||||||
- **Local machine**: Please refer to [usage](#getting-started)
|
- **Local machine**: Please refer to [usage](#getting-started)
|
||||||
|
- **PAI-DSW (free trial)**: [Llama3 Example](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory) | [Qwen2-VL Example](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_qwen2vl) | [DeepSeek-R1-Distill Example](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_deepseek_r1_distill_7b)
|
||||||
|
- **Amazon SageMaker**: [Blog](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/)
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> Except for the above links, all other websites are unauthorized third-party websites. Please carefully use them.
|
||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
@@ -38,6 +56,16 @@ Choose your path:
|
|||||||
- [Provided Datasets](#provided-datasets)
|
- [Provided Datasets](#provided-datasets)
|
||||||
- [Requirement](#requirement)
|
- [Requirement](#requirement)
|
||||||
- [Getting Started](#getting-started)
|
- [Getting Started](#getting-started)
|
||||||
|
- [Installation](#installation)
|
||||||
|
- [Data Preparation](#data-preparation)
|
||||||
|
- [Quickstart](#quickstart)
|
||||||
|
- [Fine-Tuning with LLaMA Board GUI](#fine-tuning-with-llama-board-gui-powered-by-gradio)
|
||||||
|
- [Build Docker](#build-docker)
|
||||||
|
- [Deploy with OpenAI-style API and vLLM](#deploy-with-openai-style-api-and-vllm)
|
||||||
|
- [Download from ModelScope Hub](#download-from-modelscope-hub)
|
||||||
|
- [Download from Modelers Hub](#download-from-modelers-hub)
|
||||||
|
- [Use W&B Logger](#use-wb-logger)
|
||||||
|
- [Use SwanLab Logger](#use-swanlab-logger)
|
||||||
- [Projects using LLaMA Factory](#projects-using-llama-factory)
|
- [Projects using LLaMA Factory](#projects-using-llama-factory)
|
||||||
- [License](#license)
|
- [License](#license)
|
||||||
- [Citation](#citation)
|
- [Citation](#citation)
|
||||||
@@ -45,14 +73,22 @@ Choose your path:
|
|||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- **Various models**: LLaMA, LLaVA, Mistral, Mixtral-MoE, Qwen, Yi, Gemma, Baichuan, ChatGLM, Phi, etc.
|
- **Various models**: LLaMA, LLaVA, Mistral, Mixtral-MoE, Qwen, Qwen2-VL, DeepSeek, Yi, Gemma, ChatGLM, Phi, etc.
|
||||||
- **Integrated methods**: (Continuous) pre-training, (multimodal) supervised fine-tuning, reward modeling, PPO, DPO and ORPO.
|
- **Integrated methods**: (Continuous) pre-training, (multimodal) supervised fine-tuning, reward modeling, PPO, DPO, KTO, ORPO, etc.
|
||||||
- **Scalable resources**: 32-bit full-tuning, 16-bit freeze-tuning, 16-bit LoRA and 2/4/8-bit QLoRA via AQLM/AWQ/GPTQ/LLM.int8.
|
- **Scalable resources**: 16-bit full-tuning, freeze-tuning, LoRA and 2/3/4/5/6/8-bit QLoRA via AQLM/AWQ/GPTQ/LLM.int8/HQQ/EETQ.
|
||||||
- **Advanced algorithms**: GaLore, BAdam, DoRA, LongLoRA, LLaMA Pro, Mixture-of-Depths, LoRA+, LoftQ and Agent tuning.
|
- **Advanced algorithms**: [GaLore](https://github.com/jiaweizzhao/GaLore), [BAdam](https://github.com/Ledzy/BAdam), [APOLLO](https://github.com/zhuhanqing/APOLLO), [Adam-mini](https://github.com/zyushun/Adam-mini), DoRA, LongLoRA, LLaMA Pro, Mixture-of-Depths, LoRA+, LoftQ and PiSSA.
|
||||||
- **Practical tricks**: FlashAttention-2, Unsloth, RoPE scaling, NEFTune and rsLoRA.
|
- **Practical tricks**: [FlashAttention-2](https://github.com/Dao-AILab/flash-attention), [Unsloth](https://github.com/unslothai/unsloth), [Liger Kernel](https://github.com/linkedin/Liger-Kernel), RoPE scaling, NEFTune and rsLoRA.
|
||||||
- **Experiment monitors**: LlamaBoard, TensorBoard, Wandb, MLflow, etc.
|
- **Wide tasks**: Multi-turn dialogue, tool using, image understanding, visual grounding, video recognition, audio understanding, etc.
|
||||||
|
- **Experiment monitors**: LlamaBoard, TensorBoard, Wandb, MLflow, SwanLab, etc.
|
||||||
- **Faster inference**: OpenAI-style API, Gradio UI and CLI with vLLM worker.
|
- **Faster inference**: OpenAI-style API, Gradio UI and CLI with vLLM worker.
|
||||||
|
|
||||||
|
### Day-N Support for Fine-Tuning Cutting-Edge Models
|
||||||
|
|
||||||
|
| Support Date | Model Name |
|
||||||
|
| ------------ | ---------------------------------------------------------- |
|
||||||
|
| Day 0 | Qwen2.5 / Qwen2-VL / QwQ / QvQ / InternLM3 / MiniCPM-o-2.6 |
|
||||||
|
| Day 1 | Llama 3 / GLM-4 / Mistral Small / PaliGemma2 |
|
||||||
|
|
||||||
## Benchmark
|
## Benchmark
|
||||||
|
|
||||||
Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning), LLaMA Factory's LoRA tuning offers up to **3.7 times faster** training speed with a better Rouge score on the advertising text generation task. By leveraging 4-bit quantization technique, LLaMA Factory's QLoRA further improves the efficiency regarding the GPU memory.
|
Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning), LLaMA Factory's LoRA tuning offers up to **3.7 times faster** training speed with a better Rouge score on the advertising text generation task. By leveraging 4-bit quantization technique, LLaMA Factory's QLoRA further improves the efficiency regarding the GPU memory.
|
||||||
@@ -70,19 +106,59 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/
|
|||||||
|
|
||||||
## Changelog
|
## Changelog
|
||||||
|
|
||||||
[24/05/14] We supported training and inference on the Ascend NPU devices. Check [installation](#installation) section for details.
|
[25/02/24] Announcing **[EasyR1](https://github.com/hiyouga/EasyR1)**, an efficient, scalable and multi-modality RL training framework for efficient GRPO training.
|
||||||
|
|
||||||
[24/05/13] We supported fine-tuning the **Yi-1.5** series models.
|
[25/02/11] We supported saving the **[Ollama](https://github.com/ollama/ollama)** modelfile when exporting the model checkpoints. See [examples](examples/README.md) for usage.
|
||||||
|
|
||||||
[24/04/26] We supported fine-tuning the **LLaVA-1.5** multimodal LLMs. See [examples](examples/README.md) for usage.
|
[25/02/05] We supported fine-tuning the **[Qwen2-Audio](Qwen/Qwen2-Audio-7B-Instruct)** and **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** on audio understanding tasks.
|
||||||
|
|
||||||
|
[25/01/31] We supported fine-tuning the **[DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1)** and **[Qwen2.5-VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct)** model.
|
||||||
|
|
||||||
<details><summary>Full Changelog</summary>
|
<details><summary>Full Changelog</summary>
|
||||||
|
|
||||||
|
[25/01/15] We supported **[APOLLO](https://arxiv.org/abs/2412.05270)** optimizer. See [examples](examples/README.md) for usage.
|
||||||
|
|
||||||
|
[25/01/14] We supported fine-tuning the **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** and **[MiniCPM-V-2.6](https://huggingface.co/openbmb/MiniCPM-V-2_6)** models. Thank [@BUAADreamer](https://github.com/BUAADreamer)'s PR.
|
||||||
|
|
||||||
|
[25/01/14] We supported fine-tuning the **[InternLM3](https://huggingface.co/collections/internlm/)** models. Thank [@hhaAndroid](https://github.com/hhaAndroid)'s PR.
|
||||||
|
|
||||||
|
[25/01/10] We supported fine-tuning the **[Phi-4](https://huggingface.co/microsoft/phi-4)** model.
|
||||||
|
|
||||||
|
[24/12/21] We supported using **[SwanLab](https://github.com/SwanHubX/SwanLab)** for experiment tracking and visualization. See [this section](#use-swanlab-logger) for details.
|
||||||
|
|
||||||
|
[24/11/27] We supported fine-tuning the **[Skywork-o1](https://huggingface.co/Skywork/Skywork-o1-Open-Llama-3.1-8B)** model and the **[OpenO1](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)** dataset.
|
||||||
|
|
||||||
|
[24/10/09] We supported downloading pre-trained models and datasets from the **[Modelers Hub](https://modelers.cn/models)**. See [this tutorial](#download-from-modelers-hub) for usage.
|
||||||
|
|
||||||
|
[24/09/19] We supported fine-tuning the **[Qwen2.5](https://qwenlm.github.io/blog/qwen2.5/)** models.
|
||||||
|
|
||||||
|
[24/08/30] We supported fine-tuning the **[Qwen2-VL](https://qwenlm.github.io/blog/qwen2-vl/)** models. Thank [@simonJJJ](https://github.com/simonJJJ)'s PR.
|
||||||
|
|
||||||
|
[24/08/27] We supported **[Liger Kernel](https://github.com/linkedin/Liger-Kernel)**. Try `enable_liger_kernel: true` for efficient training.
|
||||||
|
|
||||||
|
[24/08/09] We supported **[Adam-mini](https://github.com/zyushun/Adam-mini)** optimizer. See [examples](examples/README.md) for usage. Thank [@relic-yuexi](https://github.com/relic-yuexi)'s PR.
|
||||||
|
|
||||||
|
[24/07/04] We supported [contamination-free packed training](https://github.com/MeetKai/functionary/tree/main/functionary/train/packing). Use `neat_packing: true` to activate it. Thank [@chuan298](https://github.com/chuan298)'s PR.
|
||||||
|
|
||||||
|
[24/06/16] We supported **[PiSSA](https://arxiv.org/abs/2404.02948)** algorithm. See [examples](examples/README.md) for usage.
|
||||||
|
|
||||||
|
[24/06/07] We supported fine-tuning the **[Qwen2](https://qwenlm.github.io/blog/qwen2/)** and **[GLM-4](https://github.com/THUDM/GLM-4)** models.
|
||||||
|
|
||||||
|
[24/05/26] We supported **[SimPO](https://arxiv.org/abs/2405.14734)** algorithm for preference learning. See [examples](examples/README.md) for usage.
|
||||||
|
|
||||||
|
[24/05/20] We supported fine-tuning the **PaliGemma** series models. Note that the PaliGemma models are pre-trained models, you need to fine-tune them with `paligemma` template for chat completion.
|
||||||
|
|
||||||
|
[24/05/18] We supported **[KTO](https://arxiv.org/abs/2402.01306)** algorithm for preference learning. See [examples](examples/README.md) for usage.
|
||||||
|
|
||||||
|
[24/05/14] We supported training and inference on the Ascend NPU devices. Check [installation](#installation) section for details.
|
||||||
|
|
||||||
|
[24/04/26] We supported fine-tuning the **LLaVA-1.5** multimodal LLMs. See [examples](examples/README.md) for usage.
|
||||||
|
|
||||||
[24/04/22] We provided a **[Colab notebook](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)** for fine-tuning the Llama-3 model on a free T4 GPU. Two Llama-3-derived models fine-tuned using LLaMA Factory are available at Hugging Face, check [Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat) and [Llama3-Chinese](https://huggingface.co/zhichen/Llama3-Chinese) for details.
|
[24/04/22] We provided a **[Colab notebook](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)** for fine-tuning the Llama-3 model on a free T4 GPU. Two Llama-3-derived models fine-tuned using LLaMA Factory are available at Hugging Face, check [Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat) and [Llama3-Chinese](https://huggingface.co/zhichen/Llama3-Chinese) for details.
|
||||||
|
|
||||||
[24/04/21] We supported **[Mixture-of-Depths](https://arxiv.org/abs/2404.02258)** according to [AstraMindAI's implementation](https://github.com/astramind-ai/Mixture-of-depths). See [examples](examples/README.md) for usage.
|
[24/04/21] We supported **[Mixture-of-Depths](https://arxiv.org/abs/2404.02258)** according to [AstraMindAI's implementation](https://github.com/astramind-ai/Mixture-of-depths). See [examples](examples/README.md) for usage.
|
||||||
|
|
||||||
[24/04/16] We supported **[BAdam](https://arxiv.org/abs/2404.02827)**. See [examples](examples/README.md) for usage.
|
[24/04/16] We supported **[BAdam](https://arxiv.org/abs/2404.02827)** optimizer. See [examples](examples/README.md) for usage.
|
||||||
|
|
||||||
[24/04/16] We supported **[unsloth](https://github.com/unslothai/unsloth)**'s long-sequence training (Llama-2-7B-56k within 24GB). It achieves **117%** speed and **50%** memory compared with FlashAttention-2, more benchmarks can be found in [this page](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison).
|
[24/04/16] We supported **[unsloth](https://github.com/unslothai/unsloth)**'s long-sequence training (Llama-2-7B-56k within 24GB). It achieves **117%** speed and **50%** memory compared with FlashAttention-2, more benchmarks can be found in [this page](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison).
|
||||||
|
|
||||||
@@ -94,7 +170,7 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/
|
|||||||
|
|
||||||
[24/03/13] We supported **[LoRA+](https://arxiv.org/abs/2402.12354)**. See [examples](examples/README.md) for usage.
|
[24/03/13] We supported **[LoRA+](https://arxiv.org/abs/2402.12354)**. See [examples](examples/README.md) for usage.
|
||||||
|
|
||||||
[24/03/07] We supported gradient low-rank projection (**[GaLore](https://arxiv.org/abs/2403.03507)**) algorithm. See [examples](examples/README.md) for usage.
|
[24/03/07] We supported **[GaLore](https://arxiv.org/abs/2403.03507)** optimizer. See [examples](examples/README.md) for usage.
|
||||||
|
|
||||||
[24/03/07] We integrated **[vLLM](https://github.com/vllm-project/vllm)** for faster and concurrent inference. Try `infer_backend: vllm` to enjoy **270%** inference speed.
|
[24/03/07] We integrated **[vLLM](https://github.com/vllm-project/vllm)** for faster and concurrent inference. Try `infer_backend: vllm` to enjoy **270%** inference speed.
|
||||||
|
|
||||||
@@ -104,13 +180,13 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/
|
|||||||
|
|
||||||
[24/02/05] Qwen1.5 (Qwen2 beta version) series models are supported in LLaMA-Factory. Check this [blog post](https://qwenlm.github.io/blog/qwen1.5/) for details.
|
[24/02/05] Qwen1.5 (Qwen2 beta version) series models are supported in LLaMA-Factory. Check this [blog post](https://qwenlm.github.io/blog/qwen1.5/) for details.
|
||||||
|
|
||||||
[24/01/18] We supported **agent tuning** for most models, equipping model with tool using abilities by fine-tuning with `dataset: glaive_toolcall`.
|
[24/01/18] We supported **agent tuning** for most models, equipping model with tool using abilities by fine-tuning with `dataset: glaive_toolcall_en`.
|
||||||
|
|
||||||
[23/12/23] We supported **[unsloth](https://github.com/unslothai/unsloth)**'s implementation to boost LoRA tuning for the LLaMA, Mistral and Yi models. Try `use_unsloth: true` argument to activate unsloth patch. It achieves **170%** speed in our benchmark, check [this page](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison) for details.
|
[23/12/23] We supported **[unsloth](https://github.com/unslothai/unsloth)**'s implementation to boost LoRA tuning for the LLaMA, Mistral and Yi models. Try `use_unsloth: true` argument to activate unsloth patch. It achieves **170%** speed in our benchmark, check [this page](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison) for details.
|
||||||
|
|
||||||
[23/12/12] We supported fine-tuning the latest MoE model **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)** in our framework. See hardware requirement [here](#hardware-requirement).
|
[23/12/12] We supported fine-tuning the latest MoE model **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)** in our framework. See hardware requirement [here](#hardware-requirement).
|
||||||
|
|
||||||
[23/12/01] We supported downloading pre-trained models and datasets from the **[ModelScope Hub](https://modelscope.cn/models)** for Chinese mainland users. See [this tutorial](#download-from-modelscope-hub) for usage.
|
[23/12/01] We supported downloading pre-trained models and datasets from the **[ModelScope Hub](https://modelscope.cn/models)**. See [this tutorial](#download-from-modelscope-hub) for usage.
|
||||||
|
|
||||||
[23/10/21] We supported **[NEFTune](https://arxiv.org/abs/2310.05914)** trick for fine-tuning. Try `neftune_noise_alpha: 5` argument to activate NEFTune.
|
[23/10/21] We supported **[NEFTune](https://arxiv.org/abs/2310.05914)** trick for fine-tuning. Try `neftune_noise_alpha: 5` argument to activate NEFTune.
|
||||||
|
|
||||||
@@ -142,43 +218,60 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/
|
|||||||
|
|
||||||
## Supported Models
|
## Supported Models
|
||||||
|
|
||||||
| Model | Model size | Default module | Template |
|
| Model | Model size | Template |
|
||||||
| -------------------------------------------------------- | -------------------------------- | ----------------- | --------- |
|
| ----------------------------------------------------------------- | -------------------------------- | ------------------- |
|
||||||
| [Baichuan2](https://huggingface.co/baichuan-inc) | 7B/13B | W_pack | baichuan2 |
|
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
||||||
| [BLOOM](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
|
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
||||||
| [BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
|
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
||||||
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | query_key_value | chatglm3 |
|
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
||||||
| [Command-R](https://huggingface.co/CohereForAI) | 35B/104B | q_proj,v_proj | cohere |
|
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
||||||
| [DeepSeek (MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | q_proj,v_proj | deepseek |
|
| [DeepSeek 2.5/3](https://huggingface.co/deepseek-ai) | 236B/671B | deepseek3 |
|
||||||
| [Falcon](https://huggingface.co/tiiuae) | 7B/40B/180B | query_key_value | falcon |
|
| [DeepSeek R1 (Distill)](https://huggingface.co/deepseek-ai) | 1.5B/7B/8B/14B/32B/70B/671B | deepseek3 |
|
||||||
| [Gemma/CodeGemma](https://huggingface.co/google) | 2B/7B | q_proj,v_proj | gemma |
|
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
||||||
| [InternLM2](https://huggingface.co/internlm) | 7B/20B | wqkv | intern2 |
|
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma |
|
||||||
| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - |
|
| [GLM-4](https://huggingface.co/THUDM) | 9B | glm4 |
|
||||||
| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 |
|
| [GPT-2](https://huggingface.co/openai-community) | 0.1B/0.4B/0.8B/1.5B | - |
|
||||||
| [LLaMA-3](https://huggingface.co/meta-llama) | 8B/70B | q_proj,v_proj | llama3 |
|
| [Granite 3.0-3.1](https://huggingface.co/ibm-granite) | 1B/2B/3B/8B | granite3 |
|
||||||
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | q_proj,v_proj | vicuna |
|
| [Index](https://huggingface.co/IndexTeam) | 1.9B | index |
|
||||||
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | q_proj,v_proj | mistral |
|
| [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 |
|
||||||
| [OLMo](https://huggingface.co/allenai) | 1B/7B | q_proj,v_proj | - |
|
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
||||||
| [Phi-1.5/2](https://huggingface.co/microsoft) | 1.3B/2.7B | q_proj,v_proj | - |
|
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
||||||
| [Phi-3](https://huggingface.co/microsoft) | 3.8B | qkv_proj | phi |
|
| [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 |
|
||||||
| [Qwen](https://huggingface.co/Qwen) | 1.8B/7B/14B/72B | c_attn | qwen |
|
| [Llama 3.2 Vision](https://huggingface.co/meta-llama) | 11B/90B | mllama |
|
||||||
| [Qwen1.5 (Code/MoE)](https://huggingface.co/Qwen) | 0.5B/1.8B/4B/7B/14B/32B/72B/110B | q_proj,v_proj | qwen |
|
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
||||||
| [StarCoder2](https://huggingface.co/bigcode) | 3B/7B/15B | q_proj,v_proj | - |
|
| [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/8B/13B/34B/72B/110B | llava_next |
|
||||||
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | q_proj,v_proj | xverse |
|
| [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/34B | llava_next_video |
|
||||||
| [Yi (1/1.5)](https://huggingface.co/01-ai) | 6B/9B/34B | q_proj,v_proj | yi |
|
| [MiniCPM](https://huggingface.co/openbmb) | 1B/2B/4B | cpm/cpm3 |
|
||||||
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | q_proj,v_proj | yi_vl |
|
| [MiniCPM-o-2.6/MiniCPM-V-2.6](https://huggingface.co/openbmb) | 8B | minicpm_o/minicpm_v |
|
||||||
| [Yuan](https://huggingface.co/IEITYuan) | 2B/51B/102B | q_proj,v_proj | yuan |
|
| [Ministral/Mistral-Nemo](https://huggingface.co/mistralai) | 8B/12B | ministral |
|
||||||
|
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
||||||
|
| [Mistral Small](https://huggingface.co/mistralai) | 24B | mistral_small |
|
||||||
|
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
||||||
|
| [PaliGemma/PaliGemma2](https://huggingface.co/google) | 3B/10B/28B | paligemma |
|
||||||
|
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
||||||
|
| [Phi-3/Phi-3.5](https://huggingface.co/microsoft) | 4B/14B | phi |
|
||||||
|
| [Phi-3-small](https://huggingface.co/microsoft) | 7B | phi_small |
|
||||||
|
| [Phi-4](https://huggingface.co/microsoft) | 14B | phi4 |
|
||||||
|
| [Pixtral](https://huggingface.co/mistralai) | 12B | pixtral |
|
||||||
|
| [Qwen/QwQ (1-2.5) (Code/Math/MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen |
|
||||||
|
| [Qwen2-Audio](https://huggingface.co/Qwen) | 7B | qwen2_audio |
|
||||||
|
| [Qwen2-VL/Qwen2.5-VL/QVQ](https://huggingface.co/Qwen) | 2B/3B/7B/72B | qwen2_vl |
|
||||||
|
| [Skywork o1](https://huggingface.co/Skywork) | 8B | skywork_o1 |
|
||||||
|
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
||||||
|
| [TeleChat2](https://huggingface.co/Tele-AI) | 3B/7B/35B/115B | telechat2 |
|
||||||
|
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
||||||
|
| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi |
|
||||||
|
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
||||||
|
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> **Default module** is used for the `--lora_target` argument, you can use `--lora_target all` to specify all the available modules for better convergence.
|
> For the "base" models, the `template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the **corresponding template** for the "instruct/chat" models.
|
||||||
>
|
|
||||||
> For the "base" models, the `--template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the **corresponding template** for the "instruct/chat" models.
|
|
||||||
>
|
>
|
||||||
> Remember to use the **SAME** template in training and inference.
|
> Remember to use the **SAME** template in training and inference.
|
||||||
|
|
||||||
Please refer to [constants.py](src/llmtuner/extras/constants.py) for a full list of models we supported.
|
Please refer to [constants.py](src/llamafactory/extras/constants.py) for a full list of models we supported.
|
||||||
|
|
||||||
You also can add a custom chat template to [template.py](src/llmtuner/data/template.py).
|
You also can add a custom chat template to [template.py](src/llamafactory/data/template.py).
|
||||||
|
|
||||||
## Supported Training Approaches
|
## Supported Training Approaches
|
||||||
|
|
||||||
@@ -189,7 +282,12 @@ You also can add a custom chat template to [template.py](src/llmtuner/data/templ
|
|||||||
| Reward Modeling | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| Reward Modeling | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
| PPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| PPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
| DPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| DPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
|
| KTO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
| ORPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| ORPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
|
| SimPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> The implementation details of PPO can be found in [this blog](https://newfacade.github.io/notes-on-reinforcement-learning/17-ppo-trl.html).
|
||||||
|
|
||||||
## Provided Datasets
|
## Provided Datasets
|
||||||
|
|
||||||
@@ -202,6 +300,8 @@ You also can add a custom chat template to [template.py](src/llmtuner/data/templ
|
|||||||
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
|
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
|
||||||
- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
|
- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
|
||||||
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
|
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
|
||||||
|
- [FineWeb (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb)
|
||||||
|
- [FineWeb-Edu (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu)
|
||||||
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
|
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
|
||||||
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
|
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
|
||||||
|
|
||||||
@@ -209,12 +309,12 @@ You also can add a custom chat template to [template.py](src/llmtuner/data/templ
|
|||||||
|
|
||||||
<details><summary>Supervised fine-tuning datasets</summary>
|
<details><summary>Supervised fine-tuning datasets</summary>
|
||||||
|
|
||||||
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
|
|
||||||
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca)
|
|
||||||
- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
|
|
||||||
- [Identity (en&zh)](data/identity.json)
|
- [Identity (en&zh)](data/identity.json)
|
||||||
- [Open Assistant (zh)](https://huggingface.co/datasets/OpenAssistant/oasst1)
|
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
|
||||||
- [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection)
|
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca-3)
|
||||||
|
- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
|
||||||
|
- [Glaive Function Calling V2 (en&zh)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)
|
||||||
|
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
|
||||||
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
|
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
|
||||||
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
|
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
|
||||||
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
|
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
|
||||||
@@ -223,7 +323,6 @@ You also can add a custom chat template to [template.py](src/llmtuner/data/templ
|
|||||||
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
|
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
|
||||||
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
|
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
|
||||||
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
|
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
|
||||||
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
|
|
||||||
- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
|
- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
|
||||||
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
|
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
|
||||||
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
|
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
|
||||||
@@ -236,16 +335,26 @@ You also can add a custom chat template to [template.py](src/llmtuner/data/templ
|
|||||||
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
|
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
|
||||||
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
||||||
- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)
|
- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)
|
||||||
- [Ad Gen (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
|
- [Advertise Generating (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
|
||||||
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
|
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
|
||||||
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
|
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
|
||||||
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
|
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
|
||||||
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
|
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
|
||||||
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
|
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
|
||||||
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
|
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
|
||||||
- [Glaive Function Calling V2 (en)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)
|
|
||||||
- [Cosmopedia (en)](https://huggingface.co/datasets/HuggingFaceTB/cosmopedia)
|
- [Cosmopedia (en)](https://huggingface.co/datasets/HuggingFaceTB/cosmopedia)
|
||||||
|
- [STEM (zh)](https://huggingface.co/datasets/hfl/stem_zh_instruction)
|
||||||
|
- [Ruozhiba (zh)](https://huggingface.co/datasets/hfl/ruozhiba_gpt4_turbo)
|
||||||
|
- [Neo-sft (zh)](https://huggingface.co/datasets/m-a-p/neo_sft_phase2)
|
||||||
|
- [Magpie-Pro-300K-Filtered (en)](https://huggingface.co/datasets/Magpie-Align/Magpie-Pro-300K-Filtered)
|
||||||
|
- [Magpie-ultra-v0.1 (en)](https://huggingface.co/datasets/argilla/magpie-ultra-v0.1)
|
||||||
|
- [WebInstructSub (en)](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub)
|
||||||
|
- [OpenO1-SFT (en&zh)](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)
|
||||||
|
- [Open-Thoughts (en)](https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k)
|
||||||
|
- [Open-R1-Math (en)](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k)
|
||||||
|
- [Chinese-DeepSeek-R1-Distill (zh)](https://huggingface.co/datasets/Congliu/Chinese-DeepSeek-R1-Distill-data-110k-SFT)
|
||||||
- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k)
|
- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k)
|
||||||
|
- [Pokemon-gpt4o-captions (en&zh)](https://huggingface.co/datasets/jugg1024/pokemon-gpt4o-captions)
|
||||||
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
|
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
|
||||||
- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de)
|
- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de)
|
||||||
- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de)
|
- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de)
|
||||||
@@ -260,13 +369,15 @@ You also can add a custom chat template to [template.py](src/llmtuner/data/templ
|
|||||||
|
|
||||||
<details><summary>Preference datasets</summary>
|
<details><summary>Preference datasets</summary>
|
||||||
|
|
||||||
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
|
|
||||||
- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
|
|
||||||
- [Orca DPO (en)](https://huggingface.co/datasets/Intel/orca_dpo_pairs)
|
|
||||||
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
|
||||||
- [DPO mixed (en&zh)](https://huggingface.co/datasets/hiyouga/DPO-En-Zh-20k)
|
- [DPO mixed (en&zh)](https://huggingface.co/datasets/hiyouga/DPO-En-Zh-20k)
|
||||||
- [Open Assistant (zh)](https://huggingface.co/datasets/OpenAssistant/oasst1)
|
- [UltraFeedback (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized)
|
||||||
|
- [RLHF-V (en)](https://huggingface.co/datasets/openbmb/RLHF-V-Dataset)
|
||||||
|
- [VLFeedback (en)](https://huggingface.co/datasets/Zhihui/VLFeedback)
|
||||||
|
- [Orca DPO Pairs (en)](https://huggingface.co/datasets/Intel/orca_dpo_pairs)
|
||||||
|
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
|
||||||
|
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
||||||
- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de)
|
- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de)
|
||||||
|
- [KTO mixed (en)](https://huggingface.co/datasets/argilla/kto-mix-15k)
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
@@ -281,35 +392,34 @@ huggingface-cli login
|
|||||||
|
|
||||||
| Mandatory | Minimum | Recommend |
|
| Mandatory | Minimum | Recommend |
|
||||||
| ------------ | ------- | --------- |
|
| ------------ | ------- | --------- |
|
||||||
| python | 3.8 | 3.10 |
|
| python | 3.9 | 3.10 |
|
||||||
| torch | 1.13.1 | 2.2.0 |
|
| torch | 1.13.1 | 2.5.1 |
|
||||||
| transformers | 4.37.2 | 4.40.1 |
|
| transformers | 4.41.2 | 4.49.0 |
|
||||||
| datasets | 2.14.3 | 2.19.1 |
|
| datasets | 2.16.0 | 3.2.0 |
|
||||||
| accelerate | 0.27.2 | 0.30.0 |
|
| accelerate | 0.34.0 | 1.2.1 |
|
||||||
| peft | 0.9.0 | 0.10.0 |
|
| peft | 0.11.1 | 0.12.0 |
|
||||||
| trl | 0.8.1 | 0.8.6 |
|
| trl | 0.8.6 | 0.9.6 |
|
||||||
|
|
||||||
| Optional | Minimum | Recommend |
|
| Optional | Minimum | Recommend |
|
||||||
| ------------ | ------- | --------- |
|
| ------------ | ------- | --------- |
|
||||||
| CUDA | 11.6 | 12.2 |
|
| CUDA | 11.6 | 12.2 |
|
||||||
| deepspeed | 0.10.0 | 0.14.0 |
|
| deepspeed | 0.10.0 | 0.16.4 |
|
||||||
| bitsandbytes | 0.39.0 | 0.43.1 |
|
| bitsandbytes | 0.39.0 | 0.43.1 |
|
||||||
| vllm | 0.4.0 | 0.4.2 |
|
| vllm | 0.4.3 | 0.7.3 |
|
||||||
| flash-attn | 2.3.0 | 2.5.8 |
|
| flash-attn | 2.3.0 | 2.7.2 |
|
||||||
|
|
||||||
### Hardware Requirement
|
### Hardware Requirement
|
||||||
|
|
||||||
\* *estimated*
|
\* *estimated*
|
||||||
|
|
||||||
| Method | Bits | 7B | 13B | 30B | 70B | 110B | 8x7B | 8x22B |
|
| Method | Bits | 7B | 14B | 30B | 70B | `x`B |
|
||||||
| ----------------- | ---- | ----- | ----- | ----- | ------ | ------ | ----- | ------ |
|
| ------------------------------- | ---- | ----- | ----- | ----- | ------ | ------- |
|
||||||
| Full | AMP | 120GB | 240GB | 600GB | 1200GB | 2000GB | 900GB | 2400GB |
|
| Full (`bf16` or `fp16`) | 32 | 120GB | 240GB | 600GB | 1200GB | `18x`GB |
|
||||||
| Full | 16 | 60GB | 120GB | 300GB | 600GB | 900GB | 400GB | 1200GB |
|
| Full (`pure_bf16`) | 16 | 60GB | 120GB | 300GB | 600GB | `8x`GB |
|
||||||
| Freeze | 16 | 20GB | 40GB | 80GB | 200GB | 360GB | 160GB | 400GB |
|
| Freeze/LoRA/GaLore/APOLLO/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | `2x`GB |
|
||||||
| LoRA/GaLore/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | 240GB | 120GB | 320GB |
|
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | `x`GB |
|
||||||
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | 140GB | 60GB | 160GB |
|
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | `x/2`GB |
|
||||||
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | 72GB | 30GB | 96GB |
|
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | `x/4`GB |
|
||||||
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | 48GB | 18GB | 48GB |
|
|
||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
@@ -319,53 +429,118 @@ huggingface-cli login
|
|||||||
> Installation is mandatory.
|
> Installation is mandatory.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/hiyouga/LLaMA-Factory.git
|
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
|
||||||
cd LLaMA-Factory
|
cd LLaMA-Factory
|
||||||
pip install -e .[torch,metrics]
|
pip install -e ".[torch,metrics]"
|
||||||
```
|
```
|
||||||
|
|
||||||
Extra dependencies available: torch, metrics, deepspeed, bitsandbytes, vllm, galore, badam, gptq, awq, aqlm, qwen, modelscope, quality
|
Extra dependencies available: torch, torch-npu, metrics, deepspeed, liger-kernel, bitsandbytes, hqq, eetq, gptq, awq, aqlm, vllm, galore, apollo, badam, adam-mini, qwen, minicpm_v, modelscope, openmind, swanlab, quality
|
||||||
|
|
||||||
> [!TIP]
|
> [!TIP]
|
||||||
> Use `pip install --no-deps -e .` to resolve package conflicts.
|
> Use `pip install --no-deps -e .` to resolve package conflicts.
|
||||||
|
|
||||||
|
<details><summary>Setting up a virtual environment with <b>uv</b></summary>
|
||||||
|
|
||||||
|
Create an isolated Python environment with [uv](https://github.com/astral-sh/uv):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
uv sync --extra torch --extra metrics --prerelease=allow
|
||||||
|
```
|
||||||
|
|
||||||
|
Run LLaMA-Factory in the isolated environment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
uv run --prerelease=allow llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
<details><summary>For Windows users</summary>
|
<details><summary>For Windows users</summary>
|
||||||
|
|
||||||
|
#### Install BitsAndBytes
|
||||||
|
|
||||||
If you want to enable the quantized LoRA (QLoRA) on the Windows platform, you need to install a pre-built version of `bitsandbytes` library, which supports CUDA 11.1 to 12.2, please select the appropriate [release version](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels) based on your CUDA version.
|
If you want to enable the quantized LoRA (QLoRA) on the Windows platform, you need to install a pre-built version of `bitsandbytes` library, which supports CUDA 11.1 to 12.2, please select the appropriate [release version](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels) based on your CUDA version.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.2.post2-py3-none-win_amd64.whl
|
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.2.post2-py3-none-win_amd64.whl
|
||||||
```
|
```
|
||||||
|
|
||||||
To enable FlashAttention-2 on the Windows platform, you need to install the precompiled `flash-attn` library, which supports CUDA 12.1 to 12.2. Please download the corresponding version from [flash-attention](https://github.com/bdashore3/flash-attention/releases) based on your requirements.
|
#### Install Flash Attention-2
|
||||||
|
|
||||||
|
To enable FlashAttention-2 on the Windows platform, please use the script from [flash-attention-windows-wheel](https://huggingface.co/lldacing/flash-attention-windows-wheel) to compile and install it by yourself.
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details><summary>For Ascend NPU users</summary>
|
<details><summary>For Ascend NPU users</summary>
|
||||||
|
|
||||||
To utilize Ascend NPU devices for (distributed) training and inference, you need to install the **[torch-npu](https://gitee.com/ascend/pytorch)** library and the **[Ascend CANN Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**.
|
To install LLaMA Factory on Ascend NPU devices, please upgrade Python to version 3.10 or higher and specify extra dependencies: `pip install -e ".[torch-npu,metrics]"`. Additionally, you need to install the **[Ascend CANN Toolkit and Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**. Please follow the [installation tutorial](https://www.hiascend.com/document/detail/en/CANNCommunityEdition/600alphaX/softwareinstall/instg/atlasdeploy_03_0031.html) or use the following commands:
|
||||||
|
|
||||||
| Requirement | Minimum | Recommend |
|
```bash
|
||||||
| ------------ | ------- | --------- |
|
# replace the url according to your CANN version and devices
|
||||||
| CANN | 8.0.RC1 | 8.0.RC1 |
|
# install CANN Toolkit
|
||||||
| torch | 2.2.0 | 2.2.0 |
|
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C20SPC702/Ascend-cann-toolkit_8.0.0.alpha002_linux-"$(uname -i)".run
|
||||||
| torch-npu | 2.2.0 | 2.2.0 |
|
bash Ascend-cann-toolkit_8.0.0.alpha002_linux-"$(uname -i)".run --install
|
||||||
| deepspeed | 0.13.2 | 0.13.2 |
|
|
||||||
|
|
||||||
Docker image:
|
# install CANN Kernels
|
||||||
|
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C20SPC702/Ascend-cann-kernels-910b_8.0.0.alpha002_linux-"$(uname -i)".run
|
||||||
|
bash Ascend-cann-kernels-910b_8.0.0.alpha002_linux-"$(uname -i)".run --install
|
||||||
|
|
||||||
- 32GB: [Download page](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html)
|
# set env variables
|
||||||
- 64GB: Coming soon
|
source /usr/local/Ascend/ascend-toolkit/set_env.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
| Requirement | Minimum | Recommend |
|
||||||
|
| ------------ | ------- | -------------- |
|
||||||
|
| CANN | 8.0.RC1 | 8.0.0.alpha002 |
|
||||||
|
| torch | 2.1.0 | 2.4.0 |
|
||||||
|
| torch-npu | 2.1.0 | 2.4.0.post2 |
|
||||||
|
| deepspeed | 0.13.2 | 0.13.2 |
|
||||||
|
|
||||||
Remember to use `ASCEND_RT_VISIBLE_DEVICES` instead of `CUDA_VISIBLE_DEVICES` to specify the device to use.
|
Remember to use `ASCEND_RT_VISIBLE_DEVICES` instead of `CUDA_VISIBLE_DEVICES` to specify the device to use.
|
||||||
|
|
||||||
If you cannot infer model on NPU devices, try setting `do_sample: false` in the configurations.
|
If you cannot infer model on NPU devices, try setting `do_sample: false` in the configurations.
|
||||||
|
|
||||||
|
Download the pre-built Docker images: [32GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html) | [64GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/131.html)
|
||||||
|
|
||||||
|
#### Install BitsAndBytes
|
||||||
|
|
||||||
|
To use QLoRA based on bitsandbytes on Ascend NPU, please follow these 3 steps:
|
||||||
|
|
||||||
|
1. Manually compile bitsandbytes: Refer to [the installation documentation](https://huggingface.co/docs/bitsandbytes/installation?backend=Ascend+NPU&platform=Ascend+NPU) for the NPU version of bitsandbytes to complete the compilation and installation. The compilation requires a cmake version of at least 3.22.1 and a g++ version of at least 12.x.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install bitsandbytes from source
|
||||||
|
# Clone bitsandbytes repo, Ascend NPU backend is currently enabled on multi-backend-refactor branch
|
||||||
|
git clone -b multi-backend-refactor https://github.com/bitsandbytes-foundation/bitsandbytes.git
|
||||||
|
cd bitsandbytes/
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
pip install -r requirements-dev.txt
|
||||||
|
|
||||||
|
# Install the dependencies for the compilation tools. Note that the commands for this step may vary depending on the operating system. The following are provided for reference
|
||||||
|
apt-get install -y build-essential cmake
|
||||||
|
|
||||||
|
# Compile & install
|
||||||
|
cmake -DCOMPUTE_BACKEND=npu -S .
|
||||||
|
make
|
||||||
|
pip install .
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Install transformers from the main branch.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone -b main https://github.com/huggingface/transformers.git
|
||||||
|
cd transformers
|
||||||
|
pip install .
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Set `double_quantization: false` in the configuration. You can refer to the [example](examples/train_qlora/llama3_lora_sft_bnb_npu.yaml).
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### Data Preparation
|
### Data Preparation
|
||||||
|
|
||||||
Please refer to [data/README.md](data/README.md) for checking the details about the format of dataset files. You can either use datasets on HuggingFace / ModelScope hub or load the dataset in local disk.
|
Please refer to [data/README.md](data/README.md) for checking the details about the format of dataset files. You can either use datasets on HuggingFace / ModelScope / Modelers hub or load the dataset in local disk.
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> Please update `data/dataset_info.json` to use your custom dataset.
|
> Please update `data/dataset_info.json` to use your custom dataset.
|
||||||
@@ -375,78 +550,163 @@ Please refer to [data/README.md](data/README.md) for checking the details about
|
|||||||
Use the following 3 commands to run LoRA **fine-tuning**, **inference** and **merging** of the Llama3-8B-Instruct model, respectively.
|
Use the following 3 commands to run LoRA **fine-tuning**, **inference** and **merging** of the Llama3-8B-Instruct model, respectively.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_sft.yaml
|
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
|
llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
See [examples/README.md](examples/README.md) for advanced usage (including distributed training).
|
See [examples/README.md](examples/README.md) for advanced usage (including distributed training).
|
||||||
|
|
||||||
> [!TIP]
|
> [!TIP]
|
||||||
> Use `llamafactory-cli help` to show help information.
|
> Use `llamafactory-cli help` to show help information.
|
||||||
|
>
|
||||||
|
> Read [FAQs](https://github.com/hiyouga/LLaMA-Factory/issues/4614) first if you encounter any problems.
|
||||||
|
|
||||||
### Fine-Tuning with LLaMA Board GUI (powered by [Gradio](https://github.com/gradio-app/gradio))
|
### Fine-Tuning with LLaMA Board GUI (powered by [Gradio](https://github.com/gradio-app/gradio))
|
||||||
|
|
||||||
> [!IMPORTANT]
|
|
||||||
> LLaMA Board GUI only supports training on a single GPU.
|
|
||||||
|
|
||||||
#### Use local environment
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 GRADIO_SHARE=1 llamafactory-cli webui
|
llamafactory-cli webui
|
||||||
```
|
```
|
||||||
|
|
||||||
<details><summary>For Alibaba Cloud PAI or AutoDL users</summary>
|
### Build Docker
|
||||||
|
|
||||||
If you encountered display problems in LLaMA Board on Alibaba Cloud PAI, try using the following command to set environment variables before starting LLaMA Board:
|
For CUDA users:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export GRADIO_SERVER_PORT=7860 GRADIO_ROOT_PATH=/${JUPYTER_NAME}/proxy/7860/
|
cd docker/docker-cuda/
|
||||||
|
docker compose up -d
|
||||||
|
docker compose exec llamafactory bash
|
||||||
```
|
```
|
||||||
|
|
||||||
If you are using AutoDL, please install a specific version of Gradio:
|
For Ascend NPU users:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install gradio==4.10.0
|
cd docker/docker-npu/
|
||||||
|
docker compose up -d
|
||||||
|
docker compose exec llamafactory bash
|
||||||
|
```
|
||||||
|
|
||||||
|
For AMD ROCm users:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd docker/docker-rocm/
|
||||||
|
docker compose up -d
|
||||||
|
docker compose exec llamafactory bash
|
||||||
|
```
|
||||||
|
|
||||||
|
<details><summary>Build without Docker Compose</summary>
|
||||||
|
|
||||||
|
For CUDA users:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker build -f ./docker/docker-cuda/Dockerfile \
|
||||||
|
--build-arg INSTALL_BNB=false \
|
||||||
|
--build-arg INSTALL_VLLM=false \
|
||||||
|
--build-arg INSTALL_DEEPSPEED=false \
|
||||||
|
--build-arg INSTALL_FLASHATTN=false \
|
||||||
|
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||||
|
-t llamafactory:latest .
|
||||||
|
|
||||||
|
docker run -dit --gpus=all \
|
||||||
|
-v ./hf_cache:/root/.cache/huggingface \
|
||||||
|
-v ./ms_cache:/root/.cache/modelscope \
|
||||||
|
-v ./om_cache:/root/.cache/openmind \
|
||||||
|
-v ./data:/app/data \
|
||||||
|
-v ./output:/app/output \
|
||||||
|
-p 7860:7860 \
|
||||||
|
-p 8000:8000 \
|
||||||
|
--shm-size 16G \
|
||||||
|
--name llamafactory \
|
||||||
|
llamafactory:latest
|
||||||
|
|
||||||
|
docker exec -it llamafactory bash
|
||||||
|
```
|
||||||
|
|
||||||
|
For Ascend NPU users:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Choose docker image upon your environment
|
||||||
|
docker build -f ./docker/docker-npu/Dockerfile \
|
||||||
|
--build-arg INSTALL_DEEPSPEED=false \
|
||||||
|
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||||
|
-t llamafactory:latest .
|
||||||
|
|
||||||
|
# Change `device` upon your resources
|
||||||
|
docker run -dit \
|
||||||
|
-v ./hf_cache:/root/.cache/huggingface \
|
||||||
|
-v ./ms_cache:/root/.cache/modelscope \
|
||||||
|
-v ./om_cache:/root/.cache/openmind \
|
||||||
|
-v ./data:/app/data \
|
||||||
|
-v ./output:/app/output \
|
||||||
|
-v /usr/local/dcmi:/usr/local/dcmi \
|
||||||
|
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
||||||
|
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
|
||||||
|
-v /etc/ascend_install.info:/etc/ascend_install.info \
|
||||||
|
-p 7860:7860 \
|
||||||
|
-p 8000:8000 \
|
||||||
|
--device /dev/davinci0 \
|
||||||
|
--device /dev/davinci_manager \
|
||||||
|
--device /dev/devmm_svm \
|
||||||
|
--device /dev/hisi_hdc \
|
||||||
|
--shm-size 16G \
|
||||||
|
--name llamafactory \
|
||||||
|
llamafactory:latest
|
||||||
|
|
||||||
|
docker exec -it llamafactory bash
|
||||||
|
```
|
||||||
|
|
||||||
|
For AMD ROCm users:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker build -f ./docker/docker-rocm/Dockerfile \
|
||||||
|
--build-arg INSTALL_BNB=false \
|
||||||
|
--build-arg INSTALL_VLLM=false \
|
||||||
|
--build-arg INSTALL_DEEPSPEED=false \
|
||||||
|
--build-arg INSTALL_FLASHATTN=false \
|
||||||
|
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||||
|
-t llamafactory:latest .
|
||||||
|
|
||||||
|
docker run -dit \
|
||||||
|
-v ./hf_cache:/root/.cache/huggingface \
|
||||||
|
-v ./ms_cache:/root/.cache/modelscope \
|
||||||
|
-v ./om_cache:/root/.cache/openmind \
|
||||||
|
-v ./data:/app/data \
|
||||||
|
-v ./output:/app/output \
|
||||||
|
-v ./saves:/app/saves \
|
||||||
|
-p 7860:7860 \
|
||||||
|
-p 8000:8000 \
|
||||||
|
--device /dev/kfd \
|
||||||
|
--device /dev/dri \
|
||||||
|
--shm-size 16G \
|
||||||
|
--name llamafactory \
|
||||||
|
llamafactory:latest
|
||||||
|
|
||||||
|
docker exec -it llamafactory bash
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
#### Use Docker
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker build -f ./Dockerfile -t llama-factory:latest .
|
|
||||||
docker run --gpus=all \
|
|
||||||
-v ./hf_cache:/root/.cache/huggingface/ \
|
|
||||||
-v ./data:/app/data \
|
|
||||||
-v ./output:/app/output \
|
|
||||||
-e CUDA_VISIBLE_DEVICES=0 \
|
|
||||||
-p 7860:7860 \
|
|
||||||
--shm-size 16G \
|
|
||||||
--name llama_factory \
|
|
||||||
-d llama-factory:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Use Docker Compose
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker compose -f ./docker-compose.yml up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
<details><summary>Details about volume</summary>
|
<details><summary>Details about volume</summary>
|
||||||
|
|
||||||
- hf_cache: Utilize Hugging Face cache on the host machine. Reassignable if a cache already exists in a different directory.
|
- `hf_cache`: Utilize Hugging Face cache on the host machine. Reassignable if a cache already exists in a different directory.
|
||||||
- data: Place datasets on this dir of the host machine so that they can be selected on LLaMA Board GUI.
|
- `ms_cache`: Similar to Hugging Face cache but for ModelScope users.
|
||||||
- output: Set export dir to this location so that the merged result can be accessed directly on the host machine.
|
- `om_cache`: Similar to Hugging Face cache but for Modelers users.
|
||||||
|
- `data`: Place datasets on this dir of the host machine so that they can be selected on LLaMA Board GUI.
|
||||||
|
- `output`: Set export dir to this location so that the merged result can be accessed directly on the host machine.
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### Deploy with OpenAI-style API and vLLM
|
### Deploy with OpenAI-style API and vLLM
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0,1 API_PORT=8000 llamafactory-cli api examples/inference/llama3_vllm.yaml
|
API_PORT=8000 llamafactory-cli api examples/inference/llama3_vllm.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Visit [this page](https://platform.openai.com/docs/api-reference/chat/create) for API document.
|
||||||
|
>
|
||||||
|
> Examples: [Image understanding](scripts/api_example/test_image.py) | [Function calling](scripts/api_example/test_toolcall.py)
|
||||||
|
|
||||||
### Download from ModelScope Hub
|
### Download from ModelScope Hub
|
||||||
|
|
||||||
If you have trouble with downloading models and datasets from Hugging Face, you can use ModelScope.
|
If you have trouble with downloading models and datasets from Hugging Face, you can use ModelScope.
|
||||||
@@ -455,7 +715,43 @@ If you have trouble with downloading models and datasets from Hugging Face, you
|
|||||||
export USE_MODELSCOPE_HUB=1 # `set USE_MODELSCOPE_HUB=1` for Windows
|
export USE_MODELSCOPE_HUB=1 # `set USE_MODELSCOPE_HUB=1` for Windows
|
||||||
```
|
```
|
||||||
|
|
||||||
Train the model by specifying a model ID of the ModelScope Hub as the `--model_name_or_path`. You can find a full list of model IDs at [ModelScope Hub](https://modelscope.cn/models), e.g., `LLM-Research/Meta-Llama-3-8B-Instruct`.
|
Train the model by specifying a model ID of the ModelScope Hub as the `model_name_or_path`. You can find a full list of model IDs at [ModelScope Hub](https://modelscope.cn/models), e.g., `LLM-Research/Meta-Llama-3-8B-Instruct`.
|
||||||
|
|
||||||
|
### Download from Modelers Hub
|
||||||
|
|
||||||
|
You can also use Modelers Hub to download models and datasets.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export USE_OPENMIND_HUB=1 # `set USE_OPENMIND_HUB=1` for Windows
|
||||||
|
```
|
||||||
|
|
||||||
|
Train the model by specifying a model ID of the Modelers Hub as the `model_name_or_path`. You can find a full list of model IDs at [Modelers Hub](https://modelers.cn/models), e.g., `TeleAI/TeleChat-7B-pt`.
|
||||||
|
|
||||||
|
### Use W&B Logger
|
||||||
|
|
||||||
|
To use [Weights & Biases](https://wandb.ai) for logging experimental results, you need to add the following arguments to yaml files.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
report_to: wandb
|
||||||
|
run_name: test_run # optional
|
||||||
|
```
|
||||||
|
|
||||||
|
Set `WANDB_API_KEY` to [your key](https://wandb.ai/authorize) when launching training tasks to log in with your W&B account.
|
||||||
|
|
||||||
|
### Use SwanLab Logger
|
||||||
|
|
||||||
|
To use [SwanLab](https://github.com/SwanHubX/SwanLab) for logging experimental results, you need to add the following arguments to yaml files.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
use_swanlab: true
|
||||||
|
swanlab_run_name: test_run # optional
|
||||||
|
```
|
||||||
|
|
||||||
|
When launching training tasks, you can log in to SwanLab in three ways:
|
||||||
|
|
||||||
|
1. Add `swanlab_api_key=<your_api_key>` to the yaml file, and set it to your [API key](https://swanlab.cn/settings).
|
||||||
|
2. Set the environment variable `SWANLAB_API_KEY` to your [API key](https://swanlab.cn/settings).
|
||||||
|
3. Use the `swanlab login` command to complete the login.
|
||||||
|
|
||||||
## Projects using LLaMA Factory
|
## Projects using LLaMA Factory
|
||||||
|
|
||||||
@@ -468,45 +764,96 @@ If you have a project that should be incorporated, please contact via email or c
|
|||||||
1. Wang et al. UbiPhysio: Support Daily Functioning, Fitness, and Rehabilitation with Action Understanding and Feedback in Natural Language. 2023. [[arxiv]](https://arxiv.org/abs/2308.10526)
|
1. Wang et al. UbiPhysio: Support Daily Functioning, Fitness, and Rehabilitation with Action Understanding and Feedback in Natural Language. 2023. [[arxiv]](https://arxiv.org/abs/2308.10526)
|
||||||
1. Luceri et al. Leveraging Large Language Models to Detect Influence Campaigns in Social Media. 2023. [[arxiv]](https://arxiv.org/abs/2311.07816)
|
1. Luceri et al. Leveraging Large Language Models to Detect Influence Campaigns in Social Media. 2023. [[arxiv]](https://arxiv.org/abs/2311.07816)
|
||||||
1. Zhang et al. Alleviating Hallucinations of Large Language Models through Induced Hallucinations. 2023. [[arxiv]](https://arxiv.org/abs/2312.15710)
|
1. Zhang et al. Alleviating Hallucinations of Large Language Models through Induced Hallucinations. 2023. [[arxiv]](https://arxiv.org/abs/2312.15710)
|
||||||
1. Wang et al. Know Your Needs Better: Towards Structured Understanding of Marketer Demands with Analogical Reasoning Augmented LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2401.04319)
|
1. Wang et al. Know Your Needs Better: Towards Structured Understanding of Marketer Demands with Analogical Reasoning Augmented LLMs. KDD 2024. [[arxiv]](https://arxiv.org/abs/2401.04319)
|
||||||
1. Wang et al. CANDLE: Iterative Conceptualization and Instantiation Distillation from Large Language Models for Commonsense Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2401.07286)
|
1. Wang et al. CANDLE: Iterative Conceptualization and Instantiation Distillation from Large Language Models for Commonsense Reasoning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2401.07286)
|
||||||
1. Choi et al. FACT-GPT: Fact-Checking Augmentation via Claim Matching with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2402.05904)
|
1. Choi et al. FACT-GPT: Fact-Checking Augmentation via Claim Matching with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2402.05904)
|
||||||
1. Zhang et al. AutoMathText: Autonomous Data Selection with Language Models for Mathematical Texts. 2024. [[arxiv]](https://arxiv.org/abs/2402.07625)
|
1. Zhang et al. AutoMathText: Autonomous Data Selection with Language Models for Mathematical Texts. 2024. [[arxiv]](https://arxiv.org/abs/2402.07625)
|
||||||
1. Lyu et al. KnowTuning: Knowledge-aware Fine-tuning for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11176)
|
1. Lyu et al. KnowTuning: Knowledge-aware Fine-tuning for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11176)
|
||||||
1. Yang et al. LaCo: Large Language Model Pruning via Layer Collaps. 2024. [[arxiv]](https://arxiv.org/abs/2402.11187)
|
1. Yang et al. LaCo: Large Language Model Pruning via Layer Collaps. 2024. [[arxiv]](https://arxiv.org/abs/2402.11187)
|
||||||
1. Bhardwaj et al. Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic. 2024. [[arxiv]](https://arxiv.org/abs/2402.11746)
|
1. Bhardwaj et al. Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic. 2024. [[arxiv]](https://arxiv.org/abs/2402.11746)
|
||||||
1. Yang et al. Enhancing Empathetic Response Generation by Augmenting LLMs with Small-scale Empathetic Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11801)
|
1. Yang et al. Enhancing Empathetic Response Generation by Augmenting LLMs with Small-scale Empathetic Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11801)
|
||||||
1. Yi et al. Generation Meets Verification: Accelerating Large Language Model Inference with Smart Parallel Auto-Correct Decoding. 2024. [[arxiv]](https://arxiv.org/abs/2402.11809)
|
1. Yi et al. Generation Meets Verification: Accelerating Large Language Model Inference with Smart Parallel Auto-Correct Decoding. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2402.11809)
|
||||||
1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819)
|
1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819)
|
||||||
1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204)
|
1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204)
|
||||||
1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714)
|
1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714)
|
||||||
1. Yu et al. KIEval: A Knowledge-grounded Interactive Evaluation Framework for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.15043)
|
1. Yu et al. KIEval: A Knowledge-grounded Interactive Evaluation Framework for Large Language Models. ACL 2024. [[arxiv]](https://arxiv.org/abs/2402.15043)
|
||||||
1. Huang et al. Key-Point-Driven Data Synthesis with its Enhancement on Mathematical Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2403.02333)
|
1. Huang et al. Key-Point-Driven Data Synthesis with its Enhancement on Mathematical Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2403.02333)
|
||||||
1. Duan et al. Negating Negatives: Alignment without Human Positive Samples via Distributional Dispreference Optimization. 2024. [[arxiv]](https://arxiv.org/abs/2403.03419)
|
1. Duan et al. Negating Negatives: Alignment without Human Positive Samples via Distributional Dispreference Optimization. 2024. [[arxiv]](https://arxiv.org/abs/2403.03419)
|
||||||
1. Xie and Schwertfeger. Empowering Robotics with Large Language Models: osmAG Map Comprehension with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2403.08228)
|
1. Xie and Schwertfeger. Empowering Robotics with Large Language Models: osmAG Map Comprehension with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2403.08228)
|
||||||
1. Wu et al. Large Language Models are Parallel Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2403.09073)
|
1. Wu et al. Large Language Models are Parallel Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2403.09073)
|
||||||
1. Zhang et al. EDT: Improving Large Language Models' Generation by Entropy-based Dynamic Temperature Sampling. 2024. [[arxiv]](https://arxiv.org/abs/2403.14541)
|
1. Zhang et al. EDT: Improving Large Language Models' Generation by Entropy-based Dynamic Temperature Sampling. 2024. [[arxiv]](https://arxiv.org/abs/2403.14541)
|
||||||
1. Weller et al. FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2403.15246)
|
1. Weller et al. FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2403.15246)
|
||||||
1. Hongbin Na. CBT-LLM: A Chinese Large Language Model for Cognitive Behavioral Therapy-based Mental Health Question Answering. 2024. [[arxiv]](https://arxiv.org/abs/2403.16008)
|
1. Hongbin Na. CBT-LLM: A Chinese Large Language Model for Cognitive Behavioral Therapy-based Mental Health Question Answering. COLING 2024. [[arxiv]](https://arxiv.org/abs/2403.16008)
|
||||||
1. Zan et al. CodeS: Natural Language to Code Repository via Multi-Layer Sketch. 2024. [[arxiv]](https://arxiv.org/abs/2403.16443)
|
1. Zan et al. CodeS: Natural Language to Code Repository via Multi-Layer Sketch. 2024. [[arxiv]](https://arxiv.org/abs/2403.16443)
|
||||||
1. Liu et al. Extensive Self-Contrast Enables Feedback-Free Language Model Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2404.00604)
|
1. Liu et al. Extensive Self-Contrast Enables Feedback-Free Language Model Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2404.00604)
|
||||||
1. Luo et al. BAdam: A Memory Efficient Full Parameter Training Method for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.02827)
|
1. Luo et al. BAdam: A Memory Efficient Full Parameter Training Method for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.02827)
|
||||||
1. Du et al. Chinese Tiny LLM: Pretraining a Chinese-Centric Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2404.04167)
|
1. Du et al. Chinese Tiny LLM: Pretraining a Chinese-Centric Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2404.04167)
|
||||||
1. Ma et al. Parameter Efficient Quasi-Orthogonal Fine-Tuning via Givens Rotation. 2024. [[arxiv]](https://arxiv.org/abs/2404.04316)
|
1. Ma et al. Parameter Efficient Quasi-Orthogonal Fine-Tuning via Givens Rotation. ICML 2024. [[arxiv]](https://arxiv.org/abs/2404.04316)
|
||||||
1. Liu et al. Dynamic Generation of Personalities with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.07084)
|
1. Liu et al. Dynamic Generation of Personalities with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.07084)
|
||||||
1. Shang et al. How Far Have We Gone in Stripped Binary Code Understanding Using Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.09836)
|
1. Shang et al. How Far Have We Gone in Stripped Binary Code Understanding Using Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.09836)
|
||||||
1. Huang et al. LLMTune: Accelerate Database Knob Tuning with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.11581)
|
1. Huang et al. LLMTune: Accelerate Database Knob Tuning with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.11581)
|
||||||
1. Deng et al. Text-Tuple-Table: Towards Information Integration in Text-to-Table Generation via Global Tuple Extraction. 2024. [[arxiv]](https://arxiv.org/abs/2404.14215)
|
1. Deng et al. Text-Tuple-Table: Towards Information Integration in Text-to-Table Generation via Global Tuple Extraction. 2024. [[arxiv]](https://arxiv.org/abs/2404.14215)
|
||||||
1. Acikgoz et al. Hippocrates: An Open-Source Framework for Advancing Large Language Models in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2404.16621)
|
1. Acikgoz et al. Hippocrates: An Open-Source Framework for Advancing Large Language Models in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2404.16621)
|
||||||
1. Zhang et al. Small Language Models Need Strong Verifiers to Self-Correct Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2404.17140)
|
1. Zhang et al. Small Language Models Need Strong Verifiers to Self-Correct Reasoning. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2404.17140)
|
||||||
1. Zhou et al. FREB-TQA: A Fine-Grained Robustness Evaluation Benchmark for Table Question Answering. 2024. [[arxiv]](https://arxiv.org/abs/2404.18585)
|
1. Zhou et al. FREB-TQA: A Fine-Grained Robustness Evaluation Benchmark for Table Question Answering. NAACL 2024. [[arxiv]](https://arxiv.org/abs/2404.18585)
|
||||||
|
1. Xu et al. Large Language Models for Cyber Security: A Systematic Literature Review. 2024. [[arxiv]](https://arxiv.org/abs/2405.04760)
|
||||||
|
1. Dammu et al. "They are uncultured": Unveiling Covert Harms and Social Threats in LLM Generated Conversations. 2024. [[arxiv]](https://arxiv.org/abs/2405.05378)
|
||||||
|
1. Yi et al. A safety realignment framework via subspace-oriented model fusion for large language models. 2024. [[arxiv]](https://arxiv.org/abs/2405.09055)
|
||||||
|
1. Lou et al. SPO: Multi-Dimensional Preference Sequential Alignment With Implicit Reward Modeling. 2024. [[arxiv]](https://arxiv.org/abs/2405.12739)
|
||||||
|
1. Zhang et al. Getting More from Less: Large Language Models are Good Spontaneous Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2405.13816)
|
||||||
|
1. Zhang et al. TS-Align: A Teacher-Student Collaborative Framework for Scalable Iterative Finetuning of Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2405.20215)
|
||||||
|
1. Zihong Chen. Sentence Segmentation and Sentence Punctuation Based on XunziALLM. 2024. [[paper]](https://aclanthology.org/2024.lt4hala-1.30)
|
||||||
|
1. Gao et al. The Best of Both Worlds: Toward an Honest and Helpful Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2406.00380)
|
||||||
|
1. Wang and Song. MARS: Benchmarking the Metaphysical Reasoning Abilities of Language Models with a Multi-task Evaluation Dataset. 2024. [[arxiv]](https://arxiv.org/abs/2406.02106)
|
||||||
|
1. Hu et al. Computational Limits of Low-Rank Adaptation (LoRA) for Transformer-Based Models. 2024. [[arxiv]](https://arxiv.org/abs/2406.03136)
|
||||||
|
1. Ge et al. Time Sensitive Knowledge Editing through Efficient Finetuning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2406.04496)
|
||||||
|
1. Tan et al. Peer Review as A Multi-Turn and Long-Context Dialogue with Role-Based Interactions. 2024. [[arxiv]](https://arxiv.org/abs/2406.05688)
|
||||||
|
1. Song et al. Turbo Sparse: Achieving LLM SOTA Performance with Minimal Activated Parameters. 2024. [[arxiv]](https://arxiv.org/abs/2406.05955)
|
||||||
|
1. Gu et al. RWKV-CLIP: A Robust Vision-Language Representation Learner. 2024. [[arxiv]](https://arxiv.org/abs/2406.06973)
|
||||||
|
1. Chen et al. Advancing Tool-Augmented Large Language Models: Integrating Insights from Errors in Inference Trees. 2024. [[arxiv]](https://arxiv.org/abs/2406.07115)
|
||||||
|
1. Zhu et al. Are Large Language Models Good Statisticians?. 2024. [[arxiv]](https://arxiv.org/abs/2406.07815)
|
||||||
|
1. Li et al. Know the Unknown: An Uncertainty-Sensitive Method for LLM Instruction Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2406.10099)
|
||||||
|
1. Ding et al. IntentionQA: A Benchmark for Evaluating Purchase Intention Comprehension Abilities of Language Models in E-commerce. 2024. [[arxiv]](https://arxiv.org/abs/2406.10173)
|
||||||
|
1. He et al. COMMUNITY-CROSS-INSTRUCT: Unsupervised Instruction Generation for Aligning Large Language Models to Online Communities. 2024. [[arxiv]](https://arxiv.org/abs/2406.12074)
|
||||||
|
1. Lin et al. FVEL: Interactive Formal Verification Environment with Large Language Models via Theorem Proving. 2024. [[arxiv]](https://arxiv.org/abs/2406.14408)
|
||||||
|
1. Treutlein et al. Connecting the Dots: LLMs can Infer and Verbalize Latent Structure from Disparate Training Data. 2024. [[arxiv]](https://arxiv.org/abs/2406.14546)
|
||||||
|
1. Feng et al. SS-Bench: A Benchmark for Social Story Generation and Evaluation. 2024. [[arxiv]](https://arxiv.org/abs/2406.15695)
|
||||||
|
1. Feng et al. Self-Constructed Context Decompilation with Fined-grained Alignment Enhancement. 2024. [[arxiv]](https://arxiv.org/abs/2406.17233)
|
||||||
|
1. Liu et al. Large Language Models for Cuffless Blood Pressure Measurement From Wearable Biosignals. 2024. [[arxiv]](https://arxiv.org/abs/2406.18069)
|
||||||
|
1. Iyer et al. Exploring Very Low-Resource Translation with LLMs: The University of Edinburgh's Submission to AmericasNLP 2024 Translation Task. AmericasNLP 2024. [[paper]](https://aclanthology.org/2024.americasnlp-1.25)
|
||||||
|
1. Li et al. Calibrating LLMs with Preference Optimization on Thought Trees for Generating Rationale in Science Question Scoring. 2024. [[arxiv]](https://arxiv.org/abs/2406.19949)
|
||||||
|
1. Yang et al. Financial Knowledge Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2407.00365)
|
||||||
|
1. Lin et al. DogeRM: Equipping Reward Models with Domain Knowledge through Model Merging. 2024. [[arxiv]](https://arxiv.org/abs/2407.01470)
|
||||||
|
1. Bako et al. Evaluating the Semantic Profiling Abilities of LLMs for Natural Language Utterances in Data Visualization. 2024. [[arxiv]](https://arxiv.org/abs/2407.06129)
|
||||||
|
1. Huang et al. RoLoRA: Fine-tuning Rotated Outlier-free LLMs for Effective Weight-Activation Quantization. 2024. [[arxiv]](https://arxiv.org/abs/2407.08044)
|
||||||
|
1. Jiang et al. LLM-Collaboration on Automatic Science Journalism for the General Audience. 2024. [[arxiv]](https://arxiv.org/abs/2407.09756)
|
||||||
|
1. Inouye et al. Applied Auto-tuning on LoRA Hyperparameters. 2024. [[paper]](https://scholarcommons.scu.edu/cseng_senior/272/)
|
||||||
|
1. Qi et al. Research on Tibetan Tourism Viewpoints information generation system based on LLM. 2024. [[arxiv]](https://arxiv.org/abs/2407.13561)
|
||||||
|
1. Xu et al. Course-Correction: Safety Alignment Using Synthetic Preferences. 2024. [[arxiv]](https://arxiv.org/abs/2407.16637)
|
||||||
|
1. Sun et al. LAMBDA: A Large Model Based Data Agent. 2024. [[arxiv]](https://arxiv.org/abs/2407.17535)
|
||||||
|
1. Zhu et al. CollectiveSFT: Scaling Large Language Models for Chinese Medical Benchmark with Collective Instructions in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2407.19705)
|
||||||
|
1. Yu et al. Correcting Negative Bias in Large Language Models through Negative Attention Score Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2408.00137)
|
||||||
|
1. Xie et al. The Power of Personalized Datasets: Advancing Chinese Composition Writing for Elementary School through Targeted Model Fine-Tuning. IALP 2024. [[paper]](https://www.asianlp.sg/conferences/ialp2024/proceedings/papers/IALP2024_P055.pdf)
|
||||||
|
1. Liu et al. Instruct-Code-Llama: Improving Capabilities of Language Model in Competition Level Code Generation by Online Judge Feedback. ICIC 2024. [[paper]](https://link.springer.com/chapter/10.1007/978-981-97-5669-8_11)
|
||||||
|
1. Wang et al. Cybernetic Sentinels: Unveiling the Impact of Safety Data Selection on Model Security in Supervised Fine-Tuning. ICIC 2024. [[paper]](https://link.springer.com/chapter/10.1007/978-981-97-5669-8_23)
|
||||||
|
1. Xia et al. Understanding the Performance and Estimating the Cost of LLM Fine-Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2408.04693)
|
||||||
|
1. Zeng et al. Perceive, Reflect, and Plan: Designing LLM Agent for Goal-Directed City Navigation without Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2408.04168)
|
||||||
|
1. Xia et al. Using Pre-trained Language Model for Accurate ESG Prediction. FinNLP 2024. [[paper]](https://aclanthology.org/2024.finnlp-2.1/)
|
||||||
|
1. Liang et al. I-SHEEP: Self-Alignment of LLM from Scratch through an Iterative Self-Enhancement Paradigm. 2024. [[arxiv]](https://arxiv.org/abs/2408.08072)
|
||||||
|
1. Bai et al. Aligning Large Language Model with Direct Multi-Preference Optimization for Recommendation. CIKM 2024. [[paper]](https://dl.acm.org/doi/10.1145/3627673.3679611)
|
||||||
1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: A large language model for Astronomy, based on ChatGLM2-6B and Qwen-14B.
|
1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: A large language model for Astronomy, based on ChatGLM2-6B and Qwen-14B.
|
||||||
1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: A large language model specialized in Chinese legal domain, based on Baichuan-13B, is capable of retrieving and reasoning on legal knowledge.
|
1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: A large language model specialized in Chinese legal domain, based on Baichuan-13B, is capable of retrieving and reasoning on legal knowledge.
|
||||||
1. **[Sunsimiao](https://github.com/thomas-yanxin/Sunsimiao)**: A large language model specialized in Chinese medical domain, based on Baichuan-7B and ChatGLM-6B.
|
1. **[Sunsimiao](https://github.com/X-D-Lab/Sunsimiao)**: A large language model specialized in Chinese medical domain, based on Baichuan-7B and ChatGLM-6B.
|
||||||
1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: A series of large language models for Chinese medical domain, based on LLaMA2-7B and Baichuan-13B.
|
1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: A series of large language models for Chinese medical domain, based on LLaMA2-7B and Baichuan-13B.
|
||||||
1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**: A series of MBTI Personality large language models, capable of giving any LLM 16 different personality types based on different datasets and training methods.
|
1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**: A series of MBTI Personality large language models, capable of giving any LLM 16 different personality types based on different datasets and training methods.
|
||||||
1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**: A large language model specialized in generate metadata for stable diffusion. [[🤗Demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt)
|
1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**: A large language model specialized in generate metadata for stable diffusion. [[demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt)
|
||||||
1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**: A multimodal large language model specialized in Chinese medical domain, based on LLaVA-1.5-7B.
|
1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**: A multimodal large language model specialized in Chinese medical domain, based on LLaVA-1.5-7B.
|
||||||
|
1. **[AutoRE](https://github.com/THUDM/AutoRE)**: A document-level relation extraction system based on large language models.
|
||||||
|
1. **[NVIDIA RTX AI Toolkit](https://github.com/NVIDIA/RTX-AI-Toolkit)**: SDKs for fine-tuning LLMs on Windows PC for NVIDIA RTX.
|
||||||
|
1. **[LazyLLM](https://github.com/LazyAGI/LazyLLM)**: An easy and lazy way for building multi-agent LLMs applications and supports model fine-tuning via LLaMA Factory.
|
||||||
|
1. **[RAG-Retrieval](https://github.com/NLPJCL/RAG-Retrieval)**: A full pipeline for RAG retrieval model fine-tuning, inference, and distillation. [[blog]](https://zhuanlan.zhihu.com/p/987727357)
|
||||||
|
1. **[360-LLaMA-Factory](https://github.com/Qihoo360/360-LLaMA-Factory)**: A modified library that supports long sequence SFT & DPO using ring attention.
|
||||||
|
1. **[Sky-T1](https://novasky-ai.github.io/posts/sky-t1/)**: An o1-like model fine-tuned by NovaSky AI with very small cost.
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
@@ -514,17 +861,19 @@ If you have a project that should be incorporated, please contact via email or c
|
|||||||
|
|
||||||
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
||||||
|
|
||||||
Please follow the model licenses to use the corresponding model weights: [Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command-R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [InternLM2](https://github.com/InternLM/InternLM#license) / [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2 (LLaVA-1.5)](https://ai.meta.com/llama/license/) / [LLaMA-3](https://llama.meta.com/llama3/license/) / [Mistral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yi-1.5](LICENSE) / [Yuan](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
|
Please follow the model licenses to use the corresponding model weights: [Baichuan 2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [GLM-4](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE) / [GPT-2](https://github.com/openai/gpt-2/blob/master/LICENSE) / [Granite](LICENSE) / [Index](https://huggingface.co/IndexTeam/Index-1.9B/blob/main/LICENSE) / [InternLM](https://github.com/InternLM/InternLM#license) / [Llama](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [Llama 2 (LLaVA-1.5)](https://ai.meta.com/llama/license/) / [Llama 3](https://llama.meta.com/llama3/license/) / [MiniCPM](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md) / [Mistral/Mixtral/Pixtral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/Phi-2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3/Phi-4](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [Skywork](https://huggingface.co/Skywork/Skywork-13B-base/blob/main/Skywork%20Community%20License.pdf) / [StarCoder 2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [TeleChat2](https://huggingface.co/Tele-AI/telechat-7B/blob/main/TeleChat%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yi-1.5](LICENSE) / [Yuan 2](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
|
||||||
|
|
||||||
## Citation
|
## Citation
|
||||||
|
|
||||||
If this work is helpful, please kindly cite as:
|
If this work is helpful, please kindly cite as:
|
||||||
|
|
||||||
```bibtex
|
```bibtex
|
||||||
@article{zheng2024llamafactory,
|
@inproceedings{zheng2024llamafactory,
|
||||||
title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models},
|
title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models},
|
||||||
author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Yongqiang Ma},
|
author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Zhangchi Feng and Yongqiang Ma},
|
||||||
journal={arXiv preprint arXiv:2403.13372},
|
booktitle={Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)},
|
||||||
|
address={Bangkok, Thailand},
|
||||||
|
publisher={Association for Computational Linguistics},
|
||||||
year={2024},
|
year={2024},
|
||||||
url={http://arxiv.org/abs/2403.13372}
|
url={http://arxiv.org/abs/2403.13372}
|
||||||
}
|
}
|
||||||
|
|||||||
668
README_zh.md
668
README_zh.md
@@ -1,32 +1,52 @@
|
|||||||

|

|
||||||
|
|
||||||
[](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
[](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
||||||
[](LICENSE)
|
|
||||||
[](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
[](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
||||||
[](https://pypi.org/project/llmtuner/)
|
[](https://github.com/hiyouga/LLaMA-Factory/graphs/contributors)
|
||||||
[](https://pypi.org/project/llmtuner/)
|
[](https://github.com/hiyouga/LLaMA-Factory/actions/workflows/tests.yml)
|
||||||
[](#使用了-llama-factory-的项目)
|
[](https://pypi.org/project/llamafactory/)
|
||||||
|
[](https://scholar.google.com/scholar?cites=12620864006390196564)
|
||||||
[](https://github.com/hiyouga/LLaMA-Factory/pulls)
|
[](https://github.com/hiyouga/LLaMA-Factory/pulls)
|
||||||
[](https://discord.gg/rKfvV9r9FK)
|
|
||||||
[](https://twitter.com/llamafactory_ai)
|
[](https://twitter.com/llamafactory_ai)
|
||||||
|
[](https://discord.gg/rKfvV9r9FK)
|
||||||
|
[](https://gitcode.com/zhengyaowei/LLaMA-Factory)
|
||||||
|
|
||||||
|
[](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)
|
||||||
|
[](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)
|
||||||
[](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
[](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
||||||
[](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
[](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
||||||
[](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)
|
[](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/)
|
||||||
|
|
||||||
[](https://trendshift.io/repositories/4535)
|
<h3 align="center">
|
||||||
|
使用零代码<a href="#快速开始">命令行</a>与 <a href="#llama-board-可视化微调由-gradio-驱动">Web UI</a> 轻松微调百余种大模型
|
||||||
|
</h3>
|
||||||
|
<p align="center">
|
||||||
|
<picture>
|
||||||
|
<img alt="Github trend" src="https://trendshift.io/api/badge/repositories/4535">
|
||||||
|
</picture>
|
||||||
|
</p>
|
||||||
|
|
||||||
👋 加入我们的[微信群](assets/wechat.jpg)。
|
|
||||||
|
👋 加入我们的[微信群](assets/wechat.jpg)或 [NPU 用户群](assets/wechat_npu.jpg)。
|
||||||
|
|
||||||
\[ [English](README.md) | 中文 \]
|
\[ [English](README.md) | 中文 \]
|
||||||
|
|
||||||
**微调大模型可以像这样轻松…**
|
**微调大模型可以像这样轻松…**
|
||||||
|
|
||||||
https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd-d76c6d0a6594
|
https://github.com/user-attachments/assets/e6ce34b0-52d5-4f3e-a830-592106c4c272
|
||||||
|
|
||||||
选择你的打开方式:
|
选择你的打开方式:
|
||||||
|
|
||||||
- **Colab**:https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing
|
- **入门教程**:https://zhuanlan.zhihu.com/p/695287607
|
||||||
|
- **框架文档**:https://llamafactory.readthedocs.io/zh-cn/latest/
|
||||||
|
- **Colab(免费)**:https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing
|
||||||
- **本地机器**:请见[如何使用](#如何使用)
|
- **本地机器**:请见[如何使用](#如何使用)
|
||||||
|
- **PAI-DSW(免费试用)**:[Llama3 案例](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory) | [Qwen2-VL 案例](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_qwen2vl) | [DeepSeek-R1-Distill 案例](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_deepseek_r1_distill_7b)
|
||||||
|
- **Amazon SageMaker**:[博客](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/)
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> 除上述链接以外的其他网站均为未经许可的第三方网站,请小心甄别。
|
||||||
|
|
||||||
## 目录
|
## 目录
|
||||||
|
|
||||||
@@ -38,6 +58,16 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd
|
|||||||
- [数据集](#数据集)
|
- [数据集](#数据集)
|
||||||
- [软硬件依赖](#软硬件依赖)
|
- [软硬件依赖](#软硬件依赖)
|
||||||
- [如何使用](#如何使用)
|
- [如何使用](#如何使用)
|
||||||
|
- [安装 LLaMA Factory](#安装-llama-factory)
|
||||||
|
- [数据准备](#数据准备)
|
||||||
|
- [快速开始](#快速开始)
|
||||||
|
- [LLaMA Board 可视化微调](#llama-board-可视化微调由-gradio-驱动)
|
||||||
|
- [构建 Docker](#构建-docker)
|
||||||
|
- [利用 vLLM 部署 OpenAI API](#利用-vllm-部署-openai-api)
|
||||||
|
- [从魔搭社区下载](#从魔搭社区下载)
|
||||||
|
- [从魔乐社区下载](#从魔乐社区下载)
|
||||||
|
- [使用 W&B 面板](#使用-wb-面板)
|
||||||
|
- [使用 SwanLab 面板](#使用-swanlab-面板)
|
||||||
- [使用了 LLaMA Factory 的项目](#使用了-llama-factory-的项目)
|
- [使用了 LLaMA Factory 的项目](#使用了-llama-factory-的项目)
|
||||||
- [协议](#协议)
|
- [协议](#协议)
|
||||||
- [引用](#引用)
|
- [引用](#引用)
|
||||||
@@ -45,14 +75,22 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd
|
|||||||
|
|
||||||
## 项目特色
|
## 项目特色
|
||||||
|
|
||||||
- **多种模型**:LLaMA、LLaVA、Mistral、Mixtral-MoE、Qwen、Yi、Gemma、Baichuan、ChatGLM、Phi 等等。
|
- **多种模型**:LLaMA、LLaVA、Mistral、Mixtral-MoE、Qwen、Qwen2-VL、DeepSeek、Yi、Gemma、ChatGLM、Phi 等等。
|
||||||
- **集成方法**:(增量)预训练、(多模态)指令监督微调、奖励模型训练、PPO 训练、DPO 训练和 ORPO 训练。
|
- **集成方法**:(增量)预训练、(多模态)指令监督微调、奖励模型训练、PPO 训练、DPO 训练、KTO 训练、ORPO 训练等等。
|
||||||
- **多种精度**:32 比特全参数微调、16 比特冻结微调、16 比特 LoRA 微调和基于 AQLM/AWQ/GPTQ/LLM.int8 的 2/4/8 比特 QLoRA 微调。
|
- **多种精度**:16 比特全参数微调、冻结微调、LoRA 微调和基于 AQLM/AWQ/GPTQ/LLM.int8/HQQ/EETQ 的 2/3/4/5/6/8 比特 QLoRA 微调。
|
||||||
- **先进算法**:GaLore、BAdam、DoRA、LongLoRA、LLaMA Pro、Mixture-of-Depths、LoRA+、LoftQ 和 Agent 微调。
|
- **先进算法**:[GaLore](https://github.com/jiaweizzhao/GaLore)、[BAdam](https://github.com/Ledzy/BAdam)、[APOLLO](https://github.com/zhuhanqing/APOLLO)、[Adam-mini](https://github.com/zyushun/Adam-mini)、DoRA、LongLoRA、LLaMA Pro、Mixture-of-Depths、LoRA+、LoftQ 和 PiSSA。
|
||||||
- **实用技巧**:FlashAttention-2、Unsloth、RoPE scaling、NEFTune 和 rsLoRA。
|
- **实用技巧**:[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)、[Unsloth](https://github.com/unslothai/unsloth)、[Liger Kernel](https://github.com/linkedin/Liger-Kernel)、RoPE scaling、NEFTune 和 rsLoRA。
|
||||||
- **实验监控**:LlamaBoard、TensorBoard、Wandb、MLflow 等等。
|
- **广泛任务**:多轮对话、工具调用、图像理解、视觉定位、视频识别和语音理解等等。
|
||||||
|
- **实验监控**:LlamaBoard、TensorBoard、Wandb、MLflow、SwanLab 等等。
|
||||||
- **极速推理**:基于 vLLM 的 OpenAI 风格 API、浏览器界面和命令行接口。
|
- **极速推理**:基于 vLLM 的 OpenAI 风格 API、浏览器界面和命令行接口。
|
||||||
|
|
||||||
|
### 最新模型的 Day-N 微调适配
|
||||||
|
|
||||||
|
| 适配时间 | 模型名称 |
|
||||||
|
| ------------ | ---------------------------------------------------------- |
|
||||||
|
| Day 0 | Qwen2.5 / Qwen2-VL / QwQ / QvQ / InternLM3 / MiniCPM-o-2.6 |
|
||||||
|
| Day 1 | Llama 3 / GLM-4 / Mistral Small / PaliGemma2 |
|
||||||
|
|
||||||
## 性能指标
|
## 性能指标
|
||||||
|
|
||||||
与 ChatGLM 官方的 [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning) 微调相比,LLaMA Factory 的 LoRA 微调提供了 **3.7 倍**的加速比,同时在广告文案生成任务上取得了更高的 Rouge 分数。结合 4 比特量化技术,LLaMA Factory 的 QLoRA 微调进一步降低了 GPU 显存消耗。
|
与 ChatGLM 官方的 [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning) 微调相比,LLaMA Factory 的 LoRA 微调提供了 **3.7 倍**的加速比,同时在广告文案生成任务上取得了更高的 Rouge 分数。结合 4 比特量化技术,LLaMA Factory 的 QLoRA 微调进一步降低了 GPU 显存消耗。
|
||||||
@@ -70,19 +108,59 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd
|
|||||||
|
|
||||||
## 更新日志
|
## 更新日志
|
||||||
|
|
||||||
[24/05/14] 我们支持了昇腾 NPU 设备的训练和推理。详情请查阅[安装](#安装-llama-factory)部分。
|
[25/02/24] 我们宣布开源 **[EasyR1](https://github.com/hiyouga/EasyR1)**,一个高效可扩展的多模态强化学习框架,支持高效的 GRPO 训练。
|
||||||
|
|
||||||
[24/05/13] 我们支持了 Yi-1.5 系列模型的微调。
|
[25/02/11] 我们支持了在导出模型时保存 **[Ollama](https://github.com/ollama/ollama)** 配置文件。详细用法请参照 [examples](examples/README_zh.md)。
|
||||||
|
|
||||||
[24/04/26] 我们支持了多模态模型 **LLaVA-1.5** 的微调。详细用法请参照 [examples](examples/README_zh.md)。
|
[25/02/05] 我们支持了在语音理解任务上微调 **[Qwen2-Audio](Qwen/Qwen2-Audio-7B-Instruct)** 和 **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** 模型。
|
||||||
|
|
||||||
|
[25/01/31] 我们支持了 **[DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1)** 和 **[Qwen2.5-VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct)** 模型的微调。
|
||||||
|
|
||||||
<details><summary>展开日志</summary>
|
<details><summary>展开日志</summary>
|
||||||
|
|
||||||
|
[25/01/15] 我们支持了 **[APOLLO](https://arxiv.org/abs/2412.05270)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。
|
||||||
|
|
||||||
|
[25/01/14] 我们支持了 **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** 和 **[MiniCPM-V-2.6](https://huggingface.co/openbmb/MiniCPM-V-2_6)** 模型的微调。 感谢 [@BUAADreamer](https://github.com/BUAADreamer) 的 PR.
|
||||||
|
|
||||||
|
[25/01/14] 我们支持了 **[InternLM3](https://huggingface.co/collections/internlm/)** 模型的微调。感谢 [@hhaAndroid](https://github.com/hhaAndroid) 的 PR。
|
||||||
|
|
||||||
|
[25/01/10] 我们支持了 **[Phi-4](https://huggingface.co/microsoft/phi-4)** 模型的微调。
|
||||||
|
|
||||||
|
[24/12/21] 我们支持了使用 **[SwanLab](https://github.com/SwanHubX/SwanLab)** 跟踪与可视化实验。详细用法请参考 [此部分](#使用-swanlab-面板)。
|
||||||
|
|
||||||
|
[24/11/27] 我们支持了 **[Skywork-o1](https://huggingface.co/Skywork/Skywork-o1-Open-Llama-3.1-8B)** 模型的微调和 **[OpenO1](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)** 数据集。
|
||||||
|
|
||||||
|
[24/10/09] 我们支持了从 **[魔乐社区](https://modelers.cn/models)** 下载预训练模型和数据集。详细用法请参照 [此教程](#从魔乐社区下载)。
|
||||||
|
|
||||||
|
[24/09/19] 我们支持了 **[Qwen2.5](https://qwenlm.github.io/blog/qwen2.5/)** 模型的微调。
|
||||||
|
|
||||||
|
[24/08/30] 我们支持了 **[Qwen2-VL](https://qwenlm.github.io/blog/qwen2-vl/)** 模型的微调。感谢 [@simonJJJ](https://github.com/simonJJJ) 的 PR。
|
||||||
|
|
||||||
|
[24/08/27] 我们支持了 **[Liger Kernel](https://github.com/linkedin/Liger-Kernel)**。请使用 `enable_liger_kernel: true` 来加速训练。
|
||||||
|
|
||||||
|
[24/08/09] 我们支持了 **[Adam-mini](https://github.com/zyushun/Adam-mini)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。感谢 [@relic-yuexi](https://github.com/relic-yuexi) 的 PR。
|
||||||
|
|
||||||
|
[24/07/04] 我们支持了[无污染打包训练](https://github.com/MeetKai/functionary/tree/main/functionary/train/packing)。请使用 `neat_packing: true` 参数。感谢 [@chuan298](https://github.com/chuan298) 的 PR。
|
||||||
|
|
||||||
|
[24/06/16] 我们支持了 **[PiSSA](https://arxiv.org/abs/2404.02948)** 算法。详细用法请参照 [examples](examples/README_zh.md)。
|
||||||
|
|
||||||
|
[24/06/07] 我们支持了 **[Qwen2](https://qwenlm.github.io/blog/qwen2/)** 和 **[GLM-4](https://github.com/THUDM/GLM-4)** 模型的微调。
|
||||||
|
|
||||||
|
[24/05/26] 我们支持了 **[SimPO](https://arxiv.org/abs/2405.14734)** 偏好对齐算法。详细用法请参照 [examples](examples/README_zh.md)。
|
||||||
|
|
||||||
|
[24/05/20] 我们支持了 **PaliGemma** 系列模型的微调。注意 PaliGemma 是预训练模型,你需要使用 `paligemma` 模板进行微调使其获得对话能力。
|
||||||
|
|
||||||
|
[24/05/18] 我们支持了 **[KTO](https://arxiv.org/abs/2402.01306)** 偏好对齐算法。详细用法请参照 [examples](examples/README_zh.md)。
|
||||||
|
|
||||||
|
[24/05/14] 我们支持了昇腾 NPU 设备的训练和推理。详情请查阅[安装](#安装-llama-factory)部分。
|
||||||
|
|
||||||
|
[24/04/26] 我们支持了多模态模型 **LLaVA-1.5** 的微调。详细用法请参照 [examples](examples/README_zh.md)。
|
||||||
|
|
||||||
[24/04/22] 我们提供了在免费 T4 GPU 上微调 Llama-3 模型的 **[Colab 笔记本](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)**。Hugging Face 社区公开了两个利用 LLaMA Factory 微调的 Llama-3 模型,详情请见 [Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat) 和 [Llama3-Chinese](https://huggingface.co/zhichen/Llama3-Chinese)。
|
[24/04/22] 我们提供了在免费 T4 GPU 上微调 Llama-3 模型的 **[Colab 笔记本](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)**。Hugging Face 社区公开了两个利用 LLaMA Factory 微调的 Llama-3 模型,详情请见 [Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat) 和 [Llama3-Chinese](https://huggingface.co/zhichen/Llama3-Chinese)。
|
||||||
|
|
||||||
[24/04/21] 我们基于 [AstraMindAI 的仓库](https://github.com/astramind-ai/Mixture-of-depths)支持了 **[混合深度训练](https://arxiv.org/abs/2404.02258)**。详细用法请参照 [examples](examples/README_zh.md)。
|
[24/04/21] 我们基于 [AstraMindAI 的仓库](https://github.com/astramind-ai/Mixture-of-depths)支持了 **[混合深度训练](https://arxiv.org/abs/2404.02258)**。详细用法请参照 [examples](examples/README_zh.md)。
|
||||||
|
|
||||||
[24/04/16] 我们支持了 **[BAdam](https://arxiv.org/abs/2404.02827)**。详细用法请参照 [examples](examples/README_zh.md)。
|
[24/04/16] 我们支持了 **[BAdam](https://arxiv.org/abs/2404.02827)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。
|
||||||
|
|
||||||
[24/04/16] 我们支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的长序列训练(24GB 可训练 Llama-2-7B-56k)。该方法相比 FlashAttention-2 提供了 **117%** 的训练速度和 **50%** 的显存节约。更多数据请见[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)。
|
[24/04/16] 我们支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的长序列训练(24GB 可训练 Llama-2-7B-56k)。该方法相比 FlashAttention-2 提供了 **117%** 的训练速度和 **50%** 的显存节约。更多数据请见[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)。
|
||||||
|
|
||||||
@@ -94,7 +172,7 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd
|
|||||||
|
|
||||||
[24/03/13] 我们支持了 **[LoRA+](https://arxiv.org/abs/2402.12354)**。详细用法请参照 [examples](examples/README_zh.md)。
|
[24/03/13] 我们支持了 **[LoRA+](https://arxiv.org/abs/2402.12354)**。详细用法请参照 [examples](examples/README_zh.md)。
|
||||||
|
|
||||||
[24/03/07] 我们支持了梯度低秩投影(**[GaLore](https://arxiv.org/abs/2403.03507)**)算法。详细用法请参照 [examples](examples/README_zh.md)。
|
[24/03/07] 我们支持了 **[GaLore](https://arxiv.org/abs/2403.03507)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。
|
||||||
|
|
||||||
[24/03/07] 我们集成了 **[vLLM](https://github.com/vllm-project/vllm)** 以实现极速并发推理。请使用 `infer_backend: vllm` 来获得 **270%** 的推理速度。
|
[24/03/07] 我们集成了 **[vLLM](https://github.com/vllm-project/vllm)** 以实现极速并发推理。请使用 `infer_backend: vllm` 来获得 **270%** 的推理速度。
|
||||||
|
|
||||||
@@ -104,7 +182,7 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd
|
|||||||
|
|
||||||
[24/02/05] Qwen1.5(Qwen2 测试版)系列模型已在 LLaMA-Factory 中实现微调支持。详情请查阅该[博客页面](https://qwenlm.github.io/zh/blog/qwen1.5/)。
|
[24/02/05] Qwen1.5(Qwen2 测试版)系列模型已在 LLaMA-Factory 中实现微调支持。详情请查阅该[博客页面](https://qwenlm.github.io/zh/blog/qwen1.5/)。
|
||||||
|
|
||||||
[24/01/18] 我们针对绝大多数模型实现了 **Agent 微调**,微调时指定 `dataset: glaive_toolcall` 即可使模型获得工具调用能力。
|
[24/01/18] 我们针对绝大多数模型实现了 **Agent 微调**,微调时指定 `dataset: glaive_toolcall_zh` 即可使模型获得工具调用能力。
|
||||||
|
|
||||||
[23/12/23] 我们针对 LLaMA, Mistral 和 Yi 模型支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的 LoRA 训练加速。请使用 `use_unsloth: true` 参数启用 unsloth 优化。该方法可提供 **170%** 的训练速度,详情请查阅[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)。
|
[23/12/23] 我们针对 LLaMA, Mistral 和 Yi 模型支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的 LoRA 训练加速。请使用 `use_unsloth: true` 参数启用 unsloth 优化。该方法可提供 **170%** 的训练速度,详情请查阅[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)。
|
||||||
|
|
||||||
@@ -142,54 +220,76 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd
|
|||||||
|
|
||||||
## 模型
|
## 模型
|
||||||
|
|
||||||
| 模型名 | 模型大小 | 默认模块 | Template |
|
| 模型名 | 参数量 | Template |
|
||||||
| -------------------------------------------------------- | -------------------------------- | ----------------- | --------- |
|
| ----------------------------------------------------------------- | -------------------------------- | ------------------- |
|
||||||
| [Baichuan2](https://huggingface.co/baichuan-inc) | 7B/13B | W_pack | baichuan2 |
|
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
||||||
| [BLOOM](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
|
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
||||||
| [BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
|
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
||||||
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | query_key_value | chatglm3 |
|
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
||||||
| [Command-R](https://huggingface.co/CohereForAI) | 35B/104B | q_proj,v_proj | cohere |
|
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
||||||
| [DeepSeek (MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | q_proj,v_proj | deepseek |
|
| [DeepSeek 2.5/3](https://huggingface.co/deepseek-ai) | 236B/671B | deepseek3 |
|
||||||
| [Falcon](https://huggingface.co/tiiuae) | 7B/40B/180B | query_key_value | falcon |
|
| [DeepSeek R1 (Distill)](https://huggingface.co/deepseek-ai) | 1.5B/7B/8B/14B/32B/70B/671B | deepseek3 |
|
||||||
| [Gemma/CodeGemma](https://huggingface.co/google) | 2B/7B | q_proj,v_proj | gemma |
|
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
||||||
| [InternLM2](https://huggingface.co/internlm) | 7B/20B | wqkv | intern2 |
|
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma |
|
||||||
| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - |
|
| [GLM-4](https://huggingface.co/THUDM) | 9B | glm4 |
|
||||||
| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 |
|
| [GPT-2](https://huggingface.co/openai-community) | 0.1B/0.4B/0.8B/1.5B | - |
|
||||||
| [LLaMA-3](https://huggingface.co/meta-llama) | 8B/70B | q_proj,v_proj | llama3 |
|
| [Granite 3.0-3.1](https://huggingface.co/ibm-granite) | 1B/2B/3B/8B | granite3 |
|
||||||
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | q_proj,v_proj | vicuna |
|
| [Index](https://huggingface.co/IndexTeam) | 1.9B | index |
|
||||||
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | q_proj,v_proj | mistral |
|
| [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 |
|
||||||
| [OLMo](https://huggingface.co/allenai) | 1B/7B | q_proj,v_proj | - |
|
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
||||||
| [Phi-1.5/2](https://huggingface.co/microsoft) | 1.3B/2.7B | q_proj,v_proj | - |
|
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
||||||
| [Phi-3](https://huggingface.co/microsoft) | 3.8B | qkv_proj | phi |
|
| [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 |
|
||||||
| [Qwen](https://huggingface.co/Qwen) | 1.8B/7B/14B/72B | c_attn | qwen |
|
| [Llama 3.2 Vision](https://huggingface.co/meta-llama) | 11B/90B | mllama |
|
||||||
| [Qwen1.5 (Code/MoE)](https://huggingface.co/Qwen) | 0.5B/1.8B/4B/7B/14B/32B/72B/110B | q_proj,v_proj | qwen |
|
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
||||||
| [StarCoder2](https://huggingface.co/bigcode) | 3B/7B/15B | q_proj,v_proj | - |
|
| [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/8B/13B/34B/72B/110B | llava_next |
|
||||||
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | q_proj,v_proj | xverse |
|
| [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/34B | llava_next_video |
|
||||||
| [Yi (1/1.5)](https://huggingface.co/01-ai) | 6B/9B/34B | q_proj,v_proj | yi |
|
| [MiniCPM](https://huggingface.co/openbmb) | 1B/2B/4B | cpm/cpm3 |
|
||||||
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | q_proj,v_proj | yi_vl |
|
| [MiniCPM-o-2.6/MiniCPM-V-2.6](https://huggingface.co/openbmb) | 8B | minicpm_o/minicpm_v |
|
||||||
| [Yuan](https://huggingface.co/IEITYuan) | 2B/51B/102B | q_proj,v_proj | yuan |
|
| [Ministral/Mistral-Nemo](https://huggingface.co/mistralai) | 8B/12B | ministral |
|
||||||
|
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
||||||
|
| [Mistral Small](https://huggingface.co/mistralai) | 24B | mistral_small |
|
||||||
|
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
||||||
|
| [PaliGemma/PaliGemma2](https://huggingface.co/google) | 3B/10B/28B | paligemma |
|
||||||
|
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
||||||
|
| [Phi-3/Phi-3.5](https://huggingface.co/microsoft) | 4B/14B | phi |
|
||||||
|
| [Phi-3-small](https://huggingface.co/microsoft) | 7B | phi_small |
|
||||||
|
| [Phi-4](https://huggingface.co/microsoft) | 14B | phi4 |
|
||||||
|
| [Pixtral](https://huggingface.co/mistralai) | 12B | pixtral |
|
||||||
|
| [Qwen/QwQ (1-2.5) (Code/Math/MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen |
|
||||||
|
| [Qwen2-Audio](https://huggingface.co/Qwen) | 7B | qwen2_audio |
|
||||||
|
| [Qwen2-VL/Qwen2.5-VL/QVQ](https://huggingface.co/Qwen) | 2B/3B/7B/72B | qwen2_vl |
|
||||||
|
| [Skywork o1](https://huggingface.co/Skywork) | 8B | skywork_o1 |
|
||||||
|
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
||||||
|
| [TeleChat2](https://huggingface.co/Tele-AI) | 3B/7B/35B/115B | telechat2 |
|
||||||
|
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
||||||
|
| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi |
|
||||||
|
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
||||||
|
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> **默认模块**应作为 `--lora_target` 参数的默认值,可使用 `--lora_target all` 参数指定全部模块以取得更好的效果。
|
> 对于所有“基座”(Base)模型,`template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Instruct/Chat)模型请务必使用**对应的模板**。
|
||||||
>
|
>
|
||||||
> 对于所有“基座”(Base)模型,`--template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Instruct/Chat)模型请务必使用**对应的模板**。
|
> 请务必在训练和推理时采用**完全一致**的模板。
|
||||||
>
|
|
||||||
> 请务必在训练和推理时使用**完全一致**的模板。
|
|
||||||
|
|
||||||
项目所支持模型的完整列表请参阅 [constants.py](src/llmtuner/extras/constants.py)。
|
项目所支持模型的完整列表请参阅 [constants.py](src/llamafactory/extras/constants.py)。
|
||||||
|
|
||||||
您也可以在 [template.py](src/llmtuner/data/template.py) 中添加自己的对话模板。
|
您也可以在 [template.py](src/llamafactory/data/template.py) 中添加自己的对话模板。
|
||||||
|
|
||||||
## 训练方法
|
## 训练方法
|
||||||
|
|
||||||
| 方法 | 全参数训练 | 部分参数训练 | LoRA | QLoRA |
|
| 方法 | 全参数训练 | 部分参数训练 | LoRA | QLoRA |
|
||||||
| ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ |
|
| --------------------- | ------------------ | ------------------ | ------------------ | ------------------ |
|
||||||
| 预训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 预训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
| 指令监督微调 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 指令监督微调 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
| 奖励模型训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| 奖励模型训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
| PPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| PPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
| DPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| DPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
|
| KTO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
| ORPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| ORPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
|
| SimPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> 有关 PPO 的实现细节,请参考[此博客](https://newfacade.github.io/notes-on-reinforcement-learning/17-ppo-trl.html)。
|
||||||
|
|
||||||
## 数据集
|
## 数据集
|
||||||
|
|
||||||
@@ -202,6 +302,8 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd
|
|||||||
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
|
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
|
||||||
- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
|
- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
|
||||||
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
|
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
|
||||||
|
- [FineWeb (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb)
|
||||||
|
- [FineWeb-Edu (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu)
|
||||||
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
|
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
|
||||||
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
|
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
|
||||||
|
|
||||||
@@ -209,12 +311,12 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd
|
|||||||
|
|
||||||
<details><summary>指令微调数据集</summary>
|
<details><summary>指令微调数据集</summary>
|
||||||
|
|
||||||
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
|
|
||||||
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca)
|
|
||||||
- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
|
|
||||||
- [Identity (en&zh)](data/identity.json)
|
- [Identity (en&zh)](data/identity.json)
|
||||||
- [Open Assistant (zh)](https://huggingface.co/datasets/OpenAssistant/oasst1)
|
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
|
||||||
- [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection)
|
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca-3)
|
||||||
|
- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
|
||||||
|
- [Glaive Function Calling V2 (en&zh)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)
|
||||||
|
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
|
||||||
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
|
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
|
||||||
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
|
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
|
||||||
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
|
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
|
||||||
@@ -223,7 +325,6 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd
|
|||||||
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
|
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
|
||||||
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
|
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
|
||||||
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
|
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
|
||||||
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
|
|
||||||
- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
|
- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
|
||||||
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
|
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
|
||||||
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
|
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
|
||||||
@@ -236,16 +337,26 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd
|
|||||||
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
|
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
|
||||||
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
||||||
- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)
|
- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)
|
||||||
- [Ad Gen (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
|
- [Advertise Generating (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
|
||||||
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
|
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
|
||||||
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
|
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
|
||||||
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
|
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
|
||||||
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
|
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
|
||||||
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
|
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
|
||||||
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
|
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
|
||||||
- [Glaive Function Calling V2 (en)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)
|
|
||||||
- [Cosmopedia (en)](https://huggingface.co/datasets/HuggingFaceTB/cosmopedia)
|
- [Cosmopedia (en)](https://huggingface.co/datasets/HuggingFaceTB/cosmopedia)
|
||||||
|
- [STEM (zh)](https://huggingface.co/datasets/hfl/stem_zh_instruction)
|
||||||
|
- [Ruozhiba (zh)](https://huggingface.co/datasets/hfl/ruozhiba_gpt4_turbo)
|
||||||
|
- [Neo-sft (zh)](https://huggingface.co/datasets/m-a-p/neo_sft_phase2)
|
||||||
|
- [Magpie-Pro-300K-Filtered (en)](https://huggingface.co/datasets/Magpie-Align/Magpie-Pro-300K-Filtered)
|
||||||
|
- [Magpie-ultra-v0.1 (en)](https://huggingface.co/datasets/argilla/magpie-ultra-v0.1)
|
||||||
|
- [WebInstructSub (en)](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub)
|
||||||
|
- [OpenO1-SFT (en&zh)](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)
|
||||||
|
- [Open-Thoughts (en)](https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k)
|
||||||
|
- [Open-R1-Math (en)](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k)
|
||||||
|
- [Chinese-DeepSeek-R1-Distill (zh)](https://huggingface.co/datasets/Congliu/Chinese-DeepSeek-R1-Distill-data-110k-SFT)
|
||||||
- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k)
|
- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k)
|
||||||
|
- [Pokemon-gpt4o-captions (en&zh)](https://huggingface.co/datasets/jugg1024/pokemon-gpt4o-captions)
|
||||||
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
|
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
|
||||||
- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de)
|
- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de)
|
||||||
- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de)
|
- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de)
|
||||||
@@ -260,13 +371,15 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd
|
|||||||
|
|
||||||
<details><summary>偏好数据集</summary>
|
<details><summary>偏好数据集</summary>
|
||||||
|
|
||||||
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
|
|
||||||
- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
|
|
||||||
- [Orca DPO (en)](https://huggingface.co/datasets/Intel/orca_dpo_pairs)
|
|
||||||
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
|
||||||
- [DPO mixed (en&zh)](https://huggingface.co/datasets/hiyouga/DPO-En-Zh-20k)
|
- [DPO mixed (en&zh)](https://huggingface.co/datasets/hiyouga/DPO-En-Zh-20k)
|
||||||
- [Open Assistant (zh)](https://huggingface.co/datasets/OpenAssistant/oasst1)
|
- [UltraFeedback (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized)
|
||||||
|
- [RLHF-V (en)](https://huggingface.co/datasets/openbmb/RLHF-V-Dataset)
|
||||||
|
- [VLFeedback (en)](https://huggingface.co/datasets/Zhihui/VLFeedback)
|
||||||
|
- [Orca DPO Pairs (en)](https://huggingface.co/datasets/Intel/orca_dpo_pairs)
|
||||||
|
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
|
||||||
|
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
||||||
- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de)
|
- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de)
|
||||||
|
- [KTO mixed (en)](https://huggingface.co/datasets/argilla/kto-mix-15k)
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
@@ -281,35 +394,34 @@ huggingface-cli login
|
|||||||
|
|
||||||
| 必需项 | 至少 | 推荐 |
|
| 必需项 | 至少 | 推荐 |
|
||||||
| ------------ | ------- | --------- |
|
| ------------ | ------- | --------- |
|
||||||
| python | 3.8 | 3.10 |
|
| python | 3.9 | 3.10 |
|
||||||
| torch | 1.13.1 | 2.2.0 |
|
| torch | 1.13.1 | 2.5.1 |
|
||||||
| transformers | 4.37.2 | 4.40.1 |
|
| transformers | 4.41.2 | 4.49.0 |
|
||||||
| datasets | 2.14.3 | 2.19.1 |
|
| datasets | 2.16.0 | 3.2.0 |
|
||||||
| accelerate | 0.27.2 | 0.30.0 |
|
| accelerate | 0.34.0 | 1.2.1 |
|
||||||
| peft | 0.9.0 | 0.10.0 |
|
| peft | 0.11.1 | 0.12.0 |
|
||||||
| trl | 0.8.1 | 0.8.6 |
|
| trl | 0.8.6 | 0.9.6 |
|
||||||
|
|
||||||
| 可选项 | 至少 | 推荐 |
|
| 可选项 | 至少 | 推荐 |
|
||||||
| ------------ | ------- | --------- |
|
| ------------ | ------- | --------- |
|
||||||
| CUDA | 11.6 | 12.2 |
|
| CUDA | 11.6 | 12.2 |
|
||||||
| deepspeed | 0.10.0 | 0.14.0 |
|
| deepspeed | 0.10.0 | 0.16.4 |
|
||||||
| bitsandbytes | 0.39.0 | 0.43.1 |
|
| bitsandbytes | 0.39.0 | 0.43.1 |
|
||||||
| vllm | 0.4.0 | 0.4.2 |
|
| vllm | 0.4.3 | 0.7.3 |
|
||||||
| flash-attn | 2.3.0 | 2.5.8 |
|
| flash-attn | 2.3.0 | 2.7.2 |
|
||||||
|
|
||||||
### 硬件依赖
|
### 硬件依赖
|
||||||
|
|
||||||
\* *估算值*
|
\* *估算值*
|
||||||
|
|
||||||
| 方法 | 精度 | 7B | 13B | 30B | 70B | 110B | 8x7B | 8x22B |
|
| 方法 | 精度 | 7B | 14B | 30B | 70B | `x`B |
|
||||||
| ----------------- | ---- | ----- | ----- | ----- | ------ | ------ | ----- | ------ |
|
| ------------------------------- | ---- | ----- | ----- | ----- | ------ | ------- |
|
||||||
| Full | AMP | 120GB | 240GB | 600GB | 1200GB | 2000GB | 900GB | 2400GB |
|
| Full (`bf16` or `fp16`) | 32 | 120GB | 240GB | 600GB | 1200GB | `18x`GB |
|
||||||
| Full | 16 | 60GB | 120GB | 300GB | 600GB | 900GB | 400GB | 1200GB |
|
| Full (`pure_bf16`) | 16 | 60GB | 120GB | 300GB | 600GB | `8x`GB |
|
||||||
| Freeze | 16 | 20GB | 40GB | 80GB | 200GB | 360GB | 160GB | 400GB |
|
| Freeze/LoRA/GaLore/APOLLO/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | `2x`GB |
|
||||||
| LoRA/GaLore/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | 240GB | 120GB | 320GB |
|
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | `x`GB |
|
||||||
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | 140GB | 60GB | 160GB |
|
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | `x/2`GB |
|
||||||
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | 72GB | 30GB | 96GB |
|
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | `x/4`GB |
|
||||||
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | 48GB | 18GB | 48GB |
|
|
||||||
|
|
||||||
## 如何使用
|
## 如何使用
|
||||||
|
|
||||||
@@ -319,53 +431,119 @@ huggingface-cli login
|
|||||||
> 此步骤为必需。
|
> 此步骤为必需。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/hiyouga/LLaMA-Factory.git
|
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
|
||||||
cd LLaMA-Factory
|
cd LLaMA-Factory
|
||||||
pip install -e .[torch,metrics]
|
pip install -e ".[torch,metrics]"
|
||||||
```
|
```
|
||||||
|
|
||||||
可选的额外依赖项:torch、metrics、deepspeed、bitsandbytes、vllm、galore、badam、gptq、awq、aqlm、qwen、modelscope、quality
|
可选的额外依赖项:torch、torch-npu、metrics、deepspeed、liger-kernel、bitsandbytes、hqq、eetq、gptq、awq、aqlm、vllm、galore、apollo、badam、adam-mini、qwen、minicpm_v、modelscope、openmind、swanlab、quality
|
||||||
|
|
||||||
> [!TIP]
|
> [!TIP]
|
||||||
> 遇到包冲突时,可使用 `pip install --no-deps -e .` 解决。
|
> 遇到包冲突时,可使用 `pip install --no-deps -e .` 解决。
|
||||||
|
|
||||||
|
<details><summary>使用 <b>uv</b> 构建虚拟环境</summary>
|
||||||
|
|
||||||
|
使用 [uv](https://github.com/astral-sh/uv) 创建隔离的 Python 环境:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
uv sync --extra torch --extra metrics --prerelease=allow
|
||||||
|
```
|
||||||
|
|
||||||
|
在环境中运行 LLaMA-Factory:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
uv run --prerelease=allow llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
|
||||||
<details><summary>Windows 用户指南</summary>
|
<details><summary>Windows 用户指南</summary>
|
||||||
|
|
||||||
|
#### 安装 BitsAndBytes
|
||||||
|
|
||||||
如果要在 Windows 平台上开启量化 LoRA(QLoRA),需要安装预编译的 `bitsandbytes` 库, 支持 CUDA 11.1 到 12.2, 请根据您的 CUDA 版本情况选择适合的[发布版本](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels)。
|
如果要在 Windows 平台上开启量化 LoRA(QLoRA),需要安装预编译的 `bitsandbytes` 库, 支持 CUDA 11.1 到 12.2, 请根据您的 CUDA 版本情况选择适合的[发布版本](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels)。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.2.post2-py3-none-win_amd64.whl
|
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.2.post2-py3-none-win_amd64.whl
|
||||||
```
|
```
|
||||||
|
|
||||||
如果要在 Windows 平台上开启 FlashAttention-2,需要安装预编译的 `flash-attn` 库,支持 CUDA 12.1 到 12.2,请根据需求到 [flash-attention](https://github.com/bdashore3/flash-attention/releases) 下载对应版本安装。
|
#### 安装 Flash Attention-2
|
||||||
|
|
||||||
|
如果要在 Windows 平台上开启 FlashAttention-2,请使用 [flash-attention-windows-wheel](https://huggingface.co/lldacing/flash-attention-windows-wheel) 中的脚本自行编译与安装。
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details><summary>昇腾 NPU 用户指南</summary>
|
<details><summary>昇腾 NPU 用户指南</summary>
|
||||||
|
|
||||||
如果使用昇腾 NPU 设备进行(分布式)训练或推理,需要安装 **[torch-npu](https://gitee.com/ascend/pytorch)** 库和 **[Ascend CANN Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**。
|
在昇腾 NPU 设备上安装 LLaMA Factory 时,请升级 Python 到 3.10 及以上,并需要指定额外依赖项,使用 `pip install -e ".[torch-npu,metrics]"` 命令安装。此外,还需要安装 **[Ascend CANN Toolkit 与 Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**,安装方法请参考[安装教程](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC2alpha002/quickstart/quickstart/quickstart_18_0004.html)或使用以下命令:
|
||||||
|
|
||||||
| 依赖项 | 至少 | 推荐 |
|
```bash
|
||||||
| ------------ | ------- | --------- |
|
# 请替换 URL 为 CANN 版本和设备型号对应的 URL
|
||||||
| CANN | 8.0.RC1 | 8.0.RC1 |
|
# 安装 CANN Toolkit
|
||||||
| torch | 2.2.0 | 2.2.0 |
|
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run
|
||||||
| torch-npu | 2.2.0 | 2.2.0 |
|
bash Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run --install
|
||||||
| deepspeed | 0.13.2 | 0.13.2 |
|
|
||||||
|
|
||||||
Docker 镜像:
|
# 安装 CANN Kernels
|
||||||
|
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run
|
||||||
|
bash Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run --install
|
||||||
|
|
||||||
- 32GB:[下载地址](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html)
|
# 设置环境变量
|
||||||
- 64GB:敬请期待
|
source /usr/local/Ascend/ascend-toolkit/set_env.sh
|
||||||
|
```
|
||||||
|
|
||||||
请记得使用 `ASCEND_RT_VISIBLE_DEVICES` 而非 `CUDA_VISIBLE_DEVICES` 来指定您使用的设备。
|
| 依赖项 | 至少 | 推荐 |
|
||||||
|
| ------------ | ------- | -------------- |
|
||||||
|
| CANN | 8.0.RC1 | 8.0.0.alpha002 |
|
||||||
|
| torch | 2.1.0 | 2.4.0 |
|
||||||
|
| torch-npu | 2.1.0 | 2.4.0.post2 |
|
||||||
|
| deepspeed | 0.13.2 | 0.13.2 |
|
||||||
|
|
||||||
|
请使用 `ASCEND_RT_VISIBLE_DEVICES` 而非 `CUDA_VISIBLE_DEVICES` 来指定运算设备。
|
||||||
|
|
||||||
如果遇到无法正常推理的情况,请尝试设置 `do_sample: false`。
|
如果遇到无法正常推理的情况,请尝试设置 `do_sample: false`。
|
||||||
|
|
||||||
|
下载预构建 Docker 镜像:[32GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html) | [64GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/131.html)
|
||||||
|
|
||||||
|
#### 安装 BitsAndBytes
|
||||||
|
|
||||||
|
如果要在 Ascend NPU 上进行基于 bitsandbytes 的 QLoRA 量化微调,请执行如下步骤:
|
||||||
|
|
||||||
|
1. 手动编译 bitsandbytes:请参考[安装文档](https://huggingface.co/docs/bitsandbytes/installation?backend=Ascend+NPU&platform=Ascend+NPU)完成 NPU 版的 bitsandbytes 安装,编译要求环境 cmake 版本不低于 3.22.1,g++ 版本不低于 12.x。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 从源码安装 bitsandbytes
|
||||||
|
# 克隆 bitsandbytes 仓库, Ascend NPU 目前在 multi-backend-refactor 中支持
|
||||||
|
git clone -b multi-backend-refactor https://github.com/bitsandbytes-foundation/bitsandbytes.git
|
||||||
|
cd bitsandbytes/
|
||||||
|
|
||||||
|
# 安装依赖
|
||||||
|
pip install -r requirements-dev.txt
|
||||||
|
|
||||||
|
# 安装编译工具依赖,该步骤在不同系统上命令有所不同,供参考
|
||||||
|
apt-get install -y build-essential cmake
|
||||||
|
|
||||||
|
# 编译 & 安装
|
||||||
|
cmake -DCOMPUTE_BACKEND=npu -S .
|
||||||
|
make
|
||||||
|
pip install .
|
||||||
|
```
|
||||||
|
|
||||||
|
2. 安装 transformers 的 main 分支版本。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone -b main https://github.com/huggingface/transformers.git
|
||||||
|
cd transformers
|
||||||
|
pip install .
|
||||||
|
```
|
||||||
|
|
||||||
|
3. 在训练参数中设置 `double_quantization: false`,可参考[示例](examples/train_qlora/llama3_lora_sft_bnb_npu.yaml)。
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### 数据准备
|
### 数据准备
|
||||||
|
|
||||||
关于数据集文件的格式,请参考 [data/README_zh.md](data/README_zh.md) 的内容。你可以使用 HuggingFace / ModelScope 上的数据集或加载本地数据集。
|
关于数据集文件的格式,请参考 [data/README_zh.md](data/README_zh.md) 的内容。你可以使用 HuggingFace / ModelScope / Modelers 上的数据集或加载本地数据集。
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> 使用自定义数据集时,请更新 `data/dataset_info.json` 文件。
|
> 使用自定义数据集时,请更新 `data/dataset_info.json` 文件。
|
||||||
@@ -375,78 +553,163 @@ Docker 镜像:
|
|||||||
下面三行命令分别对 Llama3-8B-Instruct 模型进行 LoRA **微调**、**推理**和**合并**。
|
下面三行命令分别对 Llama3-8B-Instruct 模型进行 LoRA **微调**、**推理**和**合并**。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_sft.yaml
|
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
|
llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
高级用法请参考 [examples/README_zh.md](examples/README_zh.md)(包括多 GPU 微调)。
|
高级用法请参考 [examples/README_zh.md](examples/README_zh.md)(包括多 GPU 微调)。
|
||||||
|
|
||||||
> [!TIP]
|
> [!TIP]
|
||||||
> 使用 `llamafactory-cli help` 显示帮助信息。
|
> 使用 `llamafactory-cli help` 显示帮助信息。
|
||||||
|
>
|
||||||
|
> 遇到报错请先看[常见问题](https://github.com/hiyouga/LLaMA-Factory/issues/4614)。
|
||||||
|
|
||||||
### LLaMA Board 可视化微调(由 [Gradio](https://github.com/gradio-app/gradio) 驱动)
|
### LLaMA Board 可视化微调(由 [Gradio](https://github.com/gradio-app/gradio) 驱动)
|
||||||
|
|
||||||
> [!IMPORTANT]
|
|
||||||
> LLaMA Board 可视化界面目前仅支持单 GPU 训练。
|
|
||||||
|
|
||||||
#### 使用本地环境
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 GRADIO_SHARE=1 llamafactory-cli webui
|
llamafactory-cli webui
|
||||||
```
|
```
|
||||||
|
|
||||||
<details><summary>阿里云 PAI 和 AutoDL 用户指南</summary>
|
### 构建 Docker
|
||||||
|
|
||||||
如果您在阿里云 PAI 上使用 LLaMA Board 时遇到显示问题,请尝试在启动前使用以下命令设置环境变量:
|
CUDA 用户:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export GRADIO_SERVER_PORT=7860 GRADIO_ROOT_PATH=/${JUPYTER_NAME}/proxy/7860/
|
cd docker/docker-cuda/
|
||||||
|
docker compose up -d
|
||||||
|
docker compose exec llamafactory bash
|
||||||
```
|
```
|
||||||
|
|
||||||
如果您正在使用 AutoDL,请安装下述 Gradio 版本:
|
昇腾 NPU 用户:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install gradio==4.10.0
|
cd docker/docker-npu/
|
||||||
|
docker compose up -d
|
||||||
|
docker compose exec llamafactory bash
|
||||||
|
```
|
||||||
|
|
||||||
|
AMD ROCm 用户:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd docker/docker-rocm/
|
||||||
|
docker compose up -d
|
||||||
|
docker compose exec llamafactory bash
|
||||||
|
```
|
||||||
|
|
||||||
|
<details><summary>不使用 Docker Compose 构建</summary>
|
||||||
|
|
||||||
|
CUDA 用户:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker build -f ./docker/docker-cuda/Dockerfile \
|
||||||
|
--build-arg INSTALL_BNB=false \
|
||||||
|
--build-arg INSTALL_VLLM=false \
|
||||||
|
--build-arg INSTALL_DEEPSPEED=false \
|
||||||
|
--build-arg INSTALL_FLASHATTN=false \
|
||||||
|
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||||
|
-t llamafactory:latest .
|
||||||
|
|
||||||
|
docker run -dit --gpus=all \
|
||||||
|
-v ./hf_cache:/root/.cache/huggingface \
|
||||||
|
-v ./ms_cache:/root/.cache/modelscope \
|
||||||
|
-v ./om_cache:/root/.cache/openmind \
|
||||||
|
-v ./data:/app/data \
|
||||||
|
-v ./output:/app/output \
|
||||||
|
-p 7860:7860 \
|
||||||
|
-p 8000:8000 \
|
||||||
|
--shm-size 16G \
|
||||||
|
--name llamafactory \
|
||||||
|
llamafactory:latest
|
||||||
|
|
||||||
|
docker exec -it llamafactory bash
|
||||||
|
```
|
||||||
|
|
||||||
|
昇腾 NPU 用户:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 根据您的环境选择镜像
|
||||||
|
docker build -f ./docker/docker-npu/Dockerfile \
|
||||||
|
--build-arg INSTALL_DEEPSPEED=false \
|
||||||
|
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||||
|
-t llamafactory:latest .
|
||||||
|
|
||||||
|
# 根据您的资源更改 `device`
|
||||||
|
docker run -dit \
|
||||||
|
-v ./hf_cache:/root/.cache/huggingface \
|
||||||
|
-v ./ms_cache:/root/.cache/modelscope \
|
||||||
|
-v ./om_cache:/root/.cache/openmind \
|
||||||
|
-v ./data:/app/data \
|
||||||
|
-v ./output:/app/output \
|
||||||
|
-v /usr/local/dcmi:/usr/local/dcmi \
|
||||||
|
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
||||||
|
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
|
||||||
|
-v /etc/ascend_install.info:/etc/ascend_install.info \
|
||||||
|
-p 7860:7860 \
|
||||||
|
-p 8000:8000 \
|
||||||
|
--device /dev/davinci0 \
|
||||||
|
--device /dev/davinci_manager \
|
||||||
|
--device /dev/devmm_svm \
|
||||||
|
--device /dev/hisi_hdc \
|
||||||
|
--shm-size 16G \
|
||||||
|
--name llamafactory \
|
||||||
|
llamafactory:latest
|
||||||
|
|
||||||
|
docker exec -it llamafactory bash
|
||||||
|
```
|
||||||
|
|
||||||
|
AMD ROCm 用户:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker build -f ./docker/docker-rocm/Dockerfile \
|
||||||
|
--build-arg INSTALL_BNB=false \
|
||||||
|
--build-arg INSTALL_VLLM=false \
|
||||||
|
--build-arg INSTALL_DEEPSPEED=false \
|
||||||
|
--build-arg INSTALL_FLASHATTN=false \
|
||||||
|
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||||
|
-t llamafactory:latest .
|
||||||
|
|
||||||
|
docker run -dit \
|
||||||
|
-v ./hf_cache:/root/.cache/huggingface \
|
||||||
|
-v ./ms_cache:/root/.cache/modelscope \
|
||||||
|
-v ./om_cache:/root/.cache/openmind \
|
||||||
|
-v ./data:/app/data \
|
||||||
|
-v ./output:/app/output \
|
||||||
|
-v ./saves:/app/saves \
|
||||||
|
-p 7860:7860 \
|
||||||
|
-p 8000:8000 \
|
||||||
|
--device /dev/kfd \
|
||||||
|
--device /dev/dri \
|
||||||
|
--shm-size 16G \
|
||||||
|
--name llamafactory \
|
||||||
|
llamafactory:latest
|
||||||
|
|
||||||
|
docker exec -it llamafactory bash
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
#### 使用 Docker
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker build -f ./Dockerfile -t llama-factory:latest .
|
|
||||||
docker run --gpus=all \
|
|
||||||
-v ./hf_cache:/root/.cache/huggingface/ \
|
|
||||||
-v ./data:/app/data \
|
|
||||||
-v ./output:/app/output \
|
|
||||||
-e CUDA_VISIBLE_DEVICES=0 \
|
|
||||||
-p 7860:7860 \
|
|
||||||
--shm-size 16G \
|
|
||||||
--name llama_factory \
|
|
||||||
-d llama-factory:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 使用 Docker Compose
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker compose -f ./docker-compose.yml up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
<details><summary>数据卷详情</summary>
|
<details><summary>数据卷详情</summary>
|
||||||
|
|
||||||
- hf_cache:使用宿主机的 Hugging Face 缓存文件夹,允许更改为新的目录。
|
- `hf_cache`:使用宿主机的 Hugging Face 缓存文件夹,允许更改为新的目录。
|
||||||
- data:宿主机中存放数据集的文件夹路径。
|
- `ms_cache`:类似 Hugging Face 缓存文件夹,为 ModelScope 用户提供。
|
||||||
- output:将导出目录设置为该路径后,即可在宿主机中访问导出后的模型。
|
- `om_cache`:类似 Hugging Face 缓存文件夹,为 Modelers 用户提供。
|
||||||
|
- `data`:宿主机中存放数据集的文件夹路径。
|
||||||
|
- `output`:将导出目录设置为该路径后,即可在宿主机中访问导出后的模型。
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### 利用 vLLM 部署 OpenAI API
|
### 利用 vLLM 部署 OpenAI API
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0,1 API_PORT=8000 llamafactory-cli api examples/inference/llama3_vllm.yaml
|
API_PORT=8000 llamafactory-cli api examples/inference/llama3_vllm.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> API 文档请查阅[这里](https://platform.openai.com/docs/api-reference/chat/create)。
|
||||||
|
>
|
||||||
|
> 示例:[图像理解](scripts/api_example/test_image.py) | [工具调用](scripts/api_example/test_toolcall.py)
|
||||||
|
|
||||||
### 从魔搭社区下载
|
### 从魔搭社区下载
|
||||||
|
|
||||||
如果您在 Hugging Face 模型和数据集的下载中遇到了问题,可以通过下述方法使用魔搭社区。
|
如果您在 Hugging Face 模型和数据集的下载中遇到了问题,可以通过下述方法使用魔搭社区。
|
||||||
@@ -455,7 +718,43 @@ CUDA_VISIBLE_DEVICES=0,1 API_PORT=8000 llamafactory-cli api examples/inference/l
|
|||||||
export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1`
|
export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1`
|
||||||
```
|
```
|
||||||
|
|
||||||
将 `--model_name_or_path` 设置为模型 ID 来加载对应的模型。在[魔搭社区](https://modelscope.cn/models)查看所有可用的模型,例如 `LLM-Research/Meta-Llama-3-8B-Instruct`。
|
将 `model_name_or_path` 设置为模型 ID 来加载对应的模型。在[魔搭社区](https://modelscope.cn/models)查看所有可用的模型,例如 `LLM-Research/Meta-Llama-3-8B-Instruct`。
|
||||||
|
|
||||||
|
### 从魔乐社区下载
|
||||||
|
|
||||||
|
您也可以通过下述方法,使用魔乐社区下载数据集和模型。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export USE_OPENMIND_HUB=1 # Windows 使用 `set USE_OPENMIND_HUB=1`
|
||||||
|
```
|
||||||
|
|
||||||
|
将 `model_name_or_path` 设置为模型 ID 来加载对应的模型。在[魔乐社区](https://modelers.cn/models)查看所有可用的模型,例如 `TeleAI/TeleChat-7B-pt`。
|
||||||
|
|
||||||
|
### 使用 W&B 面板
|
||||||
|
|
||||||
|
若要使用 [Weights & Biases](https://wandb.ai) 记录实验数据,请在 yaml 文件中添加下面的参数。
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
report_to: wandb
|
||||||
|
run_name: test_run # 可选
|
||||||
|
```
|
||||||
|
|
||||||
|
在启动训练任务时,将 `WANDB_API_KEY` 设置为[密钥](https://wandb.ai/authorize)来登录 W&B 账户。
|
||||||
|
|
||||||
|
### 使用 SwanLab 面板
|
||||||
|
|
||||||
|
若要使用 [SwanLab](https://github.com/SwanHubX/SwanLab) 记录实验数据,请在 yaml 文件中添加下面的参数。
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
use_swanlab: true
|
||||||
|
swanlab_run_name: test_run # 可选
|
||||||
|
```
|
||||||
|
|
||||||
|
在启动训练任务时,登录SwanLab账户有以下三种方式:
|
||||||
|
|
||||||
|
方式一:在 yaml 文件中添加 `swanlab_api_key=<your_api_key>` ,并设置为你的 [API 密钥](https://swanlab.cn/settings)。
|
||||||
|
方式二:将环境变量 `SWANLAB_API_KEY` 设置为你的 [API 密钥](https://swanlab.cn/settings)。
|
||||||
|
方式三:启动前使用 `swanlab login` 命令完成登录。
|
||||||
|
|
||||||
## 使用了 LLaMA Factory 的项目
|
## 使用了 LLaMA Factory 的项目
|
||||||
|
|
||||||
@@ -468,45 +767,96 @@ export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1`
|
|||||||
1. Wang et al. UbiPhysio: Support Daily Functioning, Fitness, and Rehabilitation with Action Understanding and Feedback in Natural Language. 2023. [[arxiv]](https://arxiv.org/abs/2308.10526)
|
1. Wang et al. UbiPhysio: Support Daily Functioning, Fitness, and Rehabilitation with Action Understanding and Feedback in Natural Language. 2023. [[arxiv]](https://arxiv.org/abs/2308.10526)
|
||||||
1. Luceri et al. Leveraging Large Language Models to Detect Influence Campaigns in Social Media. 2023. [[arxiv]](https://arxiv.org/abs/2311.07816)
|
1. Luceri et al. Leveraging Large Language Models to Detect Influence Campaigns in Social Media. 2023. [[arxiv]](https://arxiv.org/abs/2311.07816)
|
||||||
1. Zhang et al. Alleviating Hallucinations of Large Language Models through Induced Hallucinations. 2023. [[arxiv]](https://arxiv.org/abs/2312.15710)
|
1. Zhang et al. Alleviating Hallucinations of Large Language Models through Induced Hallucinations. 2023. [[arxiv]](https://arxiv.org/abs/2312.15710)
|
||||||
1. Wang et al. Know Your Needs Better: Towards Structured Understanding of Marketer Demands with Analogical Reasoning Augmented LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2401.04319)
|
1. Wang et al. Know Your Needs Better: Towards Structured Understanding of Marketer Demands with Analogical Reasoning Augmented LLMs. KDD 2024. [[arxiv]](https://arxiv.org/abs/2401.04319)
|
||||||
1. Wang et al. CANDLE: Iterative Conceptualization and Instantiation Distillation from Large Language Models for Commonsense Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2401.07286)
|
1. Wang et al. CANDLE: Iterative Conceptualization and Instantiation Distillation from Large Language Models for Commonsense Reasoning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2401.07286)
|
||||||
1. Choi et al. FACT-GPT: Fact-Checking Augmentation via Claim Matching with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2402.05904)
|
1. Choi et al. FACT-GPT: Fact-Checking Augmentation via Claim Matching with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2402.05904)
|
||||||
1. Zhang et al. AutoMathText: Autonomous Data Selection with Language Models for Mathematical Texts. 2024. [[arxiv]](https://arxiv.org/abs/2402.07625)
|
1. Zhang et al. AutoMathText: Autonomous Data Selection with Language Models for Mathematical Texts. 2024. [[arxiv]](https://arxiv.org/abs/2402.07625)
|
||||||
1. Lyu et al. KnowTuning: Knowledge-aware Fine-tuning for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11176)
|
1. Lyu et al. KnowTuning: Knowledge-aware Fine-tuning for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11176)
|
||||||
1. Yang et al. LaCo: Large Language Model Pruning via Layer Collaps. 2024. [[arxiv]](https://arxiv.org/abs/2402.11187)
|
1. Yang et al. LaCo: Large Language Model Pruning via Layer Collaps. 2024. [[arxiv]](https://arxiv.org/abs/2402.11187)
|
||||||
1. Bhardwaj et al. Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic. 2024. [[arxiv]](https://arxiv.org/abs/2402.11746)
|
1. Bhardwaj et al. Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic. 2024. [[arxiv]](https://arxiv.org/abs/2402.11746)
|
||||||
1. Yang et al. Enhancing Empathetic Response Generation by Augmenting LLMs with Small-scale Empathetic Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11801)
|
1. Yang et al. Enhancing Empathetic Response Generation by Augmenting LLMs with Small-scale Empathetic Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11801)
|
||||||
1. Yi et al. Generation Meets Verification: Accelerating Large Language Model Inference with Smart Parallel Auto-Correct Decoding. 2024. [[arxiv]](https://arxiv.org/abs/2402.11809)
|
1. Yi et al. Generation Meets Verification: Accelerating Large Language Model Inference with Smart Parallel Auto-Correct Decoding. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2402.11809)
|
||||||
1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819)
|
1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819)
|
||||||
1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204)
|
1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204)
|
||||||
1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714)
|
1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714)
|
||||||
1. Yu et al. KIEval: A Knowledge-grounded Interactive Evaluation Framework for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.15043)
|
1. Yu et al. KIEval: A Knowledge-grounded Interactive Evaluation Framework for Large Language Models. ACL 2024. [[arxiv]](https://arxiv.org/abs/2402.15043)
|
||||||
1. Huang et al. Key-Point-Driven Data Synthesis with its Enhancement on Mathematical Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2403.02333)
|
1. Huang et al. Key-Point-Driven Data Synthesis with its Enhancement on Mathematical Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2403.02333)
|
||||||
1. Duan et al. Negating Negatives: Alignment without Human Positive Samples via Distributional Dispreference Optimization. 2024. [[arxiv]](https://arxiv.org/abs/2403.03419)
|
1. Duan et al. Negating Negatives: Alignment without Human Positive Samples via Distributional Dispreference Optimization. 2024. [[arxiv]](https://arxiv.org/abs/2403.03419)
|
||||||
1. Xie and Schwertfeger. Empowering Robotics with Large Language Models: osmAG Map Comprehension with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2403.08228)
|
1. Xie and Schwertfeger. Empowering Robotics with Large Language Models: osmAG Map Comprehension with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2403.08228)
|
||||||
1. Wu et al. Large Language Models are Parallel Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2403.09073)
|
1. Wu et al. Large Language Models are Parallel Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2403.09073)
|
||||||
1. Zhang et al. EDT: Improving Large Language Models' Generation by Entropy-based Dynamic Temperature Sampling. 2024. [[arxiv]](https://arxiv.org/abs/2403.14541)
|
1. Zhang et al. EDT: Improving Large Language Models' Generation by Entropy-based Dynamic Temperature Sampling. 2024. [[arxiv]](https://arxiv.org/abs/2403.14541)
|
||||||
1. Weller et al. FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2403.15246)
|
1. Weller et al. FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2403.15246)
|
||||||
1. Hongbin Na. CBT-LLM: A Chinese Large Language Model for Cognitive Behavioral Therapy-based Mental Health Question Answering. 2024. [[arxiv]](https://arxiv.org/abs/2403.16008)
|
1. Hongbin Na. CBT-LLM: A Chinese Large Language Model for Cognitive Behavioral Therapy-based Mental Health Question Answering. COLING 2024. [[arxiv]](https://arxiv.org/abs/2403.16008)
|
||||||
1. Zan et al. CodeS: Natural Language to Code Repository via Multi-Layer Sketch. 2024. [[arxiv]](https://arxiv.org/abs/2403.16443)
|
1. Zan et al. CodeS: Natural Language to Code Repository via Multi-Layer Sketch. 2024. [[arxiv]](https://arxiv.org/abs/2403.16443)
|
||||||
1. Liu et al. Extensive Self-Contrast Enables Feedback-Free Language Model Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2404.00604)
|
1. Liu et al. Extensive Self-Contrast Enables Feedback-Free Language Model Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2404.00604)
|
||||||
1. Luo et al. BAdam: A Memory Efficient Full Parameter Training Method for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.02827)
|
1. Luo et al. BAdam: A Memory Efficient Full Parameter Training Method for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.02827)
|
||||||
1. Du et al. Chinese Tiny LLM: Pretraining a Chinese-Centric Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2404.04167)
|
1. Du et al. Chinese Tiny LLM: Pretraining a Chinese-Centric Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2404.04167)
|
||||||
1. Ma et al. Parameter Efficient Quasi-Orthogonal Fine-Tuning via Givens Rotation. 2024. [[arxiv]](https://arxiv.org/abs/2404.04316)
|
1. Ma et al. Parameter Efficient Quasi-Orthogonal Fine-Tuning via Givens Rotation. ICML 2024. [[arxiv]](https://arxiv.org/abs/2404.04316)
|
||||||
1. Liu et al. Dynamic Generation of Personalities with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.07084)
|
1. Liu et al. Dynamic Generation of Personalities with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.07084)
|
||||||
1. Shang et al. How Far Have We Gone in Stripped Binary Code Understanding Using Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.09836)
|
1. Shang et al. How Far Have We Gone in Stripped Binary Code Understanding Using Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.09836)
|
||||||
1. Huang et al. LLMTune: Accelerate Database Knob Tuning with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.11581)
|
1. Huang et al. LLMTune: Accelerate Database Knob Tuning with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.11581)
|
||||||
1. Deng et al. Text-Tuple-Table: Towards Information Integration in Text-to-Table Generation via Global Tuple Extraction. 2024. [[arxiv]](https://arxiv.org/abs/2404.14215)
|
1. Deng et al. Text-Tuple-Table: Towards Information Integration in Text-to-Table Generation via Global Tuple Extraction. 2024. [[arxiv]](https://arxiv.org/abs/2404.14215)
|
||||||
1. Acikgoz et al. Hippocrates: An Open-Source Framework for Advancing Large Language Models in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2404.16621)
|
1. Acikgoz et al. Hippocrates: An Open-Source Framework for Advancing Large Language Models in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2404.16621)
|
||||||
1. Zhang et al. Small Language Models Need Strong Verifiers to Self-Correct Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2404.17140)
|
1. Zhang et al. Small Language Models Need Strong Verifiers to Self-Correct Reasoning. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2404.17140)
|
||||||
1. Zhou et al. FREB-TQA: A Fine-Grained Robustness Evaluation Benchmark for Table Question Answering. 2024. [[arxiv]](https://arxiv.org/abs/2404.18585)
|
1. Zhou et al. FREB-TQA: A Fine-Grained Robustness Evaluation Benchmark for Table Question Answering. NAACL 2024. [[arxiv]](https://arxiv.org/abs/2404.18585)
|
||||||
|
1. Xu et al. Large Language Models for Cyber Security: A Systematic Literature Review. 2024. [[arxiv]](https://arxiv.org/abs/2405.04760)
|
||||||
|
1. Dammu et al. "They are uncultured": Unveiling Covert Harms and Social Threats in LLM Generated Conversations. 2024. [[arxiv]](https://arxiv.org/abs/2405.05378)
|
||||||
|
1. Yi et al. A safety realignment framework via subspace-oriented model fusion for large language models. 2024. [[arxiv]](https://arxiv.org/abs/2405.09055)
|
||||||
|
1. Lou et al. SPO: Multi-Dimensional Preference Sequential Alignment With Implicit Reward Modeling. 2024. [[arxiv]](https://arxiv.org/abs/2405.12739)
|
||||||
|
1. Zhang et al. Getting More from Less: Large Language Models are Good Spontaneous Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2405.13816)
|
||||||
|
1. Zhang et al. TS-Align: A Teacher-Student Collaborative Framework for Scalable Iterative Finetuning of Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2405.20215)
|
||||||
|
1. Zihong Chen. Sentence Segmentation and Sentence Punctuation Based on XunziALLM. 2024. [[paper]](https://aclanthology.org/2024.lt4hala-1.30)
|
||||||
|
1. Gao et al. The Best of Both Worlds: Toward an Honest and Helpful Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2406.00380)
|
||||||
|
1. Wang and Song. MARS: Benchmarking the Metaphysical Reasoning Abilities of Language Models with a Multi-task Evaluation Dataset. 2024. [[arxiv]](https://arxiv.org/abs/2406.02106)
|
||||||
|
1. Hu et al. Computational Limits of Low-Rank Adaptation (LoRA) for Transformer-Based Models. 2024. [[arxiv]](https://arxiv.org/abs/2406.03136)
|
||||||
|
1. Ge et al. Time Sensitive Knowledge Editing through Efficient Finetuning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2406.04496)
|
||||||
|
1. Tan et al. Peer Review as A Multi-Turn and Long-Context Dialogue with Role-Based Interactions. 2024. [[arxiv]](https://arxiv.org/abs/2406.05688)
|
||||||
|
1. Song et al. Turbo Sparse: Achieving LLM SOTA Performance with Minimal Activated Parameters. 2024. [[arxiv]](https://arxiv.org/abs/2406.05955)
|
||||||
|
1. Gu et al. RWKV-CLIP: A Robust Vision-Language Representation Learner. 2024. [[arxiv]](https://arxiv.org/abs/2406.06973)
|
||||||
|
1. Chen et al. Advancing Tool-Augmented Large Language Models: Integrating Insights from Errors in Inference Trees. 2024. [[arxiv]](https://arxiv.org/abs/2406.07115)
|
||||||
|
1. Zhu et al. Are Large Language Models Good Statisticians?. 2024. [[arxiv]](https://arxiv.org/abs/2406.07815)
|
||||||
|
1. Li et al. Know the Unknown: An Uncertainty-Sensitive Method for LLM Instruction Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2406.10099)
|
||||||
|
1. Ding et al. IntentionQA: A Benchmark for Evaluating Purchase Intention Comprehension Abilities of Language Models in E-commerce. 2024. [[arxiv]](https://arxiv.org/abs/2406.10173)
|
||||||
|
1. He et al. COMMUNITY-CROSS-INSTRUCT: Unsupervised Instruction Generation for Aligning Large Language Models to Online Communities. 2024. [[arxiv]](https://arxiv.org/abs/2406.12074)
|
||||||
|
1. Lin et al. FVEL: Interactive Formal Verification Environment with Large Language Models via Theorem Proving. 2024. [[arxiv]](https://arxiv.org/abs/2406.14408)
|
||||||
|
1. Treutlein et al. Connecting the Dots: LLMs can Infer and Verbalize Latent Structure from Disparate Training Data. 2024. [[arxiv]](https://arxiv.org/abs/2406.14546)
|
||||||
|
1. Feng et al. SS-Bench: A Benchmark for Social Story Generation and Evaluation. 2024. [[arxiv]](https://arxiv.org/abs/2406.15695)
|
||||||
|
1. Feng et al. Self-Constructed Context Decompilation with Fined-grained Alignment Enhancement. 2024. [[arxiv]](https://arxiv.org/abs/2406.17233)
|
||||||
|
1. Liu et al. Large Language Models for Cuffless Blood Pressure Measurement From Wearable Biosignals. 2024. [[arxiv]](https://arxiv.org/abs/2406.18069)
|
||||||
|
1. Iyer et al. Exploring Very Low-Resource Translation with LLMs: The University of Edinburgh's Submission to AmericasNLP 2024 Translation Task. AmericasNLP 2024. [[paper]](https://aclanthology.org/2024.americasnlp-1.25)
|
||||||
|
1. Li et al. Calibrating LLMs with Preference Optimization on Thought Trees for Generating Rationale in Science Question Scoring. 2024. [[arxiv]](https://arxiv.org/abs/2406.19949)
|
||||||
|
1. Yang et al. Financial Knowledge Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2407.00365)
|
||||||
|
1. Lin et al. DogeRM: Equipping Reward Models with Domain Knowledge through Model Merging. 2024. [[arxiv]](https://arxiv.org/abs/2407.01470)
|
||||||
|
1. Bako et al. Evaluating the Semantic Profiling Abilities of LLMs for Natural Language Utterances in Data Visualization. 2024. [[arxiv]](https://arxiv.org/abs/2407.06129)
|
||||||
|
1. Huang et al. RoLoRA: Fine-tuning Rotated Outlier-free LLMs for Effective Weight-Activation Quantization. 2024. [[arxiv]](https://arxiv.org/abs/2407.08044)
|
||||||
|
1. Jiang et al. LLM-Collaboration on Automatic Science Journalism for the General Audience. 2024. [[arxiv]](https://arxiv.org/abs/2407.09756)
|
||||||
|
1. Inouye et al. Applied Auto-tuning on LoRA Hyperparameters. 2024. [[paper]](https://scholarcommons.scu.edu/cseng_senior/272/)
|
||||||
|
1. Qi et al. Research on Tibetan Tourism Viewpoints information generation system based on LLM. 2024. [[arxiv]](https://arxiv.org/abs/2407.13561)
|
||||||
|
1. Xu et al. Course-Correction: Safety Alignment Using Synthetic Preferences. 2024. [[arxiv]](https://arxiv.org/abs/2407.16637)
|
||||||
|
1. Sun et al. LAMBDA: A Large Model Based Data Agent. 2024. [[arxiv]](https://arxiv.org/abs/2407.17535)
|
||||||
|
1. Zhu et al. CollectiveSFT: Scaling Large Language Models for Chinese Medical Benchmark with Collective Instructions in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2407.19705)
|
||||||
|
1. Yu et al. Correcting Negative Bias in Large Language Models through Negative Attention Score Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2408.00137)
|
||||||
|
1. Xie et al. The Power of Personalized Datasets: Advancing Chinese Composition Writing for Elementary School through Targeted Model Fine-Tuning. IALP 2024. [[paper]](https://www.asianlp.sg/conferences/ialp2024/proceedings/papers/IALP2024_P055.pdf)
|
||||||
|
1. Liu et al. Instruct-Code-Llama: Improving Capabilities of Language Model in Competition Level Code Generation by Online Judge Feedback. ICIC 2024. [[paper]](https://link.springer.com/chapter/10.1007/978-981-97-5669-8_11)
|
||||||
|
1. Wang et al. Cybernetic Sentinels: Unveiling the Impact of Safety Data Selection on Model Security in Supervised Fine-Tuning. ICIC 2024. [[paper]](https://link.springer.com/chapter/10.1007/978-981-97-5669-8_23)
|
||||||
|
1. Xia et al. Understanding the Performance and Estimating the Cost of LLM Fine-Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2408.04693)
|
||||||
|
1. Zeng et al. Perceive, Reflect, and Plan: Designing LLM Agent for Goal-Directed City Navigation without Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2408.04168)
|
||||||
|
1. Xia et al. Using Pre-trained Language Model for Accurate ESG Prediction. FinNLP 2024. [[paper]](https://aclanthology.org/2024.finnlp-2.1/)
|
||||||
|
1. Liang et al. I-SHEEP: Self-Alignment of LLM from Scratch through an Iterative Self-Enhancement Paradigm. 2024. [[arxiv]](https://arxiv.org/abs/2408.08072)
|
||||||
|
1. Bai et al. Aligning Large Language Model with Direct Multi-Preference Optimization for Recommendation. CIKM 2024. [[paper]](https://dl.acm.org/doi/10.1145/3627673.3679611)
|
||||||
1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: 天文大模型 StarWhisper,基于 ChatGLM2-6B 和 Qwen-14B 在天文数据上微调而得。
|
1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: 天文大模型 StarWhisper,基于 ChatGLM2-6B 和 Qwen-14B 在天文数据上微调而得。
|
||||||
1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: 中文法律领域大模型 DISC-LawLLM,基于 Baichuan-13B 微调而得,具有法律推理和知识检索能力。
|
1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: 中文法律领域大模型 DISC-LawLLM,基于 Baichuan-13B 微调而得,具有法律推理和知识检索能力。
|
||||||
1. **[Sunsimiao](https://github.com/thomas-yanxin/Sunsimiao)**: 孙思邈中文医疗大模型 Sumsimiao,基于 Baichuan-7B 和 ChatGLM-6B 在中文医疗数据上微调而得。
|
1. **[Sunsimiao](https://github.com/X-D-Lab/Sunsimiao)**: 孙思邈中文医疗大模型 Sumsimiao,基于 Baichuan-7B 和 ChatGLM-6B 在中文医疗数据上微调而得。
|
||||||
1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: 医疗大模型项目 CareGPT,基于 LLaMA2-7B 和 Baichuan-13B 在中文医疗数据上微调而得。
|
1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: 医疗大模型项目 CareGPT,基于 LLaMA2-7B 和 Baichuan-13B 在中文医疗数据上微调而得。
|
||||||
1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**:MBTI性格大模型项目,根据数据集与训练方式让任意 LLM 拥有 16 个不同的性格类型。
|
1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**:MBTI性格大模型项目,根据数据集与训练方式让任意 LLM 拥有 16 个不同的性格类型。
|
||||||
1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**:一个用于生成 Stable Diffusion 提示词的大型语言模型。[[🤗Demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt)
|
1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**:一个用于生成 Stable Diffusion 提示词的大型语言模型。[[demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt)
|
||||||
1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**:中文多模态医学大模型,基于 LLaVA-1.5-7B 在中文多模态医疗数据上微调而得。
|
1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**:中文多模态医学大模型,基于 LLaVA-1.5-7B 在中文多模态医疗数据上微调而得。
|
||||||
|
1. **[AutoRE](https://github.com/THUDM/AutoRE)**:基于大语言模型的文档级关系抽取系统。
|
||||||
|
1. **[NVIDIA RTX AI Toolkit](https://github.com/NVIDIA/RTX-AI-Toolkit)**:在 Windows 主机上利用英伟达 RTX 设备进行大型语言模型微调的开发包。
|
||||||
|
1. **[LazyLLM](https://github.com/LazyAGI/LazyLLM)**:一个低代码构建多 Agent 大模型应用的开发工具,支持基于 LLaMA Factory 的模型微调.
|
||||||
|
1. **[RAG-Retrieval](https://github.com/NLPJCL/RAG-Retrieval)**:一个全链路 RAG 检索模型微调、推理和蒸馏代码库。[[blog]](https://zhuanlan.zhihu.com/p/987727357)
|
||||||
|
1. **[360-LLaMA-Factory](https://github.com/Qihoo360/360-LLaMA-Factory)**:一个魔改后的代码库,通过 Ring Attention 支持长序列的 SFT 和 DPO 训练。
|
||||||
|
1. **[Sky-T1](https://novasky-ai.github.io/posts/sky-t1/)**:由 NovaSky AI 微调的低成本类 o1 长推理模型。
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
@@ -514,17 +864,19 @@ export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1`
|
|||||||
|
|
||||||
本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源。
|
本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源。
|
||||||
|
|
||||||
使用模型权重时,请遵循对应的模型协议:[Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command-R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [InternLM2](https://github.com/InternLM/InternLM#license) / [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2 (LLaVA-1.5)](https://ai.meta.com/llama/license/) / [LLaMA-3](https://llama.meta.com/llama3/license/) / [Mistral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yi-1.5](LICENSE) / [Yuan](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
|
使用模型权重时,请遵循对应的模型协议:[Baichuan 2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [GLM-4](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE) / [GPT-2](https://github.com/openai/gpt-2/blob/master/LICENSE) / [Granite](LICENSE) / [Index](https://huggingface.co/IndexTeam/Index-1.9B/blob/main/LICENSE) / [InternLM](https://github.com/InternLM/InternLM#license) / [Llama](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [Llama 2 (LLaVA-1.5)](https://ai.meta.com/llama/license/) / [Llama 3](https://llama.meta.com/llama3/license/) / [MiniCPM](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md) / [Mistral/Mixtral/Pixtral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/Phi-2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3/Phi-4](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [Skywork](https://huggingface.co/Skywork/Skywork-13B-base/blob/main/Skywork%20Community%20License.pdf) / [StarCoder 2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [TeleChat2](https://huggingface.co/Tele-AI/telechat-7B/blob/main/TeleChat%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yi-1.5](LICENSE) / [Yuan 2](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
|
||||||
|
|
||||||
## 引用
|
## 引用
|
||||||
|
|
||||||
如果您觉得此项目有帮助,请考虑以下列格式引用
|
如果您觉得此项目有帮助,请考虑以下列格式引用
|
||||||
|
|
||||||
```bibtex
|
```bibtex
|
||||||
@article{zheng2024llamafactory,
|
@inproceedings{zheng2024llamafactory,
|
||||||
title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models},
|
title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models},
|
||||||
author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Yongqiang Ma},
|
author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Zhangchi Feng and Yongqiang Ma},
|
||||||
journal={arXiv preprint arXiv:2403.13372},
|
booktitle={Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)},
|
||||||
|
address={Bangkok, Thailand},
|
||||||
|
publisher={Association for Computational Linguistics},
|
||||||
year={2024},
|
year={2024},
|
||||||
url={http://arxiv.org/abs/2403.13372}
|
url={http://arxiv.org/abs/2403.13372}
|
||||||
}
|
}
|
||||||
|
|||||||
1630
assets/benchmark.svg
1630
assets/benchmark.svg
File diff suppressed because it is too large
Load Diff
|
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 28 KiB |
343
data/README.md
343
data/README.md
@@ -1,16 +1,19 @@
|
|||||||
If you are using a custom dataset, please add your **dataset description** to `dataset_info.json` according to the following format. We also provide several examples in the next section.
|
The [dataset_info.json](dataset_info.json) contains all available datasets. If you are using a custom dataset, please **make sure** to add a *dataset description* in `dataset_info.json` and specify `dataset: dataset_name` before training to use it.
|
||||||
|
|
||||||
|
Currently we support datasets in **alpaca** and **sharegpt** format.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"dataset_name": {
|
"dataset_name": {
|
||||||
"hf_hub_url": "the name of the dataset repository on the Hugging Face hub. (if specified, ignore script_url and file_name)",
|
"hf_hub_url": "the name of the dataset repository on the Hugging Face hub. (if specified, ignore script_url and file_name)",
|
||||||
"ms_hub_url": "the name of the dataset repository on the ModelScope hub. (if specified, ignore script_url and file_name)",
|
"ms_hub_url": "the name of the dataset repository on the Model Scope hub. (if specified, ignore script_url and file_name)",
|
||||||
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore file_name)",
|
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore file_name)",
|
||||||
"file_name": "the name of the dataset file in this directory. (required if above are not specified)",
|
"file_name": "the name of the dataset folder or dataset file in this directory. (required if above are not specified)",
|
||||||
"file_sha1": "the SHA-1 hash value of the dataset file. (optional, does not affect training)",
|
|
||||||
"subset": "the name of the subset. (optional, default: None)",
|
|
||||||
"folder": "the name of the folder of the dataset repository on the Hugging Face hub. (optional, default: None)",
|
|
||||||
"ranking": "whether the dataset is a preference dataset or not. (default: false)",
|
|
||||||
"formatting": "the format of the dataset. (optional, default: alpaca, can be chosen from {alpaca, sharegpt})",
|
"formatting": "the format of the dataset. (optional, default: alpaca, can be chosen from {alpaca, sharegpt})",
|
||||||
|
"ranking": "whether the dataset is a preference dataset or not. (default: False)",
|
||||||
|
"subset": "the name of the subset. (optional, default: None)",
|
||||||
|
"split": "the name of dataset split to be used. (optional, default: train)",
|
||||||
|
"folder": "the name of the folder of the dataset repository on the Hugging Face hub. (optional, default: None)",
|
||||||
|
"num_samples": "the number of samples in the dataset to be used. (optional, default: None)",
|
||||||
"columns (optional)": {
|
"columns (optional)": {
|
||||||
"prompt": "the column name in the dataset containing the prompts. (default: instruction)",
|
"prompt": "the column name in the dataset containing the prompts. (default: instruction)",
|
||||||
"query": "the column name in the dataset containing the queries. (default: input)",
|
"query": "the column name in the dataset containing the queries. (default: input)",
|
||||||
@@ -19,7 +22,12 @@ If you are using a custom dataset, please add your **dataset description** to `d
|
|||||||
"messages": "the column name in the dataset containing the messages. (default: conversations)",
|
"messages": "the column name in the dataset containing the messages. (default: conversations)",
|
||||||
"system": "the column name in the dataset containing the system prompts. (default: None)",
|
"system": "the column name in the dataset containing the system prompts. (default: None)",
|
||||||
"tools": "the column name in the dataset containing the tool description. (default: None)",
|
"tools": "the column name in the dataset containing the tool description. (default: None)",
|
||||||
"images": "the column name in the dataset containing the image inputs. (default: None)"
|
"images": "the column name in the dataset containing the image inputs. (default: None)",
|
||||||
|
"videos": "the column name in the dataset containing the videos inputs. (default: None)",
|
||||||
|
"audios": "the column name in the dataset containing the audios inputs. (default: None)",
|
||||||
|
"chosen": "the column name in the dataset containing the chosen answers. (default: None)",
|
||||||
|
"rejected": "the column name in the dataset containing the rejected answers. (default: None)",
|
||||||
|
"kto_tag": "the column name in the dataset containing the kto tags. (default: None)"
|
||||||
},
|
},
|
||||||
"tags (optional, used for the sharegpt format)": {
|
"tags (optional, used for the sharegpt format)": {
|
||||||
"role_tag": "the key in the message represents the identity. (default: from)",
|
"role_tag": "the key in the message represents the identity. (default: from)",
|
||||||
@@ -33,28 +41,34 @@ If you are using a custom dataset, please add your **dataset description** to `d
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
After that, you can load the custom dataset by specifying `--dataset dataset_name`.
|
## Alpaca Format
|
||||||
|
|
||||||
----
|
### Supervised Fine-Tuning Dataset
|
||||||
|
|
||||||
Currently we support dataset in **alpaca** or **sharegpt** format, the dataset in alpaca format should follow the below format:
|
* [Example dataset](alpaca_en_demo.json)
|
||||||
|
|
||||||
|
In supervised fine-tuning, the `instruction` column will be concatenated with the `input` column and used as the human prompt, then the human prompt would be `instruction\ninput`. The `output` column represents the model response.
|
||||||
|
|
||||||
|
The `system` column will be used as the system prompt if specified.
|
||||||
|
|
||||||
|
The `history` column is a list consisting of string tuples representing prompt-response pairs in the history messages. Note that the responses in the history **will also be learned by the model** in supervised fine-tuning.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"instruction": "user instruction (required)",
|
"instruction": "human instruction (required)",
|
||||||
"input": "user input (optional)",
|
"input": "human input (optional)",
|
||||||
"output": "model response (required)",
|
"output": "model response (required)",
|
||||||
"system": "system prompt (optional)",
|
"system": "system prompt (optional)",
|
||||||
"history": [
|
"history": [
|
||||||
["user instruction in the first round (optional)", "model response in the first round (optional)"],
|
["human instruction in the first round (optional)", "model response in the first round (optional)"],
|
||||||
["user instruction in the second round (optional)", "model response in the second round (optional)"]
|
["human instruction in the second round (optional)", "model response in the second round (optional)"]
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
Regarding the above dataset, the description in `dataset_info.json` should be:
|
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"dataset_name": {
|
"dataset_name": {
|
||||||
@@ -69,11 +83,11 @@ Regarding the above dataset, the description in `dataset_info.json` should be:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
The `query` column will be concatenated with the `prompt` column and used as the user prompt, then the user prompt would be `prompt\nquery`. The `response` column represents the model response.
|
### Pre-training Dataset
|
||||||
|
|
||||||
The `system` column will be used as the system prompt. The `history` column is a list consisting string tuples representing prompt-response pairs in the history. Note that the responses in the history **will also be used for training** in supervised fine-tuning.
|
- [Example dataset](c4_demo.json)
|
||||||
|
|
||||||
For the **pre-training datasets**, only the `prompt` column will be used for training, for example:
|
In pre-training, only the `text` column will be used for model learning.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
[
|
[
|
||||||
@@ -82,7 +96,7 @@ For the **pre-training datasets**, only the `prompt` column will be used for tra
|
|||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
Regarding the above dataset, the description in `dataset_info.json` should be:
|
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"dataset_name": {
|
"dataset_name": {
|
||||||
@@ -93,22 +107,24 @@ Regarding the above dataset, the description in `dataset_info.json` should be:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
For the **preference datasets**, the `response` column should be a string list whose length is 2, with the preferred answers appearing first, for example:
|
### Preference Dataset
|
||||||
|
|
||||||
|
Preference datasets are used for reward modeling, DPO training, ORPO and SimPO training.
|
||||||
|
|
||||||
|
It requires a better response in `chosen` column and a worse response in `rejected` column.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"instruction": "user instruction",
|
"instruction": "human instruction (required)",
|
||||||
"input": "user input",
|
"input": "human input (optional)",
|
||||||
"output": [
|
"chosen": "chosen answer (required)",
|
||||||
"chosen answer",
|
"rejected": "rejected answer (required)"
|
||||||
"rejected answer"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
Regarding the above dataset, the description in `dataset_info.json` should be:
|
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"dataset_name": {
|
"dataset_name": {
|
||||||
@@ -117,14 +133,37 @@ Regarding the above dataset, the description in `dataset_info.json` should be:
|
|||||||
"columns": {
|
"columns": {
|
||||||
"prompt": "instruction",
|
"prompt": "instruction",
|
||||||
"query": "input",
|
"query": "input",
|
||||||
"response": "output",
|
"chosen": "chosen",
|
||||||
|
"rejected": "rejected"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
----
|
### KTO Dataset
|
||||||
|
|
||||||
The dataset in **sharegpt** format should follow the below format:
|
An additional column `kto_tag` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
||||||
|
|
||||||
|
### Multimodal Image Dataset
|
||||||
|
|
||||||
|
An additional column `images` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
||||||
|
|
||||||
|
### Multimodal Video Dataset
|
||||||
|
|
||||||
|
An additional column `videos` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
||||||
|
|
||||||
|
### Multimodal Audio Dataset
|
||||||
|
|
||||||
|
An additional column `audios` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
||||||
|
|
||||||
|
## Sharegpt Format
|
||||||
|
|
||||||
|
### Supervised Fine-Tuning Dataset
|
||||||
|
|
||||||
|
- [Example dataset](glaive_toolcall_en_demo.json)
|
||||||
|
|
||||||
|
Compared to the alpaca format, the sharegpt format allows the datasets have **more roles**, such as human, gpt, observation and function. They are presented in a list of objects in the `conversations` column.
|
||||||
|
|
||||||
|
Note that the human and observation should appear in odd positions, while gpt and function should appear in even positions.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
[
|
[
|
||||||
@@ -132,7 +171,15 @@ The dataset in **sharegpt** format should follow the below format:
|
|||||||
"conversations": [
|
"conversations": [
|
||||||
{
|
{
|
||||||
"from": "human",
|
"from": "human",
|
||||||
"value": "user instruction"
|
"value": "human instruction"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "function_call",
|
||||||
|
"value": "tool arguments"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "observation",
|
||||||
|
"value": "tool result"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"from": "gpt",
|
"from": "gpt",
|
||||||
@@ -145,7 +192,7 @@ The dataset in **sharegpt** format should follow the below format:
|
|||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
Regarding the above dataset, the description in `dataset_info.json` should be:
|
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"dataset_name": {
|
"dataset_name": {
|
||||||
@@ -155,19 +202,227 @@ Regarding the above dataset, the description in `dataset_info.json` should be:
|
|||||||
"messages": "conversations",
|
"messages": "conversations",
|
||||||
"system": "system",
|
"system": "system",
|
||||||
"tools": "tools"
|
"tools": "tools"
|
||||||
},
|
|
||||||
"tags": {
|
|
||||||
"role_tag": "from",
|
|
||||||
"content_tag": "value",
|
|
||||||
"user_tag": "human",
|
|
||||||
"assistant_tag": "gpt"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
where the `messages` column should be a list following the `u/a/u/a/u/a` order.
|
### Pre-training Dataset
|
||||||
|
|
||||||
We also supports the dataset in the **openai** format:
|
Not yet supported, please use the [alpaca](#alpaca-format) format.
|
||||||
|
|
||||||
|
### Preference Dataset
|
||||||
|
|
||||||
|
- [Example dataset](dpo_en_demo.json)
|
||||||
|
|
||||||
|
Preference datasets in sharegpt format also require a better message in `chosen` column and a worse message in `rejected` column.
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"conversations": [
|
||||||
|
{
|
||||||
|
"from": "human",
|
||||||
|
"value": "human instruction"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "gpt",
|
||||||
|
"value": "model response"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "human",
|
||||||
|
"value": "human instruction"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"chosen": {
|
||||||
|
"from": "gpt",
|
||||||
|
"value": "chosen answer (required)"
|
||||||
|
},
|
||||||
|
"rejected": {
|
||||||
|
"from": "gpt",
|
||||||
|
"value": "rejected answer (required)"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"dataset_name": {
|
||||||
|
"file_name": "data.json",
|
||||||
|
"formatting": "sharegpt",
|
||||||
|
"ranking": true,
|
||||||
|
"columns": {
|
||||||
|
"messages": "conversations",
|
||||||
|
"chosen": "chosen",
|
||||||
|
"rejected": "rejected"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### KTO Dataset
|
||||||
|
|
||||||
|
- [Example dataset](kto_en_demo.json)
|
||||||
|
|
||||||
|
KTO datasets require a extra `kto_tag` column containing the boolean human feedback.
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"conversations": [
|
||||||
|
{
|
||||||
|
"from": "human",
|
||||||
|
"value": "human instruction"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "gpt",
|
||||||
|
"value": "model response"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"kto_tag": "human feedback [true/false] (required)"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"dataset_name": {
|
||||||
|
"file_name": "data.json",
|
||||||
|
"formatting": "sharegpt",
|
||||||
|
"columns": {
|
||||||
|
"messages": "conversations",
|
||||||
|
"kto_tag": "kto_tag"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multimodal Image Dataset
|
||||||
|
|
||||||
|
- [Example dataset](mllm_demo.json)
|
||||||
|
|
||||||
|
Multimodal image datasets require an `images` column containing the paths to the input images.
|
||||||
|
|
||||||
|
The number of images should be identical to the `<image>` tokens in the conversations.
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"conversations": [
|
||||||
|
{
|
||||||
|
"from": "human",
|
||||||
|
"value": "<image>human instruction"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "gpt",
|
||||||
|
"value": "model response"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"images": [
|
||||||
|
"image path (required)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"dataset_name": {
|
||||||
|
"file_name": "data.json",
|
||||||
|
"formatting": "sharegpt",
|
||||||
|
"columns": {
|
||||||
|
"messages": "conversations",
|
||||||
|
"images": "images"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multimodal Video Dataset
|
||||||
|
|
||||||
|
- [Example dataset](mllm_video_demo.json)
|
||||||
|
|
||||||
|
Multimodal video datasets require a `videos` column containing the paths to the input videos.
|
||||||
|
|
||||||
|
The number of videos should be identical to the `<video>` tokens in the conversations.
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"conversations": [
|
||||||
|
{
|
||||||
|
"from": "human",
|
||||||
|
"value": "<video>human instruction"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "gpt",
|
||||||
|
"value": "model response"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"videos": [
|
||||||
|
"video path (required)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"dataset_name": {
|
||||||
|
"file_name": "data.json",
|
||||||
|
"formatting": "sharegpt",
|
||||||
|
"columns": {
|
||||||
|
"messages": "conversations",
|
||||||
|
"videos": "videos"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multimodal Audio Dataset
|
||||||
|
|
||||||
|
- [Example dataset](mllm_audio_demo.json)
|
||||||
|
|
||||||
|
Multimodal audio datasets require an `audios` column containing the paths to the input audios.
|
||||||
|
|
||||||
|
The number of audios should be identical to the `<audio>` tokens in the conversations.
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"conversations": [
|
||||||
|
{
|
||||||
|
"from": "human",
|
||||||
|
"value": "<audio>human instruction"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "gpt",
|
||||||
|
"value": "model response"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"audios": [
|
||||||
|
"audio path (required)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"dataset_name": {
|
||||||
|
"file_name": "data.json",
|
||||||
|
"formatting": "sharegpt",
|
||||||
|
"columns": {
|
||||||
|
"messages": "conversations",
|
||||||
|
"audios": "audios"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### OpenAI Format
|
||||||
|
|
||||||
|
The openai format is simply a special case of the sharegpt format, where the first message may be a system prompt.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
[
|
[
|
||||||
@@ -179,7 +434,7 @@ We also supports the dataset in the **openai** format:
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": "user instruction"
|
"content": "human instruction"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
@@ -190,7 +445,7 @@ We also supports the dataset in the **openai** format:
|
|||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
Regarding the above dataset, the description in `dataset_info.json` should be:
|
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"dataset_name": {
|
"dataset_name": {
|
||||||
@@ -208,5 +463,3 @@ Regarding the above dataset, the description in `dataset_info.json` should be:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Pre-training datasets and preference datasets are **incompatible** with the sharegpt format yet.
|
|
||||||
|
|||||||
@@ -1,16 +1,19 @@
|
|||||||
如果您使用自定义数据集,请务必按照以下格式在 `dataset_info.json` 文件中添加**数据集描述**。我们在下面也提供了一些例子。
|
[dataset_info.json](dataset_info.json) 包含了所有可用的数据集。如果您希望使用自定义数据集,请**务必**在 `dataset_info.json` 文件中添加*数据集描述*,并通过修改 `dataset: 数据集名称` 配置来使用数据集。
|
||||||
|
|
||||||
|
目前我们支持 **alpaca** 格式和 **sharegpt** 格式的数据集。
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"数据集名称": {
|
"数据集名称": {
|
||||||
"hf_hub_url": "Hugging Face 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
|
"hf_hub_url": "Hugging Face 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
|
||||||
"ms_hub_url": "ModelScope 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
|
"ms_hub_url": "ModelScope 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
|
||||||
"script_url": "包含数据加载脚本的本地文件夹名称(若指定,则忽略 file_name)",
|
"script_url": "包含数据加载脚本的本地文件夹名称(若指定,则忽略 file_name)",
|
||||||
"file_name": "该目录下数据集文件的名称(若上述参数未指定,则此项必需)",
|
"file_name": "该目录下数据集文件夹或文件的名称(若上述参数未指定,则此项必需)",
|
||||||
"file_sha1": "数据集文件的 SHA-1 哈希值(可选,留空不影响训练)",
|
|
||||||
"subset": "数据集子集的名称(可选,默认:None)",
|
|
||||||
"folder": "Hugging Face 仓库的文件夹名称(可选,默认:None)",
|
|
||||||
"ranking": "是否为偏好数据集(可选,默认:False)",
|
|
||||||
"formatting": "数据集格式(可选,默认:alpaca,可以为 alpaca 或 sharegpt)",
|
"formatting": "数据集格式(可选,默认:alpaca,可以为 alpaca 或 sharegpt)",
|
||||||
|
"ranking": "是否为偏好数据集(可选,默认:False)",
|
||||||
|
"subset": "数据集子集的名称(可选,默认:None)",
|
||||||
|
"split": "所使用的数据集切分(可选,默认:train)",
|
||||||
|
"folder": "Hugging Face 仓库的文件夹名称(可选,默认:None)",
|
||||||
|
"num_samples": "该数据集所使用的样本数量。(可选,默认:None)",
|
||||||
"columns(可选)": {
|
"columns(可选)": {
|
||||||
"prompt": "数据集代表提示词的表头名称(默认:instruction)",
|
"prompt": "数据集代表提示词的表头名称(默认:instruction)",
|
||||||
"query": "数据集代表请求的表头名称(默认:input)",
|
"query": "数据集代表请求的表头名称(默认:input)",
|
||||||
@@ -19,7 +22,12 @@
|
|||||||
"messages": "数据集代表消息列表的表头名称(默认:conversations)",
|
"messages": "数据集代表消息列表的表头名称(默认:conversations)",
|
||||||
"system": "数据集代表系统提示的表头名称(默认:None)",
|
"system": "数据集代表系统提示的表头名称(默认:None)",
|
||||||
"tools": "数据集代表工具描述的表头名称(默认:None)",
|
"tools": "数据集代表工具描述的表头名称(默认:None)",
|
||||||
"images": "数据集代表图像输入的表头名称(默认:None)"
|
"images": "数据集代表图像输入的表头名称(默认:None)",
|
||||||
|
"videos": "数据集代表视频输入的表头名称(默认:None)",
|
||||||
|
"audios": "数据集代表音频输入的表头名称(默认:None)",
|
||||||
|
"chosen": "数据集代表更优回答的表头名称(默认:None)",
|
||||||
|
"rejected": "数据集代表更差回答的表头名称(默认:None)",
|
||||||
|
"kto_tag": "数据集代表 KTO 标签的表头名称(默认:None)"
|
||||||
},
|
},
|
||||||
"tags(可选,用于 sharegpt 格式)": {
|
"tags(可选,用于 sharegpt 格式)": {
|
||||||
"role_tag": "消息中代表发送者身份的键名(默认:from)",
|
"role_tag": "消息中代表发送者身份的键名(默认:from)",
|
||||||
@@ -28,22 +36,28 @@
|
|||||||
"assistant_tag": "消息中代表助手的 role_tag(默认:gpt)",
|
"assistant_tag": "消息中代表助手的 role_tag(默认:gpt)",
|
||||||
"observation_tag": "消息中代表工具返回结果的 role_tag(默认:observation)",
|
"observation_tag": "消息中代表工具返回结果的 role_tag(默认:observation)",
|
||||||
"function_tag": "消息中代表工具调用的 role_tag(默认:function_call)",
|
"function_tag": "消息中代表工具调用的 role_tag(默认:function_call)",
|
||||||
"system_tag": "消息中代表系统提示的 role_tag(默认:system,会覆盖 system 列)"
|
"system_tag": "消息中代表系统提示的 role_tag(默认:system,会覆盖 system column)"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
然后,可通过使用 `--dataset 数据集名称` 参数加载自定义数据集。
|
## Alpaca 格式
|
||||||
|
|
||||||
----
|
### 指令监督微调数据集
|
||||||
|
|
||||||
该项目目前支持两种格式的数据集:**alpaca** 和 **sharegpt**,其中 alpaca 格式的数据集按照以下方式组织:
|
- [样例数据集](alpaca_zh_demo.json)
|
||||||
|
|
||||||
|
在指令监督微调时,`instruction` 列对应的内容会与 `input` 列对应的内容拼接后作为人类指令,即人类指令为 `instruction\ninput`。而 `output` 列对应的内容为模型回答。
|
||||||
|
|
||||||
|
如果指定,`system` 列对应的内容将被作为系统提示词。
|
||||||
|
|
||||||
|
`history` 列是由多个字符串二元组构成的列表,分别代表历史消息中每轮对话的指令和回答。注意在指令监督微调时,历史消息中的回答内容**也会被用于模型学习**。
|
||||||
|
|
||||||
```json
|
```json
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"instruction": "用户指令(必填)",
|
"instruction": "人类指令(必填)",
|
||||||
"input": "用户输入(选填)",
|
"input": "人类输入(选填)",
|
||||||
"output": "模型回答(必填)",
|
"output": "模型回答(必填)",
|
||||||
"system": "系统提示词(选填)",
|
"system": "系统提示词(选填)",
|
||||||
"history": [
|
"history": [
|
||||||
@@ -54,7 +68,7 @@
|
|||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
对于上述格式的数据,`dataset_info.json` 中的描述应为:
|
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"数据集名称": {
|
"数据集名称": {
|
||||||
@@ -69,11 +83,11 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
其中 `query` 列对应的内容会与 `prompt` 列对应的内容拼接后作为用户指令,即用户指令为 `prompt\nquery`。`response` 列对应的内容为模型回答。
|
### 预训练数据集
|
||||||
|
|
||||||
`system` 列对应的内容将被作为系统提示词。`history` 列是由多个字符串二元组构成的列表,分别代表历史消息中每轮的指令和回答。注意在指令监督学习时,历史消息中的回答**也会被用于训练**。
|
- [样例数据集](c4_demo.json)
|
||||||
|
|
||||||
对于**预训练数据集**,仅 `prompt` 列中的内容会用于模型训练,例如:
|
在预训练时,只有 `text` 列中的内容会用于模型学习。
|
||||||
|
|
||||||
```json
|
```json
|
||||||
[
|
[
|
||||||
@@ -82,7 +96,7 @@
|
|||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
对于上述格式的数据,`dataset_info.json` 中的描述应为:
|
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"数据集名称": {
|
"数据集名称": {
|
||||||
@@ -93,22 +107,24 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
对于**偏好数据集**,`response` 列应当是一个长度为 2 的字符串列表,排在前面的代表更优的回答,例如:
|
### 偏好数据集
|
||||||
|
|
||||||
|
偏好数据集用于奖励模型训练、DPO 训练、ORPO 训练和 SimPO 训练。
|
||||||
|
|
||||||
|
它需要在 `chosen` 列中提供更优的回答,并在 `rejected` 列中提供更差的回答。
|
||||||
|
|
||||||
```json
|
```json
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"instruction": "用户指令",
|
"instruction": "人类指令(必填)",
|
||||||
"input": "用户输入",
|
"input": "人类输入(选填)",
|
||||||
"output": [
|
"chosen": "优质回答(必填)",
|
||||||
"优质回答",
|
"rejected": "劣质回答(必填)"
|
||||||
"劣质回答"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
对于上述格式的数据,`dataset_info.json` 中的描述应为:
|
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"数据集名称": {
|
"数据集名称": {
|
||||||
@@ -117,14 +133,37 @@
|
|||||||
"columns": {
|
"columns": {
|
||||||
"prompt": "instruction",
|
"prompt": "instruction",
|
||||||
"query": "input",
|
"query": "input",
|
||||||
"response": "output",
|
"chosen": "chosen",
|
||||||
|
"rejected": "rejected"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
----
|
### KTO 数据集
|
||||||
|
|
||||||
而 **sharegpt** 格式的数据集按照以下方式组织:
|
KTO 数据集需要提供额外的 `kto_tag` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
||||||
|
|
||||||
|
### 多模态图像数据集
|
||||||
|
|
||||||
|
多模态图像数据集需要提供额外的 `images` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
||||||
|
|
||||||
|
### 多模态视频数据集
|
||||||
|
|
||||||
|
多模态视频数据集需要提供额外的 `videos` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
||||||
|
|
||||||
|
### 多模态音频数据集
|
||||||
|
|
||||||
|
多模态音频数据集需要提供额外的 `audios` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
||||||
|
|
||||||
|
## Sharegpt 格式
|
||||||
|
|
||||||
|
### 指令监督微调数据集
|
||||||
|
|
||||||
|
- [样例数据集](glaive_toolcall_zh_demo.json)
|
||||||
|
|
||||||
|
相比 alpaca 格式的数据集,sharegpt 格式支持**更多的角色种类**,例如 human、gpt、observation、function 等等。它们构成一个对象列表呈现在 `conversations` 列中。
|
||||||
|
|
||||||
|
注意其中 human 和 observation 必须出现在奇数位置,gpt 和 function 必须出现在偶数位置。
|
||||||
|
|
||||||
```json
|
```json
|
||||||
[
|
[
|
||||||
@@ -132,7 +171,15 @@
|
|||||||
"conversations": [
|
"conversations": [
|
||||||
{
|
{
|
||||||
"from": "human",
|
"from": "human",
|
||||||
"value": "用户指令"
|
"value": "人类指令"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "function_call",
|
||||||
|
"value": "工具参数"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "observation",
|
||||||
|
"value": "工具结果"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"from": "gpt",
|
"from": "gpt",
|
||||||
@@ -145,7 +192,7 @@
|
|||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
对于上述格式的数据,`dataset_info.json` 中的描述应为:
|
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"数据集名称": {
|
"数据集名称": {
|
||||||
@@ -155,19 +202,228 @@
|
|||||||
"messages": "conversations",
|
"messages": "conversations",
|
||||||
"system": "system",
|
"system": "system",
|
||||||
"tools": "tools"
|
"tools": "tools"
|
||||||
},
|
|
||||||
"tags": {
|
|
||||||
"role_tag": "from",
|
|
||||||
"content_tag": "value",
|
|
||||||
"user_tag": "human",
|
|
||||||
"assistant_tag": "gpt"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
其中 `messages` 列应当是一个列表,且符合 `用户/模型/用户/模型/用户/模型` 的顺序。
|
### 预训练数据集
|
||||||
|
|
||||||
我们同样支持 **openai** 格式的数据集:
|
尚不支持,请使用 [alpaca](#alpaca-格式) 格式。
|
||||||
|
|
||||||
|
### 偏好数据集
|
||||||
|
|
||||||
|
- [样例数据集](dpo_zh_demo.json)
|
||||||
|
|
||||||
|
Sharegpt 格式的偏好数据集同样需要在 `chosen` 列中提供更优的消息,并在 `rejected` 列中提供更差的消息。
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"conversations": [
|
||||||
|
{
|
||||||
|
"from": "human",
|
||||||
|
"value": "人类指令"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "gpt",
|
||||||
|
"value": "模型回答"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "human",
|
||||||
|
"value": "人类指令"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"chosen": {
|
||||||
|
"from": "gpt",
|
||||||
|
"value": "优质回答"
|
||||||
|
},
|
||||||
|
"rejected": {
|
||||||
|
"from": "gpt",
|
||||||
|
"value": "劣质回答"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"数据集名称": {
|
||||||
|
"file_name": "data.json",
|
||||||
|
"formatting": "sharegpt",
|
||||||
|
"ranking": true,
|
||||||
|
"columns": {
|
||||||
|
"messages": "conversations",
|
||||||
|
"chosen": "chosen",
|
||||||
|
"rejected": "rejected"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### KTO 数据集
|
||||||
|
|
||||||
|
- [样例数据集](kto_en_demo.json)
|
||||||
|
|
||||||
|
KTO 数据集需要额外添加一个 `kto_tag` 列,包含 bool 类型的人类反馈。
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"conversations": [
|
||||||
|
{
|
||||||
|
"from": "human",
|
||||||
|
"value": "人类指令"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "gpt",
|
||||||
|
"value": "模型回答"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"kto_tag": "人类反馈 [true/false](必填)"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"数据集名称": {
|
||||||
|
"file_name": "data.json",
|
||||||
|
"formatting": "sharegpt",
|
||||||
|
"columns": {
|
||||||
|
"messages": "conversations",
|
||||||
|
"kto_tag": "kto_tag"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 多模态图像数据集
|
||||||
|
|
||||||
|
- [样例数据集](mllm_demo.json)
|
||||||
|
|
||||||
|
多模态图像数据集需要额外添加一个 `images` 列,包含输入图像的路径。
|
||||||
|
|
||||||
|
注意图片的数量必须与文本中所有 `<image>` 标记的数量严格一致。
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"conversations": [
|
||||||
|
{
|
||||||
|
"from": "human",
|
||||||
|
"value": "<image>人类指令"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "gpt",
|
||||||
|
"value": "模型回答"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"images": [
|
||||||
|
"图像路径(必填)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"数据集名称": {
|
||||||
|
"file_name": "data.json",
|
||||||
|
"formatting": "sharegpt",
|
||||||
|
"columns": {
|
||||||
|
"messages": "conversations",
|
||||||
|
"images": "images"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 多模态视频数据集
|
||||||
|
|
||||||
|
- [样例数据集](mllm_video_demo.json)
|
||||||
|
|
||||||
|
多模态视频数据集需要额外添加一个 `videos` 列,包含输入视频的路径。
|
||||||
|
|
||||||
|
注意视频的数量必须与文本中所有 `<video>` 标记的数量严格一致。
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"conversations": [
|
||||||
|
{
|
||||||
|
"from": "human",
|
||||||
|
"value": "<video>人类指令"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "gpt",
|
||||||
|
"value": "模型回答"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"videos": [
|
||||||
|
"视频路径(必填)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"数据集名称": {
|
||||||
|
"file_name": "data.json",
|
||||||
|
"formatting": "sharegpt",
|
||||||
|
"columns": {
|
||||||
|
"messages": "conversations",
|
||||||
|
"videos": "videos"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 多模态音频数据集
|
||||||
|
|
||||||
|
- [样例数据集](mllm_audio_demo.json)
|
||||||
|
|
||||||
|
多模态音频数据集需要额外添加一个 `audios` 列,包含输入音频的路径。
|
||||||
|
|
||||||
|
注意音频的数量必须与文本中所有 `<audio>` 标记的数量严格一致。
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"conversations": [
|
||||||
|
{
|
||||||
|
"from": "human",
|
||||||
|
"value": "<audio>人类指令"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"from": "gpt",
|
||||||
|
"value": "模型回答"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"audios": [
|
||||||
|
"音频路径(必填)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"数据集名称": {
|
||||||
|
"file_name": "data.json",
|
||||||
|
"formatting": "sharegpt",
|
||||||
|
"columns": {
|
||||||
|
"messages": "conversations",
|
||||||
|
"audios": "audios"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### OpenAI 格式
|
||||||
|
|
||||||
|
OpenAI 格式仅仅是 sharegpt 格式的一种特殊情况,其中第一条消息可能是系统提示词。
|
||||||
|
|
||||||
```json
|
```json
|
||||||
[
|
[
|
||||||
@@ -179,7 +435,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": "用户指令"
|
"content": "人类指令"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
@@ -190,7 +446,7 @@
|
|||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
对于上述格式的数据,`dataset_info.json` 中的描述应为:
|
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"数据集名称": {
|
"数据集名称": {
|
||||||
@@ -208,5 +464,3 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
预训练数据集和偏好数据集**尚不支持** sharegpt 格式。
|
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
3779ddbc040543ab1834ef216c983d6fcc06cc9a
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
a97cf9475291591843976554878568e046d8a46d
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
25508714b7879a1e5a6764ba7f979a980f549f1a
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
7cb6a7d11455bddc3d495750a2392683d775b184
|
|
||||||
@@ -17,9 +17,9 @@ _CITATION = """\
|
|||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_HOMEPAGE = "{}/datasets/BelleGroup/multiturn_chat_0.8M".format(_HF_ENDPOINT)
|
_HOMEPAGE = f"{_HF_ENDPOINT}/datasets/BelleGroup/multiturn_chat_0.8M"
|
||||||
_LICENSE = "gpl-3.0"
|
_LICENSE = "gpl-3.0"
|
||||||
_URL = "{}/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json".format(_HF_ENDPOINT)
|
_URL = f"{_HF_ENDPOINT}/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json"
|
||||||
|
|
||||||
|
|
||||||
class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
||||||
@@ -38,7 +38,7 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
|||||||
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": file_path})]
|
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": file_path})]
|
||||||
|
|
||||||
def _generate_examples(self, filepath: str):
|
def _generate_examples(self, filepath: str):
|
||||||
with open(filepath, "r", encoding="utf-8") as f:
|
with open(filepath, encoding="utf-8") as f:
|
||||||
for key, row in enumerate(f):
|
for key, row in enumerate(f):
|
||||||
data = json.loads(row)
|
data = json.loads(row)
|
||||||
conversations = []
|
conversations = []
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
f5cb08305ff5dc9c17a09809c54c8c8834aadc70
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
aee47b7b443496e37808d7f34ef10403ff99bcc3
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
import json
|
|
||||||
from typing import Any, Dict, Generator, List, Tuple
|
|
||||||
|
|
||||||
import datasets
|
|
||||||
|
|
||||||
|
|
||||||
_DESCRIPTION = "An example of dataset."
|
|
||||||
_CITATION = ""
|
|
||||||
_HOMEPAGE = ""
|
|
||||||
_LICENSE = ""
|
|
||||||
_URL = "examples.json"
|
|
||||||
|
|
||||||
|
|
||||||
class ExampleDataset(datasets.GeneratorBasedBuilder):
|
|
||||||
VERSION = datasets.Version("0.0.0")
|
|
||||||
|
|
||||||
def _info(self) -> datasets.DatasetInfo:
|
|
||||||
features = datasets.Features(
|
|
||||||
{
|
|
||||||
"instruction": datasets.Value("string"),
|
|
||||||
"input": datasets.Value("string"),
|
|
||||||
"output": datasets.Value("string"),
|
|
||||||
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
return datasets.DatasetInfo(
|
|
||||||
description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
|
|
||||||
)
|
|
||||||
|
|
||||||
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
|
||||||
file_path = dl_manager.download(_URL)
|
|
||||||
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": file_path})]
|
|
||||||
|
|
||||||
def _generate_examples(self, filepath: str) -> Generator[Tuple[int, Dict[str, Any]], None, None]:
|
|
||||||
example_dataset = json.load(open(filepath, "r", encoding="utf-8"))
|
|
||||||
for key, example in enumerate(example_dataset):
|
|
||||||
yield key, example
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
4748dff00d1dc42768a5b6cc772143c313017812
|
|
||||||
@@ -8,9 +8,9 @@ import datasets
|
|||||||
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
|
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
|
||||||
_DESCRIPTION = "Human preference data about helpfulness and harmlessness."
|
_DESCRIPTION = "Human preference data about helpfulness and harmlessness."
|
||||||
_CITATION = ""
|
_CITATION = ""
|
||||||
_HOMEPAGE = "{}/datasets/Anthropic/hh-rlhf".format(_HF_ENDPOINT)
|
_HOMEPAGE = f"{_HF_ENDPOINT}/datasets/Anthropic/hh-rlhf"
|
||||||
_LICENSE = "mit"
|
_LICENSE = "mit"
|
||||||
_URL = "{}/datasets/Anthropic/hh-rlhf/resolve/main/".format(_HF_ENDPOINT)
|
_URL = f"{_HF_ENDPOINT}/datasets/Anthropic/hh-rlhf/resolve/main/"
|
||||||
_URLS = {
|
_URLS = {
|
||||||
"train": [
|
"train": [
|
||||||
_URL + "harmless-base/train.jsonl.gz",
|
_URL + "harmless-base/train.jsonl.gz",
|
||||||
@@ -34,7 +34,8 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder):
|
|||||||
features = datasets.Features(
|
features = datasets.Features(
|
||||||
{
|
{
|
||||||
"instruction": datasets.Value("string"),
|
"instruction": datasets.Value("string"),
|
||||||
"output": datasets.Sequence(datasets.Value("string")),
|
"chosen": datasets.Value("string"),
|
||||||
|
"rejected": datasets.Value("string"),
|
||||||
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
|
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -52,7 +53,7 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder):
|
|||||||
def _generate_examples(self, filepaths: List[str]):
|
def _generate_examples(self, filepaths: List[str]):
|
||||||
key = 0
|
key = 0
|
||||||
for filepath in filepaths:
|
for filepath in filepaths:
|
||||||
with open(filepath, "r", encoding="utf-8") as f:
|
with open(filepath, encoding="utf-8") as f:
|
||||||
for row in f:
|
for row in f:
|
||||||
data = json.loads(row)
|
data = json.loads(row)
|
||||||
chosen = data["chosen"]
|
chosen = data["chosen"]
|
||||||
@@ -79,5 +80,5 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder):
|
|||||||
break
|
break
|
||||||
prompt = prompt[:human_idx]
|
prompt = prompt[:human_idx]
|
||||||
|
|
||||||
yield key, {"instruction": query, "output": [r_accept, r_reject], "history": history}
|
yield key, {"instruction": query, "chosen": r_accept, "rejected": r_reject, "history": history}
|
||||||
key += 1
|
key += 1
|
||||||
|
|||||||
BIN
data/mllm_demo_data/1.mp3
Normal file
BIN
data/mllm_demo_data/1.mp3
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/1.mp4
Normal file
BIN
data/mllm_demo_data/1.mp4
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/2.avi
Normal file
BIN
data/mllm_demo_data/2.avi
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/2.wav
Normal file
BIN
data/mllm_demo_data/2.wav
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/3.flac
Normal file
BIN
data/mllm_demo_data/3.flac
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/3.mp4
Normal file
BIN
data/mllm_demo_data/3.mp4
Normal file
Binary file not shown.
@@ -1 +0,0 @@
|
|||||||
736bcedea2b24a1414765c6d69cbdafaea839f3c
|
|
||||||
@@ -20,9 +20,9 @@ _CITATION = """\
|
|||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_HOMEPAGE = "{}/datasets/stingning/ultrachat".format(_HF_ENDPOINT)
|
_HOMEPAGE = f"{_HF_ENDPOINT}/datasets/stingning/ultrachat"
|
||||||
_LICENSE = "cc-by-nc-4.0"
|
_LICENSE = "cc-by-nc-4.0"
|
||||||
_BASE_DATA_URL = "{}/datasets/stingning/ultrachat/resolve/main/train_{{idx}}.jsonl".format(_HF_ENDPOINT)
|
_BASE_DATA_URL = f"{_HF_ENDPOINT}/datasets/stingning/ultrachat/resolve/main/train_{{idx}}.jsonl"
|
||||||
|
|
||||||
|
|
||||||
class UltraChat(datasets.GeneratorBasedBuilder):
|
class UltraChat(datasets.GeneratorBasedBuilder):
|
||||||
@@ -42,7 +42,7 @@ class UltraChat(datasets.GeneratorBasedBuilder):
|
|||||||
|
|
||||||
def _generate_examples(self, filepaths: List[str]):
|
def _generate_examples(self, filepaths: List[str]):
|
||||||
for filepath in filepaths:
|
for filepath in filepaths:
|
||||||
with open(filepath, "r", encoding="utf-8") as f:
|
with open(filepath, encoding="utf-8") as f:
|
||||||
for row in f:
|
for row in f:
|
||||||
try:
|
try:
|
||||||
data = json.loads(row)
|
data = json.loads(row)
|
||||||
|
|||||||
30
data/wiki_demo.txt
Normal file
30
data/wiki_demo.txt
Normal file
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
|||||||
c9cf509b7fdac5490cfd6dae72c2d7b8a60af6cb
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
|
||||||
llama-factory:
|
|
||||||
build:
|
|
||||||
dockerfile: Dockerfile
|
|
||||||
context: .
|
|
||||||
container_name: llama_factory
|
|
||||||
volumes:
|
|
||||||
- ./hf_cache:/root/.cache/huggingface/
|
|
||||||
- ./data:/app/data
|
|
||||||
- ./output:/app/output
|
|
||||||
environment:
|
|
||||||
- CUDA_VISIBLE_DEVICES=0
|
|
||||||
ports:
|
|
||||||
- "7860:7860"
|
|
||||||
ipc: host
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
reservations:
|
|
||||||
devices:
|
|
||||||
- driver: nvidia
|
|
||||||
count: "all"
|
|
||||||
capabilities: [gpu]
|
|
||||||
restart: unless-stopped
|
|
||||||
101
docker/docker-cuda/Dockerfile
Normal file
101
docker/docker-cuda/Dockerfile
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
# Default use the NVIDIA official image with PyTorch 2.3.0
|
||||||
|
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/index.html
|
||||||
|
ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:24.02-py3
|
||||||
|
FROM ${BASE_IMAGE}
|
||||||
|
|
||||||
|
# Define environments
|
||||||
|
ENV MAX_JOBS=4
|
||||||
|
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
|
||||||
|
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||||
|
|
||||||
|
# Define installation arguments
|
||||||
|
ARG INSTALL_BNB=false
|
||||||
|
ARG INSTALL_VLLM=false
|
||||||
|
ARG INSTALL_DEEPSPEED=false
|
||||||
|
ARG INSTALL_FLASHATTN=false
|
||||||
|
ARG INSTALL_LIGER_KERNEL=false
|
||||||
|
ARG INSTALL_HQQ=false
|
||||||
|
ARG INSTALL_EETQ=false
|
||||||
|
ARG PIP_INDEX=https://pypi.org/simple
|
||||||
|
ARG HTTP_PROXY=
|
||||||
|
|
||||||
|
# Set the working directory
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Set http proxy
|
||||||
|
RUN if [ -n "$HTTP_PROXY" ]; then \
|
||||||
|
echo "Configuring proxy..."; \
|
||||||
|
export http_proxy=$HTTP_PROXY; \
|
||||||
|
export https_proxy=$HTTP_PROXY; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Install the requirements
|
||||||
|
COPY requirements.txt /app
|
||||||
|
RUN pip config set global.index-url "$PIP_INDEX" && \
|
||||||
|
pip config set global.extra-index-url "$PIP_INDEX" && \
|
||||||
|
python -m pip install --upgrade pip && \
|
||||||
|
if [ -n "$HTTP_PROXY" ]; then \
|
||||||
|
python -m pip install --proxy=$HTTP_PROXY -r requirements.txt; \
|
||||||
|
else \
|
||||||
|
python -m pip install -r requirements.txt; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Copy the rest of the application into the image
|
||||||
|
COPY . /app
|
||||||
|
|
||||||
|
# Install the LLaMA Factory
|
||||||
|
RUN EXTRA_PACKAGES="metrics"; \
|
||||||
|
if [ "$INSTALL_BNB" == "true" ]; then \
|
||||||
|
EXTRA_PACKAGES="${EXTRA_PACKAGES},bitsandbytes"; \
|
||||||
|
fi; \
|
||||||
|
if [ "$INSTALL_VLLM" == "true" ]; then \
|
||||||
|
EXTRA_PACKAGES="${EXTRA_PACKAGES},vllm"; \
|
||||||
|
fi; \
|
||||||
|
if [ "$INSTALL_DEEPSPEED" == "true" ]; then \
|
||||||
|
EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
|
||||||
|
fi; \
|
||||||
|
if [ "$INSTALL_LIGER_KERNEL" == "true" ]; then \
|
||||||
|
EXTRA_PACKAGES="${EXTRA_PACKAGES},liger-kernel"; \
|
||||||
|
fi; \
|
||||||
|
if [ "$INSTALL_HQQ" == "true" ]; then \
|
||||||
|
EXTRA_PACKAGES="${EXTRA_PACKAGES},hqq"; \
|
||||||
|
fi; \
|
||||||
|
if [ "$INSTALL_EETQ" == "true" ]; then \
|
||||||
|
EXTRA_PACKAGES="${EXTRA_PACKAGES},eetq"; \
|
||||||
|
fi; \
|
||||||
|
if [ -n "$HTTP_PROXY" ]; then \
|
||||||
|
pip install --proxy=$HTTP_PROXY -e ".[$EXTRA_PACKAGES]"; \
|
||||||
|
else \
|
||||||
|
pip install -e ".[$EXTRA_PACKAGES]"; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Rebuild flash attention
|
||||||
|
RUN pip uninstall -y transformer-engine flash-attn && \
|
||||||
|
if [ "$INSTALL_FLASHATTN" == "true" ]; then \
|
||||||
|
pip uninstall -y ninja && \
|
||||||
|
if [ -n "$HTTP_PROXY" ]; then \
|
||||||
|
pip install --proxy=$HTTP_PROXY ninja && \
|
||||||
|
pip install --proxy=$HTTP_PROXY --no-cache-dir flash-attn --no-build-isolation; \
|
||||||
|
else \
|
||||||
|
pip install ninja && \
|
||||||
|
pip install --no-cache-dir flash-attn --no-build-isolation; \
|
||||||
|
fi; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# Unset http proxy
|
||||||
|
RUN if [ -n "$HTTP_PROXY" ]; then \
|
||||||
|
unset http_proxy; \
|
||||||
|
unset https_proxy; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set up volumes
|
||||||
|
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
|
||||||
|
|
||||||
|
# Expose port 7860 for the LLaMA Board
|
||||||
|
ENV GRADIO_SERVER_PORT 7860
|
||||||
|
EXPOSE 7860
|
||||||
|
|
||||||
|
# Expose port 8000 for the API service
|
||||||
|
ENV API_PORT 8000
|
||||||
|
EXPOSE 8000
|
||||||
37
docker/docker-cuda/docker-compose.yml
Normal file
37
docker/docker-cuda/docker-compose.yml
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
services:
|
||||||
|
llamafactory:
|
||||||
|
build:
|
||||||
|
dockerfile: ./docker/docker-cuda/Dockerfile
|
||||||
|
context: ../..
|
||||||
|
args:
|
||||||
|
INSTALL_BNB: "false"
|
||||||
|
INSTALL_VLLM: "false"
|
||||||
|
INSTALL_DEEPSPEED: "false"
|
||||||
|
INSTALL_FLASHATTN: "false"
|
||||||
|
INSTALL_LIGER_KERNEL: "false"
|
||||||
|
INSTALL_HQQ: "false"
|
||||||
|
INSTALL_EETQ: "false"
|
||||||
|
PIP_INDEX: https://pypi.org/simple
|
||||||
|
container_name: llamafactory
|
||||||
|
volumes:
|
||||||
|
- ../../hf_cache:/root/.cache/huggingface
|
||||||
|
- ../../ms_cache:/root/.cache/modelscope
|
||||||
|
- ../../om_cache:/root/.cache/openmind
|
||||||
|
- ../../data:/app/data
|
||||||
|
- ../../output:/app/output
|
||||||
|
ports:
|
||||||
|
- "7860:7860"
|
||||||
|
- "8000:8000"
|
||||||
|
ipc: host
|
||||||
|
tty: true
|
||||||
|
shm_size: "16gb"
|
||||||
|
stdin_open: true
|
||||||
|
command: bash
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
reservations:
|
||||||
|
devices:
|
||||||
|
- driver: nvidia
|
||||||
|
count: "all"
|
||||||
|
capabilities: [gpu]
|
||||||
|
restart: unless-stopped
|
||||||
67
docker/docker-npu/Dockerfile
Normal file
67
docker/docker-npu/Dockerfile
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# Use the Ubuntu 22.04 image with CANN 8.0.rc1
|
||||||
|
# More versions can be found at https://hub.docker.com/r/ascendai/cann/tags
|
||||||
|
# FROM ascendai/cann:8.0.rc1-910-ubuntu22.04-py3.8
|
||||||
|
FROM ascendai/cann:8.0.0-910b-ubuntu22.04-py3.10
|
||||||
|
# FROM ascendai/cann:8.0.rc1-910-openeuler22.03-py3.8
|
||||||
|
# FROM ascendai/cann:8.0.rc1-910b-openeuler22.03-py3.8
|
||||||
|
|
||||||
|
# Define environments
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Define installation arguments
|
||||||
|
ARG INSTALL_DEEPSPEED=false
|
||||||
|
ARG PIP_INDEX=https://pypi.org/simple
|
||||||
|
ARG TORCH_INDEX=https://download.pytorch.org/whl/cpu
|
||||||
|
ARG HTTP_PROXY=
|
||||||
|
|
||||||
|
# Set the working directory
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Set http proxy
|
||||||
|
RUN if [ -n "$HTTP_PROXY" ]; then \
|
||||||
|
echo "Configuring proxy..."; \
|
||||||
|
export http_proxy=$HTTP_PROXY; \
|
||||||
|
export https_proxy=$HTTP_PROXY; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Install the requirements
|
||||||
|
COPY requirements.txt /app
|
||||||
|
RUN pip config set global.index-url "$PIP_INDEX" && \
|
||||||
|
pip config set global.extra-index-url "$TORCH_INDEX" && \
|
||||||
|
python -m pip install --upgrade pip && \
|
||||||
|
if [ -n "$HTTP_PROXY" ]; then \
|
||||||
|
python -m pip install --proxy=$HTTP_PROXY -r requirements.txt; \
|
||||||
|
else \
|
||||||
|
python -m pip install -r requirements.txt; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Copy the rest of the application into the image
|
||||||
|
COPY . /app
|
||||||
|
|
||||||
|
# Install the LLaMA Factory
|
||||||
|
RUN EXTRA_PACKAGES="torch-npu,metrics"; \
|
||||||
|
if [ "$INSTALL_DEEPSPEED" == "true" ]; then \
|
||||||
|
EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
|
||||||
|
fi; \
|
||||||
|
if [ -n "$HTTP_PROXY" ]; then \
|
||||||
|
pip install --proxy=$HTTP_PROXY -e ".[$EXTRA_PACKAGES]"; \
|
||||||
|
else \
|
||||||
|
pip install -e ".[$EXTRA_PACKAGES]"; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Unset http proxy
|
||||||
|
RUN if [ -n "$HTTP_PROXY" ]; then \
|
||||||
|
unset http_proxy; \
|
||||||
|
unset https_proxy; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set up volumes
|
||||||
|
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
|
||||||
|
|
||||||
|
# Expose port 7860 for the LLaMA Board
|
||||||
|
ENV GRADIO_SERVER_PORT 7860
|
||||||
|
EXPOSE 7860
|
||||||
|
|
||||||
|
# Expose port 8000 for the API service
|
||||||
|
ENV API_PORT 8000
|
||||||
|
EXPOSE 8000
|
||||||
33
docker/docker-npu/docker-compose.yml
Normal file
33
docker/docker-npu/docker-compose.yml
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
services:
|
||||||
|
llamafactory:
|
||||||
|
build:
|
||||||
|
dockerfile: ./docker/docker-npu/Dockerfile
|
||||||
|
context: ../..
|
||||||
|
args:
|
||||||
|
INSTALL_DEEPSPEED: "false"
|
||||||
|
PIP_INDEX: https://pypi.org/simple
|
||||||
|
container_name: llamafactory
|
||||||
|
volumes:
|
||||||
|
- ../../hf_cache:/root/.cache/huggingface
|
||||||
|
- ../../ms_cache:/root/.cache/modelscope
|
||||||
|
- ../../om_cache:/root/.cache/openmind
|
||||||
|
- ../../data:/app/data
|
||||||
|
- ../../output:/app/output
|
||||||
|
- /usr/local/dcmi:/usr/local/dcmi
|
||||||
|
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
|
||||||
|
- /usr/local/Ascend/driver:/usr/local/Ascend/driver
|
||||||
|
- /etc/ascend_install.info:/etc/ascend_install.info
|
||||||
|
ports:
|
||||||
|
- "7860:7860"
|
||||||
|
- "8000:8000"
|
||||||
|
ipc: host
|
||||||
|
tty: true
|
||||||
|
shm_size: "16gb"
|
||||||
|
stdin_open: true
|
||||||
|
command: bash
|
||||||
|
devices:
|
||||||
|
- /dev/davinci0
|
||||||
|
- /dev/davinci_manager
|
||||||
|
- /dev/devmm_svm
|
||||||
|
- /dev/hisi_hdc
|
||||||
|
restart: unless-stopped
|
||||||
93
docker/docker-rocm/Dockerfile
Normal file
93
docker/docker-rocm/Dockerfile
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
FROM hardandheavy/transformers-rocm:2.2.0
|
||||||
|
|
||||||
|
# Define environments
|
||||||
|
ENV MAX_JOBS=4
|
||||||
|
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
|
||||||
|
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||||
|
|
||||||
|
# Define installation arguments
|
||||||
|
ARG INSTALL_BNB=false
|
||||||
|
ARG INSTALL_VLLM=false
|
||||||
|
ARG INSTALL_DEEPSPEED=false
|
||||||
|
ARG INSTALL_FLASHATTN=false
|
||||||
|
ARG INSTALL_LIGER_KERNEL=false
|
||||||
|
ARG INSTALL_HQQ=false
|
||||||
|
ARG PIP_INDEX=https://pypi.org/simple
|
||||||
|
ARG HTTP_PROXY=
|
||||||
|
|
||||||
|
# Set the working directory
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Set http proxy
|
||||||
|
RUN if [ -n "$HTTP_PROXY" ]; then \
|
||||||
|
echo "Configuring proxy..."; \
|
||||||
|
export http_proxy=$HTTP_PROXY; \
|
||||||
|
export https_proxy=$HTTP_PROXY; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Install the requirements
|
||||||
|
COPY requirements.txt /app
|
||||||
|
RUN pip config set global.index-url "$PIP_INDEX" && \
|
||||||
|
pip config set global.extra-index-url "$PIP_INDEX" && \
|
||||||
|
python -m pip install --upgrade pip && \
|
||||||
|
if [ -n "$HTTP_PROXY" ]; then \
|
||||||
|
python -m pip install --proxy=$HTTP_PROXY -r requirements.txt; \
|
||||||
|
else \
|
||||||
|
python -m pip install -r requirements.txt; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Copy the rest of the application into the image
|
||||||
|
COPY . /app
|
||||||
|
|
||||||
|
# Install the LLaMA Factory
|
||||||
|
RUN EXTRA_PACKAGES="metrics"; \
|
||||||
|
if [ "$INSTALL_BNB" == "true" ]; then \
|
||||||
|
EXTRA_PACKAGES="${EXTRA_PACKAGES},bitsandbytes"; \
|
||||||
|
fi; \
|
||||||
|
if [ "$INSTALL_VLLM" == "true" ]; then \
|
||||||
|
EXTRA_PACKAGES="${EXTRA_PACKAGES},vllm"; \
|
||||||
|
fi; \
|
||||||
|
if [ "$INSTALL_DEEPSPEED" == "true" ]; then \
|
||||||
|
EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
|
||||||
|
fi; \
|
||||||
|
if [ "$INSTALL_LIGER_KERNEL" == "true" ]; then \
|
||||||
|
EXTRA_PACKAGES="${EXTRA_PACKAGES},liger-kernel"; \
|
||||||
|
fi; \
|
||||||
|
if [ "$INSTALL_HQQ" == "true" ]; then \
|
||||||
|
EXTRA_PACKAGES="${EXTRA_PACKAGES},hqq"; \
|
||||||
|
fi; \
|
||||||
|
if [ -n "$HTTP_PROXY" ]; then \
|
||||||
|
pip install --proxy=$HTTP_PROXY -e ".[$EXTRA_PACKAGES]"; \
|
||||||
|
else \
|
||||||
|
pip install -e ".[$EXTRA_PACKAGES]"; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Rebuild flash attention
|
||||||
|
RUN pip uninstall -y transformer-engine flash-attn && \
|
||||||
|
if [ "$INSTALL_FLASHATTN" == "true" ]; then \
|
||||||
|
pip uninstall -y ninja && \
|
||||||
|
if [ -n "$HTTP_PROXY" ]; then \
|
||||||
|
pip install --proxy=$HTTP_PROXY ninja && \
|
||||||
|
pip install --proxy=$HTTP_PROXY --no-cache-dir flash-attn --no-build-isolation; \
|
||||||
|
else \
|
||||||
|
pip install ninja && \
|
||||||
|
pip install --no-cache-dir flash-attn --no-build-isolation; \
|
||||||
|
fi; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Unset http proxy
|
||||||
|
RUN if [ -n "$HTTP_PROXY" ]; then \
|
||||||
|
unset http_proxy; \
|
||||||
|
unset https_proxy; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set up volumes
|
||||||
|
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
|
||||||
|
|
||||||
|
# Expose port 7860 for the LLaMA Board
|
||||||
|
ENV GRADIO_SERVER_PORT 7860
|
||||||
|
EXPOSE 7860
|
||||||
|
|
||||||
|
# Expose port 8000 for the API service
|
||||||
|
ENV API_PORT 8000
|
||||||
|
EXPOSE 8000
|
||||||
33
docker/docker-rocm/docker-compose.yml
Normal file
33
docker/docker-rocm/docker-compose.yml
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
services:
|
||||||
|
llamafactory:
|
||||||
|
build:
|
||||||
|
dockerfile: ./docker/docker-rocm/Dockerfile
|
||||||
|
context: ../..
|
||||||
|
args:
|
||||||
|
INSTALL_BNB: "false"
|
||||||
|
INSTALL_VLLM: "false"
|
||||||
|
INSTALL_DEEPSPEED: "false"
|
||||||
|
INSTALL_FLASHATTN: "false"
|
||||||
|
INSTALL_LIGER_KERNEL: "false"
|
||||||
|
INSTALL_HQQ: "false"
|
||||||
|
PIP_INDEX: https://pypi.org/simple
|
||||||
|
container_name: llamafactory
|
||||||
|
volumes:
|
||||||
|
- ../../hf_cache:/root/.cache/huggingface
|
||||||
|
- ../../ms_cache:/root/.cache/modelscope
|
||||||
|
- ../../om_cache:/root/.cache/openmind
|
||||||
|
- ../../data:/app/data
|
||||||
|
- ../../output:/app/output
|
||||||
|
- ../../saves:/app/saves
|
||||||
|
ports:
|
||||||
|
- "7860:7860"
|
||||||
|
- "8000:8000"
|
||||||
|
ipc: host
|
||||||
|
tty: true
|
||||||
|
shm_size: "16gb"
|
||||||
|
stdin_open: true
|
||||||
|
command: bash
|
||||||
|
devices:
|
||||||
|
- /dev/kfd:/dev/kfd
|
||||||
|
- /dev/dri:/dev/dri
|
||||||
|
restart: unless-stopped
|
||||||
@@ -11,6 +11,7 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import datasets
|
import datasets
|
||||||
|
|||||||
@@ -11,6 +11,7 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import datasets
|
import datasets
|
||||||
|
|||||||
@@ -11,6 +11,7 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import datasets
|
import datasets
|
||||||
@@ -154,8 +155,7 @@ class MMLU(datasets.GeneratorBasedBuilder):
|
|||||||
]
|
]
|
||||||
|
|
||||||
def _generate_examples(self, filepath):
|
def _generate_examples(self, filepath):
|
||||||
df = pd.read_csv(filepath)
|
df = pd.read_csv(filepath, header=None)
|
||||||
df.columns = ["question", "A", "B", "C", "D", "answer"]
|
df.columns = ["question", "A", "B", "C", "D", "answer"]
|
||||||
|
|
||||||
for i, instance in enumerate(df.to_dict(orient="records")):
|
yield from enumerate(df.to_dict(orient="records"))
|
||||||
yield i, instance
|
|
||||||
|
|||||||
@@ -4,59 +4,68 @@ Make sure to execute these commands in the `LLaMA-Factory` directory.
|
|||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
- [LoRA Fine-Tuning on A Single GPU](#lora-fine-tuning-on-a-single-gpu)
|
- [LoRA Fine-Tuning](#lora-fine-tuning)
|
||||||
- [QLoRA Fine-Tuning on a Single GPU](#qlora-fine-tuning-on-a-single-gpu)
|
- [QLoRA Fine-Tuning](#qlora-fine-tuning)
|
||||||
- [LoRA Fine-Tuning on Multiple GPUs](#lora-fine-tuning-on-multiple-gpus)
|
- [Full-Parameter Fine-Tuning](#full-parameter-fine-tuning)
|
||||||
- [LoRA Fine-Tuning on Multiple NPUs](#lora-fine-tuning-on-multiple-npus)
|
|
||||||
- [Full-Parameter Fine-Tuning on Multiple GPUs](#full-parameter-fine-tuning-on-multiple-gpus)
|
|
||||||
- [Merging LoRA Adapters and Quantization](#merging-lora-adapters-and-quantization)
|
- [Merging LoRA Adapters and Quantization](#merging-lora-adapters-and-quantization)
|
||||||
- [Inferring LoRA Fine-Tuned Models](#inferring-lora-fine-tuned-models)
|
- [Inferring LoRA Fine-Tuned Models](#inferring-lora-fine-tuned-models)
|
||||||
- [Extras](#extras)
|
- [Extras](#extras)
|
||||||
|
|
||||||
|
Use `CUDA_VISIBLE_DEVICES` (GPU) or `ASCEND_RT_VISIBLE_DEVICES` (NPU) to choose computing devices.
|
||||||
|
|
||||||
|
By default, LLaMA-Factory uses all visible computing devices.
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
### LoRA Fine-Tuning on A Single GPU
|
### LoRA Fine-Tuning
|
||||||
|
|
||||||
#### (Continuous) Pre-Training
|
#### (Continuous) Pre-Training
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_pretrain.yaml
|
llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Supervised Fine-Tuning
|
#### Supervised Fine-Tuning
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_sft.yaml
|
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Multimodal Supervised Fine-Tuning
|
#### Multimodal Supervised Fine-Tuning
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llava1_5_lora_sft.yaml
|
llamafactory-cli train examples/train_lora/llava1_5_lora_sft.yaml
|
||||||
|
llamafactory-cli train examples/train_lora/qwen2vl_lora_sft.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### DPO/ORPO/SimPO Training
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/train_lora/llama3_lora_dpo.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Multimodal DPO/ORPO/SimPO Training
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/train_lora/qwen2vl_lora_dpo.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Reward Modeling
|
#### Reward Modeling
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_reward.yaml
|
llamafactory-cli train examples/train_lora/llama3_lora_reward.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### PPO Training
|
#### PPO Training
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_ppo.yaml
|
llamafactory-cli train examples/train_lora/llama3_lora_ppo.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### DPO Training
|
#### KTO Training
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_dpo.yaml
|
llamafactory-cli train examples/train_lora/llama3_lora_kto.yaml
|
||||||
```
|
|
||||||
|
|
||||||
#### ORPO Training
|
|
||||||
|
|
||||||
```bash
|
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_orpo.yaml
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Preprocess Dataset
|
#### Preprocess Dataset
|
||||||
@@ -64,93 +73,85 @@ CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lo
|
|||||||
It is useful for large dataset, use `tokenized_path` in config to load the preprocessed dataset.
|
It is useful for large dataset, use `tokenized_path` in config to load the preprocessed dataset.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_preprocess.yaml
|
llamafactory-cli train examples/train_lora/llama3_preprocess.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Evaluating on MMLU/CMMLU/C-Eval Benchmarks
|
#### Evaluating on MMLU/CMMLU/C-Eval Benchmarks
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli eval examples/lora_single_gpu/llama3_lora_eval.yaml
|
llamafactory-cli eval examples/train_lora/llama3_lora_eval.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Batch Predicting and Computing BLEU and ROUGE Scores
|
#### Supervised Fine-Tuning on Multiple Nodes
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_predict.yaml
|
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||||
```
|
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||||
|
|
||||||
### QLoRA Fine-Tuning on a Single GPU
|
|
||||||
|
|
||||||
#### Supervised Fine-Tuning with 4/8-bit Bitsandbytes Quantization (Recommended)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_bitsandbytes.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Supervised Fine-Tuning with 4/8-bit GPTQ Quantization
|
|
||||||
|
|
||||||
```bash
|
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_gptq.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Supervised Fine-Tuning with 4-bit AWQ Quantization
|
|
||||||
|
|
||||||
```bash
|
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_awq.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Supervised Fine-Tuning with 2-bit AQLM Quantization
|
|
||||||
|
|
||||||
```bash
|
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_aqlm.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
### LoRA Fine-Tuning on Multiple GPUs
|
|
||||||
|
|
||||||
#### Supervised Fine-Tuning with Accelerate on Single Node
|
|
||||||
|
|
||||||
```bash
|
|
||||||
bash examples/lora_multi_gpu/single_node.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Supervised Fine-Tuning with Accelerate on Multiple Nodes
|
|
||||||
|
|
||||||
```bash
|
|
||||||
bash examples/lora_multi_gpu/multi_node.sh
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Supervised Fine-Tuning with DeepSpeed ZeRO-3 (Weight Sharding)
|
#### Supervised Fine-Tuning with DeepSpeed ZeRO-3 (Weight Sharding)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash examples/lora_multi_gpu/ds_zero3.sh
|
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ds3.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
### LoRA Fine-Tuning on Multiple NPUs
|
#### Supervised Fine-Tuning with Ray on 4 GPUs
|
||||||
|
|
||||||
#### Supervised Fine-Tuning with DeepSpeed ZeRO-0
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash examples/lora_multi_npu/ds_zero0.sh
|
USE_RAY=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ray.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
### Full-Parameter Fine-Tuning on Multiple GPUs
|
### QLoRA Fine-Tuning
|
||||||
|
|
||||||
#### Supervised Fine-Tuning with Accelerate on Single Node
|
#### Supervised Fine-Tuning with 4/8-bit Bitsandbytes/HQQ/EETQ Quantization (Recommended)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash examples/full_multi_gpu/single_node.sh
|
llamafactory-cli train examples/train_qlora/llama3_lora_sft_otfq.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Supervised Fine-Tuning with Accelerate on Multiple Nodes
|
#### Supervised Fine-Tuning with 4-bit Bitsandbytes Quantization on Ascend NPU
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash examples/full_multi_gpu/multi_node.sh
|
llamafactory-cli train examples/train_qlora/llama3_lora_sft_bnb_npu.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Batch Predicting and Computing BLEU and ROUGE Scores
|
#### Supervised Fine-Tuning with 4/8-bit GPTQ Quantization
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash examples/full_multi_gpu/predict.sh
|
llamafactory-cli train examples/train_qlora/llama3_lora_sft_gptq.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Supervised Fine-Tuning with 4-bit AWQ Quantization
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/train_qlora/llama3_lora_sft_awq.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Supervised Fine-Tuning with 2-bit AQLM Quantization
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/train_qlora/llama3_lora_sft_aqlm.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Full-Parameter Fine-Tuning
|
||||||
|
|
||||||
|
#### Supervised Fine-Tuning on Single Node
|
||||||
|
|
||||||
|
```bash
|
||||||
|
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Supervised Fine-Tuning on Multiple Nodes
|
||||||
|
|
||||||
|
```bash
|
||||||
|
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||||
|
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Multimodal Supervised Fine-Tuning
|
||||||
|
|
||||||
|
```bash
|
||||||
|
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/qwen2vl_full_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
### Merging LoRA Adapters and Quantization
|
### Merging LoRA Adapters and Quantization
|
||||||
@@ -160,33 +161,45 @@ bash examples/full_multi_gpu/predict.sh
|
|||||||
Note: DO NOT use quantized model or `quantization_bit` when merging LoRA adapters.
|
Note: DO NOT use quantized model or `quantization_bit` when merging LoRA adapters.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Quantizing Model using AutoGPTQ
|
#### Quantizing Model using AutoGPTQ
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_gptq.yaml
|
llamafactory-cli export examples/merge_lora/llama3_gptq.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Save Ollama modelfile
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli export examples/merge_lora/llama3_full_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
### Inferring LoRA Fine-Tuned Models
|
### Inferring LoRA Fine-Tuned Models
|
||||||
|
|
||||||
#### Use CLI
|
#### Batch Generation using vLLM Tensor Parallel
|
||||||
|
|
||||||
```bash
|
```
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli chat examples/merge_lora/llama3_lora_sft.yaml
|
python scripts/vllm_infer.py --model_name_or_path path_to_merged_model --dataset alpaca_en_demo
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Use Web UI
|
#### Use CLI ChatBox
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli webchat examples/merge_lora/llama3_lora_sft.yaml
|
llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Use Web UI ChatBox
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli webchat examples/inference/llama3_lora_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Launch OpenAI-style API
|
#### Launch OpenAI-style API
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli api examples/merge_lora/llama3_lora_sft.yaml
|
llamafactory-cli api examples/inference/llama3_lora_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
### Extras
|
### Extras
|
||||||
@@ -194,36 +207,60 @@ CUDA_VISIBLE_DEVICES=0 llamafactory-cli api examples/merge_lora/llama3_lora_sft.
|
|||||||
#### Full-Parameter Fine-Tuning using GaLore
|
#### Full-Parameter Fine-Tuning using GaLore
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml
|
llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Full-Parameter Fine-Tuning using APOLLO
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/extras/apollo/llama3_full_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Full-Parameter Fine-Tuning using BAdam
|
#### Full-Parameter Fine-Tuning using BAdam
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
|
llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Full-Parameter Fine-Tuning using Adam-mini
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/extras/adam_mini/qwen2_full_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### LoRA+ Fine-Tuning
|
#### LoRA+ Fine-Tuning
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/loraplus/llama3_lora_sft.yaml
|
llamafactory-cli train examples/extras/loraplus/llama3_lora_sft.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### PiSSA Fine-Tuning
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/extras/pissa/llama3_lora_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Mixture-of-Depths Fine-Tuning
|
#### Mixture-of-Depths Fine-Tuning
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/mod/llama3_full_sft.yaml
|
llamafactory-cli train examples/extras/mod/llama3_full_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### LLaMA-Pro Fine-Tuning
|
#### LLaMA-Pro Fine-Tuning
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash examples/extras/llama_pro/expand.sh
|
bash examples/extras/llama_pro/expand.sh
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml
|
llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### FSDP+QLoRA Fine-Tuning
|
#### FSDP+QLoRA Fine-Tuning
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash examples/extras/fsdp_qlora/single_node.sh
|
bash examples/extras/fsdp_qlora/train.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Computing BLEU and ROUGE Scores
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/extras/nlg_eval/llama3_lora_predict.yaml
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -4,59 +4,68 @@
|
|||||||
|
|
||||||
## 目录
|
## 目录
|
||||||
|
|
||||||
- [单 GPU LoRA 微调](#单-gpu-lora-微调)
|
- [LoRA 微调](#lora-微调)
|
||||||
- [单 GPU QLoRA 微调](#单-gpu-qlora-微调)
|
- [QLoRA 微调](#qlora-微调)
|
||||||
- [多 GPU LoRA 微调](#多-gpu-lora-微调)
|
- [全参数微调](#全参数微调)
|
||||||
- [多 NPU LoRA 微调](#多-npu-lora-微调)
|
|
||||||
- [多 GPU 全参数微调](#多-gpu-全参数微调)
|
|
||||||
- [合并 LoRA 适配器与模型量化](#合并-lora-适配器与模型量化)
|
- [合并 LoRA 适配器与模型量化](#合并-lora-适配器与模型量化)
|
||||||
- [推理 LoRA 模型](#推理-lora-模型)
|
- [推理 LoRA 模型](#推理-lora-模型)
|
||||||
- [杂项](#杂项)
|
- [杂项](#杂项)
|
||||||
|
|
||||||
|
使用 `CUDA_VISIBLE_DEVICES`(GPU)或 `ASCEND_RT_VISIBLE_DEVICES`(NPU)选择计算设备。
|
||||||
|
|
||||||
|
LLaMA-Factory 默认使用所有可见的计算设备。
|
||||||
|
|
||||||
## 示例
|
## 示例
|
||||||
|
|
||||||
### 单 GPU LoRA 微调
|
### LoRA 微调
|
||||||
|
|
||||||
#### (增量)预训练
|
#### (增量)预训练
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_pretrain.yaml
|
llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 指令监督微调
|
#### 指令监督微调
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_sft.yaml
|
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 多模态指令监督微调
|
#### 多模态指令监督微调
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llava1_5_lora_sft.yaml
|
llamafactory-cli train examples/train_lora/llava1_5_lora_sft.yaml
|
||||||
|
llamafactory-cli train examples/train_lora/qwen2vl_lora_sft.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### DPO/ORPO/SimPO 训练
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/train_lora/llama3_lora_dpo.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 多模态 DPO/ORPO/SimPO 训练
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/train_lora/qwen2vl_lora_dpo.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 奖励模型训练
|
#### 奖励模型训练
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_reward.yaml
|
llamafactory-cli train examples/train_lora/llama3_lora_reward.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### PPO 训练
|
#### PPO 训练
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_ppo.yaml
|
llamafactory-cli train examples/train_lora/llama3_lora_ppo.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### DPO 训练
|
#### KTO 训练
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_dpo.yaml
|
llamafactory-cli train examples/train_lora/llama3_lora_kto.yaml
|
||||||
```
|
|
||||||
|
|
||||||
#### ORPO 训练
|
|
||||||
|
|
||||||
```bash
|
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_orpo.yaml
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 预处理数据集
|
#### 预处理数据集
|
||||||
@@ -64,93 +73,85 @@ CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lo
|
|||||||
对于大数据集有帮助,在配置中使用 `tokenized_path` 以加载预处理后的数据集。
|
对于大数据集有帮助,在配置中使用 `tokenized_path` 以加载预处理后的数据集。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_preprocess.yaml
|
llamafactory-cli train examples/train_lora/llama3_preprocess.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 在 MMLU/CMMLU/C-Eval 上评估
|
#### 在 MMLU/CMMLU/C-Eval 上评估
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli eval examples/lora_single_gpu/llama3_lora_eval.yaml
|
llamafactory-cli eval examples/train_lora/llama3_lora_eval.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 批量预测并计算 BLEU 和 ROUGE 分数
|
#### 多机指令监督微调
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_predict.yaml
|
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||||
```
|
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||||
|
|
||||||
### 单 GPU QLoRA 微调
|
|
||||||
|
|
||||||
#### 基于 4/8 比特 Bitsandbytes 量化进行指令监督微调(推荐)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_bitsandbytes.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 基于 4/8 比特 GPTQ 量化进行指令监督微调
|
|
||||||
|
|
||||||
```bash
|
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_gptq.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 基于 4 比特 AWQ 量化进行指令监督微调
|
|
||||||
|
|
||||||
```bash
|
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_awq.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 基于 2 比特 AQLM 量化进行指令监督微调
|
|
||||||
|
|
||||||
```bash
|
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_aqlm.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
### 多 GPU LoRA 微调
|
|
||||||
|
|
||||||
#### 使用 Accelerate 进行单节点训练
|
|
||||||
|
|
||||||
```bash
|
|
||||||
bash examples/lora_multi_gpu/single_node.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 使用 Accelerate 进行多节点训练
|
|
||||||
|
|
||||||
```bash
|
|
||||||
bash examples/lora_multi_gpu/multi_node.sh
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 使用 DeepSpeed ZeRO-3 平均分配显存
|
#### 使用 DeepSpeed ZeRO-3 平均分配显存
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash examples/lora_multi_gpu/ds_zero3.sh
|
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ds3.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
### 多 NPU LoRA 微调
|
#### 使用 Ray 在 4 张 GPU 上微调
|
||||||
|
|
||||||
#### 使用 DeepSpeed ZeRO-0 训练
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash examples/lora_multi_npu/ds_zero0.sh
|
USE_RAY=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ray.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
### 多 GPU 全参数微调
|
### QLoRA 微调
|
||||||
|
|
||||||
#### 使用 DeepSpeed 进行单节点训练
|
#### 基于 4/8 比特 Bitsandbytes/HQQ/EETQ 量化进行指令监督微调(推荐)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash examples/full_multi_gpu/single_node.sh
|
llamafactory-cli train examples/train_qlora/llama3_lora_sft_otfq.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 使用 DeepSpeed 进行多节点训练
|
#### 在 NPU 上基于 4 比特 Bitsandbytes 量化进行指令监督微调
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash examples/full_multi_gpu/multi_node.sh
|
llamafactory-cli train examples/train_qlora/llama3_lora_sft_bnb_npu.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 批量预测并计算 BLEU 和 ROUGE 分数
|
#### 基于 4/8 比特 GPTQ 量化进行指令监督微调
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash examples/full_multi_gpu/predict.sh
|
llamafactory-cli train examples/train_qlora/llama3_lora_sft_gptq.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 基于 4 比特 AWQ 量化进行指令监督微调
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/train_qlora/llama3_lora_sft_awq.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 基于 2 比特 AQLM 量化进行指令监督微调
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/train_qlora/llama3_lora_sft_aqlm.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### 全参数微调
|
||||||
|
|
||||||
|
#### 在单机上进行指令监督微调
|
||||||
|
|
||||||
|
```bash
|
||||||
|
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 在多机上进行指令监督微调
|
||||||
|
|
||||||
|
```bash
|
||||||
|
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||||
|
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 多模态指令监督微调
|
||||||
|
|
||||||
|
```bash
|
||||||
|
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/qwen2vl_full_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
### 合并 LoRA 适配器与模型量化
|
### 合并 LoRA 适配器与模型量化
|
||||||
@@ -160,33 +161,45 @@ bash examples/full_multi_gpu/predict.sh
|
|||||||
注:请勿使用量化后的模型或 `quantization_bit` 参数来合并 LoRA 适配器。
|
注:请勿使用量化后的模型或 `quantization_bit` 参数来合并 LoRA 适配器。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 使用 AutoGPTQ 量化模型
|
#### 使用 AutoGPTQ 量化模型
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_gptq.yaml
|
llamafactory-cli export examples/merge_lora/llama3_gptq.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### 保存 Ollama 配置文件
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli export examples/merge_lora/llama3_full_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
### 推理 LoRA 模型
|
### 推理 LoRA 模型
|
||||||
|
|
||||||
#### 使用命令行接口
|
#### 使用 vLLM+TP 批量推理
|
||||||
|
|
||||||
```bash
|
```
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli chat examples/merge_lora/llama3_lora_sft.yaml
|
python scripts/vllm_infer.py --model_name_or_path path_to_merged_model --dataset alpaca_en_demo
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 使用浏览器界面
|
#### 使用命令行对话框
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli webchat examples/merge_lora/llama3_lora_sft.yaml
|
llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 使用浏览器对话框
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli webchat examples/inference/llama3_lora_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 启动 OpenAI 风格 API
|
#### 启动 OpenAI 风格 API
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli api examples/merge_lora/llama3_lora_sft.yaml
|
llamafactory-cli api examples/inference/llama3_lora_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
### 杂项
|
### 杂项
|
||||||
@@ -194,36 +207,60 @@ CUDA_VISIBLE_DEVICES=0 llamafactory-cli api examples/merge_lora/llama3_lora_sft.
|
|||||||
#### 使用 GaLore 进行全参数训练
|
#### 使用 GaLore 进行全参数训练
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml
|
llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 使用 APOLLO 进行全参数训练
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/extras/apollo/llama3_full_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 使用 BAdam 进行全参数训练
|
#### 使用 BAdam 进行全参数训练
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
|
llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 使用 Adam-mini 进行全参数训练
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/extras/adam_mini/qwen2_full_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### LoRA+ 微调
|
#### LoRA+ 微调
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/loraplus/llama3_lora_sft.yaml
|
llamafactory-cli train examples/extras/loraplus/llama3_lora_sft.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### PiSSA 微调
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/extras/pissa/llama3_lora_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 深度混合微调
|
#### 深度混合微调
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/mod/llama3_full_sft.yaml
|
llamafactory-cli train examples/extras/mod/llama3_full_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### LLaMA-Pro 微调
|
#### LLaMA-Pro 微调
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash examples/extras/llama_pro/expand.sh
|
bash examples/extras/llama_pro/expand.sh
|
||||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml
|
llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### FSDP+QLoRA 微调
|
#### FSDP+QLoRA 微调
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash examples/extras/fsdp_qlora/single_node.sh
|
bash examples/extras/fsdp_qlora/train.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 计算 BLEU 和 ROUGE 分数
|
||||||
|
|
||||||
|
```bash
|
||||||
|
llamafactory-cli train examples/extras/nlg_eval/llama3_lora_predict.yaml
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -5,16 +5,16 @@ downcast_bf16: 'no'
|
|||||||
fsdp_config:
|
fsdp_config:
|
||||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||||
fsdp_backward_prefetch: BACKWARD_PRE
|
fsdp_backward_prefetch: BACKWARD_PRE
|
||||||
fsdp_cpu_ram_efficient_loading: true
|
|
||||||
fsdp_forward_prefetch: false
|
fsdp_forward_prefetch: false
|
||||||
fsdp_offload_params: true
|
fsdp_cpu_ram_efficient_loading: true
|
||||||
|
fsdp_offload_params: true # offload may affect training speed
|
||||||
fsdp_sharding_strategy: FULL_SHARD
|
fsdp_sharding_strategy: FULL_SHARD
|
||||||
fsdp_state_dict_type: FULL_STATE_DICT
|
fsdp_state_dict_type: FULL_STATE_DICT
|
||||||
fsdp_sync_module_states: true
|
fsdp_sync_module_states: true
|
||||||
fsdp_use_orig_params: false
|
fsdp_use_orig_params: true
|
||||||
machine_rank: 0
|
machine_rank: 0
|
||||||
main_training_function: main
|
main_training_function: main
|
||||||
mixed_precision: fp16
|
mixed_precision: bf16 # or fp16
|
||||||
num_machines: 1 # the number of nodes
|
num_machines: 1 # the number of nodes
|
||||||
num_processes: 2 # the number of GPUs in all nodes
|
num_processes: 2 # the number of GPUs in all nodes
|
||||||
rdzv_backend: static
|
rdzv_backend: static
|
||||||
|
|||||||
@@ -1,18 +0,0 @@
|
|||||||
compute_environment: LOCAL_MACHINE
|
|
||||||
debug: false
|
|
||||||
distributed_type: MULTI_GPU
|
|
||||||
downcast_bf16: 'no'
|
|
||||||
gpu_ids: all
|
|
||||||
machine_rank: 0
|
|
||||||
main_process_ip: 192.168.0.1
|
|
||||||
main_process_port: 29555
|
|
||||||
main_training_function: main
|
|
||||||
mixed_precision: fp16
|
|
||||||
num_machines: 2 # the number of nodes
|
|
||||||
num_processes: 8 # the number of GPUs in all nodes
|
|
||||||
rdzv_backend: static
|
|
||||||
same_network: true
|
|
||||||
tpu_env: []
|
|
||||||
tpu_use_cluster: false
|
|
||||||
tpu_use_sudo: false
|
|
||||||
use_cpu: false
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
compute_environment: LOCAL_MACHINE
|
|
||||||
debug: false
|
|
||||||
distributed_type: MULTI_GPU
|
|
||||||
downcast_bf16: 'no'
|
|
||||||
gpu_ids: all
|
|
||||||
machine_rank: 0
|
|
||||||
main_training_function: main
|
|
||||||
mixed_precision: fp16
|
|
||||||
num_machines: 1 # the number of nodes
|
|
||||||
num_processes: 4 # the number of GPUs in all nodes
|
|
||||||
rdzv_backend: static
|
|
||||||
same_network: true
|
|
||||||
tpu_env: []
|
|
||||||
tpu_use_cluster: false
|
|
||||||
tpu_use_sudo: false
|
|
||||||
use_cpu: false
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
compute_environment: LOCAL_MACHINE
|
|
||||||
debug: false
|
|
||||||
distributed_type: MULTI_GPU
|
|
||||||
downcast_bf16: 'no'
|
|
||||||
gpu_ids: all
|
|
||||||
machine_rank: 1
|
|
||||||
main_process_ip: 192.168.0.1
|
|
||||||
main_process_port: 29555
|
|
||||||
main_training_function: main
|
|
||||||
mixed_precision: fp16
|
|
||||||
num_machines: 2 # the number of nodes
|
|
||||||
num_processes: 8 # the number of GPUs in all nodes
|
|
||||||
rdzv_backend: static
|
|
||||||
same_network: true
|
|
||||||
tpu_env: []
|
|
||||||
tpu_use_cluster: false
|
|
||||||
tpu_use_sudo: false
|
|
||||||
use_cpu: false
|
|
||||||
40
examples/extras/adam_mini/qwen2_full_sft.yaml
Normal file
40
examples/extras/adam_mini/qwen2_full_sft.yaml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
### model
|
||||||
|
model_name_or_path: Qwen/Qwen2-1.5B-Instruct
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
### method
|
||||||
|
stage: sft
|
||||||
|
do_train: true
|
||||||
|
finetuning_type: full
|
||||||
|
use_adam_mini: true
|
||||||
|
|
||||||
|
### dataset
|
||||||
|
dataset: identity,alpaca_en_demo
|
||||||
|
template: qwen
|
||||||
|
cutoff_len: 2048
|
||||||
|
max_samples: 1000
|
||||||
|
overwrite_cache: true
|
||||||
|
preprocessing_num_workers: 16
|
||||||
|
|
||||||
|
### output
|
||||||
|
output_dir: saves/qwen2-1_5b/full/sft
|
||||||
|
logging_steps: 10
|
||||||
|
save_steps: 500
|
||||||
|
plot_loss: true
|
||||||
|
overwrite_output_dir: true
|
||||||
|
|
||||||
|
### train
|
||||||
|
per_device_train_batch_size: 1
|
||||||
|
gradient_accumulation_steps: 8
|
||||||
|
learning_rate: 1.0e-5
|
||||||
|
num_train_epochs: 3.0
|
||||||
|
lr_scheduler_type: cosine
|
||||||
|
warmup_ratio: 0.1
|
||||||
|
bf16: true
|
||||||
|
ddp_timeout: 180000000
|
||||||
|
|
||||||
|
### eval
|
||||||
|
# val_size: 0.1
|
||||||
|
# per_device_eval_batch_size: 1
|
||||||
|
# eval_strategy: steps
|
||||||
|
# eval_steps: 500
|
||||||
45
examples/extras/apollo/llama3_full_sft.yaml
Normal file
45
examples/extras/apollo/llama3_full_sft.yaml
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
### model
|
||||||
|
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
### method
|
||||||
|
stage: sft
|
||||||
|
do_train: true
|
||||||
|
finetuning_type: full
|
||||||
|
use_apollo: true
|
||||||
|
apollo_layerwise: true # choices: [true, false], use false for DDP training
|
||||||
|
apollo_target: all
|
||||||
|
apollo_rank: 128
|
||||||
|
apollo_scale: 32.0
|
||||||
|
apollo_scale_type: channel
|
||||||
|
|
||||||
|
### dataset
|
||||||
|
dataset: identity,alpaca_en_demo
|
||||||
|
template: llama3
|
||||||
|
cutoff_len: 2048
|
||||||
|
max_samples: 1000
|
||||||
|
overwrite_cache: true
|
||||||
|
preprocessing_num_workers: 16
|
||||||
|
|
||||||
|
### output
|
||||||
|
output_dir: saves/llama3-8b/full/sft
|
||||||
|
logging_steps: 10
|
||||||
|
save_steps: 500
|
||||||
|
plot_loss: true
|
||||||
|
overwrite_output_dir: true
|
||||||
|
|
||||||
|
### train
|
||||||
|
per_device_train_batch_size: 1
|
||||||
|
gradient_accumulation_steps: 1 # use 1 for layerwise apollo
|
||||||
|
learning_rate: 1.0e-5
|
||||||
|
num_train_epochs: 3.0
|
||||||
|
lr_scheduler_type: cosine
|
||||||
|
warmup_ratio: 0.1
|
||||||
|
pure_bf16: true
|
||||||
|
ddp_timeout: 180000000
|
||||||
|
|
||||||
|
### eval
|
||||||
|
# val_size: 0.1
|
||||||
|
# per_device_eval_batch_size: 1
|
||||||
|
# eval_strategy: steps
|
||||||
|
# eval_steps: 500
|
||||||
@@ -1,41 +1,43 @@
|
|||||||
# model
|
### model
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
# method
|
### method
|
||||||
stage: sft
|
stage: sft
|
||||||
do_train: true
|
do_train: true
|
||||||
finetuning_type: full
|
finetuning_type: full
|
||||||
use_badam: true
|
use_badam: true
|
||||||
badam_switch_mode: descending
|
badam_mode: layer
|
||||||
|
badam_switch_mode: ascending
|
||||||
badam_switch_interval: 50
|
badam_switch_interval: 50
|
||||||
badam_verbose: 2
|
badam_verbose: 2
|
||||||
|
# deepspeed: examples/deepspeed/ds_z3_config.json
|
||||||
|
|
||||||
# dataset
|
### dataset
|
||||||
dataset: identity,alpaca_gpt4_en
|
dataset: identity,alpaca_en_demo
|
||||||
template: llama3
|
template: llama3
|
||||||
cutoff_len: 1024
|
cutoff_len: 2048
|
||||||
max_samples: 1000
|
max_samples: 1000
|
||||||
overwrite_cache: true
|
overwrite_cache: true
|
||||||
preprocessing_num_workers: 16
|
preprocessing_num_workers: 16
|
||||||
|
|
||||||
# output
|
### output
|
||||||
output_dir: saves/llama3-8b/full/sft
|
output_dir: saves/llama3-8b/full/sft
|
||||||
logging_steps: 10
|
logging_steps: 10
|
||||||
save_steps: 500
|
save_steps: 500
|
||||||
plot_loss: true
|
plot_loss: true
|
||||||
overwrite_output_dir: true
|
overwrite_output_dir: true
|
||||||
|
|
||||||
# train
|
### train
|
||||||
per_device_train_batch_size: 1
|
per_device_train_batch_size: 1
|
||||||
gradient_accumulation_steps: 8
|
gradient_accumulation_steps: 8
|
||||||
learning_rate: 0.0001
|
learning_rate: 1.0e-5
|
||||||
num_train_epochs: 3.0
|
num_train_epochs: 3.0
|
||||||
lr_scheduler_type: cosine
|
lr_scheduler_type: cosine
|
||||||
warmup_steps: 0.1
|
warmup_ratio: 0.1
|
||||||
pure_bf16: true
|
|
||||||
|
|
||||||
# eval
|
### eval
|
||||||
val_size: 0.1
|
# val_size: 0.1
|
||||||
per_device_eval_batch_size: 1
|
# per_device_eval_batch_size: 1
|
||||||
evaluation_strategy: steps
|
# eval_strategy: steps
|
||||||
eval_steps: 500
|
# eval_steps: 500
|
||||||
@@ -1,42 +1,42 @@
|
|||||||
# model
|
### model
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||||
quantization_bit: 4
|
quantization_bit: 4
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
# method
|
### method
|
||||||
stage: sft
|
stage: sft
|
||||||
do_train: true
|
do_train: true
|
||||||
finetuning_type: lora
|
finetuning_type: lora
|
||||||
lora_target: q_proj,v_proj
|
lora_rank: 8
|
||||||
|
lora_target: all
|
||||||
|
|
||||||
# ddp
|
### dataset
|
||||||
ddp_timeout: 180000000
|
dataset: identity,alpaca_en_demo
|
||||||
|
|
||||||
# dataset
|
|
||||||
dataset: identity,alpaca_gpt4_en
|
|
||||||
template: llama3
|
template: llama3
|
||||||
cutoff_len: 1024
|
cutoff_len: 2048
|
||||||
max_samples: 1000
|
max_samples: 1000
|
||||||
overwrite_cache: true
|
overwrite_cache: true
|
||||||
preprocessing_num_workers: 16
|
preprocessing_num_workers: 16
|
||||||
|
|
||||||
# output
|
### output
|
||||||
output_dir: saves/llama3-8b/lora/sft
|
output_dir: saves/llama3-8b/lora/sft
|
||||||
logging_steps: 10
|
logging_steps: 10
|
||||||
save_steps: 500
|
save_steps: 500
|
||||||
plot_loss: true
|
plot_loss: true
|
||||||
overwrite_output_dir: true
|
overwrite_output_dir: true
|
||||||
|
|
||||||
# train
|
### train
|
||||||
per_device_train_batch_size: 1
|
per_device_train_batch_size: 1
|
||||||
gradient_accumulation_steps: 8
|
gradient_accumulation_steps: 8
|
||||||
learning_rate: 0.0001
|
learning_rate: 1.0e-4
|
||||||
num_train_epochs: 3.0
|
num_train_epochs: 3.0
|
||||||
lr_scheduler_type: cosine
|
lr_scheduler_type: cosine
|
||||||
warmup_steps: 0.1
|
warmup_ratio: 0.1
|
||||||
fp16: true
|
bf16: true
|
||||||
|
ddp_timeout: 180000000
|
||||||
|
|
||||||
# eval
|
### eval
|
||||||
val_size: 0.1
|
# val_size: 0.1
|
||||||
per_device_eval_batch_size: 1
|
# per_device_eval_batch_size: 1
|
||||||
evaluation_strategy: steps
|
# eval_strategy: steps
|
||||||
eval_steps: 500
|
# eval_steps: 500
|
||||||
|
|||||||
@@ -1,10 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# DO NOT use GPTQ/AWQ model in FSDP+QLoRA
|
# DO NOT use GPTQ/AWQ model in FSDP+QLoRA
|
||||||
|
|
||||||
pip install "transformers>=4.39.1"
|
|
||||||
pip install "accelerate>=0.28.0"
|
|
||||||
pip install "bitsandbytes>=0.43.0"
|
|
||||||
|
|
||||||
CUDA_VISIBLE_DEVICES=0,1 accelerate launch \
|
CUDA_VISIBLE_DEVICES=0,1 accelerate launch \
|
||||||
--config_file examples/accelerate/fsdp_config.yaml \
|
--config_file examples/accelerate/fsdp_config.yaml \
|
||||||
src/train.py examples/extras/fsdp_qlora/llama3_lora_sft.yaml
|
src/train.py examples/extras/fsdp_qlora/llama3_lora_sft.yaml
|
||||||
@@ -1,42 +1,44 @@
|
|||||||
# model
|
### model
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
# method
|
### method
|
||||||
stage: sft
|
stage: sft
|
||||||
do_train: true
|
do_train: true
|
||||||
finetuning_type: full
|
finetuning_type: full
|
||||||
use_galore: true
|
use_galore: true
|
||||||
galore_layerwise: true
|
galore_layerwise: true # choices: [true, false], use false for DDP training
|
||||||
galore_target: mlp,self_attn
|
galore_target: all
|
||||||
galore_rank: 128
|
galore_rank: 128
|
||||||
galore_scale: 2.0
|
galore_scale: 2.0
|
||||||
|
|
||||||
# dataset
|
### dataset
|
||||||
dataset: identity,alpaca_gpt4_en
|
dataset: identity,alpaca_en_demo
|
||||||
template: llama3
|
template: llama3
|
||||||
cutoff_len: 1024
|
cutoff_len: 2048
|
||||||
max_samples: 1000
|
max_samples: 1000
|
||||||
overwrite_cache: true
|
overwrite_cache: true
|
||||||
preprocessing_num_workers: 16
|
preprocessing_num_workers: 16
|
||||||
|
|
||||||
# output
|
### output
|
||||||
output_dir: saves/llama3-8b/full/sft
|
output_dir: saves/llama3-8b/full/sft
|
||||||
logging_steps: 10
|
logging_steps: 10
|
||||||
save_steps: 500
|
save_steps: 500
|
||||||
plot_loss: true
|
plot_loss: true
|
||||||
overwrite_output_dir: true
|
overwrite_output_dir: true
|
||||||
|
|
||||||
# train
|
### train
|
||||||
per_device_train_batch_size: 1
|
per_device_train_batch_size: 1
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1 # use 1 for layerwise galore
|
||||||
learning_rate: 0.0001
|
learning_rate: 1.0e-5
|
||||||
num_train_epochs: 3.0
|
num_train_epochs: 3.0
|
||||||
lr_scheduler_type: cosine
|
lr_scheduler_type: cosine
|
||||||
warmup_steps: 0.1
|
warmup_ratio: 0.1
|
||||||
pure_bf16: true
|
pure_bf16: true
|
||||||
|
ddp_timeout: 180000000
|
||||||
|
|
||||||
# eval
|
### eval
|
||||||
val_size: 0.1
|
# val_size: 0.1
|
||||||
per_device_eval_batch_size: 1
|
# per_device_eval_batch_size: 1
|
||||||
evaluation_strategy: steps
|
# eval_strategy: steps
|
||||||
eval_steps: 500
|
# eval_steps: 500
|
||||||
|
|||||||
@@ -2,5 +2,5 @@
|
|||||||
|
|
||||||
python scripts/llama_pro.py \
|
python scripts/llama_pro.py \
|
||||||
--model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct \
|
--model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct \
|
||||||
--output_dir models/llama3-8b-instruct-pro \
|
--output_dir models/llama3-8b-pro \
|
||||||
--num_expand 8
|
--num_expand 8
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
# model
|
### model
|
||||||
model_name_or_path: models/llama3-8b-instruct-pro
|
model_name_or_path: models/llama3-8b-pro
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
# method
|
### method
|
||||||
stage: sft
|
stage: sft
|
||||||
do_train: true
|
do_train: true
|
||||||
finetuning_type: freeze
|
finetuning_type: freeze
|
||||||
@@ -9,32 +10,33 @@ freeze_trainable_layers: 8
|
|||||||
freeze_trainable_modules: all
|
freeze_trainable_modules: all
|
||||||
use_llama_pro: true
|
use_llama_pro: true
|
||||||
|
|
||||||
# dataset
|
### dataset
|
||||||
dataset: identity,alpaca_gpt4_en
|
dataset: identity,alpaca_en_demo
|
||||||
template: llama3
|
template: llama3
|
||||||
cutoff_len: 1024
|
cutoff_len: 2048
|
||||||
max_samples: 1000
|
max_samples: 1000
|
||||||
overwrite_cache: true
|
overwrite_cache: true
|
||||||
preprocessing_num_workers: 16
|
preprocessing_num_workers: 16
|
||||||
|
|
||||||
# output
|
### output
|
||||||
output_dir: saves/llama3-8b-instruct-pro/freeze/sft
|
output_dir: saves/llama3-8b-pro/freeze/sft
|
||||||
logging_steps: 10
|
logging_steps: 10
|
||||||
save_steps: 500
|
save_steps: 500
|
||||||
plot_loss: true
|
plot_loss: true
|
||||||
overwrite_output_dir: true
|
overwrite_output_dir: true
|
||||||
|
|
||||||
# train
|
### train
|
||||||
per_device_train_batch_size: 1
|
per_device_train_batch_size: 1
|
||||||
gradient_accumulation_steps: 8
|
gradient_accumulation_steps: 8
|
||||||
learning_rate: 0.0001
|
learning_rate: 1.0e-4
|
||||||
num_train_epochs: 3.0
|
num_train_epochs: 3.0
|
||||||
lr_scheduler_type: cosine
|
lr_scheduler_type: cosine
|
||||||
warmup_steps: 0.1
|
warmup_ratio: 0.1
|
||||||
fp16: true
|
bf16: true
|
||||||
|
ddp_timeout: 180000000
|
||||||
|
|
||||||
# eval
|
### eval
|
||||||
val_size: 0.1
|
# val_size: 0.1
|
||||||
per_device_eval_batch_size: 1
|
# per_device_eval_batch_size: 1
|
||||||
evaluation_strategy: steps
|
# eval_strategy: steps
|
||||||
eval_steps: 500
|
# eval_steps: 500
|
||||||
|
|||||||
@@ -1,39 +1,42 @@
|
|||||||
# model
|
### model
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
# method
|
### method
|
||||||
stage: sft
|
stage: sft
|
||||||
do_train: true
|
do_train: true
|
||||||
finetuning_type: lora
|
finetuning_type: lora
|
||||||
lora_target: q_proj,v_proj
|
lora_rank: 8
|
||||||
|
lora_target: all
|
||||||
loraplus_lr_ratio: 16.0
|
loraplus_lr_ratio: 16.0
|
||||||
|
|
||||||
# dataset
|
### dataset
|
||||||
dataset: identity,alpaca_gpt4_en
|
dataset: identity,alpaca_en_demo
|
||||||
template: llama3
|
template: llama3
|
||||||
cutoff_len: 1024
|
cutoff_len: 2048
|
||||||
max_samples: 1000
|
max_samples: 1000
|
||||||
overwrite_cache: true
|
overwrite_cache: true
|
||||||
preprocessing_num_workers: 16
|
preprocessing_num_workers: 16
|
||||||
|
|
||||||
# output
|
### output
|
||||||
output_dir: saves/llama3-8b/lora/sft
|
output_dir: saves/llama3-8b/lora/sft
|
||||||
logging_steps: 10
|
logging_steps: 10
|
||||||
save_steps: 500
|
save_steps: 500
|
||||||
plot_loss: true
|
plot_loss: true
|
||||||
overwrite_output_dir: true
|
overwrite_output_dir: true
|
||||||
|
|
||||||
# train
|
### train
|
||||||
per_device_train_batch_size: 1
|
per_device_train_batch_size: 1
|
||||||
gradient_accumulation_steps: 8
|
gradient_accumulation_steps: 8
|
||||||
learning_rate: 0.0001
|
learning_rate: 1.0e-4
|
||||||
num_train_epochs: 3.0
|
num_train_epochs: 3.0
|
||||||
lr_scheduler_type: cosine
|
lr_scheduler_type: cosine
|
||||||
warmup_steps: 0.1
|
warmup_ratio: 0.1
|
||||||
fp16: true
|
bf16: true
|
||||||
|
ddp_timeout: 180000000
|
||||||
|
|
||||||
# eval
|
### eval
|
||||||
val_size: 0.1
|
# val_size: 0.1
|
||||||
per_device_eval_batch_size: 1
|
# per_device_eval_batch_size: 1
|
||||||
evaluation_strategy: steps
|
# eval_strategy: steps
|
||||||
eval_steps: 500
|
# eval_steps: 500
|
||||||
|
|||||||
@@ -1,39 +1,41 @@
|
|||||||
# model
|
### model
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
# method
|
### method
|
||||||
stage: sft
|
stage: sft
|
||||||
do_train: true
|
do_train: true
|
||||||
finetuning_type: full
|
finetuning_type: full
|
||||||
mixture_of_depths: convert
|
mixture_of_depths: convert
|
||||||
|
|
||||||
# dataset
|
### dataset
|
||||||
dataset: identity,alpaca_gpt4_en
|
dataset: identity,alpaca_en_demo
|
||||||
template: llama3
|
template: llama3
|
||||||
cutoff_len: 1024
|
cutoff_len: 2048
|
||||||
max_samples: 1000
|
max_samples: 1000
|
||||||
overwrite_cache: true
|
overwrite_cache: true
|
||||||
preprocessing_num_workers: 16
|
preprocessing_num_workers: 16
|
||||||
|
|
||||||
# output
|
### output
|
||||||
output_dir: saves/llama3-8b-mod/full/sft
|
output_dir: saves/llama3-8b-mod/full/sft
|
||||||
logging_steps: 10
|
logging_steps: 10
|
||||||
save_steps: 500
|
save_steps: 500
|
||||||
plot_loss: true
|
plot_loss: true
|
||||||
overwrite_output_dir: true
|
overwrite_output_dir: true
|
||||||
|
|
||||||
# train
|
### train
|
||||||
per_device_train_batch_size: 1
|
per_device_train_batch_size: 1
|
||||||
gradient_accumulation_steps: 8
|
gradient_accumulation_steps: 8
|
||||||
optim: paged_adamw_8bit
|
optim: paged_adamw_8bit
|
||||||
learning_rate: 0.0001
|
learning_rate: 1.0e-5
|
||||||
num_train_epochs: 3.0
|
num_train_epochs: 3.0
|
||||||
lr_scheduler_type: cosine
|
lr_scheduler_type: cosine
|
||||||
warmup_steps: 0.1
|
warmup_ratio: 0.1
|
||||||
pure_bf16: true
|
pure_bf16: true
|
||||||
|
ddp_timeout: 180000000
|
||||||
|
|
||||||
# eval
|
### eval
|
||||||
val_size: 0.1
|
# val_size: 0.1
|
||||||
per_device_eval_batch_size: 1
|
# per_device_eval_batch_size: 1
|
||||||
evaluation_strategy: steps
|
# eval_strategy: steps
|
||||||
eval_steps: 500
|
# eval_steps: 500
|
||||||
|
|||||||
@@ -1,24 +1,29 @@
|
|||||||
# model
|
# The batch generation can be SLOW using this config.
|
||||||
|
# For faster inference, we recommend to use `scripts/vllm_infer.py`.
|
||||||
|
|
||||||
|
### model
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||||
adapter_name_or_path: saves/llama3-8b/lora/sft
|
adapter_name_or_path: saves/llama3-8b/lora/sft
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
# method
|
### method
|
||||||
stage: sft
|
stage: sft
|
||||||
do_predict: true
|
do_predict: true
|
||||||
finetuning_type: lora
|
finetuning_type: lora
|
||||||
|
|
||||||
# dataset
|
### dataset
|
||||||
dataset: identity,alpaca_gpt4_en
|
eval_dataset: identity,alpaca_en_demo
|
||||||
template: llama3
|
template: llama3
|
||||||
cutoff_len: 1024
|
cutoff_len: 2048
|
||||||
max_samples: 50
|
max_samples: 50
|
||||||
overwrite_cache: true
|
overwrite_cache: true
|
||||||
preprocessing_num_workers: 16
|
preprocessing_num_workers: 16
|
||||||
|
|
||||||
# output
|
### output
|
||||||
output_dir: saves/llama3-8b/lora/predict
|
output_dir: saves/llama3-8b/lora/predict
|
||||||
overwrite_output_dir: true
|
overwrite_output_dir: true
|
||||||
|
|
||||||
# eval
|
### eval
|
||||||
per_device_eval_batch_size: 1
|
per_device_eval_batch_size: 1
|
||||||
predict_with_generate: true
|
predict_with_generate: true
|
||||||
|
ddp_timeout: 180000000
|
||||||
5
examples/extras/pissa/init.sh
Normal file
5
examples/extras/pissa/init.sh
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
python scripts/pissa_init.py \
|
||||||
|
--model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct \
|
||||||
|
--output_dir models/llama3-8b-pissa
|
||||||
@@ -1,38 +1,44 @@
|
|||||||
# model
|
### model
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
# method
|
### method
|
||||||
stage: sft
|
stage: sft
|
||||||
do_train: true
|
do_train: true
|
||||||
finetuning_type: lora
|
finetuning_type: lora
|
||||||
lora_target: q_proj,v_proj
|
lora_rank: 8
|
||||||
|
lora_target: all
|
||||||
|
pissa_init: true
|
||||||
|
pissa_iter: 16
|
||||||
|
pissa_convert: true
|
||||||
|
|
||||||
# dataset
|
### dataset
|
||||||
dataset: identity,alpaca_gpt4_en
|
dataset: identity,alpaca_en_demo
|
||||||
template: llama3
|
template: llama3
|
||||||
cutoff_len: 1024
|
cutoff_len: 2048
|
||||||
max_samples: 1000
|
max_samples: 1000
|
||||||
overwrite_cache: true
|
overwrite_cache: true
|
||||||
preprocessing_num_workers: 16
|
preprocessing_num_workers: 16
|
||||||
|
|
||||||
# output
|
### output
|
||||||
output_dir: saves/llama3-8b/lora/sft
|
output_dir: saves/llama3-8b/lora/sft
|
||||||
logging_steps: 10
|
logging_steps: 10
|
||||||
save_steps: 500
|
save_steps: 500
|
||||||
plot_loss: true
|
plot_loss: true
|
||||||
overwrite_output_dir: true
|
overwrite_output_dir: true
|
||||||
|
|
||||||
# train
|
### train
|
||||||
per_device_train_batch_size: 1
|
per_device_train_batch_size: 1
|
||||||
gradient_accumulation_steps: 8
|
gradient_accumulation_steps: 8
|
||||||
learning_rate: 0.0001
|
learning_rate: 1.0e-4
|
||||||
num_train_epochs: 3.0
|
num_train_epochs: 3.0
|
||||||
lr_scheduler_type: cosine
|
lr_scheduler_type: cosine
|
||||||
warmup_steps: 0.1
|
warmup_ratio: 0.1
|
||||||
fp16: true
|
bf16: true
|
||||||
|
ddp_timeout: 180000000
|
||||||
|
|
||||||
# eval
|
### eval
|
||||||
val_size: 0.1
|
# val_size: 0.1
|
||||||
per_device_eval_batch_size: 1
|
# per_device_eval_batch_size: 1
|
||||||
evaluation_strategy: steps
|
# eval_strategy: steps
|
||||||
eval_steps: 500
|
# eval_steps: 500
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
# model
|
|
||||||
model_name_or_path: saves/llama3-8b/full/sft
|
|
||||||
|
|
||||||
# method
|
|
||||||
stage: sft
|
|
||||||
do_predict: true
|
|
||||||
finetuning_type: full
|
|
||||||
|
|
||||||
# dataset
|
|
||||||
dataset: identity,alpaca_gpt4_en
|
|
||||||
template: llama3
|
|
||||||
cutoff_len: 1024
|
|
||||||
max_samples: 50
|
|
||||||
overwrite_cache: true
|
|
||||||
preprocessing_num_workers: 16
|
|
||||||
|
|
||||||
# output
|
|
||||||
output_dir: saves/llama3-8b/full/predict
|
|
||||||
overwrite_output_dir: true
|
|
||||||
|
|
||||||
# eval
|
|
||||||
per_device_eval_batch_size: 1
|
|
||||||
predict_with_generate: true
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
# model
|
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
|
||||||
|
|
||||||
# method
|
|
||||||
stage: sft
|
|
||||||
do_train: true
|
|
||||||
finetuning_type: full
|
|
||||||
|
|
||||||
# ddp
|
|
||||||
ddp_timeout: 180000000
|
|
||||||
deepspeed: examples/deepspeed/ds_z3_config.json
|
|
||||||
|
|
||||||
# dataset
|
|
||||||
dataset: identity,alpaca_gpt4_en
|
|
||||||
template: llama3
|
|
||||||
cutoff_len: 1024
|
|
||||||
max_samples: 1000
|
|
||||||
overwrite_cache: true
|
|
||||||
preprocessing_num_workers: 16
|
|
||||||
|
|
||||||
# output
|
|
||||||
output_dir: saves/llama3-8b/full/sft
|
|
||||||
logging_steps: 10
|
|
||||||
save_steps: 500
|
|
||||||
plot_loss: true
|
|
||||||
overwrite_output_dir: true
|
|
||||||
|
|
||||||
# train
|
|
||||||
per_device_train_batch_size: 1
|
|
||||||
gradient_accumulation_steps: 2
|
|
||||||
learning_rate: 0.0001
|
|
||||||
num_train_epochs: 3.0
|
|
||||||
lr_scheduler_type: cosine
|
|
||||||
warmup_steps: 0.1
|
|
||||||
fp16: true
|
|
||||||
|
|
||||||
# eval
|
|
||||||
val_size: 0.1
|
|
||||||
per_device_eval_batch_size: 1
|
|
||||||
evaluation_strategy: steps
|
|
||||||
eval_steps: 500
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
NPROC_PER_NODE=4
|
|
||||||
NNODES=2
|
|
||||||
RANK=0
|
|
||||||
MASTER_ADDR=192.168.0.1
|
|
||||||
MASTER_PORT=29500
|
|
||||||
|
|
||||||
CUDA_VISIBLE_DEVICES=0,1,2,3 torchrun \
|
|
||||||
--nproc_per_node $NPROC_PER_NODE \
|
|
||||||
--nnodes $NNODES \
|
|
||||||
--node_rank $RANK \
|
|
||||||
--master_addr $MASTER_ADDR \
|
|
||||||
--master_port $MASTER_PORT \
|
|
||||||
src/train.py examples/full_multi_gpu/llama3_full_sft.yaml
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch \
|
|
||||||
--config_file examples/accelerate/single_config.yaml \
|
|
||||||
src/train.py examples/full_multi_gpu/llama3_full_predict.yaml
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
NPROC_PER_NODE=4
|
|
||||||
NNODES=1
|
|
||||||
RANK=0
|
|
||||||
MASTER_ADDR=127.0.0.1
|
|
||||||
MASTER_PORT=29500
|
|
||||||
|
|
||||||
CUDA_VISIBLE_DEVICES=0,1,2,3 torchrun \
|
|
||||||
--nproc_per_node $NPROC_PER_NODE \
|
|
||||||
--nnodes $NNODES \
|
|
||||||
--node_rank $RANK \
|
|
||||||
--master_addr $MASTER_ADDR \
|
|
||||||
--master_port $MASTER_PORT \
|
|
||||||
src/train.py examples/full_multi_gpu/llama3_full_sft.yaml
|
|
||||||
@@ -1,2 +1,4 @@
|
|||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||||
template: llama3
|
template: llama3
|
||||||
|
infer_backend: huggingface # choices: [huggingface, vllm]
|
||||||
|
trust_remote_code: true
|
||||||
|
|||||||
4
examples/inference/llama3_full_sft.yaml
Normal file
4
examples/inference/llama3_full_sft.yaml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
model_name_or_path: saves/llama3-8b/full/sft
|
||||||
|
template: llama3
|
||||||
|
infer_backend: huggingface # choices: [huggingface, vllm]
|
||||||
|
trust_remote_code: true
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||||
adapter_name_or_path: saves/llama3-8b/lora/sft
|
adapter_name_or_path: saves/llama3-8b/lora/sft
|
||||||
template: llama3
|
template: llama3
|
||||||
finetuning_type: lora
|
infer_backend: huggingface # choices: [huggingface, vllm]
|
||||||
|
trust_remote_code: true
|
||||||
|
|||||||
@@ -2,3 +2,4 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
|||||||
template: llama3
|
template: llama3
|
||||||
infer_backend: vllm
|
infer_backend: vllm
|
||||||
vllm_enforce_eager: true
|
vllm_enforce_eager: true
|
||||||
|
trust_remote_code: true
|
||||||
|
|||||||
4
examples/inference/llava1_5.yaml
Normal file
4
examples/inference/llava1_5.yaml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
model_name_or_path: llava-hf/llava-1.5-7b-hf
|
||||||
|
template: llava
|
||||||
|
infer_backend: huggingface # choices: [huggingface, vllm]
|
||||||
|
trust_remote_code: true
|
||||||
4
examples/inference/qwen2_vl.yaml
Normal file
4
examples/inference/qwen2_vl.yaml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
|
||||||
|
template: qwen2_vl
|
||||||
|
infer_backend: huggingface # choices: [huggingface, vllm]
|
||||||
|
trust_remote_code: true
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
NPROC_PER_NODE=4
|
|
||||||
NNODES=1
|
|
||||||
RANK=0
|
|
||||||
MASTER_ADDR=127.0.0.1
|
|
||||||
MASTER_PORT=29500
|
|
||||||
|
|
||||||
CUDA_VISIBLE_DEVICES=0,1,2,3 torchrun \
|
|
||||||
--nproc_per_node $NPROC_PER_NODE \
|
|
||||||
--nnodes $NNODES \
|
|
||||||
--node_rank $RANK \
|
|
||||||
--master_addr $MASTER_ADDR \
|
|
||||||
--master_port $MASTER_PORT \
|
|
||||||
src/train.py examples/lora_multi_gpu/llama3_lora_sft_ds.yaml
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
# model
|
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
|
||||||
|
|
||||||
# method
|
|
||||||
stage: sft
|
|
||||||
do_train: true
|
|
||||||
finetuning_type: lora
|
|
||||||
lora_target: q_proj,v_proj
|
|
||||||
|
|
||||||
# ddp
|
|
||||||
ddp_timeout: 180000000
|
|
||||||
|
|
||||||
# dataset
|
|
||||||
dataset: identity,alpaca_gpt4_en
|
|
||||||
template: llama3
|
|
||||||
cutoff_len: 1024
|
|
||||||
max_samples: 1000
|
|
||||||
overwrite_cache: true
|
|
||||||
preprocessing_num_workers: 16
|
|
||||||
|
|
||||||
# output
|
|
||||||
output_dir: saves/llama3-8b/lora/sft
|
|
||||||
logging_steps: 10
|
|
||||||
save_steps: 500
|
|
||||||
plot_loss: true
|
|
||||||
overwrite_output_dir: true
|
|
||||||
|
|
||||||
# train
|
|
||||||
per_device_train_batch_size: 1
|
|
||||||
gradient_accumulation_steps: 2
|
|
||||||
learning_rate: 0.0001
|
|
||||||
num_train_epochs: 3.0
|
|
||||||
lr_scheduler_type: cosine
|
|
||||||
warmup_steps: 0.1
|
|
||||||
fp16: true
|
|
||||||
|
|
||||||
# eval
|
|
||||||
val_size: 0.1
|
|
||||||
per_device_eval_batch_size: 1
|
|
||||||
evaluation_strategy: steps
|
|
||||||
eval_steps: 500
|
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
# model
|
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
|
||||||
|
|
||||||
# method
|
|
||||||
stage: sft
|
|
||||||
do_train: true
|
|
||||||
finetuning_type: lora
|
|
||||||
lora_target: q_proj,v_proj
|
|
||||||
|
|
||||||
# ddp
|
|
||||||
ddp_timeout: 180000000
|
|
||||||
deepspeed: examples/deepspeed/ds_z3_config.json
|
|
||||||
|
|
||||||
# dataset
|
|
||||||
dataset: identity,alpaca_gpt4_en
|
|
||||||
template: llama3
|
|
||||||
cutoff_len: 1024
|
|
||||||
max_samples: 1000
|
|
||||||
overwrite_cache: true
|
|
||||||
preprocessing_num_workers: 16
|
|
||||||
|
|
||||||
# output
|
|
||||||
output_dir: saves/llama3-8b/lora/sft
|
|
||||||
logging_steps: 10
|
|
||||||
save_steps: 500
|
|
||||||
plot_loss: true
|
|
||||||
overwrite_output_dir: true
|
|
||||||
|
|
||||||
# train
|
|
||||||
per_device_train_batch_size: 1
|
|
||||||
gradient_accumulation_steps: 2
|
|
||||||
learning_rate: 0.0001
|
|
||||||
num_train_epochs: 3.0
|
|
||||||
lr_scheduler_type: cosine
|
|
||||||
warmup_steps: 0.1
|
|
||||||
fp16: true
|
|
||||||
|
|
||||||
# eval
|
|
||||||
val_size: 0.1
|
|
||||||
per_device_eval_batch_size: 1
|
|
||||||
evaluation_strategy: steps
|
|
||||||
eval_steps: 500
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# also launch it on slave machine using slave_config.yaml
|
|
||||||
|
|
||||||
CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch \
|
|
||||||
--config_file examples/accelerate/master_config.yaml \
|
|
||||||
src/train.py examples/lora_multi_gpu/llama3_lora_sft.yaml
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch \
|
|
||||||
--config_file examples/accelerate/single_config.yaml \
|
|
||||||
src/train.py examples/lora_multi_gpu/llama3_lora_sft.yaml
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
NPROC_PER_NODE=4
|
|
||||||
NNODES=1
|
|
||||||
RANK=0
|
|
||||||
MASTER_ADDR=127.0.0.1
|
|
||||||
MASTER_PORT=29500
|
|
||||||
|
|
||||||
ASCEND_RT_VISIBLE_DEVICES=0,1,2,3 torchrun \
|
|
||||||
--nproc_per_node $NPROC_PER_NODE \
|
|
||||||
--nnodes $NNODES \
|
|
||||||
--node_rank $RANK \
|
|
||||||
--master_addr $MASTER_ADDR \
|
|
||||||
--master_port $MASTER_PORT \
|
|
||||||
src/train.py examples/lora_multi_npu/llama3_lora_sft_ds.yaml
|
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
# model
|
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
|
||||||
|
|
||||||
# method
|
|
||||||
stage: sft
|
|
||||||
do_train: true
|
|
||||||
finetuning_type: lora
|
|
||||||
lora_target: q_proj,v_proj
|
|
||||||
|
|
||||||
# ddp
|
|
||||||
ddp_timeout: 180000000
|
|
||||||
deepspeed: examples/deepspeed/ds_z0_config.json
|
|
||||||
|
|
||||||
# dataset
|
|
||||||
dataset: identity,alpaca_gpt4_en
|
|
||||||
template: llama3
|
|
||||||
cutoff_len: 1024
|
|
||||||
max_samples: 1000
|
|
||||||
overwrite_cache: true
|
|
||||||
preprocessing_num_workers: 16
|
|
||||||
|
|
||||||
# output
|
|
||||||
output_dir: saves/llama3-8b/lora/sft
|
|
||||||
logging_steps: 10
|
|
||||||
save_steps: 500
|
|
||||||
plot_loss: true
|
|
||||||
overwrite_output_dir: true
|
|
||||||
|
|
||||||
# train
|
|
||||||
per_device_train_batch_size: 1
|
|
||||||
gradient_accumulation_steps: 2
|
|
||||||
learning_rate: 0.0001
|
|
||||||
num_train_epochs: 3.0
|
|
||||||
lr_scheduler_type: cosine
|
|
||||||
warmup_steps: 0.1
|
|
||||||
fp16: true
|
|
||||||
|
|
||||||
# eval
|
|
||||||
val_size: 0.1
|
|
||||||
per_device_eval_batch_size: 1
|
|
||||||
evaluation_strategy: steps
|
|
||||||
eval_steps: 500
|
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
# model
|
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
|
||||||
|
|
||||||
# method
|
|
||||||
stage: dpo
|
|
||||||
do_train: true
|
|
||||||
finetuning_type: lora
|
|
||||||
lora_target: q_proj,v_proj
|
|
||||||
dpo_ftx: 1.0
|
|
||||||
|
|
||||||
# dataset
|
|
||||||
dataset: orca_rlhf
|
|
||||||
template: llama3
|
|
||||||
cutoff_len: 1024
|
|
||||||
max_samples: 1000
|
|
||||||
overwrite_cache: true
|
|
||||||
preprocessing_num_workers: 16
|
|
||||||
|
|
||||||
# output
|
|
||||||
output_dir: saves/llama3-8b/lora/dpo
|
|
||||||
logging_steps: 10
|
|
||||||
save_steps: 500
|
|
||||||
plot_loss: true
|
|
||||||
overwrite_output_dir: true
|
|
||||||
|
|
||||||
# train
|
|
||||||
per_device_train_batch_size: 1
|
|
||||||
gradient_accumulation_steps: 8
|
|
||||||
learning_rate: 0.00001
|
|
||||||
num_train_epochs: 3.0
|
|
||||||
lr_scheduler_type: cosine
|
|
||||||
warmup_steps: 0.1
|
|
||||||
fp16: true
|
|
||||||
|
|
||||||
# eval
|
|
||||||
val_size: 0.1
|
|
||||||
per_device_eval_batch_size: 1
|
|
||||||
evaluation_strategy: steps
|
|
||||||
eval_steps: 500
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
# model
|
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
|
||||||
|
|
||||||
# method
|
|
||||||
stage: orpo
|
|
||||||
do_train: true
|
|
||||||
finetuning_type: lora
|
|
||||||
lora_target: q_proj,v_proj
|
|
||||||
|
|
||||||
# dataset
|
|
||||||
dataset: orca_rlhf
|
|
||||||
template: llama3
|
|
||||||
cutoff_len: 1024
|
|
||||||
max_samples: 1000
|
|
||||||
overwrite_cache: true
|
|
||||||
preprocessing_num_workers: 16
|
|
||||||
|
|
||||||
# output
|
|
||||||
output_dir: saves/llama3-8b/lora/orpo
|
|
||||||
logging_steps: 10
|
|
||||||
save_steps: 500
|
|
||||||
plot_loss: true
|
|
||||||
overwrite_output_dir: true
|
|
||||||
|
|
||||||
# train
|
|
||||||
per_device_train_batch_size: 1
|
|
||||||
gradient_accumulation_steps: 8
|
|
||||||
learning_rate: 0.00001
|
|
||||||
num_train_epochs: 3.0
|
|
||||||
lr_scheduler_type: cosine
|
|
||||||
warmup_steps: 0.1
|
|
||||||
fp16: true
|
|
||||||
|
|
||||||
# eval
|
|
||||||
val_size: 0.1
|
|
||||||
per_device_eval_batch_size: 1
|
|
||||||
evaluation_strategy: steps
|
|
||||||
eval_steps: 500
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
# model
|
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
|
||||||
|
|
||||||
# method
|
|
||||||
stage: pt
|
|
||||||
do_train: true
|
|
||||||
finetuning_type: lora
|
|
||||||
lora_target: q_proj,v_proj
|
|
||||||
|
|
||||||
# dataset
|
|
||||||
dataset: c4_demo
|
|
||||||
cutoff_len: 1024
|
|
||||||
max_samples: 1000
|
|
||||||
overwrite_cache: true
|
|
||||||
preprocessing_num_workers: 16
|
|
||||||
|
|
||||||
# output
|
|
||||||
output_dir: saves/llama3-8b/lora/sft
|
|
||||||
logging_steps: 10
|
|
||||||
save_steps: 500
|
|
||||||
plot_loss: true
|
|
||||||
overwrite_output_dir: true
|
|
||||||
|
|
||||||
# train
|
|
||||||
per_device_train_batch_size: 1
|
|
||||||
gradient_accumulation_steps: 8
|
|
||||||
learning_rate: 0.0001
|
|
||||||
num_train_epochs: 3.0
|
|
||||||
lr_scheduler_type: cosine
|
|
||||||
warmup_steps: 0.1
|
|
||||||
fp16: true
|
|
||||||
|
|
||||||
# eval
|
|
||||||
val_size: 0.1
|
|
||||||
per_device_eval_batch_size: 1
|
|
||||||
evaluation_strategy: steps
|
|
||||||
eval_steps: 500
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
# model
|
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
|
||||||
|
|
||||||
# method
|
|
||||||
stage: rm
|
|
||||||
do_train: true
|
|
||||||
finetuning_type: lora
|
|
||||||
lora_target: q_proj,v_proj
|
|
||||||
|
|
||||||
# dataset
|
|
||||||
dataset: orca_rlhf
|
|
||||||
template: llama3
|
|
||||||
cutoff_len: 1024
|
|
||||||
max_samples: 1000
|
|
||||||
overwrite_cache: true
|
|
||||||
preprocessing_num_workers: 16
|
|
||||||
|
|
||||||
# output
|
|
||||||
output_dir: saves/llama3-8b/lora/reward
|
|
||||||
logging_steps: 10
|
|
||||||
save_steps: 500
|
|
||||||
plot_loss: true
|
|
||||||
overwrite_output_dir: true
|
|
||||||
|
|
||||||
# train
|
|
||||||
per_device_train_batch_size: 1
|
|
||||||
gradient_accumulation_steps: 8
|
|
||||||
learning_rate: 0.00001
|
|
||||||
num_train_epochs: 3.0
|
|
||||||
lr_scheduler_type: cosine
|
|
||||||
warmup_steps: 0.1
|
|
||||||
fp16: true
|
|
||||||
|
|
||||||
# eval
|
|
||||||
val_size: 0.1
|
|
||||||
per_device_eval_batch_size: 1
|
|
||||||
evaluation_strategy: steps
|
|
||||||
eval_steps: 500
|
|
||||||
10
examples/merge_lora/llama3_full_sft.yaml
Normal file
10
examples/merge_lora/llama3_full_sft.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
### model
|
||||||
|
model_name_or_path: saves/llama3-8b/full/sft
|
||||||
|
template: llama3
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
### export
|
||||||
|
export_dir: output/llama3_full_sft
|
||||||
|
export_size: 5
|
||||||
|
export_device: cpu
|
||||||
|
export_legacy_format: false
|
||||||
@@ -1,11 +1,12 @@
|
|||||||
# model
|
### model
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||||
template: llama3
|
template: llama3
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
# export
|
### export
|
||||||
export_dir: models/llama3_gptq
|
export_dir: output/llama3_gptq
|
||||||
export_quantization_bit: 4
|
export_quantization_bit: 4
|
||||||
export_quantization_dataset: data/c4_demo.json
|
export_quantization_dataset: data/c4_demo.json
|
||||||
export_size: 2
|
export_size: 5
|
||||||
export_device: cpu
|
export_device: cpu
|
||||||
export_legacy_format: false
|
export_legacy_format: false
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
# Note: DO NOT use quantized model or quantization_bit when merging lora adapters
|
### Note: DO NOT use quantized model or quantization_bit when merging lora adapters
|
||||||
|
|
||||||
# model
|
### model
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||||
adapter_name_or_path: saves/llama3-8b/lora/sft
|
adapter_name_or_path: saves/llama3-8b/lora/sft
|
||||||
template: llama3
|
template: llama3
|
||||||
finetuning_type: lora
|
trust_remote_code: true
|
||||||
|
|
||||||
# export
|
### export
|
||||||
export_dir: models/llama3_lora_sft
|
export_dir: output/llama3_lora_sft
|
||||||
export_size: 2
|
export_size: 5
|
||||||
export_device: cpu
|
export_device: cpu
|
||||||
export_legacy_format: false
|
export_legacy_format: false
|
||||||
|
|||||||
13
examples/merge_lora/qwen2vl_lora_sft.yaml
Normal file
13
examples/merge_lora/qwen2vl_lora_sft.yaml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
### Note: DO NOT use quantized model or quantization_bit when merging lora adapters
|
||||||
|
|
||||||
|
### model
|
||||||
|
model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
|
||||||
|
adapter_name_or_path: saves/qwen2_vl-7b/lora/sft
|
||||||
|
template: qwen2_vl
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
### export
|
||||||
|
export_dir: output/qwen2_vl_lora_sft
|
||||||
|
export_size: 5
|
||||||
|
export_device: cpu
|
||||||
|
export_legacy_format: false
|
||||||
44
examples/train_full/llama3_full_sft.yaml
Normal file
44
examples/train_full/llama3_full_sft.yaml
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
### model
|
||||||
|
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
### method
|
||||||
|
stage: sft
|
||||||
|
do_train: true
|
||||||
|
finetuning_type: full
|
||||||
|
deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json]
|
||||||
|
|
||||||
|
### dataset
|
||||||
|
dataset: identity,alpaca_en_demo
|
||||||
|
template: llama3
|
||||||
|
cutoff_len: 2048
|
||||||
|
max_samples: 1000
|
||||||
|
overwrite_cache: true
|
||||||
|
preprocessing_num_workers: 16
|
||||||
|
dataloader_num_workers: 4
|
||||||
|
|
||||||
|
### output
|
||||||
|
output_dir: saves/llama3-8b/full/sft
|
||||||
|
logging_steps: 10
|
||||||
|
save_steps: 500
|
||||||
|
plot_loss: true
|
||||||
|
overwrite_output_dir: true
|
||||||
|
save_only_model: false
|
||||||
|
|
||||||
|
### train
|
||||||
|
per_device_train_batch_size: 1
|
||||||
|
gradient_accumulation_steps: 2
|
||||||
|
learning_rate: 1.0e-5
|
||||||
|
num_train_epochs: 3.0
|
||||||
|
lr_scheduler_type: cosine
|
||||||
|
warmup_ratio: 0.1
|
||||||
|
bf16: true
|
||||||
|
ddp_timeout: 180000000
|
||||||
|
resume_from_checkpoint: null
|
||||||
|
|
||||||
|
### eval
|
||||||
|
# eval_dataset: alpaca_en_demo
|
||||||
|
# val_size: 0.1
|
||||||
|
# per_device_eval_batch_size: 1
|
||||||
|
# eval_strategy: steps
|
||||||
|
# eval_steps: 500
|
||||||
48
examples/train_full/qwen2vl_full_sft.yaml
Normal file
48
examples/train_full/qwen2vl_full_sft.yaml
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
### model
|
||||||
|
model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
|
||||||
|
image_max_pixels: 262144
|
||||||
|
video_max_pixels: 16384
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
### method
|
||||||
|
stage: sft
|
||||||
|
do_train: true
|
||||||
|
finetuning_type: full
|
||||||
|
freeze_vision_tower: true # choices: [true, false]
|
||||||
|
freeze_multi_modal_projector: true # choices: [true, false]
|
||||||
|
freeze_language_model: false # choices: [true, false]
|
||||||
|
deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json]
|
||||||
|
|
||||||
|
### dataset
|
||||||
|
dataset: mllm_demo,identity,alpaca_en_demo
|
||||||
|
template: qwen2_vl
|
||||||
|
cutoff_len: 2048
|
||||||
|
max_samples: 1000
|
||||||
|
overwrite_cache: true
|
||||||
|
preprocessing_num_workers: 16
|
||||||
|
dataloader_num_workers: 4
|
||||||
|
|
||||||
|
### output
|
||||||
|
output_dir: saves/qwen2_vl-7b/full/sft
|
||||||
|
logging_steps: 10
|
||||||
|
save_steps: 500
|
||||||
|
plot_loss: true
|
||||||
|
overwrite_output_dir: true
|
||||||
|
save_only_model: false
|
||||||
|
|
||||||
|
### train
|
||||||
|
per_device_train_batch_size: 1
|
||||||
|
gradient_accumulation_steps: 2
|
||||||
|
learning_rate: 1.0e-5
|
||||||
|
num_train_epochs: 3.0
|
||||||
|
lr_scheduler_type: cosine
|
||||||
|
warmup_ratio: 0.1
|
||||||
|
bf16: true
|
||||||
|
ddp_timeout: 180000000
|
||||||
|
resume_from_checkpoint: null
|
||||||
|
|
||||||
|
### eval
|
||||||
|
# val_size: 0.1
|
||||||
|
# per_device_eval_batch_size: 1
|
||||||
|
# eval_strategy: steps
|
||||||
|
# eval_steps: 500
|
||||||
47
examples/train_lora/llama3_lora_dpo.yaml
Normal file
47
examples/train_lora/llama3_lora_dpo.yaml
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
### model
|
||||||
|
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
### method
|
||||||
|
stage: dpo
|
||||||
|
do_train: true
|
||||||
|
finetuning_type: lora
|
||||||
|
lora_rank: 8
|
||||||
|
lora_target: all
|
||||||
|
pref_beta: 0.1
|
||||||
|
pref_loss: sigmoid # choices: [sigmoid (dpo), orpo, simpo]
|
||||||
|
|
||||||
|
### dataset
|
||||||
|
dataset: dpo_en_demo
|
||||||
|
template: llama3
|
||||||
|
cutoff_len: 2048
|
||||||
|
max_samples: 1000
|
||||||
|
overwrite_cache: true
|
||||||
|
preprocessing_num_workers: 16
|
||||||
|
dataloader_num_workers: 4
|
||||||
|
|
||||||
|
### output
|
||||||
|
output_dir: saves/llama3-8b/lora/dpo
|
||||||
|
logging_steps: 10
|
||||||
|
save_steps: 500
|
||||||
|
plot_loss: true
|
||||||
|
overwrite_output_dir: true
|
||||||
|
save_only_model: false
|
||||||
|
|
||||||
|
### train
|
||||||
|
per_device_train_batch_size: 1
|
||||||
|
gradient_accumulation_steps: 8
|
||||||
|
learning_rate: 5.0e-6
|
||||||
|
num_train_epochs: 3.0
|
||||||
|
lr_scheduler_type: cosine
|
||||||
|
warmup_ratio: 0.1
|
||||||
|
bf16: true
|
||||||
|
ddp_timeout: 180000000
|
||||||
|
resume_from_checkpoint: null
|
||||||
|
|
||||||
|
### eval
|
||||||
|
# eval_dataset: dpo_en_demo
|
||||||
|
# val_size: 0.1
|
||||||
|
# per_device_eval_batch_size: 1
|
||||||
|
# eval_strategy: steps
|
||||||
|
# eval_steps: 500
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user