Compare commits
2565 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ca75f1edf3 | ||
|
|
3a3bae1cfe | ||
|
|
31874e4f62 | ||
|
|
9a2d1dec62 | ||
|
|
8e4ac78607 | ||
|
|
44f1b9b5ad | ||
|
|
b41697c9b6 | ||
|
|
31bca4d172 | ||
|
|
fa4360dca7 | ||
|
|
9acab4949d | ||
|
|
32b4574094 | ||
|
|
03a93ec513 | ||
|
|
bcb6b94658 | ||
|
|
c0710be6d7 | ||
|
|
212a8006dc | ||
|
|
ed70f8d5a2 | ||
|
|
1a33d65a56 | ||
|
|
69c9e379d5 | ||
|
|
e9fe9cee29 | ||
|
|
cb7ab69783 | ||
|
|
c1ed76e109 | ||
|
|
c224d17cb2 | ||
|
|
c4e51d40e0 | ||
|
|
554e89ff02 | ||
|
|
fee2122f09 | ||
|
|
c7e63bead7 | ||
|
|
3e1a7fcb9c | ||
|
|
2aaede8ef4 | ||
|
|
42bebc341d | ||
|
|
83a9ff5853 | ||
|
|
519bab86e6 | ||
|
|
dbc9f5a5d9 | ||
|
|
9b152d9cb5 | ||
|
|
6c3cd400b5 | ||
|
|
4d3ffa2ec4 | ||
|
|
2bf8e993ab | ||
|
|
d4a413eb37 | ||
|
|
00974a3169 | ||
|
|
46ccf84aaa | ||
|
|
07343ca83d | ||
|
|
3c7dc66a92 | ||
|
|
ba032828e2 | ||
|
|
501e7d8a8f | ||
|
|
12292e4283 | ||
|
|
f08b748199 | ||
|
|
d2a3036a23 | ||
|
|
9ae17cd173 | ||
|
|
56926d76f9 | ||
|
|
c2f6f2fa77 | ||
|
|
9b5baa97f0 | ||
|
|
45030ff803 | ||
|
|
bc7f00f2c7 | ||
|
|
beae231af6 | ||
|
|
a0b4b91577 | ||
|
|
90492f3582 | ||
|
|
ab41f7956c | ||
|
|
52b23f9e56 | ||
|
|
a9aa392ba4 | ||
|
|
0b773234e5 | ||
|
|
712c57f3b4 | ||
|
|
dc080399c6 | ||
|
|
68fc068cab | ||
|
|
9620825892 | ||
|
|
26cbb03a5f | ||
|
|
5f4b793e04 | ||
|
|
994ab6424a | ||
|
|
aa9ed4db59 | ||
|
|
ef86a53063 | ||
|
|
bf0286e1e3 | ||
|
|
ce7032e1b3 | ||
|
|
5763017cea | ||
|
|
13b05e74f1 | ||
|
|
c566e39b7d | ||
|
|
052ca871bd | ||
|
|
73198a6645 | ||
|
|
d4ee44bdef | ||
|
|
6d2cde43e7 | ||
|
|
11295cdea0 | ||
|
|
98f23c6584 | ||
|
|
db9559456c | ||
|
|
3ae5da2a04 | ||
|
|
d173cb50f5 | ||
|
|
df27d7e48a | ||
|
|
bb5b83352b | ||
|
|
1157f4e246 | ||
|
|
ef03832cd4 | ||
|
|
2233b739fa | ||
|
|
091d2539e8 | ||
|
|
c1a7f2ebb2 | ||
|
|
fa0eb91f1f | ||
|
|
49f9ed0232 | ||
|
|
2a564c25d1 | ||
|
|
7500e761d3 | ||
|
|
fddcd43c88 | ||
|
|
0e4ce039ee | ||
|
|
b07628dea5 | ||
|
|
12ada72ed4 | ||
|
|
416853dd25 | ||
|
|
bd7bc31c79 | ||
|
|
0ac641326b | ||
|
|
c5ba9106ec | ||
|
|
3b2d3794a5 | ||
|
|
b605c20768 | ||
|
|
39169986ef | ||
|
|
86ebb219d6 | ||
|
|
d222f63cb7 | ||
|
|
2e518f255f | ||
|
|
8f88a4e6a4 | ||
|
|
b9263ff5ac | ||
|
|
ee2ab093a7 | ||
|
|
3df021d4d7 | ||
|
|
e252abf051 | ||
|
|
1134baeedd | ||
|
|
2101399c94 | ||
|
|
3f91a95250 | ||
|
|
7c61b35106 | ||
|
|
f518bfba5b | ||
|
|
8162f94db5 | ||
|
|
1f0c52b73c | ||
|
|
a8caf09c7f | ||
|
|
bb8d79bae2 | ||
|
|
1c436c9f25 | ||
|
|
1b0934bccb | ||
|
|
4eec541857 | ||
|
|
89a4f9ec7f | ||
|
|
1abd71b551 | ||
|
|
349c56c51c | ||
|
|
acb09fa3a3 | ||
|
|
f75b91077b | ||
|
|
c3c0efbaa0 | ||
|
|
5115dc8c7f | ||
|
|
831e7f1cfd | ||
|
|
d4cfa9507e | ||
|
|
d32c6c014d | ||
|
|
7b9deb9410 | ||
|
|
5e22597ff1 | ||
|
|
2bfcad2394 | ||
|
|
a13b1bb49a | ||
|
|
d10467d178 | ||
|
|
aac70663fd | ||
|
|
00409ff28a | ||
|
|
d70b3b4bc5 | ||
|
|
e76eba051d | ||
|
|
7eed496336 | ||
|
|
0f8296626a | ||
|
|
8da1d2fa71 | ||
|
|
b578a7d5b6 | ||
|
|
24afceddb7 | ||
|
|
0583d06676 | ||
|
|
ec6a261568 | ||
|
|
6b3b97c738 | ||
|
|
6d3748f727 | ||
|
|
7c890170e3 | ||
|
|
ca42c0c406 | ||
|
|
7203365b80 | ||
|
|
3612946dd9 | ||
|
|
3aa4f32e9c | ||
|
|
304796b803 | ||
|
|
7cfd6e4bb0 | ||
|
|
05b19d6952 | ||
|
|
919415dba9 | ||
|
|
a959c2a509 | ||
|
|
db0a08db6f | ||
|
|
a306f0f5a2 | ||
|
|
63752fccf7 | ||
|
|
1f9773395b | ||
|
|
128b5b12b3 | ||
|
|
d5915a7dd7 | ||
|
|
ec1154662b | ||
|
|
a44a53ebec | ||
|
|
93e6184cbe | ||
|
|
0be0d7796a | ||
|
|
480369a9f2 | ||
|
|
650a9a9057 | ||
|
|
4b9d8da5a4 | ||
|
|
e6159ad730 | ||
|
|
264538cb26 | ||
|
|
5995800bce | ||
|
|
bf8b483186 | ||
|
|
e2299e261b | ||
|
|
8a44dce326 | ||
|
|
6d9233833b | ||
|
|
d019603835 | ||
|
|
478e8194d9 | ||
|
|
1890d3dafe | ||
|
|
522a3e8493 | ||
|
|
18968405d0 | ||
|
|
71a1c1321a | ||
|
|
cf58a6d860 | ||
|
|
9adc0a2c3f | ||
|
|
16419b2834 | ||
|
|
82a2bac866 | ||
|
|
151ef48b40 | ||
|
|
a255c3a476 | ||
|
|
f4ec4fa6ad | ||
|
|
2635794727 | ||
|
|
d2f845d70d | ||
|
|
bb8aba5abf | ||
|
|
9f16c50155 | ||
|
|
25bb9f5ad9 | ||
|
|
7b985f55db | ||
|
|
fd0357a26d | ||
|
|
31f9daa362 | ||
|
|
15ea576246 | ||
|
|
19a6916d80 | ||
|
|
585c475f71 | ||
|
|
e62dae37fe | ||
|
|
11672f760d | ||
|
|
b9f84900ee | ||
|
|
5f65558088 | ||
|
|
0f54a78144 | ||
|
|
2986bef530 | ||
|
|
065f7fb5da | ||
|
|
c1d5073bd3 | ||
|
|
ee46011b34 | ||
|
|
d55f420206 | ||
|
|
fcf75633a0 | ||
|
|
e77ced045d | ||
|
|
331f53381f | ||
|
|
1d675a287d | ||
|
|
be33ef67fb | ||
|
|
f5cd17881e | ||
|
|
c09b648934 | ||
|
|
f2fd9d1b25 | ||
|
|
167342af8a | ||
|
|
76f9bd1820 | ||
|
|
a893505924 | ||
|
|
ed25e051a9 | ||
|
|
5e5fc337f9 | ||
|
|
58e9ca8aa0 | ||
|
|
a4c4b8496f | ||
|
|
38c9641777 | ||
|
|
8b8fdb3a85 | ||
|
|
290057069e | ||
|
|
46203856fc | ||
|
|
80b89978d9 | ||
|
|
5a221d91f9 | ||
|
|
3a3f4072e5 | ||
|
|
0c0cdc26bc | ||
|
|
2581cc844b | ||
|
|
d58fcd094e | ||
|
|
86063e27ea | ||
|
|
88eafd865b | ||
|
|
3f7bd98bfa | ||
|
|
b72c4bd118 | ||
|
|
808ff89a2d | ||
|
|
6d7f1299bd | ||
|
|
0420a608ca | ||
|
|
2047eab723 | ||
|
|
e11b40c344 | ||
|
|
b869506a57 | ||
|
|
72d5b06b08 | ||
|
|
94726bdc8d | ||
|
|
4d1791e905 | ||
|
|
528e06ccaa | ||
|
|
fec641ec82 | ||
|
|
8f401e37f8 | ||
|
|
9feb78e7b4 | ||
|
|
c2022431aa | ||
|
|
0817c24c04 | ||
|
|
cfb926fb84 | ||
|
|
34746d6151 | ||
|
|
5bb447b118 | ||
|
|
a28261a866 | ||
|
|
800de98dc8 | ||
|
|
222423bcef | ||
|
|
e71737351f | ||
|
|
4f298894da | ||
|
|
a8fae3869d | ||
|
|
db9b977e4f | ||
|
|
87d685b59f | ||
|
|
e4046bdd1f | ||
|
|
5baa3add8c | ||
|
|
332f637592 | ||
|
|
31daa6570b | ||
|
|
33525a34b6 | ||
|
|
3607caa2ad | ||
|
|
0fc2e19279 | ||
|
|
ef994600db | ||
|
|
7638f1070e | ||
|
|
c2120432db | ||
|
|
66184762e8 | ||
|
|
41a9e231cb | ||
|
|
1bb06e06df | ||
|
|
381f7120e6 | ||
|
|
f7857c83e1 | ||
|
|
d0da6f40b0 | ||
|
|
28d145a066 | ||
|
|
ae32c148d1 | ||
|
|
2a05941b14 | ||
|
|
11c38b9173 | ||
|
|
73c1c15b62 | ||
|
|
7f58bf984f | ||
|
|
ec552372ba | ||
|
|
17d32fb5c7 | ||
|
|
4b61610b12 | ||
|
|
07798e4aad | ||
|
|
6d6acd0213 | ||
|
|
a789e0f263 | ||
|
|
f9ee00b6b6 | ||
|
|
31bfdb08cd | ||
|
|
12c83e00fc | ||
|
|
9dc7b6c7ac | ||
|
|
627548bf7f | ||
|
|
dc65ecdf09 | ||
|
|
e577990eb2 | ||
|
|
1f3b729a4b | ||
|
|
0aa7ac210f | ||
|
|
40382f1387 | ||
|
|
75b3819e43 | ||
|
|
e63c2df0b1 | ||
|
|
25d4889789 | ||
|
|
8c0a721c4c | ||
|
|
9e972bc9ec | ||
|
|
1675712a4c | ||
|
|
e0c9012f7f | ||
|
|
a25024bd0c | ||
|
|
867980196e | ||
|
|
4e25d037c8 | ||
|
|
6ba6926221 | ||
|
|
b6b53b61f7 | ||
|
|
647c51a772 | ||
|
|
3b843ac9d4 | ||
|
|
0ef1f981da | ||
|
|
944a2aec4d | ||
|
|
4f31ad997c | ||
|
|
8683582300 | ||
|
|
5ccc607222 | ||
|
|
d8bd46f1bf | ||
|
|
8c2a712247 | ||
|
|
53e41bf2c7 | ||
|
|
0eeae9061c | ||
|
|
08729dbefc | ||
|
|
2c120aa0df | ||
|
|
cca6286b6f | ||
|
|
8516054e4d | ||
|
|
d1a8cd67d2 | ||
|
|
8a5b4bdfd4 | ||
|
|
3bceef02ee | ||
|
|
166a830938 | ||
|
|
18767fe026 | ||
|
|
18a1a4b9da | ||
|
|
6015fe700e | ||
|
|
369dae8dd3 | ||
|
|
2aaf3697d7 | ||
|
|
5504b5254c | ||
|
|
b2e4f11602 | ||
|
|
e3f95abca7 | ||
|
|
2f44f70c2c | ||
|
|
f8f05a883b | ||
|
|
5f473e2696 | ||
|
|
88b1874c04 | ||
|
|
58bc6943dc | ||
|
|
2dedf7b401 | ||
|
|
5769a553d2 | ||
|
|
552816e04b | ||
|
|
b5fa1044b8 | ||
|
|
3c55976a0e | ||
|
|
4611f67fae | ||
|
|
a5346041bb | ||
|
|
df42e438c1 | ||
|
|
7dbfd7dff6 | ||
|
|
a897d46049 | ||
|
|
adff887659 | ||
|
|
eba78f2159 | ||
|
|
ec05c8cdb4 | ||
|
|
0a869c4ed4 | ||
|
|
f792eaf8d4 | ||
|
|
8a41c96761 | ||
|
|
e5d9d8c55d | ||
|
|
3e44c8fe3a | ||
|
|
925e421bde | ||
|
|
bbb636bdba | ||
|
|
a30bdbb1c0 | ||
|
|
95b7e10a06 | ||
|
|
0385c60177 | ||
|
|
44895ebe36 | ||
|
|
44dfbf9dbd | ||
|
|
0a465fc3ca | ||
|
|
01eeae50b5 | ||
|
|
7eeeffdb8a | ||
|
|
eca06531c3 | ||
|
|
d90b40b60f | ||
|
|
1898c1e9a6 | ||
|
|
8d2f8b0dd8 | ||
|
|
df42281256 | ||
|
|
896cf476d5 | ||
|
|
37961d5f06 | ||
|
|
bb047bc844 | ||
|
|
448adedf6a | ||
|
|
469c7cd462 | ||
|
|
ebf6a07681 | ||
|
|
53f0fff513 | ||
|
|
ab7567693d | ||
|
|
1b8aab0723 | ||
|
|
30ebe61914 | ||
|
|
6f1c8dacea | ||
|
|
8881237475 | ||
|
|
584755be4b | ||
|
|
3d3324be5c | ||
|
|
4196d5b4d6 | ||
|
|
101c95ce65 | ||
|
|
19ebc0e7a2 | ||
|
|
1ce15b5d9e | ||
|
|
d670d62a66 | ||
|
|
6522467ddb | ||
|
|
aacd9642f5 | ||
|
|
4446c92517 | ||
|
|
8c65548b10 | ||
|
|
fb22651faf | ||
|
|
cfff136b2a | ||
|
|
bac2c64f87 | ||
|
|
be1ec97c8e | ||
|
|
bbd432415d | ||
|
|
1fef702382 | ||
|
|
39865d8a1f | ||
|
|
c7b27bd70b | ||
|
|
86e4fab0d5 | ||
|
|
ff3e40e4a5 | ||
|
|
ea830cad0c | ||
|
|
225e270fd5 | ||
|
|
c1768cfb14 | ||
|
|
53edd62f8b | ||
|
|
41a7e128b6 | ||
|
|
6b8c41c3ac | ||
|
|
2f09c34980 | ||
|
|
76dc69ce36 | ||
|
|
6c9d05539a | ||
|
|
b6bc17f730 | ||
|
|
c07ba8ccc0 | ||
|
|
ed86f621a0 | ||
|
|
c6a3175bbf | ||
|
|
452291417d | ||
|
|
ab9db8b7c7 | ||
|
|
877e2ea791 | ||
|
|
6ea42d5b63 | ||
|
|
31c117e696 | ||
|
|
04f057334f | ||
|
|
99a54d06ca | ||
|
|
8332c85f37 | ||
|
|
fcf1a3df62 | ||
|
|
f4f52ae67d | ||
|
|
0b08d5882a | ||
|
|
62eeafaba6 | ||
|
|
5a52e41399 | ||
|
|
e8083f8f3f | ||
|
|
338b3a03f0 | ||
|
|
c8b01b41ac | ||
|
|
6d08a418ed | ||
|
|
e3066d1489 | ||
|
|
487e3f2507 | ||
|
|
b82a53cad8 | ||
|
|
5bec82ca9d | ||
|
|
57354fc990 | ||
|
|
89f240805c | ||
|
|
27bbea886c | ||
|
|
3ec3dda33a | ||
|
|
ae9f338bf7 | ||
|
|
bf44f76dc7 | ||
|
|
c18581f0a4 | ||
|
|
9f6c5c4798 | ||
|
|
7bc03ac986 | ||
|
|
85d7e4f4ab | ||
|
|
bf69747f40 | ||
|
|
f1146bf7b6 | ||
|
|
9efd1fec90 | ||
|
|
3b91839a55 | ||
|
|
bc4421eeef | ||
|
|
5003820a6a | ||
|
|
cd2485f28d | ||
|
|
918a367378 | ||
|
|
3d35aeca72 | ||
|
|
53b1e5fd1d | ||
|
|
b852c895cf | ||
|
|
aaa7ed8712 | ||
|
|
205aca5b03 | ||
|
|
87b1f851f1 | ||
|
|
fca814b30d | ||
|
|
a20c2b6ecf | ||
|
|
fee94e1c54 | ||
|
|
047a596542 | ||
|
|
3d45606984 | ||
|
|
310c107d56 | ||
|
|
089e4d9e96 | ||
|
|
ae56c3cf49 | ||
|
|
0a0288a286 | ||
|
|
25da686758 | ||
|
|
e2da3cc9fa | ||
|
|
c42e5cf401 | ||
|
|
9943cd1c96 | ||
|
|
1e6f96508a | ||
|
|
d401974f69 | ||
|
|
09b2dbe859 | ||
|
|
7f8ef8c132 | ||
|
|
fcb6283a72 | ||
|
|
0027f46ccc | ||
|
|
967a27695e | ||
|
|
3ce8a326c6 | ||
|
|
91b56b7baf | ||
|
|
e2fa961302 | ||
|
|
87d6d7dc61 | ||
|
|
00019e2ca4 | ||
|
|
b104739d63 | ||
|
|
6ef0d13e42 | ||
|
|
b238d1aa04 | ||
|
|
aa497d5d96 | ||
|
|
fecf04b2f4 | ||
|
|
3f157e2f6f | ||
|
|
c7c558562e | ||
|
|
c2ea5fb618 | ||
|
|
fa9c32bb8d | ||
|
|
c610deb5a2 | ||
|
|
2bb3255e74 | ||
|
|
b28b74c71e | ||
|
|
1ed921bff7 | ||
|
|
80f634cc95 | ||
|
|
a3eb5e200c | ||
|
|
2d02c0e22d | ||
|
|
093eda2ad6 | ||
|
|
dbaf621f57 | ||
|
|
ceb701c2d4 | ||
|
|
29ad3783f5 | ||
|
|
fa2386e73c | ||
|
|
e0045e8386 | ||
|
|
b94c941196 | ||
|
|
ba66ac084f | ||
|
|
83479c9ef0 | ||
|
|
df8ac15ef0 | ||
|
|
8cea5cd967 | ||
|
|
a2d7d6a518 | ||
|
|
a63e624eca | ||
|
|
8596c321ce | ||
|
|
54cd799aa0 | ||
|
|
8185eb1890 | ||
|
|
03213984ec | ||
|
|
aeeee9d4b5 | ||
|
|
c8a1fb99bf | ||
|
|
f0181a41ff | ||
|
|
f6b06d0c6f | ||
|
|
1047217f78 | ||
|
|
16a9a44849 | ||
|
|
58fb24ce41 | ||
|
|
a9afffa246 | ||
|
|
1fdd053022 | ||
|
|
0a833968a0 | ||
|
|
58b681de78 | ||
|
|
22d5fc5f4c | ||
|
|
cc0119f698 | ||
|
|
580cedebde | ||
|
|
43bd1b070c | ||
|
|
42aa9c65be | ||
|
|
b0b87fa33f | ||
|
|
22912eba1a | ||
|
|
e2748fa967 | ||
|
|
248d5daaff | ||
|
|
8f5921692e | ||
|
|
e880eb8844 | ||
|
|
dc076c4e52 | ||
|
|
8306e93ef3 | ||
|
|
6a2cd129c0 | ||
|
|
30d7f6a22e | ||
|
|
5440ebbae6 | ||
|
|
22dbe694e9 | ||
|
|
64ac6ca396 | ||
|
|
377d37fa7f | ||
|
|
55296744a8 | ||
|
|
d0889012c2 | ||
|
|
3a8b2890eb | ||
|
|
5b2284a51d | ||
|
|
4807d8a4ef | ||
|
|
c6e1313977 | ||
|
|
66819fd3ee | ||
|
|
bd85e370be | ||
|
|
cc097174cc | ||
|
|
7d135bbdb8 | ||
|
|
4845a76535 | ||
|
|
67645c0db8 | ||
|
|
f463b3f038 | ||
|
|
01defc2779 | ||
|
|
c9e77ab352 | ||
|
|
c3de160d1c | ||
|
|
3693d7b571 | ||
|
|
a63144c28f | ||
|
|
2b3b0473cd | ||
|
|
9d929897ce | ||
|
|
313a5e1494 | ||
|
|
74dd25224a | ||
|
|
c7efc7f2ed | ||
|
|
c71c78da50 | ||
|
|
f4897da009 | ||
|
|
a6951db970 | ||
|
|
9d27aaa38f | ||
|
|
3b19b6f31b | ||
|
|
5b15ca0b0b | ||
|
|
aad79127e6 | ||
|
|
c42dcab32b | ||
|
|
be519c84d9 | ||
|
|
b2dc6dc59a | ||
|
|
9df626dc18 | ||
|
|
8d4b9200a1 | ||
|
|
7806df46ba | ||
|
|
bba026a212 | ||
|
|
6e111eb29f | ||
|
|
2b69ae0eb2 | ||
|
|
13d73574ef | ||
|
|
bc264807ae | ||
|
|
f9815dd20a | ||
|
|
1f58943b32 | ||
|
|
6476507429 | ||
|
|
35862d19ec | ||
|
|
1272cb00df | ||
|
|
e9ac26db4c | ||
|
|
20ee1d2e19 | ||
|
|
cbc1dd0c88 | ||
|
|
870bbabbc4 | ||
|
|
8fd84c375e | ||
|
|
32b5364051 | ||
|
|
cf72aec098 | ||
|
|
87849d12d2 | ||
|
|
a19512436f | ||
|
|
6c89d93aea | ||
|
|
345f40a660 | ||
|
|
8b9a814653 | ||
|
|
05fabf9095 | ||
|
|
95eede911a | ||
|
|
7bc7f7d673 | ||
|
|
054fdbe186 | ||
|
|
f0f80819a0 | ||
|
|
e702678252 | ||
|
|
553579986a | ||
|
|
622cb04f27 | ||
|
|
f3ba11a432 | ||
|
|
8b1f53bca5 | ||
|
|
ac25fef80e | ||
|
|
15f819d273 | ||
|
|
f2d1c43d28 | ||
|
|
464acc7d6c | ||
|
|
a96c5da737 | ||
|
|
28d09b81c9 | ||
|
|
a769d0e3d4 | ||
|
|
1b98b5e65c | ||
|
|
3cc5408da7 | ||
|
|
689f5c4554 | ||
|
|
ab5d042cd3 | ||
|
|
4d43317aa1 | ||
|
|
ed3b0c5b40 | ||
|
|
67a97794ee | ||
|
|
2c7c93cb9b | ||
|
|
4d4fe08d14 | ||
|
|
85a919b6f7 | ||
|
|
fe2abe20fc | ||
|
|
12444720db | ||
|
|
510faf5805 | ||
|
|
722e01c8ab | ||
|
|
6050e6cff9 | ||
|
|
c8abbe4fc3 | ||
|
|
f2881c9d4a | ||
|
|
1ded3abdf1 | ||
|
|
e641f1215a | ||
|
|
ca736bcab7 | ||
|
|
bddb2646bd | ||
|
|
e4c57f54f8 | ||
|
|
6de82ca843 | ||
|
|
b2c02df555 | ||
|
|
ca86d6361e | ||
|
|
b6fb00e046 | ||
|
|
86c84972c8 | ||
|
|
9390927875 | ||
|
|
c4a585f232 | ||
|
|
300feb3245 | ||
|
|
cacafb0038 | ||
|
|
6509114259 | ||
|
|
7d4cb79822 | ||
|
|
b867e164fe | ||
|
|
26bbfc084d | ||
|
|
c376eed31d | ||
|
|
7c595abc38 | ||
|
|
c428ab68d8 | ||
|
|
968b9f1852 | ||
|
|
018266c66e | ||
|
|
111c644bf1 | ||
|
|
ed5c641e8b | ||
|
|
de72d1f0e7 | ||
|
|
8bfb856923 | ||
|
|
8fdbaab95d | ||
|
|
a01668bbe8 | ||
|
|
3385616a37 | ||
|
|
1f0d89328d | ||
|
|
a7feab45d5 | ||
|
|
f34322afd7 | ||
|
|
3815fa40b7 | ||
|
|
c43050b3fa | ||
|
|
3e152872ad | ||
|
|
ae6ad55758 | ||
|
|
0118a2fc04 | ||
|
|
4dd81976f4 | ||
|
|
2b4da8baf6 | ||
|
|
7d1b4071e8 | ||
|
|
8fc5377f50 | ||
|
|
e5812f261d | ||
|
|
f7e85cd7de | ||
|
|
749395420b | ||
|
|
7d536d1d75 | ||
|
|
7fd0d2fc2f | ||
|
|
ec696bbcdd | ||
|
|
df24345d65 | ||
|
|
386dd26097 | ||
|
|
514f976cc1 | ||
|
|
66b870fd08 | ||
|
|
24d3c7e378 | ||
|
|
484128b641 | ||
|
|
588ea95732 | ||
|
|
800567cde7 | ||
|
|
7a3ba5a25d | ||
|
|
dfff411e1a | ||
|
|
e20baa4218 | ||
|
|
d1ab9b501a | ||
|
|
3cbc9109ea | ||
|
|
3259397f89 | ||
|
|
eb5af3d90b | ||
|
|
b6810b209a | ||
|
|
158e0e1f63 | ||
|
|
294a103ead | ||
|
|
7f71276ad8 | ||
|
|
93d4570a59 | ||
|
|
527ba2eb2e | ||
|
|
3021b31cf3 | ||
|
|
9f2427907e | ||
|
|
570ce100c1 | ||
|
|
27547355e6 | ||
|
|
c5ef52a67a | ||
|
|
b48b47d519 | ||
|
|
9bdba2f6a8 | ||
|
|
d6ce902d80 | ||
|
|
ce6dcf3600 | ||
|
|
e7f92d16d8 | ||
|
|
abd26f5f67 | ||
|
|
4d35ace75e | ||
|
|
72222d1598 | ||
|
|
26d914b8fc | ||
|
|
7b01c0676c | ||
|
|
571a9b8669 | ||
|
|
ed35eb1e9e | ||
|
|
d291e0d60d | ||
|
|
1874d579c5 | ||
|
|
c692339020 | ||
|
|
2c1eef34cb | ||
|
|
af178cbcd1 | ||
|
|
5d85be31ca | ||
|
|
372b71c847 | ||
|
|
41a9c415e1 | ||
|
|
915e32a5f8 | ||
|
|
f4dd429cbf | ||
|
|
7435cde2ef | ||
|
|
7056087e92 | ||
|
|
fed7ae5661 | ||
|
|
5019c6148b | ||
|
|
2e1396cd6b | ||
|
|
b5e9df5df8 | ||
|
|
3622856994 | ||
|
|
7367c6ec21 | ||
|
|
6579ec8c4c | ||
|
|
a7fbae47d5 | ||
|
|
f203a9d78e | ||
|
|
bae73e676c | ||
|
|
806e1061d4 | ||
|
|
f920091667 | ||
|
|
801979f779 | ||
|
|
df2d32e7aa | ||
|
|
60cf12727b | ||
|
|
7621526d22 | ||
|
|
559b84dceb | ||
|
|
7e4c5d4bb3 | ||
|
|
2a4ed6610e | ||
|
|
1d8e9c7897 | ||
|
|
43654028eb | ||
|
|
2f6fc27c8b | ||
|
|
d789b667d7 | ||
|
|
66a1abac6a | ||
|
|
665db18661 | ||
|
|
30d97ca879 | ||
|
|
c62a6ca59d | ||
|
|
77c2c7076b | ||
|
|
7466fd4387 | ||
|
|
c1369a1ec9 | ||
|
|
d677fe053d | ||
|
|
7c6785d3df | ||
|
|
77341ee3c4 | ||
|
|
5b4b60cfb5 | ||
|
|
0f3d54d8a0 | ||
|
|
7272792f65 | ||
|
|
4cc8e16595 | ||
|
|
ca5a759f94 | ||
|
|
be51e56a2e | ||
|
|
3a9171e275 | ||
|
|
bd0f3b4050 | ||
|
|
206a8364d4 | ||
|
|
097d031066 | ||
|
|
2674b42b59 | ||
|
|
edf2e51bbc | ||
|
|
47877acc2a | ||
|
|
d111a324bc | ||
|
|
388f0a6e05 | ||
|
|
8c13c02c55 | ||
|
|
a101fde917 | ||
|
|
1f4373b6e5 | ||
|
|
525747b472 | ||
|
|
472f12c985 | ||
|
|
b681f24f43 | ||
|
|
fd02b089b6 | ||
|
|
57d4c4a4f8 | ||
|
|
3595d26846 | ||
|
|
22a79c169d | ||
|
|
75dfe259cf | ||
|
|
2e257d6af0 | ||
|
|
e734222373 | ||
|
|
6a351b9912 | ||
|
|
cfc04aa162 | ||
|
|
943c795318 | ||
|
|
7fb61bad04 | ||
|
|
47efcdb1dd | ||
|
|
59cbce1a46 | ||
|
|
7e755e9cac | ||
|
|
9d1e2c3c1f | ||
|
|
5af32ce705 | ||
|
|
4e8861e653 | ||
|
|
d4d7ffb17c | ||
|
|
46f834ec75 | ||
|
|
6ec64a7e56 | ||
|
|
d71446e387 | ||
|
|
eada49e56b | ||
|
|
8f42d7df56 | ||
|
|
33a90b9026 | ||
|
|
710902b0d0 | ||
|
|
7b4f5d3b21 | ||
|
|
13093963b1 | ||
|
|
2e477e7458 | ||
|
|
4b6252151e | ||
|
|
f3765d1996 | ||
|
|
1f5cdd66b7 | ||
|
|
5b0ddbb835 | ||
|
|
4f92b56f06 | ||
|
|
a1f6ff92be | ||
|
|
ef98e91618 | ||
|
|
9fdf800750 | ||
|
|
32c698e4c2 | ||
|
|
75e80fa820 | ||
|
|
f8329bc632 | ||
|
|
9f74d36ba4 | ||
|
|
fc2435f135 | ||
|
|
0636519ba3 | ||
|
|
573bf03a6f | ||
|
|
9e529be4e7 | ||
|
|
7af4ffa6cc | ||
|
|
5b67ccd1c6 | ||
|
|
5166dbbcd3 | ||
|
|
21adb09730 | ||
|
|
28b5f656db | ||
|
|
68ee2d512f | ||
|
|
a5f7e0efc6 | ||
|
|
211038584a | ||
|
|
ff5ba97970 | ||
|
|
27f2c3cae1 | ||
|
|
48f0819327 | ||
|
|
5c6d88e91c | ||
|
|
0a04d9470f | ||
|
|
f0408c0dde | ||
|
|
a041f4a111 | ||
|
|
cdf9dae53e | ||
|
|
1917f431f5 | ||
|
|
a770afbff2 | ||
|
|
b1a5bf025b | ||
|
|
adff3e5050 | ||
|
|
0e88c5754f | ||
|
|
3fff875f99 | ||
|
|
e2d9ab3591 | ||
|
|
3db5cf44ea | ||
|
|
994b9089e9 | ||
|
|
4c1513a845 | ||
|
|
86e009b504 | ||
|
|
c1e1918db1 | ||
|
|
341225a405 | ||
|
|
8c93921952 | ||
|
|
45367105fc | ||
|
|
df71359069 | ||
|
|
a03d14a9a6 | ||
|
|
41d7ca395e | ||
|
|
757573bec1 | ||
|
|
16d655b119 | ||
|
|
f6483de197 | ||
|
|
da34411bf2 | ||
|
|
1891b64072 | ||
|
|
a14069acf8 | ||
|
|
0ea708c226 | ||
|
|
cb474c7b11 | ||
|
|
e4d11a117b | ||
|
|
68365045b4 | ||
|
|
502555b65d | ||
|
|
0bc52c0aae | ||
|
|
6bf2663b8e | ||
|
|
d337de668e | ||
|
|
ec372f91e9 | ||
|
|
20b1bd8c54 | ||
|
|
ee17741591 | ||
|
|
93a6925ec5 | ||
|
|
47405a8e8a | ||
|
|
54ba30c47f | ||
|
|
b92214f78b | ||
|
|
71e4404c0d | ||
|
|
5ab997d484 | ||
|
|
6e7048831b | ||
|
|
97cd932c19 | ||
|
|
dfc7a7d5cd | ||
|
|
27e13a8371 | ||
|
|
bf6ad1fbed | ||
|
|
bc71380b59 | ||
|
|
137c87ff60 | ||
|
|
485b8dc18b | ||
|
|
875f9078d1 | ||
|
|
d3bfcbd3af | ||
|
|
e36db692e7 | ||
|
|
460a40756c | ||
|
|
18057e14ef | ||
|
|
025c8fe302 | ||
|
|
446129ca7a | ||
|
|
834c4e8ad9 | ||
|
|
11d961cf3c | ||
|
|
00b93d8b2f | ||
|
|
281fd5bb89 | ||
|
|
cb10050cb9 | ||
|
|
2935c4cddb | ||
|
|
0d6ec70c6f | ||
|
|
74777b4ded | ||
|
|
5f2bd04799 | ||
|
|
9a1a5f9778 | ||
|
|
edc8aefa59 | ||
|
|
ee1c786a12 | ||
|
|
a3e4f2b716 | ||
|
|
6685f1fb9e | ||
|
|
c89ff328f6 | ||
|
|
c6f1bc65c0 | ||
|
|
0f43c61229 | ||
|
|
8567dab167 | ||
|
|
0517d7bee5 | ||
|
|
5bc0b9b31c | ||
|
|
3d219b91b9 | ||
|
|
a90c6306f8 | ||
|
|
60558388ec | ||
|
|
b29a7f8cd6 | ||
|
|
a1501591e8 | ||
|
|
1408aa078d | ||
|
|
5acaa476d6 | ||
|
|
8ac4f87c91 | ||
|
|
14d3001824 | ||
|
|
1ac9389ddc | ||
|
|
0b0e27c2f1 | ||
|
|
fd1199cce4 | ||
|
|
3c9eda8265 | ||
|
|
6622cdb43f | ||
|
|
49c28a7dab | ||
|
|
a42671c2d7 | ||
|
|
f17ab6ad92 | ||
|
|
ca548af2a2 | ||
|
|
579997688f | ||
|
|
e6ba7ef3e6 | ||
|
|
20fdf177e8 | ||
|
|
f0b01803ea | ||
|
|
f5c4841ff2 | ||
|
|
1e01283d81 | ||
|
|
2196448c21 | ||
|
|
96a81ce89d | ||
|
|
a715490c2a | ||
|
|
973cf8e980 | ||
|
|
4357e42391 | ||
|
|
884b49e662 | ||
|
|
38c94d2e9c | ||
|
|
67d2eb6b2a | ||
|
|
b670fb57db | ||
|
|
188b4be64d | ||
|
|
889c042ecd | ||
|
|
3c4f8eaa55 | ||
|
|
6a75d57060 | ||
|
|
fda2cf677b | ||
|
|
cfdf5a5a78 | ||
|
|
a1437c15f7 | ||
|
|
42e7489713 | ||
|
|
024760f866 | ||
|
|
46f0189e88 | ||
|
|
edc7498111 | ||
|
|
9103fdf866 | ||
|
|
95bf795de4 | ||
|
|
bf99223a80 | ||
|
|
9caf9b6f91 | ||
|
|
727c7b0dc6 | ||
|
|
13d184b280 | ||
|
|
12a91774b0 | ||
|
|
88018000ac | ||
|
|
f6eda1c35d | ||
|
|
a2ebdbc112 | ||
|
|
e930a42083 | ||
|
|
4b123f49cb | ||
|
|
556eca918d | ||
|
|
31fcd03f3c | ||
|
|
89d9dd5aa5 | ||
|
|
d1aad72826 | ||
|
|
8e5b4bddf4 | ||
|
|
5a7cb9af4e | ||
|
|
d1cda4ec68 | ||
|
|
8aaf1185a5 | ||
|
|
b46bd07119 | ||
|
|
08fa707085 | ||
|
|
72ba29d81a | ||
|
|
cf2dc4c444 | ||
|
|
d82d86e16d | ||
|
|
bde31d8600 | ||
|
|
e115d55585 | ||
|
|
daea86e047 | ||
|
|
a4f69d8914 | ||
|
|
98f382fda3 | ||
|
|
cd899734f3 | ||
|
|
f51b435bcf | ||
|
|
0f82a55305 | ||
|
|
9fd7a410bb | ||
|
|
98fb3d015a | ||
|
|
bfb2ad7c79 | ||
|
|
135bfbf7c1 | ||
|
|
c6b17ebc20 | ||
|
|
b55eb30474 | ||
|
|
cec2f1fc00 | ||
|
|
8367ec03a7 | ||
|
|
37013f8068 | ||
|
|
8360544d65 | ||
|
|
b5cdef43a1 | ||
|
|
2e5d521ed8 | ||
|
|
dbe35d52d1 | ||
|
|
8bcdb6f52c | ||
|
|
5cfcb8262e | ||
|
|
0b331a318b | ||
|
|
5d6cf55208 | ||
|
|
9a1ec19845 | ||
|
|
a79e93f335 | ||
|
|
abcb94a738 | ||
|
|
a4f2d5aa6f | ||
|
|
6b738d1c89 | ||
|
|
f4c518b370 | ||
|
|
d475dd3809 | ||
|
|
5675c47a01 | ||
|
|
16e950454e | ||
|
|
2926265a14 | ||
|
|
af2607de1a | ||
|
|
826d7808b4 | ||
|
|
4c89aca243 | ||
|
|
43a065bb07 | ||
|
|
4513a2cc75 | ||
|
|
f29c1ac6e5 | ||
|
|
05abe47c8b | ||
|
|
6c185a2c57 | ||
|
|
af2cb33bb2 | ||
|
|
f16a4a8264 | ||
|
|
b232552d42 | ||
|
|
0edccc11a5 | ||
|
|
b2f5c0e0db | ||
|
|
5f5d4c1923 | ||
|
|
a7d7f79855 | ||
|
|
f0bff18324 | ||
|
|
b631bdc5b7 | ||
|
|
c65f7e9bd5 | ||
|
|
3e0fa4a8da | ||
|
|
fa3150548e | ||
|
|
235ed85b0f | ||
|
|
1ca639a777 | ||
|
|
e36a994fe6 | ||
|
|
19ffcfea76 | ||
|
|
85f3a09c83 | ||
|
|
60b9a9c1fa | ||
|
|
984e38575c | ||
|
|
665df5d733 | ||
|
|
4bc0bea0e9 | ||
|
|
5cfa342f01 | ||
|
|
c106cc24e4 | ||
|
|
372da52d4a | ||
|
|
c7479751e8 | ||
|
|
870a54ac84 | ||
|
|
12fcfc2b72 | ||
|
|
875270b851 | ||
|
|
43fab306b6 | ||
|
|
77242f4169 | ||
|
|
95ae30f678 | ||
|
|
7408e778ca | ||
|
|
ba303fd1aa | ||
|
|
60d9896a70 | ||
|
|
485a80d294 | ||
|
|
63bfe9967e | ||
|
|
a720b82e63 | ||
|
|
d3b0048d8c | ||
|
|
9a0aca42a5 | ||
|
|
5e802b0645 | ||
|
|
dd7a1dbfae | ||
|
|
ca67b7a568 | ||
|
|
76cd879c84 | ||
|
|
e0c049e590 | ||
|
|
727943f078 | ||
|
|
8393b08666 | ||
|
|
9049f72d2f | ||
|
|
32f45c9e91 | ||
|
|
05f3a3c944 | ||
|
|
f91fe10985 | ||
|
|
14f7bfc545 | ||
|
|
7f90b0cd20 | ||
|
|
308abfec6c | ||
|
|
bb88536166 | ||
|
|
d2df3f2d6e | ||
|
|
2abfad9c1f | ||
|
|
2af932d969 | ||
|
|
c29fa61a9c | ||
|
|
a30931fe0f | ||
|
|
3ff9b87012 | ||
|
|
f4f315fd11 | ||
|
|
530165d9a5 | ||
|
|
dbd1458adf | ||
|
|
dedefecd2b | ||
|
|
46f441dd37 | ||
|
|
49b58fd6af | ||
|
|
103a507b39 | ||
|
|
0a75224f62 | ||
|
|
04d7629abf | ||
|
|
1b6786a21f | ||
|
|
5080f2314c | ||
|
|
41beb7f0a3 | ||
|
|
799873aa14 | ||
|
|
fe2c7eaa93 | ||
|
|
6392d45ea7 | ||
|
|
c60ea675d7 | ||
|
|
16c7c92396 | ||
|
|
c7ab302c69 | ||
|
|
7598b37543 | ||
|
|
cc9717e2f2 | ||
|
|
08f2f99f4b | ||
|
|
77bf3d66c7 | ||
|
|
f14f67f803 | ||
|
|
820b6e7b32 | ||
|
|
27aece94cf | ||
|
|
3f2508be92 | ||
|
|
fce11bb386 | ||
|
|
2723438531 | ||
|
|
f330b73682 | ||
|
|
0f1e592326 | ||
|
|
4d7dd0330d | ||
|
|
ea2ca2777f | ||
|
|
4b2b92fd9a | ||
|
|
784088db3f | ||
|
|
0ecf0d51e3 | ||
|
|
bc04ca464a | ||
|
|
44829df762 | ||
|
|
94ddfa66c0 | ||
|
|
8db8ed5a41 | ||
|
|
041ecd0de1 | ||
|
|
d812249db7 | ||
|
|
88528f1a87 | ||
|
|
82533114a7 | ||
|
|
6d9fbb3fa9 | ||
|
|
9953ae3d03 | ||
|
|
c0c387e4db | ||
|
|
ae60ea15da | ||
|
|
72cd1123a8 | ||
|
|
1364190a66 | ||
|
|
6d17c59090 | ||
|
|
e0f2c0b5dc | ||
|
|
073e34855d | ||
|
|
ff9ba70bb8 | ||
|
|
adbebb0e3f | ||
|
|
3f6b3eed98 | ||
|
|
f45e81e186 | ||
|
|
ba648fd003 | ||
|
|
b0e5a76f4c | ||
|
|
8692796c9b | ||
|
|
d0edcde4ea | ||
|
|
8c4c2e580c | ||
|
|
07f33e7641 | ||
|
|
1998c641af | ||
|
|
be1e5f9d62 | ||
|
|
fdeec6db52 | ||
|
|
a4d335b42f | ||
|
|
fcb134e144 | ||
|
|
a47e24222a | ||
|
|
b96b995620 | ||
|
|
c231706aa5 | ||
|
|
35b5117a59 | ||
|
|
80f716bc10 | ||
|
|
ca95e98ca0 | ||
|
|
d5559461c1 | ||
|
|
f4acd81e2f | ||
|
|
31feb6e26c | ||
|
|
7d5c0a069c | ||
|
|
937f49ec3d | ||
|
|
abc2a73a33 | ||
|
|
5e1bf7572c | ||
|
|
8fdb32d0a3 | ||
|
|
c709d5f7db | ||
|
|
f5b2749ec2 | ||
|
|
ee5853c565 | ||
|
|
6ec6df8a5f | ||
|
|
fc95800840 | ||
|
|
765715af21 | ||
|
|
639a7f6796 | ||
|
|
35379c7c0e | ||
|
|
d992f5353f | ||
|
|
875eef45f3 | ||
|
|
556a4aa972 | ||
|
|
8dc1969111 | ||
|
|
b74c229498 | ||
|
|
3dbca466fd | ||
|
|
ce6f7fdb82 | ||
|
|
7528bc1bc0 | ||
|
|
9dd5f7d642 | ||
|
|
99ecb0daaf | ||
|
|
39d8d7995a | ||
|
|
2ac2cde03e | ||
|
|
aa6c3766de | ||
|
|
f4f5d7e3ce | ||
|
|
efbf6018d3 | ||
|
|
1090bb8bf3 | ||
|
|
26bc79f971 | ||
|
|
4c1f015eca | ||
|
|
0655a183d3 | ||
|
|
7754024e9b | ||
|
|
b4913569a8 | ||
|
|
eae9f09ca8 | ||
|
|
8264e5ceaa | ||
|
|
b76f319e45 | ||
|
|
82d744716a | ||
|
|
1a3764ab8f | ||
|
|
d2ede9d393 | ||
|
|
5690f513fc | ||
|
|
123a845209 | ||
|
|
b1b7d735b3 | ||
|
|
230c69f7ce | ||
|
|
bfc43558ef | ||
|
|
f2ae2cc04d | ||
|
|
6e9c03f958 | ||
|
|
2696f614a7 | ||
|
|
070b944895 | ||
|
|
f5f091d390 | ||
|
|
14ab14a0e6 | ||
|
|
4f7c850115 | ||
|
|
391eca66cf | ||
|
|
a67199246d | ||
|
|
5f67fdaac9 | ||
|
|
05e6fe4287 | ||
|
|
91cc571e6e | ||
|
|
890926e60c | ||
|
|
87aa332583 | ||
|
|
f90c4ca672 | ||
|
|
a922e85a5c | ||
|
|
9a65820592 | ||
|
|
f4e16ae373 | ||
|
|
e2cfd34da0 | ||
|
|
668dea9706 | ||
|
|
084be442f2 | ||
|
|
29cb4a1327 | ||
|
|
81a61134b8 | ||
|
|
cb1a49aa02 | ||
|
|
351b4efc6c | ||
|
|
9b551309de | ||
|
|
9fed4a2ef4 | ||
|
|
bceac4f554 | ||
|
|
ae3a88d3a7 | ||
|
|
9138a7a5ba | ||
|
|
9912b43fcc | ||
|
|
5ac37555a4 | ||
|
|
34bdc730a6 | ||
|
|
e45a9d70fc | ||
|
|
232b36059c | ||
|
|
d9fbd675d5 | ||
|
|
0206e7b9de | ||
|
|
a886544d3d | ||
|
|
8c9b929bb0 | ||
|
|
1bb1ae834e | ||
|
|
0d9e364a90 | ||
|
|
3b28c003dd | ||
|
|
48ff9fb150 | ||
|
|
c43bc74fe6 | ||
|
|
eaf9cc2195 | ||
|
|
4bd276f58f | ||
|
|
f8cf0d5e5d | ||
|
|
79bc60db33 | ||
|
|
dc7c54067e | ||
|
|
932f0d5c20 | ||
|
|
9670f5e41a | ||
|
|
97a23e1cbe | ||
|
|
11fcd055ec | ||
|
|
b0d9966663 | ||
|
|
5c51ab7e1f | ||
|
|
26f293d587 | ||
|
|
a3b52fd380 | ||
|
|
27d8706d6d | ||
|
|
bf59383783 | ||
|
|
1078611259 | ||
|
|
e6fc0ac8fe | ||
|
|
554ca3d8dc | ||
|
|
86dfdf956d | ||
|
|
c0e4475485 | ||
|
|
2b65f8bd5c | ||
|
|
09e78272c2 | ||
|
|
cccce564bd | ||
|
|
4adec327de | ||
|
|
1f093334d1 | ||
|
|
e0e8507108 | ||
|
|
f5962f8128 | ||
|
|
b31d808655 | ||
|
|
247cda4b68 | ||
|
|
e30975e9a2 | ||
|
|
de9f1583c2 | ||
|
|
ab48653e63 | ||
|
|
6d7a1e3f8f | ||
|
|
e093dad7cb | ||
|
|
b103a121f0 | ||
|
|
3578abc7a4 | ||
|
|
17d398f419 | ||
|
|
3453a8eebb | ||
|
|
77a089c35c | ||
|
|
516d83c946 | ||
|
|
fd02c9f973 | ||
|
|
351e80a656 | ||
|
|
4f04e2ed93 | ||
|
|
a810d1b98e | ||
|
|
fbe963a96a | ||
|
|
d13b8bee8a | ||
|
|
0aa072a155 | ||
|
|
57dde7c3bc | ||
|
|
6b9003f781 | ||
|
|
9c1c59e481 | ||
|
|
31daec2749 | ||
|
|
2bff90719b | ||
|
|
e4570e28a8 | ||
|
|
d84a730daa | ||
|
|
0fd1a05cec | ||
|
|
6373d307ec | ||
|
|
a32c3a50fc | ||
|
|
66b5634ebf | ||
|
|
92b3697e2c | ||
|
|
969e605c7e | ||
|
|
a3320f26cf | ||
|
|
45329d9e3c | ||
|
|
6481321470 | ||
|
|
efcf5e050d | ||
|
|
dfa686b617 | ||
|
|
fe638cf11f | ||
|
|
b2949b88e9 | ||
|
|
538c79fd8f | ||
|
|
437cc20be6 | ||
|
|
2ac972d6e7 | ||
|
|
4d7f0fbb7a | ||
|
|
40e3d3fbdd | ||
|
|
096677b989 | ||
|
|
7940b968ae | ||
|
|
36a4224bf5 | ||
|
|
d4d36e157c | ||
|
|
c4f5e49d0d | ||
|
|
8e518d6c62 | ||
|
|
79165100e5 | ||
|
|
fc82acbbd8 | ||
|
|
aead3ca8e5 | ||
|
|
b12679ad59 | ||
|
|
8061cb5671 | ||
|
|
0a7e5f2f57 | ||
|
|
812d2c25a7 | ||
|
|
51795e8db1 | ||
|
|
2c011060b1 | ||
|
|
a8c7531250 | ||
|
|
88c34d26a8 | ||
|
|
12d666a63c | ||
|
|
304a2efec8 | ||
|
|
322331df51 | ||
|
|
ba0da83031 | ||
|
|
0a82e15e7c | ||
|
|
6670b36c49 | ||
|
|
7a1d13aae2 | ||
|
|
86a048128b | ||
|
|
fe1a3b1367 | ||
|
|
84ff56c3a0 | ||
|
|
483ed64b43 | ||
|
|
dd4619e9f3 | ||
|
|
905815d878 | ||
|
|
ba72e08901 | ||
|
|
e4972c8fc4 | ||
|
|
5f5f948806 | ||
|
|
2892e5d42a | ||
|
|
542a5d15ef | ||
|
|
b1c791fb0d | ||
|
|
7589123465 | ||
|
|
f94b54b776 | ||
|
|
1e1b8899f5 | ||
|
|
7b02c83399 | ||
|
|
8f1ba07b30 | ||
|
|
1ce400bddf | ||
|
|
6bc0ec63c7 | ||
|
|
25d316b1a0 | ||
|
|
2bcd5b2b73 | ||
|
|
436afcba57 | ||
|
|
db47c53486 | ||
|
|
4efe56fd68 | ||
|
|
d54313fcf9 | ||
|
|
382f096475 | ||
|
|
0ccc76392e | ||
|
|
e2cfcb0a5f | ||
|
|
b530a798c1 | ||
|
|
fdf38b70a0 | ||
|
|
1a78b675be | ||
|
|
9b1008912c | ||
|
|
18241f4ed8 | ||
|
|
223bbd9930 | ||
|
|
9dadff90bb | ||
|
|
827a929f1d | ||
|
|
e508519e0a | ||
|
|
47892418ad | ||
|
|
2aeae4b88b | ||
|
|
c213f2a9a9 | ||
|
|
333f4a69bb | ||
|
|
172600d432 | ||
|
|
4ce4172c87 | ||
|
|
400ae144a4 | ||
|
|
0a1b6ca5a7 | ||
|
|
05ef89cfcc | ||
|
|
6d9d8b92ca | ||
|
|
3f7f1daa33 | ||
|
|
8061e92d07 | ||
|
|
0c811a7653 | ||
|
|
f6ac3796ca | ||
|
|
c1394e7dfc | ||
|
|
ebab655683 | ||
|
|
3d74f21738 | ||
|
|
8493753fab | ||
|
|
0f626a2145 | ||
|
|
5100c290c4 | ||
|
|
4bde37e7c8 | ||
|
|
e3b3a722de | ||
|
|
b9e167e6ca | ||
|
|
1ebd1e50e7 | ||
|
|
14316f6583 | ||
|
|
8e4ab2f7d0 | ||
|
|
196068fa19 | ||
|
|
da2295f8c8 | ||
|
|
ab0741b5a6 | ||
|
|
6aec446940 | ||
|
|
50c71dd29f | ||
|
|
5c9da798b5 | ||
|
|
3d1b0e1864 | ||
|
|
45becd2a45 | ||
|
|
8f1197de7e | ||
|
|
25de4ce56a | ||
|
|
d0597897bf | ||
|
|
4674f3baa7 | ||
|
|
2f5f6722cf | ||
|
|
7ef3788ff4 | ||
|
|
f9aa74715a | ||
|
|
9b187b274c | ||
|
|
68ed89f351 | ||
|
|
342d7da8d7 | ||
|
|
6eda42eb7c | ||
|
|
e9fe8815be | ||
|
|
9381fecca7 | ||
|
|
efa9140577 | ||
|
|
b1b18b2c5a | ||
|
|
37bcbf72b4 | ||
|
|
99125c8825 | ||
|
|
182b974786 | ||
|
|
7a4a6a5522 | ||
|
|
2383e5440c | ||
|
|
1fea91736a | ||
|
|
09d9fb28f9 | ||
|
|
57c6eabf83 | ||
|
|
33d440b577 | ||
|
|
ce8200ad98 | ||
|
|
2cedb59bee | ||
|
|
dd0b85580e | ||
|
|
cd4dad846b | ||
|
|
a11a04a24f | ||
|
|
eb99999ca8 | ||
|
|
ea58cf111e | ||
|
|
2d95127c33 | ||
|
|
57fcdca336 | ||
|
|
3d88589c0f | ||
|
|
dfd153cc81 | ||
|
|
7641a214d8 | ||
|
|
3cef844079 | ||
|
|
4dcd47100d | ||
|
|
a412b4ed4a | ||
|
|
544a6259b6 | ||
|
|
c501f377dd | ||
|
|
cb8b8f40cd | ||
|
|
70bed8ad8f | ||
|
|
51f776ae2a | ||
|
|
697bc20941 | ||
|
|
1480e3a88f | ||
|
|
19029d5b0f | ||
|
|
7773ac0ead | ||
|
|
23b881bff1 | ||
|
|
10a6c395bb | ||
|
|
f9a7732a1f | ||
|
|
c37582af02 | ||
|
|
ece67f8c7f | ||
|
|
e1838e76fe | ||
|
|
2eede9ffd6 | ||
|
|
a6f6b406b3 | ||
|
|
279439abbe | ||
|
|
13117b69d7 | ||
|
|
5d03ac642d | ||
|
|
5062ee547e | ||
|
|
59817c27e3 | ||
|
|
759bee48d2 | ||
|
|
514ffafc12 | ||
|
|
8b2a735c14 | ||
|
|
10d59e9e4a | ||
|
|
058ed5e607 | ||
|
|
110c2ce2a5 | ||
|
|
c425436676 | ||
|
|
266fe908e3 | ||
|
|
dbd905438b | ||
|
|
d64c87f928 | ||
|
|
29eebef696 | ||
|
|
7bfbcb1fe3 | ||
|
|
9b210cf4b3 | ||
|
|
f74e640565 | ||
|
|
d1d08d066a | ||
|
|
6be321b5da | ||
|
|
3c792174db | ||
|
|
9aeb88c426 | ||
|
|
00e2a272ef | ||
|
|
5142349661 | ||
|
|
0e3cc52327 | ||
|
|
6c1db2d012 | ||
|
|
12c51655ce | ||
|
|
36be12a3b7 | ||
|
|
21fac4c98c | ||
|
|
83404c4fa9 | ||
|
|
12f852b8d4 | ||
|
|
a88873116a | ||
|
|
7cfcd69c64 | ||
|
|
a5eabbe933 | ||
|
|
aa25716a5d | ||
|
|
94c8219575 | ||
|
|
ad24a2a0c9 | ||
|
|
c05027d14a | ||
|
|
5420905a2e | ||
|
|
03f2e3284a | ||
|
|
d2bb1b3a6b | ||
|
|
35c4a2c212 | ||
|
|
1e4010a1fb | ||
|
|
1451297c78 | ||
|
|
0b99b13786 | ||
|
|
f5edbf2b49 | ||
|
|
ab6dc0ea30 | ||
|
|
79d34ce0f3 | ||
|
|
1d2e372a8e | ||
|
|
f6a53d83c8 | ||
|
|
4ec56dd958 | ||
|
|
ba06eb65ca | ||
|
|
be716972fe | ||
|
|
719585a128 | ||
|
|
348f29aa50 | ||
|
|
c8fe3f544b | ||
|
|
0f1ad7140f | ||
|
|
233e167f68 | ||
|
|
1d341dcd83 | ||
|
|
d16561e7a4 | ||
|
|
f8e219dc81 | ||
|
|
3365cc8cf0 | ||
|
|
3a5e68b7d9 | ||
|
|
0cb596fee1 | ||
|
|
b3b5b530d1 | ||
|
|
9225c15c88 | ||
|
|
abd9fed445 | ||
|
|
44cda2eece | ||
|
|
8397808d1d | ||
|
|
9e1bd6420d | ||
|
|
619264c854 | ||
|
|
1ebac62e3d | ||
|
|
ce9bdb3509 | ||
|
|
0c8d6369ac | ||
|
|
bee796f6b5 | ||
|
|
9f6349a333 | ||
|
|
171a029c5e | ||
|
|
eaefaa0fe0 | ||
|
|
d301f0a64b | ||
|
|
0a1578e4e3 | ||
|
|
a4167fd925 | ||
|
|
42084e08ae | ||
|
|
9d23f5dc89 | ||
|
|
5978427ae0 | ||
|
|
c7c216069c | ||
|
|
cde9d1b917 | ||
|
|
96213f04b0 | ||
|
|
7ecea08b9b | ||
|
|
191971865d | ||
|
|
ff4f587dd9 | ||
|
|
de728d0371 | ||
|
|
d08e09642d | ||
|
|
351493b183 | ||
|
|
86ab47e121 | ||
|
|
6dd6b3e396 | ||
|
|
5f1418a68b | ||
|
|
7b97a79efc | ||
|
|
ce4f653121 | ||
|
|
b053c6454e | ||
|
|
ebf0f4a77c | ||
|
|
efa808069a | ||
|
|
b5c5283dd6 | ||
|
|
b638c65519 | ||
|
|
d4d471450f | ||
|
|
3144bdec2c | ||
|
|
c6d6c4c209 | ||
|
|
f5f1589662 | ||
|
|
276f2cb24e | ||
|
|
952b785bb3 | ||
|
|
72dd676208 | ||
|
|
dfaa31e991 | ||
|
|
86556b1c74 | ||
|
|
0c80751e87 | ||
|
|
9338f878a3 | ||
|
|
fde3d91242 | ||
|
|
19adfb88a9 | ||
|
|
daaafa900a | ||
|
|
0dcc9e0bca | ||
|
|
aeec78b35c | ||
|
|
c991654cb4 | ||
|
|
f328413646 | ||
|
|
106a0104da | ||
|
|
5486ea09e3 | ||
|
|
31bbbb6d13 | ||
|
|
1a77de82fa | ||
|
|
7468f2535c | ||
|
|
38e4f22605 | ||
|
|
2bc2fe7b5e | ||
|
|
6d0140d8a0 | ||
|
|
7856f98965 | ||
|
|
e25ddef08c | ||
|
|
95a4589bbf | ||
|
|
566d71b7a9 | ||
|
|
6030a4a720 | ||
|
|
5dc0cb94d4 | ||
|
|
325dafcbb0 | ||
|
|
1a8a8b8651 | ||
|
|
61a495cb1e | ||
|
|
75866aa020 | ||
|
|
9e4fda326d | ||
|
|
1131ddfaff | ||
|
|
9f437b5c43 | ||
|
|
0cc03d3f05 | ||
|
|
04fc2f78bf | ||
|
|
3ac333fc6a | ||
|
|
a246ac1914 | ||
|
|
48ceac845c | ||
|
|
b1986a06b9 | ||
|
|
43d134ba29 | ||
|
|
1348f7d860 | ||
|
|
f6530222f7 | ||
|
|
a74a7585e0 | ||
|
|
5bf0cca2b8 | ||
|
|
755b6511ff | ||
|
|
35621c6089 | ||
|
|
38b59664e6 | ||
|
|
933a084999 | ||
|
|
c1510d19c7 | ||
|
|
2074cf99fb | ||
|
|
b12176d818 | ||
|
|
117b67ea30 | ||
|
|
03e20bb5c6 | ||
|
|
0c4a1381a4 | ||
|
|
9e14501edb | ||
|
|
1dc963caa6 | ||
|
|
85726c91ce | ||
|
|
40211db275 | ||
|
|
e7f13098c6 | ||
|
|
61eb3a3d46 | ||
|
|
be0a807e8c | ||
|
|
52d402e2a9 | ||
|
|
c5a46f9113 | ||
|
|
00e17a377c | ||
|
|
9abd83adb1 | ||
|
|
f0d2afcf90 | ||
|
|
1aba442bcd | ||
|
|
d764cd8736 | ||
|
|
526111a303 | ||
|
|
b8364046df | ||
|
|
1f617c6e08 | ||
|
|
a6858a36c0 | ||
|
|
6198121923 | ||
|
|
b0efebf853 | ||
|
|
fbd0584391 | ||
|
|
50224b09cc | ||
|
|
32dcc5a491 | ||
|
|
9408366a36 | ||
|
|
f0e564beaa | ||
|
|
14b75a0b93 | ||
|
|
59e6ebf039 | ||
|
|
7cdc16abdf | ||
|
|
dc540dfaa8 | ||
|
|
587e65e442 | ||
|
|
a916688723 | ||
|
|
3336422760 | ||
|
|
04423b916f | ||
|
|
bf8d2f8eda | ||
|
|
2a5d02fd0f | ||
|
|
ea550ed9e0 | ||
|
|
02665cd42b | ||
|
|
0c6a94e66d | ||
|
|
ebd6bc2604 | ||
|
|
daab85e3e6 | ||
|
|
769d81a83d | ||
|
|
ac2a401b1d | ||
|
|
bb53c18153 | ||
|
|
04e0fe9147 | ||
|
|
39f75c7001 | ||
|
|
7f99cb1817 | ||
|
|
c555b2cce3 | ||
|
|
2eba1c6851 | ||
|
|
edeed55664 | ||
|
|
92248f9cb2 | ||
|
|
c548ad5e69 | ||
|
|
a57d839e1d | ||
|
|
d88a34bc79 | ||
|
|
60cbc9d0e5 | ||
|
|
d5005e766f | ||
|
|
4d0753cffe | ||
|
|
1cf0f11840 | ||
|
|
052e8b2cc6 | ||
|
|
8963e89633 | ||
|
|
935ee0a023 | ||
|
|
5ed234ca63 | ||
|
|
04884a0911 | ||
|
|
c7af26a9e3 | ||
|
|
d8073488be | ||
|
|
6fc2d7e063 | ||
|
|
e93c7cdb80 | ||
|
|
c32d6c8250 | ||
|
|
757158da63 | ||
|
|
ffdacaa618 | ||
|
|
e194efab10 | ||
|
|
772fc2eac7 | ||
|
|
ed020579dc | ||
|
|
096869c7b6 | ||
|
|
c6873211e9 | ||
|
|
623ee1bd88 | ||
|
|
aabe90343e | ||
|
|
764cfb506d | ||
|
|
249ad56075 | ||
|
|
46f99ff277 | ||
|
|
73f4513c84 | ||
|
|
3c91e86268 | ||
|
|
42473ec150 | ||
|
|
6a4e4b9c5b | ||
|
|
9a784fb4f3 | ||
|
|
43fd80a1aa | ||
|
|
e6ab1a57ea | ||
|
|
282edb9161 | ||
|
|
dff77004f2 | ||
|
|
6c1b4aec75 | ||
|
|
7814db1b42 | ||
|
|
c9ed3fc3a4 | ||
|
|
9ee416a8fc | ||
|
|
4f9a47c026 | ||
|
|
3fcb1c6d09 | ||
|
|
7c492864e9 | ||
|
|
7ff8a064f3 | ||
|
|
c635bbe465 | ||
|
|
4881f4e631 | ||
|
|
c631799f5d | ||
|
|
48846676d8 | ||
|
|
f37d481c5d | ||
|
|
5d7d8bd55c | ||
|
|
8ed1463236 | ||
|
|
43b2ede0f8 | ||
|
|
2f095e2017 | ||
|
|
9b55bb964c | ||
|
|
9b97b23ce7 | ||
|
|
53ab28533e | ||
|
|
940c00e7ae | ||
|
|
18cfd5f349 | ||
|
|
6169df1c52 | ||
|
|
d46c2bbcba | ||
|
|
48d4364586 | ||
|
|
8042c66a76 | ||
|
|
3879d79b89 | ||
|
|
e416cecf62 | ||
|
|
81fcb80466 | ||
|
|
bf812fbe40 | ||
|
|
1e6fb6c8aa | ||
|
|
5d0c95bd02 | ||
|
|
7cd2417002 | ||
|
|
16851d66e5 | ||
|
|
056d2d956a | ||
|
|
9a69cadab3 | ||
|
|
3de642bffd | ||
|
|
286b9d9849 | ||
|
|
cef1ede826 | ||
|
|
5007566588 | ||
|
|
e93fb3cc6c | ||
|
|
7578209735 | ||
|
|
67f02f75d0 | ||
|
|
73d9dfc7ab | ||
|
|
6b407092d9 | ||
|
|
3168abc0a1 | ||
|
|
46ee267cfc | ||
|
|
a10bead9b5 | ||
|
|
3553e301dd | ||
|
|
02b838b9b0 | ||
|
|
b1de6d1025 | ||
|
|
bc67872218 | ||
|
|
0229fffde5 | ||
|
|
3555b87363 | ||
|
|
2dca53962e | ||
|
|
f4f71f2797 | ||
|
|
77ab9457ed | ||
|
|
4fa53b6282 | ||
|
|
790b73586b | ||
|
|
9c29c2a172 | ||
|
|
863960d33e | ||
|
|
330e5381b4 | ||
|
|
5bb411fdb8 | ||
|
|
59a9a5994e | ||
|
|
5306a71b42 | ||
|
|
3eafa2dd9e | ||
|
|
88fddb879d | ||
|
|
71491825bf | ||
|
|
30855b924a | ||
|
|
48d2e6d7fe | ||
|
|
041c83ea03 | ||
|
|
0e621c2dc9 | ||
|
|
544e7a491b | ||
|
|
a2c881fa08 | ||
|
|
c53c7af168 | ||
|
|
a2d93e5269 | ||
|
|
b392e6cfb9 | ||
|
|
13aa2d389a | ||
|
|
1e7962dfc4 | ||
|
|
1c9556c84c | ||
|
|
ca3ca7a5b5 | ||
|
|
0500befdb4 | ||
|
|
f618feab51 | ||
|
|
4b06aa134f | ||
|
|
9cde56d760 | ||
|
|
d0ea203694 | ||
|
|
c5eb3fba62 | ||
|
|
a8bc32553c | ||
|
|
88f3358320 | ||
|
|
a85bdcf2f6 | ||
|
|
caf56b313e | ||
|
|
75603c45fc | ||
|
|
89f86cc970 | ||
|
|
c09a0e4f08 | ||
|
|
7bac6c9460 | ||
|
|
0c7d0bf172 | ||
|
|
a274900188 | ||
|
|
67deefe527 | ||
|
|
823f618cba | ||
|
|
bc16c9a54a | ||
|
|
a3f30038a0 | ||
|
|
e237f618c2 | ||
|
|
688adad665 | ||
|
|
0158812afb | ||
|
|
e52e0d9b07 | ||
|
|
eb2aa2c073 | ||
|
|
debfd46749 | ||
|
|
5ccf8fcd6b | ||
|
|
7bd1991513 | ||
|
|
456e4ca569 | ||
|
|
6bf0fe4913 | ||
|
|
596b6828cb | ||
|
|
b403f8d8a8 | ||
|
|
590b6c2143 | ||
|
|
5537ef1e7d | ||
|
|
5f83860aa1 | ||
|
|
62b6a7971a | ||
|
|
1d16e87c5f | ||
|
|
1955a8ea5a | ||
|
|
a41fa6e730 | ||
|
|
b98a64448a | ||
|
|
1ce82f391a | ||
|
|
4d473894fd | ||
|
|
5788b7c7d0 | ||
|
|
04515f6b55 | ||
|
|
96f8ccf3d5 | ||
|
|
2c3ef480a6 | ||
|
|
fa6873122c | ||
|
|
34bc0c22b1 | ||
|
|
e5484b2729 | ||
|
|
f67f781fed | ||
|
|
b564b97b7e | ||
|
|
0dd68d1e06 | ||
|
|
73f40f1ca4 | ||
|
|
ea53bebac4 | ||
|
|
00418012bd | ||
|
|
5f3d8c514b | ||
|
|
cb39a3f1c4 | ||
|
|
4d78fe6ece | ||
|
|
a3e3ea9846 | ||
|
|
feba34e82d | ||
|
|
e134013e04 | ||
|
|
5589d0296a | ||
|
|
de0ebab464 | ||
|
|
f2e7122a96 | ||
|
|
996cc5d900 | ||
|
|
a2ae5bd867 | ||
|
|
5fa52e87cb | ||
|
|
bcd76d2c7a | ||
|
|
36fcbedc11 | ||
|
|
1dad01cc53 | ||
|
|
5fb21f6e54 | ||
|
|
08dfac8352 | ||
|
|
956751e419 | ||
|
|
fe2ae04c91 | ||
|
|
5b8712d061 | ||
|
|
dc7ff90c1e | ||
|
|
1ace676170 | ||
|
|
8947a87b95 | ||
|
|
786a2f1103 | ||
|
|
36ac14a566 | ||
|
|
7a048fc91d | ||
|
|
3f3756b113 | ||
|
|
b36c4b99cc | ||
|
|
9856a2276e | ||
|
|
b6dc3ed3ad | ||
|
|
75be329994 | ||
|
|
1fe1ca1c8b | ||
|
|
882a6a1d51 | ||
|
|
712ab4ae7a | ||
|
|
18ad259fb3 | ||
|
|
fe4d93c6db | ||
|
|
c6ba588e37 | ||
|
|
3fda60fca0 | ||
|
|
96531a0ef8 | ||
|
|
7abc3065fb | ||
|
|
013ded4bac | ||
|
|
010c3c7348 | ||
|
|
bf075c075c | ||
|
|
41b34e5f60 | ||
|
|
5a889398e7 | ||
|
|
054cae86d8 | ||
|
|
cd1cb8b83c | ||
|
|
a34779c027 | ||
|
|
d19cb77d74 | ||
|
|
ab67528e89 | ||
|
|
27f281480a | ||
|
|
50459a39f4 | ||
|
|
5c9815ef6f | ||
|
|
aed00a97b6 | ||
|
|
7543dc4a9d | ||
|
|
841fa0030f | ||
|
|
66e0e651b9 | ||
|
|
1750218057 | ||
|
|
80637fc06d | ||
|
|
8efc055511 | ||
|
|
be61bfda93 | ||
|
|
1a39f529c0 | ||
|
|
0868d5c550 | ||
|
|
384f0e7678 | ||
|
|
9b390c4bea | ||
|
|
42a13fec46 | ||
|
|
790acc4c17 | ||
|
|
b74cf27538 | ||
|
|
ffc874ec6f | ||
|
|
546d6bd0b2 | ||
|
|
8b68ca029e | ||
|
|
502f84b30c | ||
|
|
b7df920860 | ||
|
|
e4a424cb6a | ||
|
|
d8affd3967 | ||
|
|
a423274fd9 | ||
|
|
f7329b1a0e | ||
|
|
48eb07c956 | ||
|
|
636d8a886c | ||
|
|
97b52c7fdf | ||
|
|
344412e66e | ||
|
|
5cdea14cdf | ||
|
|
7b1a56b96f | ||
|
|
d1ec884e75 | ||
|
|
aa72a4349e | ||
|
|
5ab7fd0842 | ||
|
|
86d5e9802a | ||
|
|
18df39e3a1 | ||
|
|
cfe1e24471 | ||
|
|
2edbe87a8c | ||
|
|
880055bc90 | ||
|
|
ad99bd0a14 | ||
|
|
c5f099138d | ||
|
|
6e64e02f71 | ||
|
|
f95f6ec009 | ||
|
|
8aeecc20e1 | ||
|
|
38d0f6c63f | ||
|
|
ac8534a9e7 | ||
|
|
73cab9d9d4 | ||
|
|
64246d42d2 | ||
|
|
6fa6d4532e | ||
|
|
92b9956c06 | ||
|
|
4d6669c268 | ||
|
|
89f4ae51f9 | ||
|
|
af0659f573 | ||
|
|
45a10d501e | ||
|
|
e529ff1245 | ||
|
|
b29371dc87 | ||
|
|
0bef890000 | ||
|
|
75fe1404b1 | ||
|
|
b460c9372f | ||
|
|
c3e574ceaa | ||
|
|
04ae80a52e | ||
|
|
a7ff095399 | ||
|
|
a655dcebaf | ||
|
|
8c74851b70 | ||
|
|
7168392a51 | ||
|
|
ccc5b324fe | ||
|
|
e85c205a81 | ||
|
|
7e225be16e | ||
|
|
ebb32e85f8 | ||
|
|
90d279f39f | ||
|
|
af3f5b6e16 | ||
|
|
53d7c5109f | ||
|
|
bf381563ff | ||
|
|
de4b9334e1 | ||
|
|
c33fbea469 | ||
|
|
921f593632 | ||
|
|
940403720a | ||
|
|
f869e44fe5 | ||
|
|
bcc92919a0 | ||
|
|
306a70c7ba | ||
|
|
d358d955e5 | ||
|
|
0fdd6074c3 | ||
|
|
6faf9c35a9 | ||
|
|
1066898e32 | ||
|
|
d05febe5de | ||
|
|
67f7034a21 | ||
|
|
79f301a2c6 | ||
|
|
31cbc67986 | ||
|
|
fe66bf3663 | ||
|
|
4691d4b35d | ||
|
|
acf5241845 | ||
|
|
2bce99b82f | ||
|
|
3c330869ef | ||
|
|
dba1af4841 | ||
|
|
2b1e52dcc9 | ||
|
|
b5238e945a | ||
|
|
afc0f29704 | ||
|
|
de0bb1d2da | ||
|
|
cc16ece283 | ||
|
|
31ba802fc9 | ||
|
|
4b27cf5460 | ||
|
|
a53b2a643f | ||
|
|
d925ecae1b | ||
|
|
13fd751a78 | ||
|
|
74575f8922 | ||
|
|
5e7bb5fe73 | ||
|
|
790a31404a | ||
|
|
f927601702 | ||
|
|
c4654d54d7 | ||
|
|
df777c30d1 | ||
|
|
d81ad2d4bc | ||
|
|
9f77e8b025 | ||
|
|
04dc3f4614 | ||
|
|
7d1fe50977 | ||
|
|
c0e5e3c5d5 | ||
|
|
3a45cfb604 | ||
|
|
393e4b0f5a | ||
|
|
296711d502 | ||
|
|
9121722999 | ||
|
|
d8d74091f6 | ||
|
|
33521fb45e | ||
|
|
e5204e60ed | ||
|
|
0409428d87 | ||
|
|
f902b0d420 | ||
|
|
27ef5b1aa7 | ||
|
|
c32303fc7e | ||
|
|
45abe361ba | ||
|
|
3ae479faae | ||
|
|
5698038f49 | ||
|
|
020233f725 | ||
|
|
6f9d55b8eb | ||
|
|
2542b62d77 | ||
|
|
95678bb6b1 | ||
|
|
a78759e7ee | ||
|
|
cc5c523f58 | ||
|
|
e39bbdd287 | ||
|
|
d9a50bf93f | ||
|
|
934d00ea1e | ||
|
|
c27675f70d | ||
|
|
7c9f37c83d | ||
|
|
b9736c13e0 | ||
|
|
c47725ff34 | ||
|
|
3ee3fe0bbb | ||
|
|
e54dad75da | ||
|
|
39c2f03eab | ||
|
|
fb9e1c4087 | ||
|
|
ed26bb3d82 | ||
|
|
0baf32e219 | ||
|
|
79a376d1db | ||
|
|
b634e91c43 | ||
|
|
9e2cc21d04 | ||
|
|
6975124a57 | ||
|
|
9f69307db1 | ||
|
|
c3448a045c | ||
|
|
95c561983c | ||
|
|
7a03c8dab5 | ||
|
|
f3ffa8310f | ||
|
|
596f496f19 | ||
|
|
2e6ed731cf | ||
|
|
24ce319b6f | ||
|
|
7b7bfea37d | ||
|
|
3be461260a | ||
|
|
8dab8d9831 | ||
|
|
fb4c5f3c91 | ||
|
|
5fe3cce5a3 | ||
|
|
09f165d442 | ||
|
|
60aea7521b | ||
|
|
29545d0e5e | ||
|
|
4a14099cfd | ||
|
|
b052574ddf | ||
|
|
5ea6a7c6d6 | ||
|
|
8ca196d51f | ||
|
|
5f572cbd77 | ||
|
|
679bd3ab30 | ||
|
|
da3d59fada | ||
|
|
835d27151d | ||
|
|
f1d7228a74 | ||
|
|
72bbd5bdef | ||
|
|
ad9d866547 | ||
|
|
a1ec668b70 | ||
|
|
389687a56d | ||
|
|
97280c73b9 | ||
|
|
f3c622b665 | ||
|
|
d71e8d8dbf | ||
|
|
02c2089ac8 | ||
|
|
07ad28a053 | ||
|
|
d323ccc3ec | ||
|
|
4738d002c7 | ||
|
|
ec099b0586 | ||
|
|
a51253fea2 | ||
|
|
304ec9ec6a | ||
|
|
8547085615 | ||
|
|
14b139ecb5 | ||
|
|
7b45f5068f | ||
|
|
99ceee840e | ||
|
|
8ed68301e3 | ||
|
|
664267e050 | ||
|
|
7ef8f46591 | ||
|
|
6933c1fed2 | ||
|
|
9d125bf533 | ||
|
|
08d5340bd8 | ||
|
|
0e6f4f981e | ||
|
|
670ee3934f | ||
|
|
569860d7ac | ||
|
|
953a562ec1 | ||
|
|
7f54008d3c | ||
|
|
5f5959bc33 | ||
|
|
0105cd48f2 | ||
|
|
28258aecd2 | ||
|
|
e585950c54 | ||
|
|
bcd661afa6 | ||
|
|
adf2730d1d | ||
|
|
ba2be6371d | ||
|
|
d2ff09a404 | ||
|
|
9f364d3880 | ||
|
|
cfad41b901 | ||
|
|
6889f044fb | ||
|
|
3d1ee27ccd | ||
|
|
775ce62950 | ||
|
|
821a6f2fa6 | ||
|
|
5197fb2fad | ||
|
|
92abe91d22 | ||
|
|
a7bf0b85d7 | ||
|
|
5ce5ea84a9 | ||
|
|
992be39f90 | ||
|
|
cab80a3c56 | ||
|
|
6af7107938 | ||
|
|
bcd31cf245 | ||
|
|
85c4ccfef9 | ||
|
|
dc0f81aabc | ||
|
|
07f934566a | ||
|
|
77cb18e9e3 | ||
|
|
fccaecf730 | ||
|
|
53cdfe8f73 | ||
|
|
ea03523c6a | ||
|
|
caf3cbf8d7 | ||
|
|
da411066c9 | ||
|
|
95d0f77fc2 | ||
|
|
9b2654277b | ||
|
|
f1b3bdac3f | ||
|
|
595fdbd95d | ||
|
|
dab9385297 | ||
|
|
df83def566 | ||
|
|
f9d4e37b3c | ||
|
|
e59a3d71e0 | ||
|
|
de3a84ac59 | ||
|
|
e017266b98 | ||
|
|
f81a8a5e5c | ||
|
|
7a3a0144a5 | ||
|
|
8263b2d32d | ||
|
|
833cd490b8 | ||
|
|
2162c37e41 | ||
|
|
b2ac8376e1 | ||
|
|
8079584143 | ||
|
|
09a4474e7f | ||
|
|
81530133ff | ||
|
|
cc4b384ac3 | ||
|
|
3852daf447 | ||
|
|
5c97111f9d | ||
|
|
75dd1f0f7e | ||
|
|
c9a4551012 | ||
|
|
87197ba91d | ||
|
|
7461bf84e5 | ||
|
|
fbc0357b2e | ||
|
|
ec334f5891 | ||
|
|
885efe772e | ||
|
|
64fc9ba678 | ||
|
|
989eccd286 | ||
|
|
f0766a2ab0 | ||
|
|
178b85ff9a | ||
|
|
68dd1ef121 | ||
|
|
b222cffe98 | ||
|
|
b4f1ab93d1 | ||
|
|
f2e139f5cd | ||
|
|
a9cbca1604 | ||
|
|
3a30ce6c16 | ||
|
|
48ec5355f9 | ||
|
|
11859bc322 | ||
|
|
28c67a5be8 | ||
|
|
44fe93e9b0 | ||
|
|
09a1681b63 | ||
|
|
f5ba2190fb | ||
|
|
14a38b5069 | ||
|
|
f23e5b602a | ||
|
|
857696ed9c | ||
|
|
2084133058 | ||
|
|
f7f0c3070e | ||
|
|
46235aa514 | ||
|
|
2eb65d21ac | ||
|
|
37a0d62a82 | ||
|
|
21ac46e439 | ||
|
|
ba3e8ba20c | ||
|
|
2c48e798ca | ||
|
|
4e40f5b62b | ||
|
|
2a8892b785 | ||
|
|
ee3b33ff03 | ||
|
|
b2c3001f8e | ||
|
|
6cfe1e1ac2 | ||
|
|
52326870e4 | ||
|
|
217fde0918 | ||
|
|
065021d82a | ||
|
|
4bb643e685 | ||
|
|
b77c745b1a | ||
|
|
7d13501b94 | ||
|
|
ac74639b32 | ||
|
|
12fa56ae68 | ||
|
|
f11b863f4b | ||
|
|
f3e4b72957 | ||
|
|
8d52fb46ca | ||
|
|
dab8f45033 | ||
|
|
bff8b02543 | ||
|
|
2406200914 | ||
|
|
db06fcfc84 | ||
|
|
93b9f74e9f | ||
|
|
33ec844f76 | ||
|
|
0f727b393e | ||
|
|
7da2aad6ee | ||
|
|
6f09f50d02 | ||
|
|
5919832059 | ||
|
|
f7635c1afc | ||
|
|
c762168ed0 | ||
|
|
67a46e553f | ||
|
|
e406f37b54 | ||
|
|
62fe877124 | ||
|
|
a0e682ba79 | ||
|
|
49e8a87383 | ||
|
|
b2764b49ca | ||
|
|
06b810de8f | ||
|
|
6da51565f5 | ||
|
|
1f69965239 | ||
|
|
af2d61178d | ||
|
|
6a955ccf4f | ||
|
|
c0658711ca | ||
|
|
d602f06882 | ||
|
|
1cb9a38ac2 | ||
|
|
47a1f73d0f | ||
|
|
142dd63b47 | ||
|
|
b1bd8370c2 | ||
|
|
215660c8da | ||
|
|
0cafe67efe | ||
|
|
ea83b3222b | ||
|
|
725087a04f | ||
|
|
d627ab4855 | ||
|
|
7d867e8df4 | ||
|
|
3d34d44497 | ||
|
|
a6f800b741 | ||
|
|
a003d1fa1e | ||
|
|
c2e84d4558 | ||
|
|
68330eab2a | ||
|
|
7070f3969d | ||
|
|
e4727ab155 | ||
|
|
280e7d97ad | ||
|
|
31e3805fb8 | ||
|
|
ef248dbe15 | ||
|
|
6a61b4b638 | ||
|
|
4b1473502f | ||
|
|
bf211d818d | ||
|
|
27dd87c890 | ||
|
|
8659084ab0 | ||
|
|
e1c9dcea93 | ||
|
|
171339ab17 | ||
|
|
8542ba5c69 | ||
|
|
97b74d328b | ||
|
|
3198a7e5f4 | ||
|
|
a2d08ce961 | ||
|
|
bd8ea09479 | ||
|
|
6d0d46c7fb | ||
|
|
820540780a | ||
|
|
f74d600497 | ||
|
|
94fec9f50e | ||
|
|
e387a50475 | ||
|
|
5c4248a29c | ||
|
|
f22886e2b6 | ||
|
|
33af3cbf37 | ||
|
|
728dfb1be7 | ||
|
|
e49f7f1afe | ||
|
|
21a454fa6c | ||
|
|
22c6c27f78 | ||
|
|
aecbb43096 | ||
|
|
fa53fd2db2 | ||
|
|
1c150995ae | ||
|
|
6c5d8f089e | ||
|
|
dd623325e8 | ||
|
|
e8a375c8f2 | ||
|
|
386d85ae72 | ||
|
|
ebb3901b05 | ||
|
|
20130b486c | ||
|
|
73c48d0463 | ||
|
|
f7cecd20e3 | ||
|
|
2bc64a7636 | ||
|
|
9564ddbb48 | ||
|
|
28062c71b5 | ||
|
|
35d1921081 | ||
|
|
4fbdf18c70 | ||
|
|
5e07ab01f0 | ||
|
|
fac465a21e | ||
|
|
e145a2ce0c | ||
|
|
dc68c313ee | ||
|
|
95c0d9ab24 | ||
|
|
46a718f339 | ||
|
|
496ba46960 | ||
|
|
43ae0aca1d | ||
|
|
b8574c1b82 | ||
|
|
32f8b1082b | ||
|
|
6443fef31a | ||
|
|
14c3795a7d | ||
|
|
3d9e2de573 | ||
|
|
0ca36a0f8d | ||
|
|
3e5555502a | ||
|
|
fbf5b5e0a9 | ||
|
|
3305e66f8c | ||
|
|
e19a44c12b | ||
|
|
8b0e6b9d1b | ||
|
|
f3e638ac6a | ||
|
|
42e0b30476 | ||
|
|
a09a7b650d | ||
|
|
332d7bbd56 | ||
|
|
d3b6fece71 | ||
|
|
9d963b82de | ||
|
|
a402161631 | ||
|
|
b481ad58e6 | ||
|
|
f91c5f2638 | ||
|
|
7143c551ab | ||
|
|
50e93392dd | ||
|
|
9f83e93839 | ||
|
|
692b132dbf | ||
|
|
e70b3e8947 | ||
|
|
612d97db6f | ||
|
|
bb1b67c076 | ||
|
|
5a75c31caa | ||
|
|
8b9210286b | ||
|
|
b5acec34f7 | ||
|
|
86d835878c | ||
|
|
eae7b331d3 | ||
|
|
ed89e29bcc | ||
|
|
c2b1886aff | ||
|
|
218f36bca5 | ||
|
|
b91fc1f5b3 | ||
|
|
2a22bf9c15 | ||
|
|
62e2037125 | ||
|
|
e5b72c6a77 | ||
|
|
93be211f80 | ||
|
|
9ae3fb4ced | ||
|
|
f641075789 | ||
|
|
f7658db1b6 | ||
|
|
b869bc1a20 | ||
|
|
a72d756d77 | ||
|
|
d3fd8f89b8 | ||
|
|
180a05a446 | ||
|
|
eb9ac9ee1f | ||
|
|
a6662b73f5 | ||
|
|
cbc7db3478 | ||
|
|
4606340f0f | ||
|
|
d4b4ccd597 | ||
|
|
9c3f4e3a37 | ||
|
|
440e00d8f9 | ||
|
|
6310613699 | ||
|
|
f55907dbea | ||
|
|
5cac87d317 | ||
|
|
9c0622de13 | ||
|
|
37b93c8b71 | ||
|
|
d6be98cda6 | ||
|
|
4d128acc17 | ||
|
|
516df9ecce | ||
|
|
8eec1d50e1 | ||
|
|
cfb096d43a | ||
|
|
713fa28804 | ||
|
|
5549f35939 | ||
|
|
6eed1db36c | ||
|
|
948124f55e | ||
|
|
2b191ca776 | ||
|
|
be4d2822ea | ||
|
|
736ddd0319 | ||
|
|
dfa289aa72 | ||
|
|
c2644f939a | ||
|
|
f11c1ae562 | ||
|
|
3126164aa6 | ||
|
|
ed10486cad | ||
|
|
04fa430c6c | ||
|
|
fa1893b59c | ||
|
|
e993e717a5 | ||
|
|
c80e56423a | ||
|
|
ffa09a01d6 | ||
|
|
7d04f8567b | ||
|
|
baa709674f | ||
|
|
ca9a494d0c | ||
|
|
37eb8c05cc | ||
|
|
7c046edb7b | ||
|
|
22cea38b20 | ||
|
|
ef2ca0a827 | ||
|
|
7f0b908de2 | ||
|
|
5fc5e776ff | ||
|
|
93b281c016 | ||
|
|
9585699918 | ||
|
|
bceaba551d | ||
|
|
0bfeed3a7e | ||
|
|
70a780c3c0 | ||
|
|
d74ab5306c | ||
|
|
688e8601ab | ||
|
|
4933ab5956 | ||
|
|
6c7225a5d4 | ||
|
|
a22982f2fa | ||
|
|
c95479dddb | ||
|
|
fc48bd8da0 | ||
|
|
d5323bfa3f | ||
|
|
e9d4a2b507 | ||
|
|
37bcbe8046 | ||
|
|
fdfb644f0a | ||
|
|
cde9f3db57 | ||
|
|
8bf5a98815 | ||
|
|
be566a15a5 | ||
|
|
d5f1b99ac4 | ||
|
|
2144bb0e27 | ||
|
|
bc665bacc7 | ||
|
|
52bfcf4883 | ||
|
|
06df3d6fb6 | ||
|
|
ca719a8697 | ||
|
|
72dfd74005 | ||
|
|
69302c4420 | ||
|
|
42d7019b2e | ||
|
|
5f0d0d6b9b | ||
|
|
76cb63e4f6 | ||
|
|
467d571206 | ||
|
|
972bfa700a | ||
|
|
458955d0fb | ||
|
|
990eeccf45 | ||
|
|
a3a7465f00 | ||
|
|
031a819257 | ||
|
|
eb4b4e3c8c | ||
|
|
d2e1fe9b1d | ||
|
|
6e27a9e39a | ||
|
|
805478c911 | ||
|
|
a281cdeb89 | ||
|
|
cda698a67f | ||
|
|
15acd17716 | ||
|
|
34a2bddfcd | ||
|
|
370f817549 | ||
|
|
041390c37e | ||
|
|
d9fe4bf500 | ||
|
|
e0c7e944fc | ||
|
|
0845fe67db | ||
|
|
fe3b12d900 | ||
|
|
a70d56864e | ||
|
|
fdbb2c5378 | ||
|
|
3c0aaf42af | ||
|
|
438e19160a | ||
|
|
f2b2ff6950 | ||
|
|
86cef96305 | ||
|
|
5f50944baf | ||
|
|
0804fd2353 | ||
|
|
86419eb457 | ||
|
|
76f3ae7bf3 | ||
|
|
aaa85190eb | ||
|
|
e2a4e926b9 | ||
|
|
d6e922dc1c | ||
|
|
27f4317ec6 | ||
|
|
e434348216 | ||
|
|
2e19afedb8 | ||
|
|
da08fa7c63 | ||
|
|
9c96b97dc7 | ||
|
|
28a51b622b | ||
|
|
8bd1da7144 | ||
|
|
e4d0b8ee6e | ||
|
|
1dfb28b362 | ||
|
|
ba618947e7 | ||
|
|
f81041b502 | ||
|
|
f2533a2800 | ||
|
|
bb5b4a7f26 | ||
|
|
20bff87021 | ||
|
|
722b954800 | ||
|
|
19256086c7 | ||
|
|
250fecfcd4 | ||
|
|
cb4d1d5ebb | ||
|
|
d7d557fb2e | ||
|
|
0b8e19b6a6 | ||
|
|
8e26eb374e | ||
|
|
9bba01a033 | ||
|
|
661890b8a1 | ||
|
|
772ad4ec6b | ||
|
|
6f65f8cb3b | ||
|
|
43e83548b9 | ||
|
|
dd3f3e9749 | ||
|
|
124f61b404 | ||
|
|
e8748cc6f3 | ||
|
|
fafec8b7a5 | ||
|
|
030daca686 | ||
|
|
ac587438f8 | ||
|
|
c145bbef3c | ||
|
|
745c46ee04 | ||
|
|
a707f5b502 | ||
|
|
dc2e801077 | ||
|
|
b56d5108b2 | ||
|
|
8e6b7034fe | ||
|
|
dad7ca6633 | ||
|
|
a1468139a5 | ||
|
|
49c90044ce | ||
|
|
0f7cdac207 | ||
|
|
c4e9694c6e | ||
|
|
2006a96570 | ||
|
|
5dcd95645f | ||
|
|
9b3304b054 | ||
|
|
e580d4ef41 | ||
|
|
64db4abc68 | ||
|
|
5ba0b80e5c | ||
|
|
7a43ff3d89 | ||
|
|
7e1a1d141a | ||
|
|
6d881f161b | ||
|
|
a02b3e6192 | ||
|
|
bcdee9fc19 | ||
|
|
8b688251be | ||
|
|
718f3382ad | ||
|
|
dc8283d3d7 | ||
|
|
35e76879f5 | ||
|
|
8e4ae0aaac | ||
|
|
5ed2a97056 | ||
|
|
03eba6f041 | ||
|
|
ec166e736a | ||
|
|
c85a6b83b3 | ||
|
|
a864a7b395 | ||
|
|
fd8c2d4aac |
15
.dockerignore
Normal file
15
.dockerignore
Normal file
@@ -0,0 +1,15 @@
|
||||
.vscode
|
||||
.git
|
||||
.github
|
||||
.venv
|
||||
cache
|
||||
docker
|
||||
saves
|
||||
hf_cache
|
||||
ms_cache
|
||||
om_cache
|
||||
shared_data
|
||||
output
|
||||
.dockerignore
|
||||
.gitattributes
|
||||
.gitignore
|
||||
42
.env.local
Normal file
42
.env.local
Normal file
@@ -0,0 +1,42 @@
|
||||
# Note: actually we do not support .env, just for reference
|
||||
# api
|
||||
API_HOST=
|
||||
API_PORT=
|
||||
API_KEY=
|
||||
API_MODEL_NAME=
|
||||
API_VERBOSE=
|
||||
FASTAPI_ROOT_PATH=
|
||||
MAX_CONCURRENT=
|
||||
# general
|
||||
DISABLE_VERSION_CHECK=
|
||||
FORCE_CHECK_IMPORTS=
|
||||
ALLOW_EXTRA_ARGS=
|
||||
LLAMAFACTORY_VERBOSITY=
|
||||
USE_MODELSCOPE_HUB=
|
||||
USE_OPENMIND_HUB=
|
||||
USE_RAY=
|
||||
RECORD_VRAM=
|
||||
OPTIM_TORCH=
|
||||
NPU_JIT_COMPILE=
|
||||
# torchrun
|
||||
FORCE_TORCHRUN=
|
||||
MASTER_ADDR=
|
||||
MASTER_PORT=
|
||||
NNODES=
|
||||
NODE_RANK=
|
||||
NPROC_PER_NODE=
|
||||
# wandb
|
||||
WANDB_DISABLED=
|
||||
WANDB_PROJECT=
|
||||
WANDB_API_KEY=
|
||||
# gradio ui
|
||||
GRADIO_SHARE=
|
||||
GRADIO_SERVER_NAME=
|
||||
GRADIO_SERVER_PORT=
|
||||
GRADIO_ROOT_PATH=
|
||||
GRADIO_IPV6=
|
||||
# setup
|
||||
ENABLE_SHORT_CONSOLE=
|
||||
# reserved (do not use)
|
||||
LLAMABOARD_ENABLED=
|
||||
LLAMABOARD_WORKDIR=
|
||||
128
.github/CODE_OF_CONDUCT.md
vendored
Normal file
128
.github/CODE_OF_CONDUCT.md
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
`hoshihiyouga AT gmail DOT com`.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
67
.github/CONTRIBUTING.md
vendored
Normal file
67
.github/CONTRIBUTING.md
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
# Contributing to LLaMA Factory
|
||||
|
||||
Everyone is welcome to contribute, and we value everybody's contribution. Code contributions are not the only way to help the community. Answering questions, helping others, and improving the documentation are also immensely valuable.
|
||||
|
||||
It also helps us if you spread the word! Reference the library in blog posts about the awesome projects it made possible, shout out on Twitter every time it has helped you, or simply ⭐️ the repository to say thank you.
|
||||
|
||||
However you choose to contribute, please be mindful and respect our [code of conduct](CODE_OF_CONDUCT.md).
|
||||
|
||||
**This guide was heavily inspired by [transformers guide to contributing](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md).**
|
||||
|
||||
## Ways to contribute
|
||||
|
||||
There are several ways you can contribute to LLaMA Factory:
|
||||
|
||||
* Fix outstanding issues with the existing code.
|
||||
* Submit issues related to bugs or desired new features.
|
||||
* Contribute to the examples or to the documentation.
|
||||
|
||||
### Style guide
|
||||
|
||||
LLaMA Factory follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html), check it for details.
|
||||
|
||||
### Create a Pull Request
|
||||
|
||||
1. Fork the [repository](https://github.com/hiyouga/LLaMA-Factory) by clicking on the [Fork](https://github.com/hiyouga/LLaMA-Factory/fork) button on the repository's page. This creates a copy of the code under your GitHub user account.
|
||||
|
||||
2. Clone your fork to your local disk, and add the base repository as a remote:
|
||||
|
||||
```bash
|
||||
git clone git@github.com:[username]/LLaMA-Factory.git
|
||||
cd LLaMA-Factory
|
||||
git remote add upstream https://github.com/hiyouga/LLaMA-Factory.git
|
||||
```
|
||||
|
||||
3. Create a new branch to hold your development changes:
|
||||
|
||||
```bash
|
||||
git checkout -b dev_your_branch
|
||||
```
|
||||
|
||||
4. Set up a development environment by running the following command in a virtual environment:
|
||||
|
||||
```bash
|
||||
pip install -e ".[dev]"
|
||||
```
|
||||
|
||||
If LLaMA Factory was already installed in the virtual environment, remove it with `pip uninstall llamafactory` before reinstalling it in editable mode with the -e flag.
|
||||
|
||||
5. Check code before commit:
|
||||
|
||||
```bash
|
||||
make commit
|
||||
make style && make quality
|
||||
make test
|
||||
```
|
||||
|
||||
6. Submit changes:
|
||||
|
||||
```bash
|
||||
git add .
|
||||
git commit -m "commit message"
|
||||
git fetch upstream
|
||||
git rebase upstream/main
|
||||
git push -u origin dev_your_branch
|
||||
```
|
||||
|
||||
7. Create a merge request from your branch `dev_your_branch` at [origin repo](https://github.com/hiyouga/LLaMA-Factory).
|
||||
61
.github/ISSUE_TEMPLATE/1-bug-report.yml
vendored
Normal file
61
.github/ISSUE_TEMPLATE/1-bug-report.yml
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
name: "\U0001F41B Bug / help"
|
||||
description: Create a report to help us improve the LLaMA Factory
|
||||
labels: ["bug", "pending"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Issues included in **[FAQs](https://github.com/hiyouga/LLaMA-Factory/issues/4614)** or those with **insufficient** information may be closed without a response.
|
||||
已经包含在 **[常见问题](https://github.com/hiyouga/LLaMA-Factory/issues/4614)** 内或提供信息**不完整**的 issues 可能不会被回复。
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please do not create issues that are not related to framework bugs under this category, use **[Discussions](https://github.com/hiyouga/LLaMA-Factory/discussions/categories/q-a)** instead.
|
||||
请勿在此分类下创建和框架 bug 无关的 issues,训练问题求助请使用 **[讨论区](https://github.com/hiyouga/LLaMA-Factory/discussions/categories/q-a)**。
|
||||
|
||||
- type: checkboxes
|
||||
id: reminder
|
||||
attributes:
|
||||
label: Reminder
|
||||
description: |
|
||||
Please ensure you have read the above rules carefully and searched the existing issues (including FAQs).
|
||||
请确保您已经认真阅读了上述规则并且搜索过现有的 issues(包括常见问题)。
|
||||
|
||||
options:
|
||||
- label: I have read the above rules and searched the existing issues.
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: system-info
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: System Info
|
||||
description: |
|
||||
Please share your system info with us. You can run the command **llamafactory-cli env** and copy-paste its output below.
|
||||
请提供您的系统信息。您可以在命令行运行 **llamafactory-cli env** 并将其输出复制到该文本框中。
|
||||
|
||||
placeholder: llamafactory version, platform, python version, ...
|
||||
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Reproduction
|
||||
description: |
|
||||
Please provide entry arguments, error messages and stack traces that reproduces the problem.
|
||||
请提供入口参数,错误日志以及异常堆栈以便于我们复现问题。
|
||||
|
||||
value: |
|
||||
```text
|
||||
Put your message here.
|
||||
```
|
||||
|
||||
- type: textarea
|
||||
id: others
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: Others
|
||||
41
.github/ISSUE_TEMPLATE/2-feature-request.yml
vendored
Normal file
41
.github/ISSUE_TEMPLATE/2-feature-request.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: "\U0001F680 Feature request"
|
||||
description: Submit a request for a new feature
|
||||
labels: ["enhancement", "pending"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please do not create issues that are not related to new features under this category.
|
||||
请勿在此分类下创建和新特性无关的 issues。
|
||||
|
||||
- type: checkboxes
|
||||
id: reminder
|
||||
attributes:
|
||||
label: Reminder
|
||||
description: |
|
||||
Please ensure you have read the above rules carefully and searched the existing issues.
|
||||
请确保您已经认真阅读了上述规则并且搜索过现有的 issues。
|
||||
|
||||
options:
|
||||
- label: I have read the above rules and searched the existing issues.
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Description
|
||||
description: |
|
||||
A clear and concise description of the feature proposal.
|
||||
请详细描述您希望加入的新功能特性。
|
||||
|
||||
- type: textarea
|
||||
id: contribution
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: Pull Request
|
||||
description: |
|
||||
Have you already created the relevant PR and submitted the code?
|
||||
您是否已经创建了相关 PR 并提交了代码?
|
||||
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
blank_issues_enabled: false
|
||||
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
# What does this PR do?
|
||||
|
||||
Fixes # (issue)
|
||||
|
||||
## Before submitting
|
||||
|
||||
- [ ] Did you read the [contributor guideline](https://github.com/hiyouga/LLaMA-Factory/blob/main/.github/CONTRIBUTING.md)?
|
||||
- [ ] Did you write any new necessary tests?
|
||||
7
.github/SECURITY.md
vendored
Normal file
7
.github/SECURITY.md
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# Reporting Security Issues
|
||||
|
||||
To report a security issue, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/hiyouga/LLaMA-Factory/security/advisories/new) tab.
|
||||
|
||||
We will send a response indicating the next steps in handling your report. After the initial reply to your report, the security team will keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance.
|
||||
|
||||
Report security bugs in third-party modules to the person or team maintaining the module.
|
||||
66
.github/workflows/docker.yml
vendored
Normal file
66
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
name: docker
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "**/*.py"
|
||||
- "requirements.txt"
|
||||
- "docker/**"
|
||||
- ".github/workflows/*.yml"
|
||||
pull_request:
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "**/*.py"
|
||||
- "requirements.txt"
|
||||
- "docker/**"
|
||||
- ".github/workflows/*.yml"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
|
||||
environment:
|
||||
name: docker
|
||||
url: https://hub.docker.com/r/hiyouga/llamafactory
|
||||
|
||||
steps:
|
||||
- name: Free up disk space
|
||||
run: |
|
||||
df -h
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf /opt/ghc
|
||||
sudo rm -rf /opt/hostedtoolcache
|
||||
df -h
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/docker-cuda/Dockerfile
|
||||
build-args: |
|
||||
EXTRAS=metrics,deepspeed,liger-kernel
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: docker.io/hiyouga/llamafactory:latest
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
32
.github/workflows/label_issue.yml
vendored
Normal file
32
.github/workflows/label_issue.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: label_issue
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- opened
|
||||
|
||||
jobs:
|
||||
label_issue:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE_URL: ${{ github.event.issue.html_url }}
|
||||
ISSUE_TITLE: ${{ github.event.issue.title }}
|
||||
run: |
|
||||
LABEL=""
|
||||
NPU_KEYWORDS=(npu huawei ascend 华为 昇腾)
|
||||
ISSUE_TITLE_LOWER=$(echo $ISSUE_TITLE | tr '[:upper:]' '[:lower:]')
|
||||
for KEYWORD in ${NPU_KEYWORDS[@]}; do
|
||||
if [[ $ISSUE_TITLE_LOWER == *$KEYWORD* ]] && [[ $ISSUE_TITLE_LOWER != *input* ]]; then
|
||||
LABEL="npu"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ -n "$LABEL" ]; then
|
||||
gh issue edit $ISSUE_URL --add-label $LABEL
|
||||
fi
|
||||
36
.github/workflows/publish.yml
vendored
Normal file
36
.github/workflows/publish.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: publish
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
name: Upload release to PyPI
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
environment:
|
||||
name: release
|
||||
url: https://pypi.org/p/llamafactory
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Build package
|
||||
run: |
|
||||
make build
|
||||
|
||||
- name: Publish package
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
102
.github/workflows/tests.yml
vendored
Normal file
102
.github/workflows/tests.yml
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
name: tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "**.py"
|
||||
- "requirements.txt"
|
||||
- ".github/workflows/*.yml"
|
||||
pull_request:
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "**.py"
|
||||
- "requirements.txt"
|
||||
- ".github/workflows/*.yml"
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
os:
|
||||
- "ubuntu-latest"
|
||||
- "windows-latest"
|
||||
- "macos-13"
|
||||
transformers:
|
||||
- null
|
||||
include: # test backward compatibility
|
||||
- python: "3.9"
|
||||
os: "ubuntu-latest"
|
||||
transformers: "4.45.0"
|
||||
- python: "3.9"
|
||||
os: "ubuntu-latest"
|
||||
transformers: "4.49.0"
|
||||
- python: "3.9"
|
||||
os: "ubuntu-latest"
|
||||
transformers: "4.51.0"
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.os }}-${{ matrix.python }}-${{ matrix.transformers }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
OS_NAME: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python }}
|
||||
cache: "pip"
|
||||
cache-dependency-path: "**/requirements*.txt"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install ".[torch,dev]"
|
||||
|
||||
- name: Install transformers
|
||||
if: ${{ matrix.transformers }}
|
||||
run: |
|
||||
python -m pip install "transformers==${{ matrix.transformers }}"
|
||||
|
||||
- name: Cache files
|
||||
id: hf-hub-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.temp }}/huggingface
|
||||
key: huggingface-${{ matrix.os }}-${{ matrix.python }}-${{ matrix.transformers }}-${{ hashFiles('tests/version.txt') }}
|
||||
|
||||
- name: Check quality
|
||||
run: |
|
||||
make style && make quality
|
||||
|
||||
- name: Check license
|
||||
run: |
|
||||
make license
|
||||
|
||||
- name: Check build
|
||||
run: |
|
||||
make build
|
||||
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
make test
|
||||
env:
|
||||
HF_HOME: ${{ runner.temp }}/huggingface
|
||||
HF_HUB_OFFLINE: "${{ steps.hf-hub-cache.outputs.cache-hit == 'true' && '1' || '0' }}"
|
||||
179
.gitignore
vendored
Normal file
179
.gitignore
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
.idea/
|
||||
|
||||
# vscode
|
||||
.vscode/
|
||||
|
||||
# uv
|
||||
uv.lock
|
||||
|
||||
# custom .gitignore
|
||||
hf_cache/
|
||||
ms_cache/
|
||||
om_cache/
|
||||
cache/
|
||||
config/
|
||||
saves/
|
||||
output/
|
||||
wandb/
|
||||
swanlog/
|
||||
generated_predictions.jsonl
|
||||
predictions_score.json
|
||||
28
.pre-commit-config.yaml
Normal file
28
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: check-ast
|
||||
- id: check-added-large-files
|
||||
args: ['--maxkb=25000']
|
||||
- id: check-merge-conflict
|
||||
- id: check-yaml
|
||||
- id: debug-statements
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
args: [--markdown-linebreak-ext=md]
|
||||
- id: no-commit-to-branch
|
||||
args: ['--branch', 'main']
|
||||
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.17.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py38-plus]
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.6.9
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
- id: ruff-format
|
||||
44
CITATION.cff
Normal file
44
CITATION.cff
Normal file
@@ -0,0 +1,44 @@
|
||||
cff-version: 1.2.0
|
||||
date-released: 2024-03
|
||||
message: "If you use this software, please cite it as below."
|
||||
authors:
|
||||
- family-names: "Zheng"
|
||||
given-names: "Yaowei"
|
||||
- family-names: "Zhang"
|
||||
given-names: "Richong"
|
||||
- family-names: "Zhang"
|
||||
given-names: "Junhao"
|
||||
- family-names: "Ye"
|
||||
given-names: "Yanhan"
|
||||
- family-names: "Luo"
|
||||
given-names: "Zheyan"
|
||||
- family-names: "Feng"
|
||||
given-names: "Zhangchi"
|
||||
- family-names: "Ma"
|
||||
given-names: "Yongqiang"
|
||||
title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models"
|
||||
url: "https://arxiv.org/abs/2403.13372"
|
||||
preferred-citation:
|
||||
type: conference-paper
|
||||
conference:
|
||||
name: "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)"
|
||||
authors:
|
||||
- family-names: "Zheng"
|
||||
given-names: "Yaowei"
|
||||
- family-names: "Zhang"
|
||||
given-names: "Richong"
|
||||
- family-names: "Zhang"
|
||||
given-names: "Junhao"
|
||||
- family-names: "Ye"
|
||||
given-names: "Yanhan"
|
||||
- family-names: "Luo"
|
||||
given-names: "Zheyan"
|
||||
- family-names: "Feng"
|
||||
given-names: "Zhangchi"
|
||||
- family-names: "Ma"
|
||||
given-names: "Yongqiang"
|
||||
title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models"
|
||||
url: "https://arxiv.org/abs/2403.13372"
|
||||
year: 2024
|
||||
publisher: "Association for Computational Linguistics"
|
||||
address: "Bangkok, Thailand"
|
||||
1
MANIFEST.in
Normal file
1
MANIFEST.in
Normal file
@@ -0,0 +1 @@
|
||||
include LICENSE requirements.txt
|
||||
24
Makefile
Normal file
24
Makefile
Normal file
@@ -0,0 +1,24 @@
|
||||
.PHONY: build commit license quality style test
|
||||
|
||||
check_dirs := scripts src tests setup.py
|
||||
|
||||
build:
|
||||
pip3 install build && python3 -m build
|
||||
|
||||
commit:
|
||||
pre-commit install
|
||||
pre-commit run --all-files
|
||||
|
||||
license:
|
||||
python3 tests/check_license.py $(check_dirs)
|
||||
|
||||
quality:
|
||||
ruff check $(check_dirs)
|
||||
ruff format --check $(check_dirs)
|
||||
|
||||
style:
|
||||
ruff check $(check_dirs) --fix
|
||||
ruff format $(check_dirs)
|
||||
|
||||
test:
|
||||
CUDA_VISIBLE_DEVICES= WANDB_DISABLED=true pytest -vv tests/
|
||||
943
README_zh.md
Normal file
943
README_zh.md
Normal file
@@ -0,0 +1,943 @@
|
||||

|
||||
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/graphs/contributors)
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/actions/workflows/tests.yml)
|
||||
[](https://pypi.org/project/llamafactory/)
|
||||
[](https://scholar.google.com/scholar?cites=12620864006390196564)
|
||||
[](https://hub.docker.com/r/hiyouga/llamafactory/tags)
|
||||
|
||||
[](https://twitter.com/llamafactory_ai)
|
||||
[](https://discord.gg/rKfvV9r9FK)
|
||||
[](https://gitcode.com/zhengyaowei/LLaMA-Factory)
|
||||
|
||||
[](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)
|
||||
[](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)
|
||||
[](https://docs.alayanew.com/docs/documents/newActivities/llamafactory/?utm_source=LLaMA-Factory)
|
||||
[](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
||||
[](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
||||
[](https://novita.ai/templates-library/105981?sharer=88115474-394e-4bda-968e-b88e123d0c47)
|
||||
|
||||
### 获得[亚马逊](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/)、[英伟达](https://developer.nvidia.cn/rtx/ai-toolkit)、[阿里云](https://help.aliyun.com/zh/pai/use-cases/fine-tune-a-llama-3-model-with-llama-factory)等的应用。
|
||||
|
||||
<div align="center" markdown="1">
|
||||
|
||||
### 赞助商 ❤️
|
||||
|
||||
<a href="https://warp.dev/llama-factory">
|
||||
<img alt="Warp sponsorship" width="400" src="https://github.com/user-attachments/assets/ab8dd143-b0fd-4904-bdc5-dd7ecac94eae">
|
||||
</a>
|
||||
|
||||
#### [Warp,面向开发者的智能终端](https://warp.dev/llama-factory)
|
||||
|
||||
[适用于 MacOS、Linux 和 Windows](https://warp.dev/llama-factory)
|
||||
|
||||
----
|
||||
|
||||
### 使用零代码[命令行](#快速开始)与 [Web UI](#llama-board-可视化微调由-gradio-驱动) 轻松微调百余种大模型
|
||||
|
||||

|
||||
|
||||
</div>
|
||||
|
||||
👋 加入我们的[微信群](assets/wechat.jpg)、[NPU 用户群](assets/wechat_npu.jpg)或 [九章智算云算力优惠群](assets/wechat_alaya.png)。
|
||||
|
||||
\[ [English](README.md) | 中文 \]
|
||||
|
||||
**微调大模型可以像这样轻松…**
|
||||
|
||||
https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc
|
||||
|
||||
选择你的打开方式:
|
||||
|
||||
- **入门教程**:https://zhuanlan.zhihu.com/p/695287607
|
||||
- **框架文档**:https://llamafactory.readthedocs.io/zh-cn/latest/
|
||||
- **框架文档(昇腾 NPU)**:https://ascend.github.io/docs/sources/llamafactory/
|
||||
- **Colab(免费)**:https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing
|
||||
- **本地机器**:请见[如何使用](#如何使用)
|
||||
- **PAI-DSW(免费试用)**:https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
|
||||
- **九章智算云(算力优惠活动)**:https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory
|
||||
|
||||
> [!NOTE]
|
||||
> 除上述链接以外的其他网站均为未经许可的第三方网站,请小心甄别。
|
||||
|
||||
## 目录
|
||||
|
||||
- [项目特色](#项目特色)
|
||||
- [官方博客](#官方博客)
|
||||
- [更新日志](#更新日志)
|
||||
- [模型](#模型)
|
||||
- [训练方法](#训练方法)
|
||||
- [数据集](#数据集)
|
||||
- [软硬件依赖](#软硬件依赖)
|
||||
- [如何使用](#如何使用)
|
||||
- [安装 LLaMA Factory](#安装-llama-factory)
|
||||
- [数据准备](#数据准备)
|
||||
- [快速开始](#快速开始)
|
||||
- [LLaMA Board 可视化微调](#llama-board-可视化微调由-gradio-驱动)
|
||||
- [构建 Docker](#构建-docker)
|
||||
- [利用 vLLM 部署 OpenAI API](#利用-vllm-部署-openai-api)
|
||||
- [从魔搭社区下载](#从魔搭社区下载)
|
||||
- [从魔乐社区下载](#从魔乐社区下载)
|
||||
- [使用 W&B 面板](#使用-wb-面板)
|
||||
- [使用 SwanLab 面板](#使用-swanlab-面板)
|
||||
- [使用了 LLaMA Factory 的项目](#使用了-llama-factory-的项目)
|
||||
- [协议](#协议)
|
||||
- [引用](#引用)
|
||||
- [致谢](#致谢)
|
||||
|
||||
## 项目特色
|
||||
|
||||
- **多种模型**:LLaMA、LLaVA、Mistral、Mixtral-MoE、Qwen、Qwen2-VL、DeepSeek、Yi、Gemma、ChatGLM、Phi 等等。
|
||||
- **集成方法**:(增量)预训练、(多模态)指令监督微调、奖励模型训练、PPO 训练、DPO 训练、KTO 训练、ORPO 训练等等。
|
||||
- **多种精度**:16 比特全参数微调、冻结微调、LoRA 微调和基于 AQLM/AWQ/GPTQ/LLM.int8/HQQ/EETQ 的 2/3/4/5/6/8 比特 QLoRA 微调。
|
||||
- **先进算法**:[GaLore](https://github.com/jiaweizzhao/GaLore)、[BAdam](https://github.com/Ledzy/BAdam)、[APOLLO](https://github.com/zhuhanqing/APOLLO)、[Adam-mini](https://github.com/zyushun/Adam-mini)、[Muon](https://github.com/KellerJordan/Muon)、DoRA、LongLoRA、LLaMA Pro、Mixture-of-Depths、LoRA+、LoftQ 和 PiSSA。
|
||||
- **实用技巧**:[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)、[Unsloth](https://github.com/unslothai/unsloth)、[Liger Kernel](https://github.com/linkedin/Liger-Kernel)、RoPE scaling、NEFTune 和 rsLoRA。
|
||||
- **广泛任务**:多轮对话、工具调用、图像理解、视觉定位、视频识别和语音理解等等。
|
||||
- **实验监控**:LlamaBoard、TensorBoard、Wandb、MLflow、[SwanLab](https://github.com/SwanHubX/SwanLab) 等等。
|
||||
- **极速推理**:基于 [vLLM](https://github.com/vllm-project/vllm) 或 [SGLang](https://github.com/sgl-project/sglang) 的 OpenAI 风格 API、浏览器界面和命令行接口。
|
||||
|
||||
### 最新模型的 Day-N 微调适配
|
||||
|
||||
| 适配时间 | 模型名称 |
|
||||
| ------------ | ------------------------------------------------------------ |
|
||||
| Day 0 | Qwen3 / Qwen2.5-VL / Gemma 3 / InternLM 3 / MiniCPM-o-2.6 |
|
||||
| Day 1 | Llama 3 / GLM-4 / Mistral Small / PaliGemma2 / Llama 4 |
|
||||
|
||||
## 官方博客
|
||||
|
||||
- [使用 LLaMA-Factory 微调 Qwen2.5-VL 实现自动驾驶场景微调](https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory)(中文)
|
||||
- [通过亚马逊 SageMaker HyperPod 上的 LLaMA-Factory 增强多模态模型银行文档的视觉信息提取](https://aws.amazon.com/cn/blogs/machine-learning/how-apoidea-group-enhances-visual-information-extraction-from-banking-documents-with-multimodal-models-using-llama-factory-on-amazon-sagemaker-hyperpod/)(英文)
|
||||
- [Easy Dataset × LLaMA Factory: 让大模型高效学习领域知识](https://buaa-act.feishu.cn/wiki/KY9xwTGs1iqHrRkjXBwcZP9WnL9)(中文)
|
||||
|
||||
<details><summary>全部博客</summary>
|
||||
|
||||
- [LLaMA Factory:微调 DeepSeek-R1-Distill-Qwen-7B 模型实现新闻标题分类器](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_deepseek_r1_distill_7b)(中文)
|
||||
- [基于 Amazon SageMaker 和 LLaMA-Factory 打造一站式无代码模型微调部署平台 Model Hub](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/)(中文)
|
||||
- [LLaMA Factory 多模态微调实践:微调 Qwen2-VL 构建文旅大模型](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_qwen2vl)(中文)
|
||||
- [LLaMA Factory:微调LLaMA3模型实现角色扮演](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)(中文)
|
||||
|
||||
</details>
|
||||
|
||||
## 更新日志
|
||||
|
||||
[25/04/28] 我们支持了 **[Qwen3](https://qwenlm.github.io/blog/qwen3/)** 系列模型的微调。
|
||||
|
||||
[25/04/21] 我们支持了 **[Muon](https://github.com/KellerJordan/Muon)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。感谢 [@tianshijing](https://github.com/tianshijing) 的 PR。
|
||||
|
||||
[25/04/16] 我们支持了 **[InternVL3](https://huggingface.co/OpenGVLab/InternVL3-8B)** 模型的微调。查看 [PR #7258](https://github.com/hiyouga/LLaMA-Factory/pull/7258) 以使用。
|
||||
|
||||
[25/04/14] 我们支持了 **[GLM-Z1](https://huggingface.co/THUDM/GLM-Z1-9B-0414)** 和 **[Kimi-VL](https://huggingface.co/moonshotai/Kimi-VL-A3B-Instruct)** 模型的微调。
|
||||
|
||||
[25/04/06] 我们支持了 **[Llama 4](https://ai.meta.com/blog/llama-4-multimodal-intelligence/)** 模型的微调。查看 [PR #7611](https://github.com/hiyouga/LLaMA-Factory/pull/7611) 以使用。
|
||||
|
||||
<details><summary>展开日志</summary>
|
||||
|
||||
[25/03/31] 我们支持了 **[Qwen2.5 Omni](https://qwenlm.github.io/blog/qwen2.5-omni/)** 模型的微调。查看 [PR #7537](https://github.com/hiyouga/LLaMA-Factory/pull/7537) 以使用。
|
||||
|
||||
[25/03/15] 我们支持了 **[SGLang](https://github.com/sgl-project/sglang)** 推理后端,请使用 `infer_backend: sglang` 启用。
|
||||
|
||||
[25/03/12] 我们支持了 **[Gemma 3](https://huggingface.co/blog/gemma3)** 模型的微调。
|
||||
|
||||
[25/02/24] 我们宣布开源 **[EasyR1](https://github.com/hiyouga/EasyR1)**,一个高效可扩展的多模态强化学习框架,支持高效的 GRPO 训练。
|
||||
|
||||
[25/02/11] 我们支持了在导出模型时保存 **[Ollama](https://github.com/ollama/ollama)** 配置文件。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[25/02/05] 我们支持了在语音理解任务上微调 **[Qwen2-Audio](Qwen/Qwen2-Audio-7B-Instruct)** 和 **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** 模型。
|
||||
|
||||
[25/01/31] 我们支持了 **[DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1)** 和 **[Qwen2.5-VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct)** 模型的微调。
|
||||
|
||||
[25/01/15] 我们支持了 **[APOLLO](https://arxiv.org/abs/2412.05270)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[25/01/14] 我们支持了 **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** 和 **[MiniCPM-V-2.6](https://huggingface.co/openbmb/MiniCPM-V-2_6)** 模型的微调。 感谢 [@BUAADreamer](https://github.com/BUAADreamer) 的 PR.
|
||||
|
||||
[25/01/14] 我们支持了 **[InternLM 3](https://huggingface.co/collections/internlm/)** 模型的微调。感谢 [@hhaAndroid](https://github.com/hhaAndroid) 的 PR。
|
||||
|
||||
[25/01/10] 我们支持了 **[Phi-4](https://huggingface.co/microsoft/phi-4)** 模型的微调。
|
||||
|
||||
[24/12/21] 我们支持了使用 **[SwanLab](https://github.com/SwanHubX/SwanLab)** 跟踪与可视化实验。详细用法请参考 [此部分](#使用-swanlab-面板)。
|
||||
|
||||
[24/11/27] 我们支持了 **[Skywork-o1](https://huggingface.co/Skywork/Skywork-o1-Open-Llama-3.1-8B)** 模型的微调和 **[OpenO1](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)** 数据集。
|
||||
|
||||
[24/10/09] 我们支持了从 **[魔乐社区](https://modelers.cn/models)** 下载预训练模型和数据集。详细用法请参照 [此教程](#从魔乐社区下载)。
|
||||
|
||||
[24/09/19] 我们支持了 **[Qwen2.5](https://qwenlm.github.io/blog/qwen2.5/)** 模型的微调。
|
||||
|
||||
[24/08/30] 我们支持了 **[Qwen2-VL](https://qwenlm.github.io/blog/qwen2-vl/)** 模型的微调。感谢 [@simonJJJ](https://github.com/simonJJJ) 的 PR。
|
||||
|
||||
[24/08/27] 我们支持了 **[Liger Kernel](https://github.com/linkedin/Liger-Kernel)**。请使用 `enable_liger_kernel: true` 来加速训练。
|
||||
|
||||
[24/08/09] 我们支持了 **[Adam-mini](https://github.com/zyushun/Adam-mini)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。感谢 [@relic-yuexi](https://github.com/relic-yuexi) 的 PR。
|
||||
|
||||
[24/07/04] 我们支持了[无污染打包训练](https://github.com/MeetKai/functionary/tree/main/functionary/train/packing)。请使用 `neat_packing: true` 参数。感谢 [@chuan298](https://github.com/chuan298) 的 PR。
|
||||
|
||||
[24/06/16] 我们支持了 **[PiSSA](https://arxiv.org/abs/2404.02948)** 算法。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[24/06/07] 我们支持了 **[Qwen2](https://qwenlm.github.io/blog/qwen2/)** 和 **[GLM-4](https://github.com/THUDM/GLM-4)** 模型的微调。
|
||||
|
||||
[24/05/26] 我们支持了 **[SimPO](https://arxiv.org/abs/2405.14734)** 偏好对齐算法。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[24/05/20] 我们支持了 **PaliGemma** 系列模型的微调。注意 PaliGemma 是预训练模型,你需要使用 `paligemma` 模板进行微调使其获得对话能力。
|
||||
|
||||
[24/05/18] 我们支持了 **[KTO](https://arxiv.org/abs/2402.01306)** 偏好对齐算法。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[24/05/14] 我们支持了昇腾 NPU 设备的训练和推理。详情请查阅[安装](#安装-llama-factory)部分。
|
||||
|
||||
[24/04/26] 我们支持了多模态模型 **LLaVA-1.5** 的微调。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[24/04/22] 我们提供了在免费 T4 GPU 上微调 Llama-3 模型的 **[Colab 笔记本](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)**。Hugging Face 社区公开了两个利用 LLaMA Factory 微调的 Llama-3 模型,详情请见 [Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat) 和 [Llama3-Chinese](https://huggingface.co/zhichen/Llama3-Chinese)。
|
||||
|
||||
[24/04/21] 我们基于 [AstraMindAI 的仓库](https://github.com/astramind-ai/Mixture-of-depths)支持了 **[混合深度训练](https://arxiv.org/abs/2404.02258)**。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[24/04/16] 我们支持了 **[BAdam](https://arxiv.org/abs/2404.02827)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[24/04/16] 我们支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的长序列训练(24GB 可训练 Llama-2-7B-56k)。该方法相比 FlashAttention-2 提供了 **117%** 的训练速度和 **50%** 的显存节约。更多数据请见[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)。
|
||||
|
||||
[24/03/31] 我们支持了 **[ORPO](https://arxiv.org/abs/2403.07691)**。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[24/03/21] 我们的论文 "[LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models](https://arxiv.org/abs/2403.13372)" 可在 arXiv 上查看!
|
||||
|
||||
[24/03/20] 我们支持了能在 2x24GB GPU 上微调 70B 模型的 **FSDP+QLoRA**。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[24/03/13] 我们支持了 **[LoRA+](https://arxiv.org/abs/2402.12354)**。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[24/03/07] 我们支持了 **[GaLore](https://arxiv.org/abs/2403.03507)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[24/03/07] 我们集成了 **[vLLM](https://github.com/vllm-project/vllm)** 以实现极速并发推理。请使用 `infer_backend: vllm` 来获得 **270%** 的推理速度。
|
||||
|
||||
[24/02/28] 我们支持了 **[DoRA](https://arxiv.org/abs/2402.09353)** 微调。请使用 `use_dora: true` 参数进行 DoRA 微调。
|
||||
|
||||
[24/02/15] 我们支持了 [LLaMA Pro](https://github.com/TencentARC/LLaMA-Pro) 提出的**块扩展**方法。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[24/02/05] Qwen1.5(Qwen2 测试版)系列模型已在 LLaMA-Factory 中实现微调支持。详情请查阅该[博客页面](https://qwenlm.github.io/zh/blog/qwen1.5/)。
|
||||
|
||||
[24/01/18] 我们针对绝大多数模型实现了 **Agent 微调**,微调时指定 `dataset: glaive_toolcall_zh` 即可使模型获得工具调用能力。
|
||||
|
||||
[23/12/23] 我们针对 LLaMA, Mistral 和 Yi 模型支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的 LoRA 训练加速。请使用 `use_unsloth: true` 参数启用 unsloth 优化。该方法可提供 **170%** 的训练速度,详情请查阅[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)。
|
||||
|
||||
[23/12/12] 我们支持了微调最新的混合专家模型 **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)**。硬件需求请查阅[此处](#硬件依赖)。
|
||||
|
||||
[23/12/01] 我们支持了从 **[魔搭社区](https://modelscope.cn/models)** 下载预训练模型和数据集。详细用法请参照 [此教程](#从魔搭社区下载)。
|
||||
|
||||
[23/10/21] 我们支持了 **[NEFTune](https://arxiv.org/abs/2310.05914)** 训练技巧。请使用 `neftune_noise_alpha: 5` 参数启用 NEFTune。
|
||||
|
||||
[23/09/27] 我们针对 LLaMA 模型支持了 [LongLoRA](https://github.com/dvlab-research/LongLoRA) 提出的 **$S^2$-Attn**。请使用 `shift_attn: true` 参数以启用该功能。
|
||||
|
||||
[23/09/23] 我们在项目中集成了 MMLU、C-Eval 和 CMMLU 评估集。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[23/09/10] 我们支持了 **[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)**。如果您使用的是 RTX4090、A100 或 H100 GPU,请使用 `flash_attn: fa2` 参数以启用 FlashAttention-2。
|
||||
|
||||
[23/08/12] 我们支持了 **RoPE 插值**来扩展 LLaMA 模型的上下文长度。请使用 `rope_scaling: linear` 参数训练模型或使用 `rope_scaling: dynamic` 参数评估模型。
|
||||
|
||||
[23/08/11] 我们支持了指令模型的 **[DPO 训练](https://arxiv.org/abs/2305.18290)**。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[23/07/31] 我们支持了**数据流式加载**。请使用 `streaming: true` 和 `max_steps: 10000` 参数来流式加载数据集。
|
||||
|
||||
[23/07/29] 我们在 Hugging Face 发布了两个 13B 指令微调模型。详细内容请查阅我们的 Hugging Face 项目([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft))。
|
||||
|
||||
[23/07/18] 我们开发了支持训练和测试的**浏览器一体化界面**。请使用 `train_web.py` 在您的浏览器中微调模型。感谢 [@KanadeSiina](https://github.com/KanadeSiina) 和 [@codemayq](https://github.com/codemayq) 在该功能开发中付出的努力。
|
||||
|
||||
[23/07/09] 我们开源了 **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹,一个简单易用的、能迅速编辑大模型事实记忆的工具包。如果您感兴趣请关注我们的 [FastEdit](https://github.com/hiyouga/FastEdit) 项目。
|
||||
|
||||
[23/06/29] 我们提供了一个**可复现的**指令模型微调示例,详细内容请查阅 [Baichuan-7B-sft](https://huggingface.co/hiyouga/Baichuan-7B-sft)。
|
||||
|
||||
[23/06/22] 我们对齐了[示例 API](src/api_demo.py) 与 [OpenAI API](https://platform.openai.com/docs/api-reference/chat) 的格式,您可以将微调模型接入**任意基于 ChatGPT 的应用**中。
|
||||
|
||||
[23/06/03] 我们实现了 4 比特的 LoRA 训练(也称 **[QLoRA](https://github.com/artidoro/qlora)**)。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
</details>
|
||||
|
||||
> [!TIP]
|
||||
> 如果您无法使用最新的功能,请尝试重新拉取代码并再次安装 LLaMA-Factory。
|
||||
|
||||
## 模型
|
||||
|
||||
| 模型名 | 参数量 | Template |
|
||||
| ----------------------------------------------------------------- | -------------------------------- | ------------------- |
|
||||
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
||||
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
||||
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
||||
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
||||
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
||||
| [DeepSeek 2.5/3](https://huggingface.co/deepseek-ai) | 236B/671B | deepseek3 |
|
||||
| [DeepSeek R1 (Distill)](https://huggingface.co/deepseek-ai) | 1.5B/7B/8B/14B/32B/70B/671B | deepseekr1 |
|
||||
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
||||
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma |
|
||||
| [Gemma 3](https://huggingface.co/google) | 1B/4B/12B/27B | gemma3/gemma (1B) |
|
||||
| [GLM-4/GLM-4-0414/GLM-Z1](https://huggingface.co/THUDM) | 9B/32B | glm4/glmz1 |
|
||||
| [GPT-2](https://huggingface.co/openai-community) | 0.1B/0.4B/0.8B/1.5B | - |
|
||||
| [Granite 3.0-3.3](https://huggingface.co/ibm-granite) | 1B/2B/3B/8B | granite3 |
|
||||
| [Hunyuan](https://huggingface.co/tencent/) | 7B | hunyuan |
|
||||
| [Index](https://huggingface.co/IndexTeam) | 1.9B | index |
|
||||
| [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 |
|
||||
| [InternVL 2.5-3](https://huggingface.co/OpenGVLab) | 1B/2B/8B/14B/38B/78B | intern_vl |
|
||||
| [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl |
|
||||
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
||||
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
||||
| [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 |
|
||||
| [Llama 4](https://huggingface.co/meta-llama) | 109B/402B | llama4 |
|
||||
| [Llama 3.2 Vision](https://huggingface.co/meta-llama) | 11B/90B | mllama |
|
||||
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
||||
| [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/8B/13B/34B/72B/110B | llava_next |
|
||||
| [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/34B | llava_next_video |
|
||||
| [MiMo](https://huggingface.co/XiaomiMiMo) | 7B | mimo |
|
||||
| [MiniCPM](https://huggingface.co/openbmb) | 0.5B/1B/2B/4B/8B | cpm/cpm3/cpm4 |
|
||||
| [MiniCPM-o-2.6/MiniCPM-V-2.6](https://huggingface.co/openbmb) | 8B | minicpm_o/minicpm_v |
|
||||
| [Ministral/Mistral-Nemo](https://huggingface.co/mistralai) | 8B/12B | ministral |
|
||||
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
||||
| [Mistral Small](https://huggingface.co/mistralai) | 24B | mistral_small |
|
||||
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
||||
| [PaliGemma/PaliGemma2](https://huggingface.co/google) | 3B/10B/28B | paligemma |
|
||||
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
||||
| [Phi-3/Phi-3.5](https://huggingface.co/microsoft) | 4B/14B | phi |
|
||||
| [Phi-3-small](https://huggingface.co/microsoft) | 7B | phi_small |
|
||||
| [Phi-4](https://huggingface.co/microsoft) | 14B | phi4 |
|
||||
| [Pixtral](https://huggingface.co/mistralai) | 12B | pixtral |
|
||||
| [Qwen (1-2.5) (Code/Math/MoE/QwQ)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen |
|
||||
| [Qwen3 (MoE)](https://huggingface.co/Qwen) | 0.6B/1.7B/4B/8B/14B/32B/235B | qwen3 |
|
||||
| [Qwen2-Audio](https://huggingface.co/Qwen) | 7B | qwen2_audio |
|
||||
| [Qwen2.5-Omni](https://huggingface.co/Qwen) | 3B/7B | qwen2_omni |
|
||||
| [Qwen2-VL/Qwen2.5-VL/QVQ](https://huggingface.co/Qwen) | 2B/3B/7B/32B/72B | qwen2_vl |
|
||||
| [Seed Coder](https://huggingface.co/ByteDance-Seed) | 8B | seed_coder |
|
||||
| [Skywork o1](https://huggingface.co/Skywork) | 8B | skywork_o1 |
|
||||
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
||||
| [TeleChat2](https://huggingface.co/Tele-AI) | 3B/7B/35B/115B | telechat2 |
|
||||
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
||||
| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi |
|
||||
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
||||
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
||||
|
||||
> [!NOTE]
|
||||
> 对于所有“基座”(Base)模型,`template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Instruct/Chat)模型请务必使用**对应的模板**。
|
||||
>
|
||||
> 请务必在训练和推理时采用**完全一致**的模板。
|
||||
>
|
||||
> \*:您需要从 main 分支安装 `transformers` 并使用 `DISABLE_VERSION_CHECK=1` 来跳过版本检查。
|
||||
>
|
||||
> \*\*:您需要安装特定版本的 `transformers` 以使用该模型。
|
||||
|
||||
项目所支持模型的完整列表请参阅 [constants.py](src/llamafactory/extras/constants.py)。
|
||||
|
||||
您也可以在 [template.py](src/llamafactory/data/template.py) 中添加自己的对话模板。
|
||||
|
||||
## 训练方法
|
||||
|
||||
| 方法 | 全参数训练 | 部分参数训练 | LoRA | QLoRA |
|
||||
| --------------------- | ------------------ | ------------------ | ------------------ | ------------------ |
|
||||
| 预训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| 指令监督微调 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| 奖励模型训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| PPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| DPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| KTO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| ORPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| SimPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
|
||||
> [!TIP]
|
||||
> 有关 PPO 的实现细节,请参考[此博客](https://newfacade.github.io/notes-on-reinforcement-learning/17-ppo-trl.html)。
|
||||
|
||||
## 数据集
|
||||
|
||||
<details><summary>预训练数据集</summary>
|
||||
|
||||
- [Wiki Demo (en)](data/wiki_demo.txt)
|
||||
- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
|
||||
- [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2)
|
||||
- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
|
||||
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
|
||||
- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
|
||||
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
|
||||
- [FineWeb (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb)
|
||||
- [FineWeb-Edu (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu)
|
||||
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
|
||||
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>指令微调数据集</summary>
|
||||
|
||||
- [Identity (en&zh)](data/identity.json)
|
||||
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
|
||||
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca-3)
|
||||
- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
|
||||
- [Glaive Function Calling V2 (en&zh)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)
|
||||
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
|
||||
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
|
||||
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
|
||||
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
|
||||
- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
|
||||
- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
|
||||
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
|
||||
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
|
||||
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
|
||||
- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
|
||||
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
|
||||
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
|
||||
- [OpenOrca (en)](https://huggingface.co/datasets/Open-Orca/OpenOrca)
|
||||
- [SlimOrca (en)](https://huggingface.co/datasets/Open-Orca/SlimOrca)
|
||||
- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct)
|
||||
- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
|
||||
- [Wiki QA (en)](https://huggingface.co/datasets/wiki_qa)
|
||||
- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
|
||||
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
|
||||
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
||||
- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)
|
||||
- [Advertise Generating (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
|
||||
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
|
||||
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
|
||||
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
|
||||
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
|
||||
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
|
||||
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
|
||||
- [Cosmopedia (en)](https://huggingface.co/datasets/HuggingFaceTB/cosmopedia)
|
||||
- [STEM (zh)](https://huggingface.co/datasets/hfl/stem_zh_instruction)
|
||||
- [Ruozhiba (zh)](https://huggingface.co/datasets/hfl/ruozhiba_gpt4_turbo)
|
||||
- [Neo-sft (zh)](https://huggingface.co/datasets/m-a-p/neo_sft_phase2)
|
||||
- [Magpie-Pro-300K-Filtered (en)](https://huggingface.co/datasets/Magpie-Align/Magpie-Pro-300K-Filtered)
|
||||
- [Magpie-ultra-v0.1 (en)](https://huggingface.co/datasets/argilla/magpie-ultra-v0.1)
|
||||
- [WebInstructSub (en)](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub)
|
||||
- [OpenO1-SFT (en&zh)](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)
|
||||
- [Open-Thoughts (en)](https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k)
|
||||
- [Open-R1-Math (en)](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k)
|
||||
- [Chinese-DeepSeek-R1-Distill (zh)](https://huggingface.co/datasets/Congliu/Chinese-DeepSeek-R1-Distill-data-110k-SFT)
|
||||
- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k)
|
||||
- [Pokemon-gpt4o-captions (en&zh)](https://huggingface.co/datasets/jugg1024/pokemon-gpt4o-captions)
|
||||
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
|
||||
- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de)
|
||||
- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de)
|
||||
- [OpenSchnabeltier (de)](https://huggingface.co/datasets/mayflowergmbh/openschnabeltier_de)
|
||||
- [Evol Instruct (de)](https://huggingface.co/datasets/mayflowergmbh/evol-instruct_de)
|
||||
- [Dolphin (de)](https://huggingface.co/datasets/mayflowergmbh/dolphin_de)
|
||||
- [Booksum (de)](https://huggingface.co/datasets/mayflowergmbh/booksum_de)
|
||||
- [Airoboros (de)](https://huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de)
|
||||
- [Ultrachat (de)](https://huggingface.co/datasets/mayflowergmbh/ultra-chat_de)
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>偏好数据集</summary>
|
||||
|
||||
- [DPO mixed (en&zh)](https://huggingface.co/datasets/hiyouga/DPO-En-Zh-20k)
|
||||
- [UltraFeedback (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized)
|
||||
- [COIG-P (zh)](https://huggingface.co/datasets/m-a-p/COIG-P)
|
||||
- [RLHF-V (en)](https://huggingface.co/datasets/openbmb/RLHF-V-Dataset)
|
||||
- [VLFeedback (en)](https://huggingface.co/datasets/Zhihui/VLFeedback)
|
||||
- [RLAIF-V (en)](https://huggingface.co/datasets/openbmb/RLAIF-V-Dataset)
|
||||
- [Orca DPO Pairs (en)](https://huggingface.co/datasets/Intel/orca_dpo_pairs)
|
||||
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
|
||||
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
||||
- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de)
|
||||
- [KTO mixed (en)](https://huggingface.co/datasets/argilla/kto-mix-15k)
|
||||
|
||||
</details>
|
||||
|
||||
部分数据集的使用需要确认,我们推荐使用下述命令登录您的 Hugging Face 账户。
|
||||
|
||||
```bash
|
||||
pip install --upgrade huggingface_hub
|
||||
huggingface-cli login
|
||||
```
|
||||
|
||||
## 软硬件依赖
|
||||
|
||||
| 必需项 | 至少 | 推荐 |
|
||||
| ------------ | ------- | --------- |
|
||||
| python | 3.9 | 3.10 |
|
||||
| torch | 2.0.0 | 2.6.0 |
|
||||
| torchvision | 0.15.0 | 0.21.0 |
|
||||
| transformers | 4.45.0 | 4.50.0 |
|
||||
| datasets | 2.16.0 | 3.2.0 |
|
||||
| accelerate | 0.34.0 | 1.2.1 |
|
||||
| peft | 0.14.0 | 0.15.1 |
|
||||
| trl | 0.8.6 | 0.9.6 |
|
||||
|
||||
| 可选项 | 至少 | 推荐 |
|
||||
| ------------ | ------- | --------- |
|
||||
| CUDA | 11.6 | 12.2 |
|
||||
| deepspeed | 0.10.0 | 0.16.4 |
|
||||
| bitsandbytes | 0.39.0 | 0.43.1 |
|
||||
| vllm | 0.4.3 | 0.8.2 |
|
||||
| flash-attn | 2.5.6 | 2.7.2 |
|
||||
|
||||
### 硬件依赖
|
||||
|
||||
\* *估算值*
|
||||
|
||||
| 方法 | 精度 | 7B | 14B | 30B | 70B | `x`B |
|
||||
| ------------------------------- | ---- | ----- | ----- | ----- | ------ | ------- |
|
||||
| Full (`bf16` or `fp16`) | 32 | 120GB | 240GB | 600GB | 1200GB | `18x`GB |
|
||||
| Full (`pure_bf16`) | 16 | 60GB | 120GB | 300GB | 600GB | `8x`GB |
|
||||
| Freeze/LoRA/GaLore/APOLLO/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | `2x`GB |
|
||||
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | `x`GB |
|
||||
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | `x/2`GB |
|
||||
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | `x/4`GB |
|
||||
|
||||
## 如何使用
|
||||
|
||||
### 安装 LLaMA Factory
|
||||
|
||||
> [!IMPORTANT]
|
||||
> 此步骤为必需。
|
||||
|
||||
#### 从源码安装
|
||||
|
||||
```bash
|
||||
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
|
||||
cd LLaMA-Factory
|
||||
pip install -e ".[torch,metrics]" --no-build-isolation
|
||||
```
|
||||
|
||||
可选的额外依赖项:torch、torch-npu、metrics、deepspeed、liger-kernel、bitsandbytes、hqq、eetq、gptq、aqlm、vllm、sglang、galore、apollo、badam、adam-mini、qwen、minicpm_v、modelscope、openmind、swanlab、dev
|
||||
|
||||
#### 从镜像安装
|
||||
|
||||
```bash
|
||||
docker run -it --rm --gpus=all --ipc=host hiyouga/llamafactory:latest
|
||||
```
|
||||
|
||||
该镜像基于 Ubuntu 22.04(x86\_64)、CUDA 12.4、Python 3.11、PyTorch 2.6.0 和 Flash-attn 2.7.4 构建。
|
||||
|
||||
查看全部镜像:https://hub.docker.com/r/hiyouga/llamafactory/tags
|
||||
|
||||
请参阅[构建 Docker](#构建-docker) 来重新构建镜像。
|
||||
|
||||
<details><summary>使用 <b>uv</b> 构建虚拟环境</summary>
|
||||
|
||||
使用 [uv](https://github.com/astral-sh/uv) 创建隔离的 Python 环境:
|
||||
|
||||
```bash
|
||||
uv sync --extra torch --extra metrics --prerelease=allow
|
||||
```
|
||||
|
||||
在环境中运行 LLaMA-Factory:
|
||||
|
||||
```bash
|
||||
uv run --prerelease=allow llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>Windows 用户指南</summary>
|
||||
|
||||
#### 安装 PyTorch
|
||||
|
||||
Windows 平台需要额外手动安装 GPU 版本的 PyTorch 依赖包,您可以参考[官方网站](https://pytorch.org/get-started/locally/)和以下命令安装并测试 PyTorch 是否正确安装。
|
||||
|
||||
```bash
|
||||
pip uninstall torch torchvision torchaudio
|
||||
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu126
|
||||
python -c "import torch; print(torch.cuda.is_available())"
|
||||
```
|
||||
|
||||
如果看到 `True` 则说明安装成功。
|
||||
|
||||
若遇到类似 `Can't pickle local object` 的报错,请设置 `dataloader_num_workers: 0`。
|
||||
|
||||
#### 安装 BitsAndBytes
|
||||
|
||||
如果要在 Windows 平台上开启量化 LoRA(QLoRA),需要安装预编译的 `bitsandbytes` 库, 支持 CUDA 11.1 到 12.2, 请根据您的 CUDA 版本情况选择适合的[发布版本](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels)。
|
||||
|
||||
```bash
|
||||
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.2.post2-py3-none-win_amd64.whl
|
||||
```
|
||||
|
||||
#### 安装 Flash Attention-2
|
||||
|
||||
如果要在 Windows 平台上开启 FlashAttention-2,请使用 [flash-attention-windows-wheel](https://huggingface.co/lldacing/flash-attention-windows-wheel) 中的脚本自行编译与安装。
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>昇腾 NPU 用户指南</summary>
|
||||
|
||||
在昇腾 NPU 设备上安装 LLaMA Factory 时,请升级 Python 到 3.10 及以上,并需要指定额外依赖项,使用 `pip install -e ".[torch-npu,metrics]"` 命令安装。此外,还需要安装 **[Ascend CANN Toolkit 与 Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**,安装方法请参考[安装教程](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC2alpha002/quickstart/quickstart/quickstart_18_0004.html)或使用以下命令:
|
||||
|
||||
```bash
|
||||
# 请替换 URL 为 CANN 版本和设备型号对应的 URL
|
||||
# 安装 CANN Toolkit
|
||||
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run
|
||||
bash Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run --install
|
||||
|
||||
# 安装 CANN Kernels
|
||||
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run
|
||||
bash Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run --install
|
||||
|
||||
# 设置环境变量
|
||||
source /usr/local/Ascend/ascend-toolkit/set_env.sh
|
||||
```
|
||||
|
||||
| 依赖项 | 至少 | 推荐 |
|
||||
| ------------ | ------- | -------------- |
|
||||
| CANN | 8.0.RC1 | 8.0.0.alpha002 |
|
||||
| torch | 2.1.0 | 2.4.0 |
|
||||
| torch-npu | 2.1.0 | 2.4.0.post2 |
|
||||
| deepspeed | 0.13.2 | 0.13.2 |
|
||||
| vllm-ascend | - | 0.7.3 |
|
||||
|
||||
请使用 `ASCEND_RT_VISIBLE_DEVICES` 而非 `CUDA_VISIBLE_DEVICES` 来指定运算设备。
|
||||
|
||||
如果遇到无法正常推理的情况,请尝试设置 `do_sample: false`。
|
||||
|
||||
下载预构建 Docker 镜像:[32GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html) | [64GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/131.html)
|
||||
|
||||
#### 安装 BitsAndBytes
|
||||
|
||||
如果要在 Ascend NPU 上进行基于 bitsandbytes 的 QLoRA 量化微调,请执行如下步骤:
|
||||
|
||||
1. 手动编译 bitsandbytes:请参考[安装文档](https://huggingface.co/docs/bitsandbytes/installation?backend=Ascend+NPU&platform=Ascend+NPU)完成 NPU 版的 bitsandbytes 安装,编译要求环境 cmake 版本不低于 3.22.1,g++ 版本不低于 12.x。
|
||||
|
||||
```bash
|
||||
# 从源码安装 bitsandbytes
|
||||
# 克隆 bitsandbytes 仓库, Ascend NPU 目前在 multi-backend-refactor 中支持
|
||||
git clone -b multi-backend-refactor https://github.com/bitsandbytes-foundation/bitsandbytes.git
|
||||
cd bitsandbytes/
|
||||
|
||||
# 安装依赖
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
# 安装编译工具依赖,该步骤在不同系统上命令有所不同,供参考
|
||||
apt-get install -y build-essential cmake
|
||||
|
||||
# 编译 & 安装
|
||||
cmake -DCOMPUTE_BACKEND=npu -S .
|
||||
make
|
||||
pip install .
|
||||
```
|
||||
|
||||
2. 安装 transformers 的 main 分支版本。
|
||||
|
||||
```bash
|
||||
git clone -b main https://github.com/huggingface/transformers.git
|
||||
cd transformers
|
||||
pip install .
|
||||
```
|
||||
|
||||
3. 在训练参数中设置 `double_quantization: false`,可参考[示例](examples/train_qlora/llama3_lora_sft_bnb_npu.yaml)。
|
||||
|
||||
</details>
|
||||
|
||||
### 数据准备
|
||||
|
||||
关于数据集文件的格式,请参考 [data/README_zh.md](data/README_zh.md) 的内容。你可以使用 HuggingFace / ModelScope / Modelers 上的数据集或加载本地数据集。
|
||||
|
||||
> [!NOTE]
|
||||
> 使用自定义数据集时,请更新 `data/dataset_info.json` 文件。
|
||||
|
||||
您也可以使用 **[Easy Dataset](https://github.com/ConardLi/easy-dataset)** 或 **[GraphGen](https://github.com/open-sciencelab/GraphGen)** 构建用于微调的合成数据。
|
||||
|
||||
### 快速开始
|
||||
|
||||
下面三行命令分别对 Llama3-8B-Instruct 模型进行 LoRA **微调**、**推理**和**合并**。
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
|
||||
llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
高级用法请参考 [examples/README_zh.md](examples/README_zh.md)(包括多 GPU 微调)。
|
||||
|
||||
> [!TIP]
|
||||
> 使用 `llamafactory-cli help` 显示帮助信息。
|
||||
>
|
||||
> 遇到报错请先看[常见问题](https://github.com/hiyouga/LLaMA-Factory/issues/4614)。
|
||||
|
||||
### LLaMA Board 可视化微调(由 [Gradio](https://github.com/gradio-app/gradio) 驱动)
|
||||
|
||||
```bash
|
||||
llamafactory-cli webui
|
||||
```
|
||||
|
||||
### 构建 Docker
|
||||
|
||||
CUDA 用户:
|
||||
|
||||
```bash
|
||||
cd docker/docker-cuda/
|
||||
docker compose up -d
|
||||
docker compose exec llamafactory bash
|
||||
```
|
||||
|
||||
昇腾 NPU 用户:
|
||||
|
||||
```bash
|
||||
cd docker/docker-npu/
|
||||
docker compose up -d
|
||||
docker compose exec llamafactory bash
|
||||
```
|
||||
|
||||
AMD ROCm 用户:
|
||||
|
||||
```bash
|
||||
cd docker/docker-rocm/
|
||||
docker compose up -d
|
||||
docker compose exec llamafactory bash
|
||||
```
|
||||
|
||||
<details><summary>不使用 Docker Compose 构建</summary>
|
||||
|
||||
CUDA 用户:
|
||||
|
||||
```bash
|
||||
docker build -f ./docker/docker-cuda/Dockerfile \
|
||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||
--build-arg EXTRAS=metrics \
|
||||
-t llamafactory:latest .
|
||||
|
||||
docker run -dit --ipc=host --gpus=all \
|
||||
-p 7860:7860 \
|
||||
-p 8000:8000 \
|
||||
--name llamafactory \
|
||||
llamafactory:latest
|
||||
|
||||
docker exec -it llamafactory bash
|
||||
```
|
||||
|
||||
昇腾 NPU 用户:
|
||||
|
||||
```bash
|
||||
docker build -f ./docker/docker-npu/Dockerfile \
|
||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||
--build-arg EXTRAS=torch-npu,metrics \
|
||||
-t llamafactory:latest .
|
||||
|
||||
docker run -dit --ipc=host \
|
||||
-v /usr/local/dcmi:/usr/local/dcmi \
|
||||
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
||||
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
|
||||
-v /etc/ascend_install.info:/etc/ascend_install.info \
|
||||
-p 7860:7860 \
|
||||
-p 8000:8000 \
|
||||
--device /dev/davinci0 \
|
||||
--device /dev/davinci_manager \
|
||||
--device /dev/devmm_svm \
|
||||
--device /dev/hisi_hdc \
|
||||
--name llamafactory \
|
||||
llamafactory:latest
|
||||
|
||||
docker exec -it llamafactory bash
|
||||
```
|
||||
|
||||
AMD ROCm 用户:
|
||||
|
||||
```bash
|
||||
docker build -f ./docker/docker-rocm/Dockerfile \
|
||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||
--build-arg EXTRAS=metrics \
|
||||
-t llamafactory:latest .
|
||||
|
||||
docker run -dit --ipc=host \
|
||||
-p 7860:7860 \
|
||||
-p 8000:8000 \
|
||||
--device /dev/kfd \
|
||||
--device /dev/dri \
|
||||
--name llamafactory \
|
||||
llamafactory:latest
|
||||
|
||||
docker exec -it llamafactory bash
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>使用数据卷</summary>
|
||||
|
||||
您可以通过移除 Dockerfile 中 `VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]` 的注释来使用数据卷。
|
||||
|
||||
在构建 Docker 时使用参数 `-v ./hf_cache:/root/.cache/huggingface` 来挂载数据卷。各个数据卷的含义表示如下。
|
||||
|
||||
- `hf_cache`:使用宿主机的 Hugging Face 缓存文件夹。
|
||||
- `shared_data`:宿主机中存放数据集的文件夹路径。
|
||||
- `output`:将导出目录设置为该路径后,即可在宿主机中访问导出后的模型。
|
||||
|
||||
</details>
|
||||
|
||||
### 利用 vLLM 部署 OpenAI API
|
||||
|
||||
```bash
|
||||
API_PORT=8000 llamafactory-cli api examples/inference/llama3.yaml infer_backend=vllm vllm_enforce_eager=true
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> API 文档请查阅[这里](https://platform.openai.com/docs/api-reference/chat/create)。
|
||||
>
|
||||
> 示例:[图像理解](scripts/api_example/test_image.py) | [工具调用](scripts/api_example/test_toolcall.py)
|
||||
|
||||
### 从魔搭社区下载
|
||||
|
||||
如果您在 Hugging Face 模型和数据集的下载中遇到了问题,可以通过下述方法使用魔搭社区。
|
||||
|
||||
```bash
|
||||
export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1`
|
||||
```
|
||||
|
||||
将 `model_name_or_path` 设置为模型 ID 来加载对应的模型。在[魔搭社区](https://modelscope.cn/models)查看所有可用的模型,例如 `LLM-Research/Meta-Llama-3-8B-Instruct`。
|
||||
|
||||
### 从魔乐社区下载
|
||||
|
||||
您也可以通过下述方法,使用魔乐社区下载数据集和模型。
|
||||
|
||||
```bash
|
||||
export USE_OPENMIND_HUB=1 # Windows 使用 `set USE_OPENMIND_HUB=1`
|
||||
```
|
||||
|
||||
将 `model_name_or_path` 设置为模型 ID 来加载对应的模型。在[魔乐社区](https://modelers.cn/models)查看所有可用的模型,例如 `TeleAI/TeleChat-7B-pt`。
|
||||
|
||||
### 使用 W&B 面板
|
||||
|
||||
若要使用 [Weights & Biases](https://wandb.ai) 记录实验数据,请在 yaml 文件中添加下面的参数。
|
||||
|
||||
```yaml
|
||||
report_to: wandb
|
||||
run_name: test_run # 可选
|
||||
```
|
||||
|
||||
在启动训练任务时,将 `WANDB_API_KEY` 设置为[密钥](https://wandb.ai/authorize)来登录 W&B 账户。
|
||||
|
||||
### 使用 SwanLab 面板
|
||||
|
||||
若要使用 [SwanLab](https://github.com/SwanHubX/SwanLab) 记录实验数据,请在 yaml 文件中添加下面的参数。
|
||||
|
||||
```yaml
|
||||
use_swanlab: true
|
||||
swanlab_run_name: test_run # 可选
|
||||
```
|
||||
|
||||
在启动训练任务时,登录SwanLab账户有以下三种方式:
|
||||
|
||||
方式一:在 yaml 文件中添加 `swanlab_api_key=<your_api_key>` ,并设置为你的 [API 密钥](https://swanlab.cn/settings)。
|
||||
方式二:将环境变量 `SWANLAB_API_KEY` 设置为你的 [API 密钥](https://swanlab.cn/settings)。
|
||||
方式三:启动前使用 `swanlab login` 命令完成登录。
|
||||
|
||||
## 使用了 LLaMA Factory 的项目
|
||||
|
||||
如果您有项目希望添加至下述列表,请通过邮件联系或者创建一个 PR。
|
||||
|
||||
<details><summary>点击显示</summary>
|
||||
|
||||
1. Wang et al. ESRL: Efficient Sampling-based Reinforcement Learning for Sequence Generation. 2023. [[arxiv]](https://arxiv.org/abs/2308.02223)
|
||||
1. Yu et al. Open, Closed, or Small Language Models for Text Classification? 2023. [[arxiv]](https://arxiv.org/abs/2308.10092)
|
||||
1. Wang et al. UbiPhysio: Support Daily Functioning, Fitness, and Rehabilitation with Action Understanding and Feedback in Natural Language. 2023. [[arxiv]](https://arxiv.org/abs/2308.10526)
|
||||
1. Luceri et al. Leveraging Large Language Models to Detect Influence Campaigns in Social Media. 2023. [[arxiv]](https://arxiv.org/abs/2311.07816)
|
||||
1. Zhang et al. Alleviating Hallucinations of Large Language Models through Induced Hallucinations. 2023. [[arxiv]](https://arxiv.org/abs/2312.15710)
|
||||
1. Wang et al. Know Your Needs Better: Towards Structured Understanding of Marketer Demands with Analogical Reasoning Augmented LLMs. KDD 2024. [[arxiv]](https://arxiv.org/abs/2401.04319)
|
||||
1. Wang et al. CANDLE: Iterative Conceptualization and Instantiation Distillation from Large Language Models for Commonsense Reasoning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2401.07286)
|
||||
1. Choi et al. FACT-GPT: Fact-Checking Augmentation via Claim Matching with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2402.05904)
|
||||
1. Zhang et al. AutoMathText: Autonomous Data Selection with Language Models for Mathematical Texts. 2024. [[arxiv]](https://arxiv.org/abs/2402.07625)
|
||||
1. Lyu et al. KnowTuning: Knowledge-aware Fine-tuning for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11176)
|
||||
1. Yang et al. LaCo: Large Language Model Pruning via Layer Collaps. 2024. [[arxiv]](https://arxiv.org/abs/2402.11187)
|
||||
1. Bhardwaj et al. Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic. 2024. [[arxiv]](https://arxiv.org/abs/2402.11746)
|
||||
1. Yang et al. Enhancing Empathetic Response Generation by Augmenting LLMs with Small-scale Empathetic Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11801)
|
||||
1. Yi et al. Generation Meets Verification: Accelerating Large Language Model Inference with Smart Parallel Auto-Correct Decoding. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2402.11809)
|
||||
1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819)
|
||||
1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204)
|
||||
1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714)
|
||||
1. Yu et al. KIEval: A Knowledge-grounded Interactive Evaluation Framework for Large Language Models. ACL 2024. [[arxiv]](https://arxiv.org/abs/2402.15043)
|
||||
1. Huang et al. Key-Point-Driven Data Synthesis with its Enhancement on Mathematical Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2403.02333)
|
||||
1. Duan et al. Negating Negatives: Alignment without Human Positive Samples via Distributional Dispreference Optimization. 2024. [[arxiv]](https://arxiv.org/abs/2403.03419)
|
||||
1. Xie and Schwertfeger. Empowering Robotics with Large Language Models: osmAG Map Comprehension with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2403.08228)
|
||||
1. Wu et al. Large Language Models are Parallel Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2403.09073)
|
||||
1. Zhang et al. EDT: Improving Large Language Models' Generation by Entropy-based Dynamic Temperature Sampling. 2024. [[arxiv]](https://arxiv.org/abs/2403.14541)
|
||||
1. Weller et al. FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2403.15246)
|
||||
1. Hongbin Na. CBT-LLM: A Chinese Large Language Model for Cognitive Behavioral Therapy-based Mental Health Question Answering. COLING 2024. [[arxiv]](https://arxiv.org/abs/2403.16008)
|
||||
1. Zan et al. CodeS: Natural Language to Code Repository via Multi-Layer Sketch. 2024. [[arxiv]](https://arxiv.org/abs/2403.16443)
|
||||
1. Liu et al. Extensive Self-Contrast Enables Feedback-Free Language Model Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2404.00604)
|
||||
1. Luo et al. BAdam: A Memory Efficient Full Parameter Training Method for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.02827)
|
||||
1. Du et al. Chinese Tiny LLM: Pretraining a Chinese-Centric Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2404.04167)
|
||||
1. Ma et al. Parameter Efficient Quasi-Orthogonal Fine-Tuning via Givens Rotation. ICML 2024. [[arxiv]](https://arxiv.org/abs/2404.04316)
|
||||
1. Liu et al. Dynamic Generation of Personalities with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.07084)
|
||||
1. Shang et al. How Far Have We Gone in Stripped Binary Code Understanding Using Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.09836)
|
||||
1. Huang et al. LLMTune: Accelerate Database Knob Tuning with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.11581)
|
||||
1. Deng et al. Text-Tuple-Table: Towards Information Integration in Text-to-Table Generation via Global Tuple Extraction. 2024. [[arxiv]](https://arxiv.org/abs/2404.14215)
|
||||
1. Acikgoz et al. Hippocrates: An Open-Source Framework for Advancing Large Language Models in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2404.16621)
|
||||
1. Zhang et al. Small Language Models Need Strong Verifiers to Self-Correct Reasoning. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2404.17140)
|
||||
1. Zhou et al. FREB-TQA: A Fine-Grained Robustness Evaluation Benchmark for Table Question Answering. NAACL 2024. [[arxiv]](https://arxiv.org/abs/2404.18585)
|
||||
1. Xu et al. Large Language Models for Cyber Security: A Systematic Literature Review. 2024. [[arxiv]](https://arxiv.org/abs/2405.04760)
|
||||
1. Dammu et al. "They are uncultured": Unveiling Covert Harms and Social Threats in LLM Generated Conversations. 2024. [[arxiv]](https://arxiv.org/abs/2405.05378)
|
||||
1. Yi et al. A safety realignment framework via subspace-oriented model fusion for large language models. 2024. [[arxiv]](https://arxiv.org/abs/2405.09055)
|
||||
1. Lou et al. SPO: Multi-Dimensional Preference Sequential Alignment With Implicit Reward Modeling. 2024. [[arxiv]](https://arxiv.org/abs/2405.12739)
|
||||
1. Zhang et al. Getting More from Less: Large Language Models are Good Spontaneous Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2405.13816)
|
||||
1. Zhang et al. TS-Align: A Teacher-Student Collaborative Framework for Scalable Iterative Finetuning of Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2405.20215)
|
||||
1. Zihong Chen. Sentence Segmentation and Sentence Punctuation Based on XunziALLM. 2024. [[paper]](https://aclanthology.org/2024.lt4hala-1.30)
|
||||
1. Gao et al. The Best of Both Worlds: Toward an Honest and Helpful Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2406.00380)
|
||||
1. Wang and Song. MARS: Benchmarking the Metaphysical Reasoning Abilities of Language Models with a Multi-task Evaluation Dataset. 2024. [[arxiv]](https://arxiv.org/abs/2406.02106)
|
||||
1. Hu et al. Computational Limits of Low-Rank Adaptation (LoRA) for Transformer-Based Models. 2024. [[arxiv]](https://arxiv.org/abs/2406.03136)
|
||||
1. Ge et al. Time Sensitive Knowledge Editing through Efficient Finetuning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2406.04496)
|
||||
1. Tan et al. Peer Review as A Multi-Turn and Long-Context Dialogue with Role-Based Interactions. 2024. [[arxiv]](https://arxiv.org/abs/2406.05688)
|
||||
1. Song et al. Turbo Sparse: Achieving LLM SOTA Performance with Minimal Activated Parameters. 2024. [[arxiv]](https://arxiv.org/abs/2406.05955)
|
||||
1. Gu et al. RWKV-CLIP: A Robust Vision-Language Representation Learner. 2024. [[arxiv]](https://arxiv.org/abs/2406.06973)
|
||||
1. Chen et al. Advancing Tool-Augmented Large Language Models: Integrating Insights from Errors in Inference Trees. 2024. [[arxiv]](https://arxiv.org/abs/2406.07115)
|
||||
1. Zhu et al. Are Large Language Models Good Statisticians?. 2024. [[arxiv]](https://arxiv.org/abs/2406.07815)
|
||||
1. Li et al. Know the Unknown: An Uncertainty-Sensitive Method for LLM Instruction Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2406.10099)
|
||||
1. Ding et al. IntentionQA: A Benchmark for Evaluating Purchase Intention Comprehension Abilities of Language Models in E-commerce. 2024. [[arxiv]](https://arxiv.org/abs/2406.10173)
|
||||
1. He et al. COMMUNITY-CROSS-INSTRUCT: Unsupervised Instruction Generation for Aligning Large Language Models to Online Communities. 2024. [[arxiv]](https://arxiv.org/abs/2406.12074)
|
||||
1. Lin et al. FVEL: Interactive Formal Verification Environment with Large Language Models via Theorem Proving. 2024. [[arxiv]](https://arxiv.org/abs/2406.14408)
|
||||
1. Treutlein et al. Connecting the Dots: LLMs can Infer and Verbalize Latent Structure from Disparate Training Data. 2024. [[arxiv]](https://arxiv.org/abs/2406.14546)
|
||||
1. Feng et al. SS-Bench: A Benchmark for Social Story Generation and Evaluation. 2024. [[arxiv]](https://arxiv.org/abs/2406.15695)
|
||||
1. Feng et al. Self-Constructed Context Decompilation with Fined-grained Alignment Enhancement. 2024. [[arxiv]](https://arxiv.org/abs/2406.17233)
|
||||
1. Liu et al. Large Language Models for Cuffless Blood Pressure Measurement From Wearable Biosignals. 2024. [[arxiv]](https://arxiv.org/abs/2406.18069)
|
||||
1. Iyer et al. Exploring Very Low-Resource Translation with LLMs: The University of Edinburgh's Submission to AmericasNLP 2024 Translation Task. AmericasNLP 2024. [[paper]](https://aclanthology.org/2024.americasnlp-1.25)
|
||||
1. Li et al. Calibrating LLMs with Preference Optimization on Thought Trees for Generating Rationale in Science Question Scoring. 2024. [[arxiv]](https://arxiv.org/abs/2406.19949)
|
||||
1. Yang et al. Financial Knowledge Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2407.00365)
|
||||
1. Lin et al. DogeRM: Equipping Reward Models with Domain Knowledge through Model Merging. 2024. [[arxiv]](https://arxiv.org/abs/2407.01470)
|
||||
1. Bako et al. Evaluating the Semantic Profiling Abilities of LLMs for Natural Language Utterances in Data Visualization. 2024. [[arxiv]](https://arxiv.org/abs/2407.06129)
|
||||
1. Huang et al. RoLoRA: Fine-tuning Rotated Outlier-free LLMs for Effective Weight-Activation Quantization. 2024. [[arxiv]](https://arxiv.org/abs/2407.08044)
|
||||
1. Jiang et al. LLM-Collaboration on Automatic Science Journalism for the General Audience. 2024. [[arxiv]](https://arxiv.org/abs/2407.09756)
|
||||
1. Inouye et al. Applied Auto-tuning on LoRA Hyperparameters. 2024. [[paper]](https://scholarcommons.scu.edu/cseng_senior/272/)
|
||||
1. Qi et al. Research on Tibetan Tourism Viewpoints information generation system based on LLM. 2024. [[arxiv]](https://arxiv.org/abs/2407.13561)
|
||||
1. Xu et al. Course-Correction: Safety Alignment Using Synthetic Preferences. 2024. [[arxiv]](https://arxiv.org/abs/2407.16637)
|
||||
1. Sun et al. LAMBDA: A Large Model Based Data Agent. 2024. [[arxiv]](https://arxiv.org/abs/2407.17535)
|
||||
1. Zhu et al. CollectiveSFT: Scaling Large Language Models for Chinese Medical Benchmark with Collective Instructions in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2407.19705)
|
||||
1. Yu et al. Correcting Negative Bias in Large Language Models through Negative Attention Score Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2408.00137)
|
||||
1. Xie et al. The Power of Personalized Datasets: Advancing Chinese Composition Writing for Elementary School through Targeted Model Fine-Tuning. IALP 2024. [[paper]](https://www.asianlp.sg/conferences/ialp2024/proceedings/papers/IALP2024_P055.pdf)
|
||||
1. Liu et al. Instruct-Code-Llama: Improving Capabilities of Language Model in Competition Level Code Generation by Online Judge Feedback. ICIC 2024. [[paper]](https://link.springer.com/chapter/10.1007/978-981-97-5669-8_11)
|
||||
1. Wang et al. Cybernetic Sentinels: Unveiling the Impact of Safety Data Selection on Model Security in Supervised Fine-Tuning. ICIC 2024. [[paper]](https://link.springer.com/chapter/10.1007/978-981-97-5669-8_23)
|
||||
1. Xia et al. Understanding the Performance and Estimating the Cost of LLM Fine-Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2408.04693)
|
||||
1. Zeng et al. Perceive, Reflect, and Plan: Designing LLM Agent for Goal-Directed City Navigation without Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2408.04168)
|
||||
1. Xia et al. Using Pre-trained Language Model for Accurate ESG Prediction. FinNLP 2024. [[paper]](https://aclanthology.org/2024.finnlp-2.1/)
|
||||
1. Liang et al. I-SHEEP: Self-Alignment of LLM from Scratch through an Iterative Self-Enhancement Paradigm. 2024. [[arxiv]](https://arxiv.org/abs/2408.08072)
|
||||
1. Bai et al. Aligning Large Language Model with Direct Multi-Preference Optimization for Recommendation. CIKM 2024. [[paper]](https://dl.acm.org/doi/10.1145/3627673.3679611)
|
||||
1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: 天文大模型 StarWhisper,基于 ChatGLM2-6B 和 Qwen-14B 在天文数据上微调而得。
|
||||
1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: 中文法律领域大模型 DISC-LawLLM,基于 Baichuan-13B 微调而得,具有法律推理和知识检索能力。
|
||||
1. **[Sunsimiao](https://github.com/X-D-Lab/Sunsimiao)**: 孙思邈中文医疗大模型 Sumsimiao,基于 Baichuan-7B 和 ChatGLM-6B 在中文医疗数据上微调而得。
|
||||
1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: 医疗大模型项目 CareGPT,基于 LLaMA2-7B 和 Baichuan-13B 在中文医疗数据上微调而得。
|
||||
1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**:MBTI性格大模型项目,根据数据集与训练方式让任意 LLM 拥有 16 个不同的性格类型。
|
||||
1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**:一个用于生成 Stable Diffusion 提示词的大型语言模型。[[demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt)
|
||||
1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**:中文多模态医学大模型,基于 LLaVA-1.5-7B 在中文多模态医疗数据上微调而得。
|
||||
1. **[AutoRE](https://github.com/THUDM/AutoRE)**:基于大语言模型的文档级关系抽取系统。
|
||||
1. **[NVIDIA RTX AI Toolkit](https://github.com/NVIDIA/RTX-AI-Toolkit)**:在 Windows 主机上利用英伟达 RTX 设备进行大型语言模型微调的开发包。
|
||||
1. **[LazyLLM](https://github.com/LazyAGI/LazyLLM)**:一个低代码构建多 Agent 大模型应用的开发工具,支持基于 LLaMA Factory 的模型微调.
|
||||
1. **[RAG-Retrieval](https://github.com/NLPJCL/RAG-Retrieval)**:一个全链路 RAG 检索模型微调、推理和蒸馏代码库。[[blog]](https://zhuanlan.zhihu.com/p/987727357)
|
||||
1. **[360-LLaMA-Factory](https://github.com/Qihoo360/360-LLaMA-Factory)**:一个魔改后的代码库,通过 Ring Attention 支持长序列的 SFT 和 DPO 训练。
|
||||
1. **[Sky-T1](https://novasky-ai.github.io/posts/sky-t1/)**:由 NovaSky AI 微调的低成本类 o1 长推理模型。
|
||||
1. **[WeClone](https://github.com/xming521/WeClone)**:从聊天记录创造数字分身的一站式解决方案。
|
||||
|
||||
</details>
|
||||
|
||||
## 协议
|
||||
|
||||
本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源。
|
||||
|
||||
使用模型权重时,请遵循对应的模型协议:[Baichuan 2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [GLM-4](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE) / [GPT-2](https://github.com/openai/gpt-2/blob/master/LICENSE) / [Granite](LICENSE) / [Index](https://huggingface.co/IndexTeam/Index-1.9B/blob/main/LICENSE) / [InternLM](https://github.com/InternLM/InternLM#license) / [Llama](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [Llama 2](https://ai.meta.com/llama/license/) / [Llama 3](https://llama.meta.com/llama3/license/) / [Llama 4](https://github.com/meta-llama/llama-models/blob/main/models/llama4/LICENSE) / [MiniCPM](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md) / [Mistral/Mixtral/Pixtral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/Phi-2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3/Phi-4](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [Skywork](https://huggingface.co/Skywork/Skywork-13B-base/blob/main/Skywork%20Community%20License.pdf) / [StarCoder 2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [TeleChat2](https://huggingface.co/Tele-AI/telechat-7B/blob/main/TeleChat%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yi-1.5](LICENSE) / [Yuan 2](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
|
||||
|
||||
## 引用
|
||||
|
||||
如果您觉得此项目有帮助,请考虑以下列格式引用
|
||||
|
||||
```bibtex
|
||||
@inproceedings{zheng2024llamafactory,
|
||||
title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models},
|
||||
author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Zhangchi Feng and Yongqiang Ma},
|
||||
booktitle={Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)},
|
||||
address={Bangkok, Thailand},
|
||||
publisher={Association for Computational Linguistics},
|
||||
year={2024},
|
||||
url={http://arxiv.org/abs/2403.13372}
|
||||
}
|
||||
```
|
||||
|
||||
## 致谢
|
||||
|
||||
本项目受益于 [PEFT](https://github.com/huggingface/peft)、[TRL](https://github.com/huggingface/trl)、[QLoRA](https://github.com/artidoro/qlora) 和 [FastChat](https://github.com/lm-sys/FastChat),感谢以上诸位作者的付出。
|
||||
|
||||
## Star History
|
||||
|
||||

|
||||
38
assets/alaya_new.svg
Normal file
38
assets/alaya_new.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 47 KiB |
506
data/README.md
506
data/README.md
@@ -1,53 +1,475 @@
|
||||
Data format in `dataset_info.json`:
|
||||
The [dataset_info.json](dataset_info.json) contains all available datasets. If you are using a custom dataset, please **make sure** to add a *dataset description* in `dataset_info.json` and specify `dataset: dataset_name` before training to use it.
|
||||
|
||||
The `dataset_info.json` file should be put in the `dataset_dir` directory. You can change `dataset_dir` to use another directory. The default value is `./data`.
|
||||
|
||||
Currently we support datasets in **alpaca** and **sharegpt** format. Allowed file types include json, jsonl, csv, parquet, arrow.
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"hf_hub_url": "the name of the dataset repository on the HuggingFace hub. (if specified, ignore below 3 arguments)",
|
||||
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore below 2 arguments)",
|
||||
"file_name": "the name of the dataset file in the this directory. (required if above are not specified)",
|
||||
"file_sha1": "the SHA-1 hash value of the dataset file. (optional)",
|
||||
"columns": {
|
||||
"prompt": "the name of the column in the datasets containing the prompts. (default: instruction)",
|
||||
"query": "the name of the column in the datasets containing the queries. (default: input)",
|
||||
"response": "the name of the column in the datasets containing the responses. (default: output)",
|
||||
"history": "the name of the column in the datasets containing the history of chat. (default: None)"
|
||||
}
|
||||
"hf_hub_url": "the name of the dataset repository on the Hugging Face hub. (if specified, ignore script_url, file_name and cloud_file_name)",
|
||||
"ms_hub_url": "the name of the dataset repository on the Model Scope hub. (if specified, ignore script_url, file_name and cloud_file_name)",
|
||||
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore file_name and cloud_file_name)",
|
||||
"cloud_file_name": "the name of the dataset file in s3/gcs cloud storage. (if specified, ignore file_name)",
|
||||
"file_name": "the name of the dataset folder or dataset file in this directory. (required if above are not specified)",
|
||||
"formatting": "the format of the dataset. (optional, default: alpaca, can be chosen from {alpaca, sharegpt})",
|
||||
"ranking": "whether the dataset is a preference dataset or not. (default: False)",
|
||||
"subset": "the name of the subset. (optional, default: None)",
|
||||
"split": "the name of dataset split to be used. (optional, default: train)",
|
||||
"folder": "the name of the folder of the dataset repository on the Hugging Face hub. (optional, default: None)",
|
||||
"num_samples": "the number of samples in the dataset to be used. (optional, default: None)",
|
||||
"columns (optional)": {
|
||||
"prompt": "the column name in the dataset containing the prompts. (default: instruction)",
|
||||
"query": "the column name in the dataset containing the queries. (default: input)",
|
||||
"response": "the column name in the dataset containing the responses. (default: output)",
|
||||
"history": "the column name in the dataset containing the histories. (default: None)",
|
||||
"messages": "the column name in the dataset containing the messages. (default: conversations)",
|
||||
"system": "the column name in the dataset containing the system prompts. (default: None)",
|
||||
"tools": "the column name in the dataset containing the tool description. (default: None)",
|
||||
"images": "the column name in the dataset containing the image inputs. (default: None)",
|
||||
"videos": "the column name in the dataset containing the videos inputs. (default: None)",
|
||||
"audios": "the column name in the dataset containing the audios inputs. (default: None)",
|
||||
"chosen": "the column name in the dataset containing the chosen answers. (default: None)",
|
||||
"rejected": "the column name in the dataset containing the rejected answers. (default: None)",
|
||||
"kto_tag": "the column name in the dataset containing the kto tags. (default: None)"
|
||||
},
|
||||
"tags (optional, used for the sharegpt format)": {
|
||||
"role_tag": "the key in the message represents the identity. (default: from)",
|
||||
"content_tag": "the key in the message represents the content. (default: value)",
|
||||
"user_tag": "the value of the role_tag represents the user. (default: human)",
|
||||
"assistant_tag": "the value of the role_tag represents the assistant. (default: gpt)",
|
||||
"observation_tag": "the value of the role_tag represents the tool results. (default: observation)",
|
||||
"function_tag": "the value of the role_tag represents the function call. (default: function_call)",
|
||||
"system_tag": "the value of the role_tag represents the system prompt. (default: system, can override system column)"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`dataset_info.json` 中的数据集定义格式:
|
||||
## Alpaca Format
|
||||
|
||||
### Supervised Fine-Tuning Dataset
|
||||
|
||||
* [Example dataset](alpaca_en_demo.json)
|
||||
|
||||
In supervised fine-tuning, the `instruction` column will be concatenated with the `input` column and used as the user prompt, then the user prompt would be `instruction\ninput`. The `output` column represents the model response.
|
||||
|
||||
For reasoning models, if the dataset contains chain-of-thought (CoT), the CoT needs to be placed in the model responses, such as `<think>cot</think>output`.
|
||||
|
||||
The `system` column will be used as the system prompt if specified.
|
||||
|
||||
The `history` column is a list consisting of string tuples representing prompt-response pairs in the history messages. Note that the responses in the history **will also be learned by the model** in supervised fine-tuning.
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"hf_hub_url": "HuggingFace上的项目地址(若指定,则忽略下列三个参数)",
|
||||
"script_url": "包含数据加载脚本的本地文件夹名称(若指定,则忽略下列两个参数)",
|
||||
"file_name": "该目录下数据集文件的名称(若上述参数未指定,则此项必需)",
|
||||
"file_sha1": "数据集文件的SHA-1哈希值(可选)",
|
||||
"columns": {
|
||||
"prompt": "数据集代表提示词的表头名称(默认:instruction)",
|
||||
"query": "数据集代表请求的表头名称(默认:input)",
|
||||
"response": "数据集代表回答的表头名称(默认:output)",
|
||||
"history": "数据集代表历史对话的表头名称(默认:None)"
|
||||
}
|
||||
[
|
||||
{
|
||||
"instruction": "user instruction (required)",
|
||||
"input": "user input (optional)",
|
||||
"output": "model response (required)",
|
||||
"system": "system prompt (optional)",
|
||||
"history": [
|
||||
["user instruction in the first round (optional)", "model response in the first round (optional)"],
|
||||
["user instruction in the second round (optional)", "model response in the second round (optional)"]
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"columns": {
|
||||
"prompt": "instruction",
|
||||
"query": "input",
|
||||
"response": "output",
|
||||
"system": "system",
|
||||
"history": "history"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
部分预置数据集简介:
|
||||
> [!TIP]
|
||||
> If the model has reasoning capabilities (e.g. Qwen3) but the dataset does not contain chain-of-thought (CoT), LLaMA-Factory will automatically add empty CoT to the data. When `enable_thinking` is `True` (slow thinking, by default), the empty CoT will be added to the model responses and loss computation will be considered; otherwise (fast thinking), it will be added to the user prompts and loss computation will be ignored. Please keep the `enable_thinking` parameter consistent during training and inference.
|
||||
>
|
||||
> If you want to train data containing CoT with slow thinking and data without CoT with fast thinking, you can set `enable_thinking` to `None`. However, this feature is relatively complicated and should be used with caution.
|
||||
|
||||
| 数据集名称 | 规模 | 描述 |
|
||||
| --- | --- | --- |
|
||||
| [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) | 52k | 斯坦福大学开源的 Alpaca 数据集,训练了 Alpaca 这类早期基于 LLaMA 的模型 |
|
||||
| [Stanford Alpaca (Chinese)](https://github.com/ymcui/Chinese-LLaMA-Alpaca) | 51k | 使用 ChatGPT 翻译的 Alpaca 数据集 |
|
||||
| [GPT-4 Generated Data](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) | 100k+ | 基于 GPT-4 的 self-instruction 数据集 |
|
||||
| [BELLE 2M](https://huggingface.co/datasets/BelleGroup/train_2M_CN) | 2m | 包含约 200 万条由 [BELLE](https://github.com/LianjiaTech/BELLE) 项目生成的中文指令数据 |
|
||||
| [BELLE 1M](https://huggingface.co/datasets/BelleGroup/train_1M_CN) | 1m | 包含约 100 万条由 [BELLE](https://github.com/LianjiaTech/BELLE) 项目生成的中文指令数据 |
|
||||
| [BELLE 0.5M](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN) | 500k | 包含约 50 万条由 [BELLE](https://github.com/LianjiaTech/BELLE) 项目生成的中文指令数据 |
|
||||
| [BELLE Dialogue 0.4M](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M) | 400k | 包含约 40 万条由 [BELLE](https://github.com/LianjiaTech/BELLE) 项目生成的个性化角色对话数据,包含角色介绍 |
|
||||
| [BELLE School Math 0.25M](https://huggingface.co/datasets/BelleGroup/school_math_0.25M) | 250k | 包含约 25 万条由 [BELLE](https://github.com/LianjiaTech/BELLE) 项目生成的中文数学题数据,包含解题过程 |
|
||||
| [BELLE Multiturn Chat 0.8M](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M) | 800k | 包含约 80 万条由 [BELLE](https://github.com/LianjiaTech/BELLE) 项目生成的用户与助手的多轮对话 |
|
||||
| [Guanaco Dataset](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset) | 100k+ | 包含日文、简繁体中文、英文等多类数据,数据集原用于 Guanaco 模型训练 |
|
||||
| [Firefly 1.1M](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M) | 1.1M | 中文对话大模型 firefly(流萤)的中文数据集,包含多个 NLP 任务 |
|
||||
| [CodeAlpaca 20k](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k) | 20k | 英文代码生成任务数据集 |
|
||||
| [Alpaca CoT](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT) | 6M | 用于微调的指令数据集集合 |
|
||||
| [Web QA](https://huggingface.co/datasets/suolyer/webqa) | 36k | 百度知道汇集的中文问答数据集 |
|
||||
| [UltraChat](https://github.com/thunlp/UltraChat) | 1.57M | 清华 NLP 发布的大规模多轮对话数据集 |
|
||||
### Pre-training Dataset
|
||||
|
||||
注:BELLE 数据集是由 ChatGPT 产生的数据集,不保证数据准确性,所有类 GPT 模型产生的 self-instruction 数据集均不能保证其准确性。
|
||||
- [Example dataset](c4_demo.jsonl)
|
||||
|
||||
In pre-training, only the `text` column will be used for model learning.
|
||||
|
||||
```json
|
||||
[
|
||||
{"text": "document"},
|
||||
{"text": "document"}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"columns": {
|
||||
"prompt": "text"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Preference Dataset
|
||||
|
||||
Preference datasets are used for reward modeling, DPO training, ORPO and SimPO training.
|
||||
|
||||
It requires a better response in `chosen` column and a worse response in `rejected` column.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"instruction": "user instruction (required)",
|
||||
"input": "user input (optional)",
|
||||
"chosen": "chosen answer (required)",
|
||||
"rejected": "rejected answer (required)"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"ranking": true,
|
||||
"columns": {
|
||||
"prompt": "instruction",
|
||||
"query": "input",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### KTO Dataset
|
||||
|
||||
An additional column `kto_tag` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
||||
|
||||
### Multimodal Image Dataset
|
||||
|
||||
An additional column `images` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
||||
|
||||
### Multimodal Video Dataset
|
||||
|
||||
An additional column `videos` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
||||
|
||||
### Multimodal Audio Dataset
|
||||
|
||||
An additional column `audios` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
||||
|
||||
## Sharegpt Format
|
||||
|
||||
### Supervised Fine-Tuning Dataset
|
||||
|
||||
- [Example dataset](glaive_toolcall_en_demo.json)
|
||||
|
||||
Compared to the alpaca format, the sharegpt format allows the datasets have **more roles**, such as human, gpt, observation and function. They are presented in a list of objects in the `conversations` column.
|
||||
|
||||
Note that the human and observation should appear in odd positions, while gpt and function should appear in even positions.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "user instruction"
|
||||
},
|
||||
{
|
||||
"from": "function_call",
|
||||
"value": "tool arguments"
|
||||
},
|
||||
{
|
||||
"from": "observation",
|
||||
"value": "tool result"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "model response"
|
||||
}
|
||||
],
|
||||
"system": "system prompt (optional)",
|
||||
"tools": "tool description (optional)"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"system": "system",
|
||||
"tools": "tools"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Pre-training Dataset
|
||||
|
||||
Not yet supported, please use the [alpaca](#alpaca-format) format.
|
||||
|
||||
### Preference Dataset
|
||||
|
||||
- [Example dataset](dpo_en_demo.json)
|
||||
|
||||
Preference datasets in sharegpt format also require a better message in `chosen` column and a worse message in `rejected` column.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "user instruction"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "model response"
|
||||
},
|
||||
{
|
||||
"from": "human",
|
||||
"value": "user instruction"
|
||||
}
|
||||
],
|
||||
"chosen": {
|
||||
"from": "gpt",
|
||||
"value": "chosen answer (required)"
|
||||
},
|
||||
"rejected": {
|
||||
"from": "gpt",
|
||||
"value": "rejected answer (required)"
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"ranking": true,
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### KTO Dataset
|
||||
|
||||
- [Example dataset](kto_en_demo.json)
|
||||
|
||||
KTO datasets require a extra `kto_tag` column containing the boolean human feedback.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "user instruction"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "model response"
|
||||
}
|
||||
],
|
||||
"kto_tag": "human feedback [true/false] (required)"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"kto_tag": "kto_tag"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Multimodal Image Dataset
|
||||
|
||||
- [Example dataset](mllm_demo.json)
|
||||
|
||||
Multimodal image datasets require an `images` column containing the paths to the input images.
|
||||
|
||||
The number of images should be identical to the `<image>` tokens in the conversations.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<image>user instruction"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "model response"
|
||||
}
|
||||
],
|
||||
"images": [
|
||||
"image path (required)"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"images": "images"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Multimodal Video Dataset
|
||||
|
||||
- [Example dataset](mllm_video_demo.json)
|
||||
|
||||
Multimodal video datasets require a `videos` column containing the paths to the input videos.
|
||||
|
||||
The number of videos should be identical to the `<video>` tokens in the conversations.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<video>user instruction"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "model response"
|
||||
}
|
||||
],
|
||||
"videos": [
|
||||
"video path (required)"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"videos": "videos"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Multimodal Audio Dataset
|
||||
|
||||
- [Example dataset](mllm_audio_demo.json)
|
||||
|
||||
Multimodal audio datasets require an `audios` column containing the paths to the input audios.
|
||||
|
||||
The number of audios should be identical to the `<audio>` tokens in the conversations.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<audio>user instruction"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "model response"
|
||||
}
|
||||
],
|
||||
"audios": [
|
||||
"audio path (required)"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"audios": "audios"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### OpenAI Format
|
||||
|
||||
The openai format is simply a special case of the sharegpt format, where the first message may be a system prompt.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "system prompt (optional)"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "user instruction"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "model response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant",
|
||||
"system_tag": "system"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
478
data/README_zh.md
Normal file
478
data/README_zh.md
Normal file
@@ -0,0 +1,478 @@
|
||||
[dataset_info.json](dataset_info.json) 包含了所有可用的数据集。如果您希望使用自定义数据集,请**务必**在 `dataset_info.json` 文件中添加*数据集描述*,并通过修改 `dataset: 数据集名称` 配置来使用数据集。
|
||||
|
||||
其中 `dataset_info.json` 文件应放置在 `dataset_dir` 目录下。您可以通过修改 `dataset_dir` 参数来使用其他目录。默认值为 `./data`。
|
||||
|
||||
目前我们支持 **alpaca** 格式和 **sharegpt** 格式的数据集。允许的文件类型包括 json、jsonl、csv、parquet 和 arrow。
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"hf_hub_url": "Hugging Face 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
|
||||
"ms_hub_url": "ModelScope 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
|
||||
"script_url": "包含数据加载脚本的本地文件夹名称(若指定,则忽略 file_name)",
|
||||
"file_name": "该目录下数据集文件夹或文件的名称(若上述参数未指定,则此项必需)",
|
||||
"formatting": "数据集格式(可选,默认:alpaca,可以为 alpaca 或 sharegpt)",
|
||||
"ranking": "是否为偏好数据集(可选,默认:False)",
|
||||
"subset": "数据集子集的名称(可选,默认:None)",
|
||||
"split": "所使用的数据集切分(可选,默认:train)",
|
||||
"folder": "Hugging Face 仓库的文件夹名称(可选,默认:None)",
|
||||
"num_samples": "该数据集所使用的样本数量。(可选,默认:None)",
|
||||
"columns(可选)": {
|
||||
"prompt": "数据集代表提示词的表头名称(默认:instruction)",
|
||||
"query": "数据集代表请求的表头名称(默认:input)",
|
||||
"response": "数据集代表回答的表头名称(默认:output)",
|
||||
"history": "数据集代表历史对话的表头名称(默认:None)",
|
||||
"messages": "数据集代表消息列表的表头名称(默认:conversations)",
|
||||
"system": "数据集代表系统提示的表头名称(默认:None)",
|
||||
"tools": "数据集代表工具描述的表头名称(默认:None)",
|
||||
"images": "数据集代表图像输入的表头名称(默认:None)",
|
||||
"videos": "数据集代表视频输入的表头名称(默认:None)",
|
||||
"audios": "数据集代表音频输入的表头名称(默认:None)",
|
||||
"chosen": "数据集代表更优回答的表头名称(默认:None)",
|
||||
"rejected": "数据集代表更差回答的表头名称(默认:None)",
|
||||
"kto_tag": "数据集代表 KTO 标签的表头名称(默认:None)"
|
||||
},
|
||||
"tags(可选,用于 sharegpt 格式)": {
|
||||
"role_tag": "消息中代表发送者身份的键名(默认:from)",
|
||||
"content_tag": "消息中代表文本内容的键名(默认:value)",
|
||||
"user_tag": "消息中代表用户的 role_tag(默认:human)",
|
||||
"assistant_tag": "消息中代表助手的 role_tag(默认:gpt)",
|
||||
"observation_tag": "消息中代表工具返回结果的 role_tag(默认:observation)",
|
||||
"function_tag": "消息中代表工具调用的 role_tag(默认:function_call)",
|
||||
"system_tag": "消息中代表系统提示的 role_tag(默认:system,会覆盖 system column)"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Alpaca 格式
|
||||
|
||||
### 指令监督微调数据集
|
||||
|
||||
- [样例数据集](alpaca_zh_demo.json)
|
||||
|
||||
在指令监督微调时,`instruction` 列对应的内容会与 `input` 列对应的内容拼接后作为提示词,即提示词为 `instruction\ninput`。而 `output` 列对应的内容为模型回答。
|
||||
|
||||
对于推理类模型的微调,如果数据集包含思维链,则需要把思维链放在模型回答中,例如 `<think>cot</think>output`。
|
||||
|
||||
如果指定,`system` 列对应的内容将被作为系统提示词。
|
||||
|
||||
`history` 列是由多个字符串二元组构成的列表,分别代表历史消息中每轮对话的指令和回答。注意在指令监督微调时,历史消息中的回答内容**也会被用于模型学习**。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"instruction": "用户指令(必填)",
|
||||
"input": "用户输入(选填)",
|
||||
"output": "模型回答(必填)",
|
||||
"system": "系统提示词(选填)",
|
||||
"history": [
|
||||
["第一轮指令(选填)", "第一轮回答(选填)"],
|
||||
["第二轮指令(选填)", "第二轮回答(选填)"]
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"columns": {
|
||||
"prompt": "instruction",
|
||||
"query": "input",
|
||||
"response": "output",
|
||||
"system": "system",
|
||||
"history": "history"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> 如果模型本身具备推理能力(如 Qwen3)而数据集不包含思维链,LLaMA-Factory 会自动为数据添加空思维链。当 `enable_thinking` 为 `True` 时(慢思考,默认),空思维链会添加到模型回答中并且计算损失,否则会添加到用户指令中并且不计算损失(快思考)。请在训练和推理时保持 `enable_thinking` 参数一致。
|
||||
>
|
||||
> 如果您希望训练包含思维链的数据时使用慢思考,训练不包含思维链的数据时使用快思考,可以设置 `enable_thinking` 为 `None`。但该功能较为复杂,请谨慎使用。
|
||||
|
||||
### 预训练数据集
|
||||
|
||||
- [样例数据集](c4_demo.jsonl)
|
||||
|
||||
在预训练时,只有 `text` 列中的内容会用于模型学习。
|
||||
|
||||
```json
|
||||
[
|
||||
{"text": "document"},
|
||||
{"text": "document"}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"columns": {
|
||||
"prompt": "text"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 偏好数据集
|
||||
|
||||
偏好数据集用于奖励模型训练、DPO 训练、ORPO 训练和 SimPO 训练。
|
||||
|
||||
它需要在 `chosen` 列中提供更优的回答,并在 `rejected` 列中提供更差的回答。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"instruction": "用户指令(必填)",
|
||||
"input": "用户输入(选填)",
|
||||
"chosen": "优质回答(必填)",
|
||||
"rejected": "劣质回答(必填)"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"ranking": true,
|
||||
"columns": {
|
||||
"prompt": "instruction",
|
||||
"query": "input",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### KTO 数据集
|
||||
|
||||
KTO 数据集需要提供额外的 `kto_tag` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
||||
|
||||
### 多模态图像数据集
|
||||
|
||||
多模态图像数据集需要提供额外的 `images` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
||||
|
||||
### 多模态视频数据集
|
||||
|
||||
多模态视频数据集需要提供额外的 `videos` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
||||
|
||||
### 多模态音频数据集
|
||||
|
||||
多模态音频数据集需要提供额外的 `audios` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
||||
|
||||
## Sharegpt 格式
|
||||
|
||||
### 指令监督微调数据集
|
||||
|
||||
- [样例数据集](glaive_toolcall_zh_demo.json)
|
||||
|
||||
相比 alpaca 格式的数据集,sharegpt 格式支持**更多的角色种类**,例如 human、gpt、observation、function 等等。它们构成一个对象列表呈现在 `conversations` 列中。
|
||||
|
||||
注意其中 human 和 observation 必须出现在奇数位置,gpt 和 function 必须出现在偶数位置。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "用户指令"
|
||||
},
|
||||
{
|
||||
"from": "function_call",
|
||||
"value": "工具参数"
|
||||
},
|
||||
{
|
||||
"from": "observation",
|
||||
"value": "工具结果"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "模型回答"
|
||||
}
|
||||
],
|
||||
"system": "系统提示词(选填)",
|
||||
"tools": "工具描述(选填)"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"system": "system",
|
||||
"tools": "tools"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 预训练数据集
|
||||
|
||||
尚不支持,请使用 [alpaca](#alpaca-格式) 格式。
|
||||
|
||||
### 偏好数据集
|
||||
|
||||
- [样例数据集](dpo_zh_demo.json)
|
||||
|
||||
Sharegpt 格式的偏好数据集同样需要在 `chosen` 列中提供更优的消息,并在 `rejected` 列中提供更差的消息。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "用户指令"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "模型回答"
|
||||
},
|
||||
{
|
||||
"from": "human",
|
||||
"value": "用户指令"
|
||||
}
|
||||
],
|
||||
"chosen": {
|
||||
"from": "gpt",
|
||||
"value": "优质回答"
|
||||
},
|
||||
"rejected": {
|
||||
"from": "gpt",
|
||||
"value": "劣质回答"
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"ranking": true,
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### KTO 数据集
|
||||
|
||||
- [样例数据集](kto_en_demo.json)
|
||||
|
||||
KTO 数据集需要额外添加一个 `kto_tag` 列,包含 bool 类型的人类反馈。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "用户指令"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "模型回答"
|
||||
}
|
||||
],
|
||||
"kto_tag": "人类反馈 [true/false](必填)"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"kto_tag": "kto_tag"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 多模态图像数据集
|
||||
|
||||
- [样例数据集](mllm_demo.json)
|
||||
|
||||
多模态图像数据集需要额外添加一个 `images` 列,包含输入图像的路径。
|
||||
|
||||
注意图片的数量必须与文本中所有 `<image>` 标记的数量严格一致。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<image><image>用户指令"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "模型回答"
|
||||
}
|
||||
],
|
||||
"images": [
|
||||
"图像路径(必填)",
|
||||
"图像路径(必填)"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"images": "images"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 多模态视频数据集
|
||||
|
||||
- [样例数据集](mllm_video_demo.json)
|
||||
|
||||
多模态视频数据集需要额外添加一个 `videos` 列,包含输入视频的路径。
|
||||
|
||||
注意视频的数量必须与文本中所有 `<video>` 标记的数量严格一致。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<video><video>用户指令"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "模型回答"
|
||||
}
|
||||
],
|
||||
"videos": [
|
||||
"视频路径(必填)",
|
||||
"视频路径(必填)"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"videos": "videos"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 多模态音频数据集
|
||||
|
||||
- [样例数据集](mllm_audio_demo.json)
|
||||
|
||||
多模态音频数据集需要额外添加一个 `audios` 列,包含输入音频的路径。
|
||||
|
||||
注意音频的数量必须与文本中所有 `<audio>` 标记的数量严格一致。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<audio><audio>用户指令"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "模型回答"
|
||||
}
|
||||
],
|
||||
"audios": [
|
||||
"音频路径(必填)",
|
||||
"音频路径(必填)"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"audios": "audios"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### OpenAI 格式
|
||||
|
||||
OpenAI 格式仅仅是 sharegpt 格式的一种特殊情况,其中第一条消息可能是系统提示词。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "系统提示词(选填)"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "用户指令"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "模型回答"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant",
|
||||
"system_tag": "system"
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -1 +0,0 @@
|
||||
3779ddbc040543ab1834ef216c983d6fcc06cc9a
|
||||
@@ -1 +0,0 @@
|
||||
fc9a6a3458caca2af8dafc6181773fe10c6d8657
|
||||
@@ -1 +0,0 @@
|
||||
25508714b7879a1e5a6764ba7f979a980f549f1a
|
||||
@@ -1 +0,0 @@
|
||||
7cb6a7d11455bddc3d495750a2392683d775b184
|
||||
@@ -1,79 +1,82 @@
|
||||
import json
|
||||
import datasets
|
||||
from typing import Any, Dict, List
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
import datasets
|
||||
|
||||
|
||||
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
|
||||
|
||||
_DESCRIPTION = "BELLE multiturn chat dataset."
|
||||
|
||||
_CITATION = """\
|
||||
@article{belle2023exploring,
|
||||
title={Exploring the Impact of Instruction Data Scaling on Large Language Models: An Empirical Study on Real-World Use Cases},
|
||||
title={Exploring the Impact of Instruction Data Scaling on Large Language Models},
|
||||
author={Yunjie Ji, Yong Deng, Yan Gong, Yiping Peng, Qiang Niu, Lei Zhang, Baochang Ma, Xiangang Li},
|
||||
journal={arXiv preprint arXiv:2303.14742},
|
||||
year={2023}
|
||||
}
|
||||
"""
|
||||
|
||||
_HOMEPAGE = "https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M"
|
||||
_HOMEPAGE = f"{_HF_ENDPOINT}/datasets/BelleGroup/multiturn_chat_0.8M"
|
||||
_LICENSE = "gpl-3.0"
|
||||
_URL = "https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json"
|
||||
_URL = f"{_HF_ENDPOINT}/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json"
|
||||
|
||||
|
||||
class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
||||
|
||||
VERSION = datasets.Version("0.0.0")
|
||||
|
||||
def _info(self) -> datasets.DatasetInfo:
|
||||
features = datasets.Features({
|
||||
"instruction": datasets.Value("string"),
|
||||
"output": datasets.Value("string"),
|
||||
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
|
||||
})
|
||||
def _info(self):
|
||||
features = datasets.Features(
|
||||
{"conversations": [{"from": datasets.Value("string"), "value": datasets.Value("string")}]}
|
||||
)
|
||||
return datasets.DatasetInfo(
|
||||
description=_DESCRIPTION,
|
||||
features=features,
|
||||
homepage=_HOMEPAGE,
|
||||
license=_LICENSE,
|
||||
citation=_CITATION
|
||||
description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
|
||||
)
|
||||
|
||||
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
||||
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
||||
file_path = dl_manager.download(_URL)
|
||||
return [
|
||||
datasets.SplitGenerator(
|
||||
name=datasets.Split.TRAIN,
|
||||
gen_kwargs={
|
||||
"filepath": file_path
|
||||
}
|
||||
)
|
||||
]
|
||||
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": file_path})]
|
||||
|
||||
def _generate_examples(self, filepath: str) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat with history
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
def _generate_examples(self, filepath: str):
|
||||
with open(filepath, encoding="utf-8") as f:
|
||||
for key, row in enumerate(f):
|
||||
data = json.loads(row)
|
||||
conversations = []
|
||||
prompt = data["instruction"].strip()
|
||||
response = data["output"].strip()
|
||||
|
||||
assist_idx = prompt.rfind("Assistant:")
|
||||
human_idx = prompt.rfind("Human:")
|
||||
query = prompt[human_idx+6:assist_idx].strip()
|
||||
query = prompt[human_idx + 6 : assist_idx].strip()
|
||||
prompt = prompt[:human_idx].strip()
|
||||
history = []
|
||||
conversations.insert(0, {"from": "gpt", "value": response})
|
||||
conversations.insert(0, {"from": "human", "value": query})
|
||||
|
||||
while prompt.rfind("Assistant:") != -1:
|
||||
assist_idx = prompt.rfind("Assistant:")
|
||||
human_idx = prompt.rfind("Human:")
|
||||
if human_idx != -1:
|
||||
old_query = prompt[human_idx+6:assist_idx].strip()
|
||||
old_resp = prompt[assist_idx+10:].strip()
|
||||
history.insert(0, (old_query, old_resp))
|
||||
old_query = prompt[human_idx + 6 : assist_idx].strip()
|
||||
old_resp = prompt[assist_idx + 10 :].strip()
|
||||
conversations.insert(0, {"from": "gpt", "value": old_resp})
|
||||
conversations.insert(0, {"from": "human", "value": old_query})
|
||||
else:
|
||||
break
|
||||
prompt = prompt[:human_idx].strip()
|
||||
|
||||
yield key, {
|
||||
"instruction": query,
|
||||
"output": response,
|
||||
"history": history
|
||||
}
|
||||
yield key, {"conversations": conversations}
|
||||
|
||||
300
data/c4_demo.jsonl
Normal file
300
data/c4_demo.jsonl
Normal file
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
f5cb08305ff5dc9c17a09809c54c8c8834aadc70
|
||||
@@ -1 +0,0 @@
|
||||
aee47b7b443496e37808d7f34ef10403ff99bcc3
|
||||
@@ -1,46 +0,0 @@
|
||||
import json
|
||||
import datasets
|
||||
from typing import Any, Dict, List
|
||||
|
||||
|
||||
_DESCRIPTION = "An example of dataset for LLaMA."
|
||||
_CITATION = ""
|
||||
_HOMEPAGE = ""
|
||||
_LICENSE = ""
|
||||
_URL = "examples.json"
|
||||
|
||||
|
||||
class ExampleDataset(datasets.GeneratorBasedBuilder):
|
||||
|
||||
VERSION = datasets.Version("0.0.0")
|
||||
|
||||
def _info(self) -> datasets.DatasetInfo:
|
||||
features = datasets.Features({
|
||||
"instruction": datasets.Value("string"),
|
||||
"input": datasets.Value("string"),
|
||||
"output": datasets.Value("string"),
|
||||
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
|
||||
})
|
||||
return datasets.DatasetInfo(
|
||||
description=_DESCRIPTION,
|
||||
features=features,
|
||||
homepage=_HOMEPAGE,
|
||||
license=_LICENSE,
|
||||
citation=_CITATION
|
||||
)
|
||||
|
||||
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
||||
file_path = dl_manager.download(_URL)
|
||||
return [
|
||||
datasets.SplitGenerator(
|
||||
name=datasets.Split.TRAIN,
|
||||
gen_kwargs={
|
||||
"filepath": file_path
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
def _generate_examples(self, filepath: str) -> Dict[int, Dict[str, Any]]:
|
||||
example_dataset = json.load(open(filepath, "r", encoding="utf-8"))
|
||||
for key, example in enumerate(example_dataset):
|
||||
yield key, example
|
||||
@@ -1,80 +1,85 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
import datasets
|
||||
from typing import Any, Dict, List
|
||||
|
||||
|
||||
_DESCRIPTION = "Human preference data about helpfulness and harmlessness for ChatGLM."
|
||||
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
|
||||
_DESCRIPTION = "Human preference data about helpfulness and harmlessness."
|
||||
_CITATION = ""
|
||||
_HOMEPAGE = "https://huggingface.co/datasets/Anthropic/hh-rlhf"
|
||||
_HOMEPAGE = f"{_HF_ENDPOINT}/datasets/Anthropic/hh-rlhf"
|
||||
_LICENSE = "mit"
|
||||
_URL = "https://huggingface.co/datasets/Anthropic/hh-rlhf/resolve/main/"
|
||||
_URL = f"{_HF_ENDPOINT}/datasets/Anthropic/hh-rlhf/resolve/main/"
|
||||
_URLS = {
|
||||
"train": [
|
||||
_URL + "harmless-base/train.jsonl.gz",
|
||||
_URL + "helpful-base/train.jsonl.gz",
|
||||
_URL + "helpful-online/train.jsonl.gz",
|
||||
_URL + "helpful-rejection-sampled/train.jsonl.gz"
|
||||
_URL + "helpful-rejection-sampled/train.jsonl.gz",
|
||||
],
|
||||
"test": [
|
||||
_URL + "harmless-base/test.jsonl.gz",
|
||||
_URL + "helpful-base/test.jsonl.gz",
|
||||
_URL + "helpful-online/test.jsonl.gz",
|
||||
_URL + "helpful-rejection-sampled/test.jsonl.gz"
|
||||
]
|
||||
_URL + "helpful-rejection-sampled/test.jsonl.gz",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class HhRlhfEn(datasets.GeneratorBasedBuilder):
|
||||
|
||||
VERSION = datasets.Version("0.0.0")
|
||||
|
||||
def _info(self) -> datasets.DatasetInfo:
|
||||
features = datasets.Features({
|
||||
"instruction": datasets.Value("string"),
|
||||
"output": datasets.Sequence(datasets.Value("string")),
|
||||
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
|
||||
})
|
||||
features = datasets.Features(
|
||||
{
|
||||
"instruction": datasets.Value("string"),
|
||||
"chosen": datasets.Value("string"),
|
||||
"rejected": datasets.Value("string"),
|
||||
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
|
||||
}
|
||||
)
|
||||
return datasets.DatasetInfo(
|
||||
description=_DESCRIPTION,
|
||||
features=features,
|
||||
homepage=_HOMEPAGE,
|
||||
license=_LICENSE,
|
||||
citation=_CITATION
|
||||
description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
|
||||
)
|
||||
|
||||
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
||||
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
||||
file_path = dl_manager.download_and_extract(_URLS)
|
||||
return [
|
||||
datasets.SplitGenerator(
|
||||
name=datasets.Split.TRAIN,
|
||||
gen_kwargs={
|
||||
"filepaths": file_path["train"]
|
||||
}
|
||||
),
|
||||
datasets.SplitGenerator(
|
||||
name=datasets.Split.TEST,
|
||||
gen_kwargs={
|
||||
"filepaths": file_path["test"]
|
||||
}
|
||||
)
|
||||
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": file_path["train"]}),
|
||||
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": file_path["test"]}),
|
||||
]
|
||||
|
||||
def _generate_examples(self, filepaths: List[str]) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat for ChatGLM
|
||||
def _generate_examples(self, filepaths: list[str]):
|
||||
key = 0
|
||||
for filepath in filepaths:
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
with open(filepath, encoding="utf-8") as f:
|
||||
for row in f:
|
||||
data = json.loads(row)
|
||||
chosen = data["chosen"]
|
||||
rejected = data["rejected"]
|
||||
|
||||
assist_idx = rejected.rfind("\n\nAssistant: ")
|
||||
r_reject = rejected[assist_idx+13:].strip()
|
||||
r_reject = rejected[assist_idx + 13 :].strip()
|
||||
assist_idx = chosen.rfind("\n\nAssistant: ")
|
||||
r_accept = chosen[assist_idx+13:].strip()
|
||||
r_accept = chosen[assist_idx + 13 :].strip()
|
||||
|
||||
human_idx = chosen.rfind("\n\nHuman: ")
|
||||
query = chosen[human_idx+9:assist_idx].strip()
|
||||
query = chosen[human_idx + 9 : assist_idx].strip()
|
||||
prompt = chosen[:human_idx]
|
||||
history = []
|
||||
|
||||
@@ -82,16 +87,12 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder):
|
||||
assist_idx = prompt.rfind("\n\nAssistant: ")
|
||||
human_idx = prompt.rfind("\n\nHuman: ")
|
||||
if human_idx != -1:
|
||||
old_query = prompt[human_idx+9:assist_idx].strip()
|
||||
old_resp = prompt[assist_idx+13:].strip()
|
||||
old_query = prompt[human_idx + 9 : assist_idx].strip()
|
||||
old_resp = prompt[assist_idx + 13 :].strip()
|
||||
history.insert(0, (old_query, old_resp))
|
||||
else:
|
||||
break
|
||||
prompt = prompt[:human_idx]
|
||||
|
||||
yield key, {
|
||||
"instruction": query,
|
||||
"output": [r_accept, r_reject],
|
||||
"history": history
|
||||
}
|
||||
yield key, {"instruction": query, "chosen": r_accept, "rejected": r_reject, "history": history}
|
||||
key += 1
|
||||
|
||||
BIN
data/mllm_demo_data/1.mp3
Normal file
BIN
data/mllm_demo_data/1.mp3
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/1.mp4
Normal file
BIN
data/mllm_demo_data/1.mp4
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/2.avi
Normal file
BIN
data/mllm_demo_data/2.avi
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/2.wav
Normal file
BIN
data/mllm_demo_data/2.wav
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/3.flac
Normal file
BIN
data/mllm_demo_data/3.flac
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/3.mp4
Normal file
BIN
data/mllm_demo_data/3.mp4
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/4.mp3
Normal file
BIN
data/mllm_demo_data/4.mp3
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/4.mp4
Normal file
BIN
data/mllm_demo_data/4.mp4
Normal file
Binary file not shown.
@@ -1 +0,0 @@
|
||||
274079ea921762be356de85b18f13fa60b7ba8cb
|
||||
@@ -1 +0,0 @@
|
||||
0a57fbc1d8cb08a8cd71c5eb8425cf59206ffed6
|
||||
@@ -1,2 +0,0 @@
|
||||
{"id": 0,"title": "大卫·亨利","content": "大卫·亨利\n\n大卫·克莱顿·亨利(David Clayton Henrie,),美国演员。近来在迪士尼频道原创电视影集《少年魔法师》(Wizards of Waverly Place)当中演出贾斯汀·鲁索(Justin Russo)一角。\n\n大卫·亨利出生在加州Mission Viejo,在凤凰城长大。他的胞弟劳伦斯·亨利(Lorenzo Henrie)也是演员。大卫·亨利就读夏安传统学校。家中是信奉罗马天主教。 \n\n大卫在2007年拍摄少年魔法师期间认识女演员露西·海尔(Lucy Hale),之后与其交往,于2009年分手。\n\n10岁时,大卫·亨利和SAG在凤凰城签订了合约,并开始走出去试镜。 9岁的时候,在沙加缅度进行商业拍摄,SAG董事建议大卫·亨利搬到洛杉矶。在10岁那年夏天,他和他的家人搬到了好莱坞。他预定他的前2支商业试镜,扮演主要角色为汉堡王和桂格燕麦。他初演电视节目为Providence。 \n\n到了13岁,大卫有了他的第一次重大突破,在福克斯公司的喜剧The Pitts饰演 Petey Pitt一角。大卫下出作品为的Hallmark movie为Monster Maker,和琳达布莱儿、乔治甘迺迪共同演出,并要求回来Hallmark movie公司。 \n\n在18岁时,大卫得到了迪士尼频道原创系列演出机会,该节目2007年10月12日首播。大卫2008年参加了迪士尼频道的游戏节目。他是绿色团队的队长,隔年,为旋风队队长。他在迪士尼原创电影《少年魔法师》之后在《酷爸的疯狂假期》中有饰演一角。\n"}
|
||||
{"id": 1,"title": "大卫·亨利","content": "大卫·亨利\n\n大卫·克莱顿·亨利(David Clayton Henrie,),美国演员。近来在迪士尼频道原创电视影集《少年魔法师》(Wizards of Waverly Place)当中演出贾斯汀·鲁索(Justin Russo)一角。\n\n大卫·亨利出生在加州Mission Viejo,在凤凰城长大。他的胞弟劳伦斯·亨利(Lorenzo Henrie)也是演员。大卫·亨利就读夏安传统学校。家中是信奉罗马天主教。 \n\n大卫在2007年拍摄少年魔法师期间认识女演员露西·海尔(Lucy Hale),之后与其交往,于2009年分手。\n\n10岁时,大卫·亨利和SAG在凤凰城签订了合约,并开始走出去试镜。 9岁的时候,在沙加缅度进行商业拍摄,SAG董事建议大卫·亨利搬到洛杉矶。在10岁那年夏天,他和他的家人搬到了好莱坞。他预定他的前2支商业试镜,扮演主要角色为汉堡王和桂格燕麦。他初演电视节目为Providence。 \n\n到了13岁,大卫有了他的第一次重大突破,在福克斯公司的喜剧The Pitts饰演 Petey Pitt一角。大卫下出作品为的Hallmark movie为Monster Maker,和琳达布莱儿、乔治甘迺迪共同演出,并要求回来Hallmark movie公司。 \n\n在18岁时,大卫得到了迪士尼频道原创系列演出机会,该节目2007年10月12日首播。大卫2008年参加了迪士尼频道的游戏节目。他是绿色团队的队长,隔年,为旋风队队长。他在迪士尼原创电影《少年魔法师》之后在《酷爸的疯狂假期》中有饰演一角。\n"}
|
||||
@@ -1,13 +1,31 @@
|
||||
import json
|
||||
import datasets
|
||||
from typing import Any, Dict, List
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
import datasets
|
||||
|
||||
|
||||
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
|
||||
|
||||
_DESCRIPTION = "UltraChat: Large-scale, Informative, and Diverse Multi-round Dialogue Data."
|
||||
|
||||
_CITATION = """\
|
||||
@misc{UltraChat,
|
||||
author = {Ding, Ning and Chen, Yulin and Xu, Bokai and Hu, Shengding and Qin, Yujia and Liu, Zhiyuan and Sun, Maosong and Zhou, Bowen},
|
||||
author = {Ding, Ning and Chen, Yulin and Xu, Bokai and Hu, Shengding and others},
|
||||
title = {UltraChat: A Large-scale Auto-generated Multi-round Dialogue Data},
|
||||
year = {2023},
|
||||
publisher = {GitHub},
|
||||
@@ -16,61 +34,41 @@ _CITATION = """\
|
||||
}
|
||||
"""
|
||||
|
||||
_HOMEPAGE = "https://huggingface.co/datasets/stingning/ultrachat"
|
||||
_HOMEPAGE = f"{_HF_ENDPOINT}/datasets/stingning/ultrachat"
|
||||
_LICENSE = "cc-by-nc-4.0"
|
||||
_BASE_DATA_URL = "https://huggingface.co/datasets/stingning/ultrachat/resolve/main/train_{idx}.jsonl"
|
||||
_BASE_DATA_URL = f"{_HF_ENDPOINT}/datasets/stingning/ultrachat/resolve/main/train_{{idx}}.jsonl"
|
||||
|
||||
|
||||
class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
||||
|
||||
class UltraChat(datasets.GeneratorBasedBuilder):
|
||||
VERSION = datasets.Version("0.0.0")
|
||||
|
||||
def _info(self) -> datasets.DatasetInfo:
|
||||
features = datasets.Features({
|
||||
"instruction": datasets.Value("string"),
|
||||
"output": datasets.Value("string"),
|
||||
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
|
||||
})
|
||||
def _info(self):
|
||||
features = datasets.Features(
|
||||
{"conversations": [{"from": datasets.Value("string"), "value": datasets.Value("string")}]}
|
||||
)
|
||||
return datasets.DatasetInfo(
|
||||
description=_DESCRIPTION,
|
||||
features=features,
|
||||
homepage=_HOMEPAGE,
|
||||
license=_LICENSE,
|
||||
citation=_CITATION
|
||||
description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
|
||||
)
|
||||
|
||||
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
||||
file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(9)] # multiple shards
|
||||
return [
|
||||
datasets.SplitGenerator(
|
||||
name=datasets.Split.TRAIN,
|
||||
gen_kwargs={
|
||||
"filepaths": file_paths
|
||||
}
|
||||
)
|
||||
]
|
||||
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
||||
file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(10)] # multiple shards
|
||||
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": file_paths})]
|
||||
|
||||
def _generate_examples(self, filepaths: List[str]) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat for ChatGLM
|
||||
def _generate_examples(self, filepaths: list[str]):
|
||||
for filepath in filepaths:
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
with open(filepath, encoding="utf-8") as f:
|
||||
for row in f:
|
||||
try:
|
||||
data = json.loads(row)
|
||||
except:
|
||||
except Exception:
|
||||
continue
|
||||
key = data["id"]
|
||||
content = data["data"]
|
||||
key: int = data["id"]
|
||||
content: list[str] = data["data"]
|
||||
if len(content) % 2 == 1:
|
||||
content.pop(-1)
|
||||
if len(content) < 2:
|
||||
continue
|
||||
|
||||
query = content[-2]
|
||||
response = content[-1]
|
||||
history = [[content[2*i], content[2*i+1]] for i in range(len(content) // 2 - 1)]
|
||||
|
||||
yield key, {
|
||||
"instruction": query,
|
||||
"output": response,
|
||||
"history": history
|
||||
}
|
||||
conversations = [
|
||||
{"from": "human" if i % 2 == 0 else "gpt", "value": content[i]} for i in range(len(content))
|
||||
]
|
||||
yield key, {"conversations": conversations}
|
||||
|
||||
File diff suppressed because one or more lines are too long
66
docker/docker-cuda/Dockerfile
Normal file
66
docker/docker-cuda/Dockerfile
Normal file
@@ -0,0 +1,66 @@
|
||||
# https://hub.docker.com/r/hiyouga/pytorch/tags
|
||||
ARG BASE_IMAGE=hiyouga/pytorch:th2.6.0-cu124-flashattn2.7.4-cxx11abi0-devel
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
# Installation arguments
|
||||
ARG PIP_INDEX=https://pypi.org/simple
|
||||
ARG EXTRAS=metrics
|
||||
ARG INSTALL_FLASHATTN=false
|
||||
ARG HTTP_PROXY=""
|
||||
|
||||
# Define environments
|
||||
ENV MAX_JOBS=16
|
||||
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
|
||||
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV NODE_OPTIONS=""
|
||||
ENV PIP_ROOT_USER_ACTION=ignore
|
||||
ENV http_proxy="${HTTP_PROXY}"
|
||||
ENV https_proxy="${HTTP_PROXY}"
|
||||
|
||||
# Use Bash instead of default /bin/sh
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Change pip source
|
||||
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
||||
|
||||
# Install the requirements
|
||||
COPY requirements.txt /app
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy the rest of the application into the image
|
||||
COPY . /app
|
||||
|
||||
# Install LLaMA Factory
|
||||
RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation
|
||||
|
||||
# Rebuild flash attention
|
||||
RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
|
||||
pip uninstall -y ninja && \
|
||||
pip install --no-cache-dir ninja && \
|
||||
pip install --no-cache-dir flash-attn --no-build-isolation; \
|
||||
fi
|
||||
|
||||
# Set up volumes
|
||||
# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
|
||||
|
||||
# Expose port 7860 for LLaMA Board
|
||||
ENV GRADIO_SERVER_PORT=7860
|
||||
EXPOSE 7860
|
||||
|
||||
# Expose port 8000 for API service
|
||||
ENV API_PORT=8000
|
||||
EXPOSE 8000
|
||||
|
||||
# unset proxy
|
||||
ENV http_proxy=
|
||||
ENV https_proxy=
|
||||
|
||||
# Reset pip config
|
||||
RUN pip config unset global.index-url && \
|
||||
pip config unset global.extra-index-url
|
||||
55
docker/docker-cuda/Dockerfile.base
Normal file
55
docker/docker-cuda/Dockerfile.base
Normal file
@@ -0,0 +1,55 @@
|
||||
# Start from the pytorch official image (ubuntu-22.04 + cuda-12.4.1 + python-3.11)
|
||||
# https://hub.docker.com/r/pytorch/pytorch/tags
|
||||
FROM pytorch/pytorch:2.6.0-cuda12.4-cudnn9-devel
|
||||
|
||||
# Define environments
|
||||
ENV MAX_JOBS=16
|
||||
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV NODE_OPTIONS=""
|
||||
ENV PIP_ROOT_USER_ACTION=ignore
|
||||
|
||||
# Define installation arguments
|
||||
ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/
|
||||
ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
||||
|
||||
# Set apt source
|
||||
RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \
|
||||
{ \
|
||||
echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \
|
||||
echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \
|
||||
echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \
|
||||
echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \
|
||||
} > /etc/apt/sources.list
|
||||
|
||||
# Install systemctl and wget
|
||||
RUN apt-get update && \
|
||||
apt-get install -y -o Dpkg::Options::="--force-confdef" systemd wget && \
|
||||
apt-get clean
|
||||
|
||||
# Install git and vim
|
||||
RUN apt-get update && \
|
||||
apt-get install -y git vim && \
|
||||
apt-get clean
|
||||
|
||||
# Install gcc and g++
|
||||
RUN apt-get update && \
|
||||
apt-get install -y gcc g++ && \
|
||||
apt-get clean
|
||||
|
||||
# Change pip source
|
||||
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
||||
|
||||
# Install flash-attn-2.7.4.post1 (cxx11abi=False)
|
||||
RUN wget -nv https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp311-cp311-linux_x86_64.whl && \
|
||||
pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp311-cp311-linux_x86_64.whl
|
||||
|
||||
# Install flashinfer-0.2.2.post1+cu124 (cxx11abi=False)
|
||||
RUN wget -nv https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.2.post1/flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \
|
||||
pip install --no-cache-dir flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl
|
||||
|
||||
# Reset pip config
|
||||
RUN pip config unset global.index-url && \
|
||||
pip config unset global.extra-index-url
|
||||
25
docker/docker-cuda/docker-compose.yml
Normal file
25
docker/docker-cuda/docker-compose.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
services:
|
||||
llamafactory:
|
||||
build:
|
||||
dockerfile: ./docker/docker-cuda/Dockerfile
|
||||
context: ../..
|
||||
args:
|
||||
PIP_INDEX: https://pypi.org/simple
|
||||
EXTRAS: metrics
|
||||
container_name: llamafactory
|
||||
ports:
|
||||
- "7860:7860"
|
||||
- "8000:8000"
|
||||
ipc: host
|
||||
tty: true
|
||||
# shm_size: "16gb" # ipc: host is set
|
||||
stdin_open: true
|
||||
command: bash
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: "all"
|
||||
capabilities: [ gpu ]
|
||||
restart: unless-stopped
|
||||
58
docker/docker-npu/Dockerfile
Normal file
58
docker/docker-npu/Dockerfile
Normal file
@@ -0,0 +1,58 @@
|
||||
# https://hub.docker.com/r/ascendai/cann/tags
|
||||
ARG BASE_IMAGE=ascendai/cann:8.0.0-910b-ubuntu22.04-py3.11
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
# Installation arguments
|
||||
ARG PIP_INDEX=https://pypi.org/simple
|
||||
ARG EXTRAS=torch-npu,metrics
|
||||
ARG HTTP_PROXY=""
|
||||
|
||||
# Define environments
|
||||
ENV MAX_JOBS=16
|
||||
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
|
||||
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV NODE_OPTIONS=""
|
||||
ENV PIP_ROOT_USER_ACTION=ignore
|
||||
ENV http_proxy="${HTTP_PROXY}"
|
||||
ENV https_proxy="${HTTP_PROXY}"
|
||||
|
||||
# Use Bash instead of default /bin/sh
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Change pip source
|
||||
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
||||
|
||||
# Install the requirements
|
||||
COPY requirements.txt /app
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy the rest of the application into the image
|
||||
COPY . /app
|
||||
|
||||
# Install LLaMA Factory
|
||||
RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation
|
||||
|
||||
# Set up volumes
|
||||
# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
|
||||
|
||||
# Expose port 7860 for LLaMA Board
|
||||
ENV GRADIO_SERVER_PORT=7860
|
||||
EXPOSE 7860
|
||||
|
||||
# Expose port 8000 for API service
|
||||
ENV API_PORT=8000
|
||||
EXPOSE 8000
|
||||
|
||||
# unset proxy
|
||||
ENV http_proxy=
|
||||
ENV https_proxy=
|
||||
|
||||
# Reset pip config
|
||||
RUN pip config unset global.index-url && \
|
||||
pip config unset global.extra-index-url
|
||||
28
docker/docker-npu/docker-compose.yml
Normal file
28
docker/docker-npu/docker-compose.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
services:
|
||||
llamafactory:
|
||||
build:
|
||||
dockerfile: ./docker/docker-npu/Dockerfile
|
||||
context: ../..
|
||||
args:
|
||||
PIP_INDEX: https://pypi.org/simple
|
||||
EXTRAS: torch-npu,metrics
|
||||
container_name: llamafactory
|
||||
volumes:
|
||||
- /usr/local/dcmi:/usr/local/dcmi
|
||||
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
|
||||
- /usr/local/Ascend/driver:/usr/local/Ascend/driver
|
||||
- /etc/ascend_install.info:/etc/ascend_install.info
|
||||
ports:
|
||||
- "7860:7860"
|
||||
- "8000:8000"
|
||||
ipc: host
|
||||
tty: true
|
||||
# shm_size: "16gb" # ipc: host is set
|
||||
stdin_open: true
|
||||
command: bash
|
||||
devices:
|
||||
- /dev/davinci0
|
||||
- /dev/davinci_manager
|
||||
- /dev/devmm_svm
|
||||
- /dev/hisi_hdc
|
||||
restart: unless-stopped
|
||||
71
docker/docker-rocm/Dockerfile
Normal file
71
docker/docker-rocm/Dockerfile
Normal file
@@ -0,0 +1,71 @@
|
||||
# https://hub.docker.com/r/rocm/pytorch/tags
|
||||
ARG BASE_IMAGE=rocm/pytorch:rocm6.4.1_ubuntu22.04_py3.10_pytorch_release_2.6.0
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
# Installation arguments
|
||||
ARG PIP_INDEX=https://pypi.org/simple
|
||||
ARG EXTRAS=metrics
|
||||
ARG INSTALL_FLASHATTN=false
|
||||
ARG HTTP_PROXY=""
|
||||
ARG PYTORCH_INDEX=https://download.pytorch.org/whl/rocm6.3
|
||||
|
||||
# Define environments
|
||||
ENV MAX_JOBS=16
|
||||
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
|
||||
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV NODE_OPTIONS=""
|
||||
ENV PIP_ROOT_USER_ACTION=ignore
|
||||
ENV http_proxy="${HTTP_PROXY}"
|
||||
ENV https_proxy="${HTTP_PROXY}"
|
||||
|
||||
# Use Bash instead of default /bin/sh
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Change pip source
|
||||
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
||||
|
||||
# Reinstall pytorch rocm
|
||||
RUN pip uninstall -y torch torchvision torchaudio && \
|
||||
pip install --no-cache-dir --pre torch torchvision torchaudio --index-url "${PYTORCH_INDEX}"
|
||||
|
||||
# Install the requirements
|
||||
COPY requirements.txt /app
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy the rest of the application into the image
|
||||
COPY . /app
|
||||
|
||||
# Install LLaMA Factory
|
||||
RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation
|
||||
|
||||
# Rebuild flash attention
|
||||
RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
|
||||
pip uninstall -y ninja && \
|
||||
pip install --no-cache-dir ninja && \
|
||||
pip install --no-cache-dir flash-attn --no-build-isolation; \
|
||||
fi
|
||||
|
||||
# Set up volumes
|
||||
# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
|
||||
|
||||
# Expose port 7860 for LLaMA Board
|
||||
ENV GRADIO_SERVER_PORT=7860
|
||||
EXPOSE 7860
|
||||
|
||||
# Expose port 8000 for API service
|
||||
ENV API_PORT=8000
|
||||
EXPOSE 8000
|
||||
|
||||
# unset proxy
|
||||
ENV http_proxy=
|
||||
ENV https_proxy=
|
||||
|
||||
# Reset pip config
|
||||
RUN pip config unset global.index-url && \
|
||||
pip config unset global.extra-index-url
|
||||
21
docker/docker-rocm/docker-compose.yml
Normal file
21
docker/docker-rocm/docker-compose.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
services:
|
||||
llamafactory:
|
||||
build:
|
||||
dockerfile: ./docker/docker-rocm/Dockerfile
|
||||
context: ../..
|
||||
args:
|
||||
PIP_INDEX: https://pypi.org/simple
|
||||
EXTRAS: metrics
|
||||
container_name: llamafactory
|
||||
ports:
|
||||
- "7860:7860"
|
||||
- "8000:8000"
|
||||
ipc: host
|
||||
tty: true
|
||||
# shm_size: "16gb" # ipc: host is set
|
||||
stdin_open: true
|
||||
command: bash
|
||||
devices:
|
||||
- /dev/kfd:/dev/kfd
|
||||
- /dev/dri:/dev/dri
|
||||
restart: unless-stopped
|
||||
163
evaluation/ceval/ceval.py
Normal file
163
evaluation/ceval/ceval.py
Normal file
@@ -0,0 +1,163 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
import datasets
|
||||
import pandas as pd
|
||||
|
||||
|
||||
_CITATION = """\
|
||||
@article{huang2023ceval,
|
||||
title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models},
|
||||
author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and others},
|
||||
journal={arXiv preprint arXiv:2305.08322},
|
||||
year={2023}
|
||||
}
|
||||
"""
|
||||
|
||||
_DESCRIPTION = """\
|
||||
C-Eval is a comprehensive Chinese evaluation suite for foundation models.
|
||||
It consists of 13948 multi-choice questions spanning 52 diverse disciplines and four difficulty levels.
|
||||
"""
|
||||
|
||||
_HOMEPAGE = "https://cevalbenchmark.com"
|
||||
|
||||
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License"
|
||||
|
||||
_URL = "ceval.zip"
|
||||
|
||||
task_list = [
|
||||
"computer_network",
|
||||
"operating_system",
|
||||
"computer_architecture",
|
||||
"college_programming",
|
||||
"college_physics",
|
||||
"college_chemistry",
|
||||
"advanced_mathematics",
|
||||
"probability_and_statistics",
|
||||
"discrete_mathematics",
|
||||
"electrical_engineer",
|
||||
"metrology_engineer",
|
||||
"high_school_mathematics",
|
||||
"high_school_physics",
|
||||
"high_school_chemistry",
|
||||
"high_school_biology",
|
||||
"middle_school_mathematics",
|
||||
"middle_school_biology",
|
||||
"middle_school_physics",
|
||||
"middle_school_chemistry",
|
||||
"veterinary_medicine",
|
||||
"college_economics",
|
||||
"business_administration",
|
||||
"marxism",
|
||||
"mao_zedong_thought",
|
||||
"education_science",
|
||||
"teacher_qualification",
|
||||
"high_school_politics",
|
||||
"high_school_geography",
|
||||
"middle_school_politics",
|
||||
"middle_school_geography",
|
||||
"modern_chinese_history",
|
||||
"ideological_and_moral_cultivation",
|
||||
"logic",
|
||||
"law",
|
||||
"chinese_language_and_literature",
|
||||
"art_studies",
|
||||
"professional_tour_guide",
|
||||
"legal_professional",
|
||||
"high_school_chinese",
|
||||
"high_school_history",
|
||||
"middle_school_history",
|
||||
"civil_servant",
|
||||
"sports_science",
|
||||
"plant_protection",
|
||||
"basic_medicine",
|
||||
"clinical_medicine",
|
||||
"urban_and_rural_planner",
|
||||
"accountant",
|
||||
"fire_engineer",
|
||||
"environmental_impact_assessment_engineer",
|
||||
"tax_accountant",
|
||||
"physician",
|
||||
]
|
||||
|
||||
|
||||
class CevalConfig(datasets.BuilderConfig):
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
|
||||
|
||||
|
||||
class Ceval(datasets.GeneratorBasedBuilder):
|
||||
BUILDER_CONFIGS = [
|
||||
CevalConfig(
|
||||
name=task_name,
|
||||
)
|
||||
for task_name in task_list
|
||||
]
|
||||
|
||||
def _info(self):
|
||||
features = datasets.Features(
|
||||
{
|
||||
"id": datasets.Value("int32"),
|
||||
"question": datasets.Value("string"),
|
||||
"A": datasets.Value("string"),
|
||||
"B": datasets.Value("string"),
|
||||
"C": datasets.Value("string"),
|
||||
"D": datasets.Value("string"),
|
||||
"answer": datasets.Value("string"),
|
||||
"explanation": datasets.Value("string"),
|
||||
}
|
||||
)
|
||||
return datasets.DatasetInfo(
|
||||
description=_DESCRIPTION,
|
||||
features=features,
|
||||
homepage=_HOMEPAGE,
|
||||
license=_LICENSE,
|
||||
citation=_CITATION,
|
||||
)
|
||||
|
||||
def _split_generators(self, dl_manager):
|
||||
data_dir = dl_manager.download_and_extract(_URL)
|
||||
task_name = self.config.name
|
||||
return [
|
||||
datasets.SplitGenerator(
|
||||
name=datasets.Split.TEST,
|
||||
gen_kwargs={
|
||||
"filepath": os.path.join(data_dir, "test", f"{task_name}_test.csv"),
|
||||
},
|
||||
),
|
||||
datasets.SplitGenerator(
|
||||
name=datasets.Split.VALIDATION,
|
||||
gen_kwargs={
|
||||
"filepath": os.path.join(data_dir, "val", f"{task_name}_val.csv"),
|
||||
},
|
||||
),
|
||||
datasets.SplitGenerator(
|
||||
name=datasets.Split.TRAIN,
|
||||
gen_kwargs={
|
||||
"filepath": os.path.join(data_dir, "dev", f"{task_name}_dev.csv"),
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
def _generate_examples(self, filepath):
|
||||
df = pd.read_csv(filepath, encoding="utf-8")
|
||||
for i, instance in enumerate(df.to_dict(orient="records")):
|
||||
if "answer" not in instance.keys():
|
||||
instance["answer"] = ""
|
||||
if "explanation" not in instance.keys():
|
||||
instance["explanation"] = ""
|
||||
yield i, instance
|
||||
170
evaluation/cmmlu/cmmlu.py
Normal file
170
evaluation/cmmlu/cmmlu.py
Normal file
@@ -0,0 +1,170 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
import datasets
|
||||
import pandas as pd
|
||||
|
||||
|
||||
_CITATION = """\
|
||||
@article{li2023cmmlu,
|
||||
title={CMMLU: Measuring massive multitask language understanding in Chinese},
|
||||
author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and others,
|
||||
journal={arXiv preprint arXiv:2306.09212},
|
||||
year={2023}
|
||||
}
|
||||
"""
|
||||
|
||||
_DESCRIPTION = """\
|
||||
CMMLU is a comprehensive Chinese assessment suite specifically designed to evaluate the advanced knowledge
|
||||
and reasoning abilities of LLMs within the Chinese language and cultural context.
|
||||
"""
|
||||
|
||||
_HOMEPAGE = "https://github.com/haonan-li/CMMLU"
|
||||
|
||||
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License"
|
||||
|
||||
_URL = "cmmlu.zip"
|
||||
|
||||
task_list = [
|
||||
"agronomy",
|
||||
"anatomy",
|
||||
"ancient_chinese",
|
||||
"arts",
|
||||
"astronomy",
|
||||
"business_ethics",
|
||||
"chinese_civil_service_exam",
|
||||
"chinese_driving_rule",
|
||||
"chinese_food_culture",
|
||||
"chinese_foreign_policy",
|
||||
"chinese_history",
|
||||
"chinese_literature",
|
||||
"chinese_teacher_qualification",
|
||||
"clinical_knowledge",
|
||||
"college_actuarial_science",
|
||||
"college_education",
|
||||
"college_engineering_hydrology",
|
||||
"college_law",
|
||||
"college_mathematics",
|
||||
"college_medical_statistics",
|
||||
"college_medicine",
|
||||
"computer_science",
|
||||
"computer_security",
|
||||
"conceptual_physics",
|
||||
"construction_project_management",
|
||||
"economics",
|
||||
"education",
|
||||
"electrical_engineering",
|
||||
"elementary_chinese",
|
||||
"elementary_commonsense",
|
||||
"elementary_information_and_technology",
|
||||
"elementary_mathematics",
|
||||
"ethnology",
|
||||
"food_science",
|
||||
"genetics",
|
||||
"global_facts",
|
||||
"high_school_biology",
|
||||
"high_school_chemistry",
|
||||
"high_school_geography",
|
||||
"high_school_mathematics",
|
||||
"high_school_physics",
|
||||
"high_school_politics",
|
||||
"human_sexuality",
|
||||
"international_law",
|
||||
"journalism",
|
||||
"jurisprudence",
|
||||
"legal_and_moral_basis",
|
||||
"logical",
|
||||
"machine_learning",
|
||||
"management",
|
||||
"marketing",
|
||||
"marxist_theory",
|
||||
"modern_chinese",
|
||||
"nutrition",
|
||||
"philosophy",
|
||||
"professional_accounting",
|
||||
"professional_law",
|
||||
"professional_medicine",
|
||||
"professional_psychology",
|
||||
"public_relations",
|
||||
"security_study",
|
||||
"sociology",
|
||||
"sports_science",
|
||||
"traditional_chinese_medicine",
|
||||
"virology",
|
||||
"world_history",
|
||||
"world_religions",
|
||||
]
|
||||
|
||||
|
||||
class CMMLUConfig(datasets.BuilderConfig):
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(version=datasets.Version("1.0.1"), **kwargs)
|
||||
|
||||
|
||||
class CMMLU(datasets.GeneratorBasedBuilder):
|
||||
BUILDER_CONFIGS = [
|
||||
CMMLUConfig(
|
||||
name=task_name,
|
||||
)
|
||||
for task_name in task_list
|
||||
]
|
||||
|
||||
def _info(self):
|
||||
features = datasets.Features(
|
||||
{
|
||||
"question": datasets.Value("string"),
|
||||
"A": datasets.Value("string"),
|
||||
"B": datasets.Value("string"),
|
||||
"C": datasets.Value("string"),
|
||||
"D": datasets.Value("string"),
|
||||
"answer": datasets.Value("string"),
|
||||
}
|
||||
)
|
||||
return datasets.DatasetInfo(
|
||||
description=_DESCRIPTION,
|
||||
features=features,
|
||||
homepage=_HOMEPAGE,
|
||||
license=_LICENSE,
|
||||
citation=_CITATION,
|
||||
)
|
||||
|
||||
def _split_generators(self, dl_manager):
|
||||
data_dir = dl_manager.download_and_extract(_URL)
|
||||
task_name = self.config.name
|
||||
return [
|
||||
datasets.SplitGenerator(
|
||||
name=datasets.Split.TEST,
|
||||
gen_kwargs={
|
||||
"filepath": os.path.join(data_dir, f"test/{task_name}.csv"),
|
||||
},
|
||||
),
|
||||
datasets.SplitGenerator(
|
||||
name=datasets.Split.TRAIN,
|
||||
gen_kwargs={
|
||||
"filepath": os.path.join(data_dir, f"dev/{task_name}.csv"),
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
def _generate_examples(self, filepath):
|
||||
df = pd.read_csv(filepath, header=0, index_col=0, encoding="utf-8")
|
||||
for i, instance in enumerate(df.to_dict(orient="records")):
|
||||
question = instance.pop("Question", "")
|
||||
answer = instance.pop("Answer", "")
|
||||
instance["question"] = question
|
||||
instance["answer"] = answer
|
||||
yield i, instance
|
||||
163
evaluation/mmlu/mmlu.py
Normal file
163
evaluation/mmlu/mmlu.py
Normal file
@@ -0,0 +1,163 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
import datasets
|
||||
import pandas as pd
|
||||
|
||||
|
||||
_CITATION = """\
|
||||
@article{hendryckstest2021,
|
||||
title={Measuring Massive Multitask Language Understanding},
|
||||
author={Dan Hendrycks and Collin Burns and others},
|
||||
journal={Proceedings of the International Conference on Learning Representations (ICLR)},
|
||||
year={2021}
|
||||
}
|
||||
"""
|
||||
|
||||
_DESCRIPTION = """\
|
||||
Measuring Massive Multitask Language Understanding by Dan Hendrycks, Collin Burns, Steven Basart,
|
||||
Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt (ICLR 2021).
|
||||
"""
|
||||
|
||||
_HOMEPAGE = "https://github.com/hendrycks/test"
|
||||
|
||||
_LICENSE = "MIT"
|
||||
|
||||
_URL = "mmlu.zip"
|
||||
|
||||
task_list = [
|
||||
"high_school_european_history",
|
||||
"business_ethics",
|
||||
"clinical_knowledge",
|
||||
"medical_genetics",
|
||||
"high_school_us_history",
|
||||
"high_school_physics",
|
||||
"high_school_world_history",
|
||||
"virology",
|
||||
"high_school_microeconomics",
|
||||
"econometrics",
|
||||
"college_computer_science",
|
||||
"high_school_biology",
|
||||
"abstract_algebra",
|
||||
"professional_accounting",
|
||||
"philosophy",
|
||||
"professional_medicine",
|
||||
"nutrition",
|
||||
"global_facts",
|
||||
"machine_learning",
|
||||
"security_studies",
|
||||
"public_relations",
|
||||
"professional_psychology",
|
||||
"prehistory",
|
||||
"anatomy",
|
||||
"human_sexuality",
|
||||
"college_medicine",
|
||||
"high_school_government_and_politics",
|
||||
"college_chemistry",
|
||||
"logical_fallacies",
|
||||
"high_school_geography",
|
||||
"elementary_mathematics",
|
||||
"human_aging",
|
||||
"college_mathematics",
|
||||
"high_school_psychology",
|
||||
"formal_logic",
|
||||
"high_school_statistics",
|
||||
"international_law",
|
||||
"high_school_mathematics",
|
||||
"high_school_computer_science",
|
||||
"conceptual_physics",
|
||||
"miscellaneous",
|
||||
"high_school_chemistry",
|
||||
"marketing",
|
||||
"professional_law",
|
||||
"management",
|
||||
"college_physics",
|
||||
"jurisprudence",
|
||||
"world_religions",
|
||||
"sociology",
|
||||
"us_foreign_policy",
|
||||
"high_school_macroeconomics",
|
||||
"computer_security",
|
||||
"moral_scenarios",
|
||||
"moral_disputes",
|
||||
"electrical_engineering",
|
||||
"astronomy",
|
||||
"college_biology",
|
||||
]
|
||||
|
||||
|
||||
class MMLUConfig(datasets.BuilderConfig):
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
|
||||
|
||||
|
||||
class MMLU(datasets.GeneratorBasedBuilder):
|
||||
BUILDER_CONFIGS = [
|
||||
MMLUConfig(
|
||||
name=task_name,
|
||||
)
|
||||
for task_name in task_list
|
||||
]
|
||||
|
||||
def _info(self):
|
||||
features = datasets.Features(
|
||||
{
|
||||
"question": datasets.Value("string"),
|
||||
"A": datasets.Value("string"),
|
||||
"B": datasets.Value("string"),
|
||||
"C": datasets.Value("string"),
|
||||
"D": datasets.Value("string"),
|
||||
"answer": datasets.Value("string"),
|
||||
}
|
||||
)
|
||||
return datasets.DatasetInfo(
|
||||
description=_DESCRIPTION,
|
||||
features=features,
|
||||
homepage=_HOMEPAGE,
|
||||
license=_LICENSE,
|
||||
citation=_CITATION,
|
||||
)
|
||||
|
||||
def _split_generators(self, dl_manager):
|
||||
data_dir = dl_manager.download_and_extract(_URL)
|
||||
task_name = self.config.name
|
||||
return [
|
||||
datasets.SplitGenerator(
|
||||
name=datasets.Split.TEST,
|
||||
gen_kwargs={
|
||||
"filepath": os.path.join(data_dir, "data", "test", f"{task_name}_test.csv"),
|
||||
},
|
||||
),
|
||||
datasets.SplitGenerator(
|
||||
name=datasets.Split.VALIDATION,
|
||||
gen_kwargs={
|
||||
"filepath": os.path.join(data_dir, "data", "val", f"{task_name}_val.csv"),
|
||||
},
|
||||
),
|
||||
datasets.SplitGenerator(
|
||||
name=datasets.Split.TRAIN,
|
||||
gen_kwargs={
|
||||
"filepath": os.path.join(data_dir, "data", "dev", f"{task_name}_dev.csv"),
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
def _generate_examples(self, filepath):
|
||||
df = pd.read_csv(filepath, header=None)
|
||||
df.columns = ["question", "A", "B", "C", "D", "answer"]
|
||||
|
||||
yield from enumerate(df.to_dict(orient="records"))
|
||||
292
examples/README.md
Normal file
292
examples/README.md
Normal file
@@ -0,0 +1,292 @@
|
||||
We provide diverse examples about fine-tuning LLMs.
|
||||
|
||||
Make sure to execute these commands in the `LLaMA-Factory` directory.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [LoRA Fine-Tuning](#lora-fine-tuning)
|
||||
- [QLoRA Fine-Tuning](#qlora-fine-tuning)
|
||||
- [Full-Parameter Fine-Tuning](#full-parameter-fine-tuning)
|
||||
- [Merging LoRA Adapters and Quantization](#merging-lora-adapters-and-quantization)
|
||||
- [Inferring LoRA Fine-Tuned Models](#inferring-lora-fine-tuned-models)
|
||||
- [Extras](#extras)
|
||||
|
||||
Use `CUDA_VISIBLE_DEVICES` (GPU) or `ASCEND_RT_VISIBLE_DEVICES` (NPU) to choose computing devices.
|
||||
|
||||
By default, LLaMA-Factory uses all visible computing devices.
|
||||
|
||||
Basic usage:
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
Advanced usage:
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0,1 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml \
|
||||
learning_rate=1e-5 \
|
||||
logging_steps=1
|
||||
```
|
||||
|
||||
```bash
|
||||
bash examples/train_lora/llama3_lora_sft.sh
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### LoRA Fine-Tuning
|
||||
|
||||
#### (Continuous) Pre-Training
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### Multimodal Supervised Fine-Tuning
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/qwen2_5vl_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### DPO/ORPO/SimPO Training
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_dpo.yaml
|
||||
```
|
||||
|
||||
#### Multimodal DPO/ORPO/SimPO Training
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/qwen2_5vl_lora_dpo.yaml
|
||||
```
|
||||
|
||||
#### Reward Modeling
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_reward.yaml
|
||||
```
|
||||
|
||||
#### PPO Training
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_ppo.yaml
|
||||
```
|
||||
|
||||
#### KTO Training
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_kto.yaml
|
||||
```
|
||||
|
||||
#### Preprocess Dataset
|
||||
|
||||
It is useful for large dataset, use `tokenized_path` in config to load the preprocessed dataset.
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_preprocess.yaml
|
||||
```
|
||||
|
||||
#### Evaluating on MMLU/CMMLU/C-Eval Benchmarks
|
||||
|
||||
```bash
|
||||
llamafactory-cli eval examples/train_lora/llama3_lora_eval.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning on Multiple Nodes
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with DeepSpeed ZeRO-3 (Weight Sharding)
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ds3.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with Ray on 4 GPUs
|
||||
|
||||
```bash
|
||||
USE_RAY=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ray.yaml
|
||||
```
|
||||
|
||||
### QLoRA Fine-Tuning
|
||||
|
||||
#### Supervised Fine-Tuning with 4/8-bit Bitsandbytes/HQQ/EETQ Quantization (Recommended)
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_otfq.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with 4-bit Bitsandbytes Quantization on Ascend NPU
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_bnb_npu.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with 4/8-bit GPTQ Quantization
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_gptq.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with 4-bit AWQ Quantization
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_awq.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with 2-bit AQLM Quantization
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_aqlm.yaml
|
||||
```
|
||||
|
||||
### Full-Parameter Fine-Tuning
|
||||
|
||||
#### Supervised Fine-Tuning on Single Node
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning on Multiple Nodes
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
### Elastic and Fault-Tolerant Supervised Fine-Tuning on Multiple Nodes
|
||||
|
||||
To launch an elastic job with `MAX_RESTARTS` failures retries, run the following on at least `MIN_NNODES` nodes and at most `MAX_NNODES` nodes. `RDZV_ID` should be set as a unique job id (shared by all nodes participating in the job). See also [torchrun](https://docs.pytorch.org/docs/stable/elastic/run.html).
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 MIN_NNODES=1 MAX_NNODES=3 MAX_RESTARTS=3 RDZV_ID=llamafactory MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Multimodal Supervised Fine-Tuning
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/qwen2_5vl_full_sft.yaml
|
||||
```
|
||||
|
||||
### Merging LoRA Adapters and Quantization
|
||||
|
||||
#### Merge LoRA Adapters
|
||||
|
||||
Note: DO NOT use quantized model or `quantization_bit` when merging LoRA adapters.
|
||||
|
||||
```bash
|
||||
llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### Quantizing Model using AutoGPTQ
|
||||
|
||||
```bash
|
||||
llamafactory-cli export examples/merge_lora/llama3_gptq.yaml
|
||||
```
|
||||
|
||||
### Save Ollama modelfile
|
||||
|
||||
```bash
|
||||
llamafactory-cli export examples/merge_lora/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
### Inferring LoRA Fine-Tuned Models
|
||||
|
||||
#### Evaluation using vLLM's Multi-GPU Inference
|
||||
|
||||
```
|
||||
python scripts/vllm_infer.py --model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct --template llama3 --dataset alpaca_en_demo
|
||||
python scripts/eval_bleu_rouge.py generated_predictions.jsonl
|
||||
```
|
||||
|
||||
#### Use CLI ChatBox
|
||||
|
||||
```bash
|
||||
llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### Use Web UI ChatBox
|
||||
|
||||
```bash
|
||||
llamafactory-cli webchat examples/inference/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### Launch OpenAI-style API
|
||||
|
||||
```bash
|
||||
llamafactory-cli api examples/inference/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
### Extras
|
||||
|
||||
#### Full-Parameter Fine-Tuning using GaLore
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Full-Parameter Fine-Tuning using APOLLO
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/apollo/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Full-Parameter Fine-Tuning using BAdam
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Full-Parameter Fine-Tuning using Adam-mini
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/adam_mini/qwen2_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Full-Parameter Fine-Tuning using Muon
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/muon/qwen2_full_sft.yaml
|
||||
```
|
||||
|
||||
#### LoRA+ Fine-Tuning
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/loraplus/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### PiSSA Fine-Tuning
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/pissa/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### Mixture-of-Depths Fine-Tuning
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/mod/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### LLaMA-Pro Fine-Tuning
|
||||
|
||||
```bash
|
||||
bash examples/extras/llama_pro/expand.sh
|
||||
llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml
|
||||
```
|
||||
|
||||
#### FSDP+QLoRA Fine-Tuning
|
||||
|
||||
```bash
|
||||
bash examples/extras/fsdp_qlora/train.sh
|
||||
```
|
||||
292
examples/README_zh.md
Normal file
292
examples/README_zh.md
Normal file
@@ -0,0 +1,292 @@
|
||||
我们提供了多样化的大模型微调示例脚本。
|
||||
|
||||
请确保在 `LLaMA-Factory` 目录下执行下述命令。
|
||||
|
||||
## 目录
|
||||
|
||||
- [LoRA 微调](#lora-微调)
|
||||
- [QLoRA 微调](#qlora-微调)
|
||||
- [全参数微调](#全参数微调)
|
||||
- [合并 LoRA 适配器与模型量化](#合并-lora-适配器与模型量化)
|
||||
- [推理 LoRA 模型](#推理-lora-模型)
|
||||
- [杂项](#杂项)
|
||||
|
||||
使用 `CUDA_VISIBLE_DEVICES`(GPU)或 `ASCEND_RT_VISIBLE_DEVICES`(NPU)选择计算设备。
|
||||
|
||||
LLaMA-Factory 默认使用所有可见的计算设备。
|
||||
|
||||
基础用法:
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
高级用法:
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0,1 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml \
|
||||
learning_rate=1e-5 \
|
||||
logging_steps=1
|
||||
```
|
||||
|
||||
```bash
|
||||
bash examples/train_lora/llama3_lora_sft.sh
|
||||
```
|
||||
|
||||
## 示例
|
||||
|
||||
### LoRA 微调
|
||||
|
||||
#### (增量)预训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml
|
||||
```
|
||||
|
||||
#### 指令监督微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### 多模态指令监督微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/qwen2_5vl_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### DPO/ORPO/SimPO 训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_dpo.yaml
|
||||
```
|
||||
|
||||
#### 多模态 DPO/ORPO/SimPO 训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/qwen2_5vl_lora_dpo.yaml
|
||||
```
|
||||
|
||||
#### 奖励模型训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_reward.yaml
|
||||
```
|
||||
|
||||
#### PPO 训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_ppo.yaml
|
||||
```
|
||||
|
||||
#### KTO 训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_kto.yaml
|
||||
```
|
||||
|
||||
#### 预处理数据集
|
||||
|
||||
对于大数据集有帮助,在配置中使用 `tokenized_path` 以加载预处理后的数据集。
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_preprocess.yaml
|
||||
```
|
||||
|
||||
#### 在 MMLU/CMMLU/C-Eval 上评估
|
||||
|
||||
```bash
|
||||
llamafactory-cli eval examples/train_lora/llama3_lora_eval.yaml
|
||||
```
|
||||
|
||||
#### 多机指令监督微调
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
### 支持弹性和容错的多机指令监督微调
|
||||
|
||||
要启动一个支持弹性节点和容错的多机指令微调,在每个节点上执行以下命令。弹性节点数量范围为 `MIN_NNODES:MAX_NNODES`,每个节点最多允许因为错误重启 `MAX_RESTARTS` 次。`RDZV_ID` 应设置为一个唯一的作业 ID(由参与该作业的所有节点共享)。更多新可以参考官方文档 [torchrun](https://docs.pytorch.org/docs/stable/elastic/run.html)。
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 MIN_NNODES=1 MAX_NNODES=3 MAX_RESTARTS=3 RDZV_ID=llamafactory MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 DeepSpeed ZeRO-3 平均分配显存
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ds3.yaml
|
||||
```
|
||||
|
||||
#### 使用 Ray 在 4 张 GPU 上微调
|
||||
|
||||
```bash
|
||||
USE_RAY=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ray.yaml
|
||||
```
|
||||
|
||||
### QLoRA 微调
|
||||
|
||||
#### 基于 4/8 比特 Bitsandbytes/HQQ/EETQ 量化进行指令监督微调(推荐)
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_otfq.yaml
|
||||
```
|
||||
|
||||
#### 在 NPU 上基于 4 比特 Bitsandbytes 量化进行指令监督微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_bnb_npu.yaml
|
||||
```
|
||||
|
||||
#### 基于 4/8 比特 GPTQ 量化进行指令监督微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_gptq.yaml
|
||||
```
|
||||
|
||||
#### 基于 4 比特 AWQ 量化进行指令监督微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_awq.yaml
|
||||
```
|
||||
|
||||
#### 基于 2 比特 AQLM 量化进行指令监督微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_aqlm.yaml
|
||||
```
|
||||
|
||||
### 全参数微调
|
||||
|
||||
#### 在单机上进行指令监督微调
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 在多机上进行指令监督微调
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 多模态指令监督微调
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/qwen2_5vl_full_sft.yaml
|
||||
```
|
||||
|
||||
### 合并 LoRA 适配器与模型量化
|
||||
|
||||
#### 合并 LoRA 适配器
|
||||
|
||||
注:请勿使用量化后的模型或 `quantization_bit` 参数来合并 LoRA 适配器。
|
||||
|
||||
```bash
|
||||
llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 AutoGPTQ 量化模型
|
||||
|
||||
```bash
|
||||
llamafactory-cli export examples/merge_lora/llama3_gptq.yaml
|
||||
```
|
||||
|
||||
### 保存 Ollama 配置文件
|
||||
|
||||
```bash
|
||||
llamafactory-cli export examples/merge_lora/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
### 推理 LoRA 模型
|
||||
|
||||
#### 使用 vLLM 多卡推理评估
|
||||
|
||||
```
|
||||
python scripts/vllm_infer.py --model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct --template llama3 --dataset alpaca_en_demo
|
||||
python scripts/eval_bleu_rouge.py generated_predictions.jsonl
|
||||
```
|
||||
|
||||
#### 使用命令行对话框
|
||||
|
||||
```bash
|
||||
llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用浏览器对话框
|
||||
|
||||
```bash
|
||||
llamafactory-cli webchat examples/inference/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### 启动 OpenAI 风格 API
|
||||
|
||||
```bash
|
||||
llamafactory-cli api examples/inference/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
### 杂项
|
||||
|
||||
#### 使用 GaLore 进行全参数训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 APOLLO 进行全参数训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/apollo/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 BAdam 进行全参数训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 Adam-mini 进行全参数训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/adam_mini/qwen2_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 Muon 进行全参数训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/muon/qwen2_full_sft.yaml
|
||||
```
|
||||
|
||||
#### LoRA+ 微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/loraplus/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### PiSSA 微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/pissa/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### 深度混合微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/mod/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### LLaMA-Pro 微调
|
||||
|
||||
```bash
|
||||
bash examples/extras/llama_pro/expand.sh
|
||||
llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml
|
||||
```
|
||||
|
||||
#### FSDP+QLoRA 微调
|
||||
|
||||
```bash
|
||||
bash examples/extras/fsdp_qlora/train.sh
|
||||
```
|
||||
25
examples/accelerate/fsdp_config.yaml
Normal file
25
examples/accelerate/fsdp_config.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
compute_environment: LOCAL_MACHINE
|
||||
debug: false
|
||||
distributed_type: FSDP
|
||||
downcast_bf16: 'no'
|
||||
fsdp_config:
|
||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
fsdp_backward_prefetch: BACKWARD_PRE
|
||||
fsdp_forward_prefetch: false
|
||||
fsdp_cpu_ram_efficient_loading: true
|
||||
fsdp_offload_params: false
|
||||
fsdp_sharding_strategy: FULL_SHARD
|
||||
fsdp_state_dict_type: FULL_STATE_DICT
|
||||
fsdp_sync_module_states: true
|
||||
fsdp_use_orig_params: true
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
mixed_precision: bf16 # or fp16
|
||||
num_machines: 1 # the number of nodes
|
||||
num_processes: 2 # the number of GPUs in all nodes
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: false
|
||||
25
examples/accelerate/fsdp_config_offload.yaml
Normal file
25
examples/accelerate/fsdp_config_offload.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
compute_environment: LOCAL_MACHINE
|
||||
debug: false
|
||||
distributed_type: FSDP
|
||||
downcast_bf16: 'no'
|
||||
fsdp_config:
|
||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
fsdp_backward_prefetch: BACKWARD_PRE
|
||||
fsdp_forward_prefetch: false
|
||||
fsdp_cpu_ram_efficient_loading: true
|
||||
fsdp_offload_params: true # offload may affect training speed
|
||||
fsdp_sharding_strategy: FULL_SHARD
|
||||
fsdp_state_dict_type: FULL_STATE_DICT
|
||||
fsdp_sync_module_states: true
|
||||
fsdp_use_orig_params: true
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
mixed_precision: bf16 # or fp16
|
||||
num_machines: 1 # the number of nodes
|
||||
num_processes: 2 # the number of GPUs in all nodes
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: false
|
||||
43
examples/extras/adam_mini/qwen2_full_sft.yaml
Normal file
43
examples/extras/adam_mini/qwen2_full_sft.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen2-1.5B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
use_adam_mini: true
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: qwen
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/qwen2-1_5b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
48
examples/extras/apollo/llama3_full_sft.yaml
Normal file
48
examples/extras/apollo/llama3_full_sft.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
use_apollo: true
|
||||
apollo_layerwise: true # choices: [true, false], use false for DDP training
|
||||
apollo_target: all
|
||||
apollo_rank: 128
|
||||
apollo_scale: 32.0
|
||||
apollo_scale_type: channel
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 1 # use 1 for layerwise apollo
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
pure_bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
46
examples/extras/badam/llama3_full_sft.yaml
Normal file
46
examples/extras/badam/llama3_full_sft.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
use_badam: true
|
||||
badam_mode: layer
|
||||
badam_switch_mode: ascending
|
||||
badam_switch_interval: 50
|
||||
badam_verbose: 2
|
||||
# deepspeed: examples/deepspeed/ds_z3_config.json
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
45
examples/extras/fsdp_qlora/llama3_lora_sft.yaml
Normal file
45
examples/extras/fsdp_qlora/llama3_lora_sft.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
quantization_bit: 4
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
6
examples/extras/fsdp_qlora/train.sh
Normal file
6
examples/extras/fsdp_qlora/train.sh
Normal file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
# DO NOT use GPTQ/AWQ model in FSDP+QLoRA
|
||||
|
||||
CUDA_VISIBLE_DEVICES=0,1 accelerate launch \
|
||||
--config_file examples/accelerate/fsdp_config.yaml \
|
||||
src/train.py examples/extras/fsdp_qlora/llama3_lora_sft.yaml
|
||||
47
examples/extras/galore/llama3_full_sft.yaml
Normal file
47
examples/extras/galore/llama3_full_sft.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
use_galore: true
|
||||
galore_layerwise: true # choices: [true, false], use false for DDP training
|
||||
galore_target: all
|
||||
galore_rank: 128
|
||||
galore_scale: 2.0
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 1 # use 1 for layerwise galore
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
pure_bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
6
examples/extras/llama_pro/expand.sh
Normal file
6
examples/extras/llama_pro/expand.sh
Normal file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
python scripts/llama_pro.py \
|
||||
--model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct \
|
||||
--output_dir models/llama3-8b-pro \
|
||||
--num_expand 8
|
||||
45
examples/extras/llama_pro/llama3_freeze_sft.yaml
Normal file
45
examples/extras/llama_pro/llama3_freeze_sft.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
### model
|
||||
model_name_or_path: models/llama3-8b-pro
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: freeze
|
||||
freeze_trainable_layers: 8
|
||||
freeze_trainable_modules: all
|
||||
use_llama_pro: true
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b-pro/freeze/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
45
examples/extras/loraplus/llama3_lora_sft.yaml
Normal file
45
examples/extras/loraplus/llama3_lora_sft.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
loraplus_lr_ratio: 16.0
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
44
examples/extras/mod/llama3_full_sft.yaml
Normal file
44
examples/extras/mod/llama3_full_sft.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
mixture_of_depths: convert
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b-mod/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
optim: paged_adamw_8bit
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
pure_bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
43
examples/extras/muon/qwen2_full_sft.yaml
Normal file
43
examples/extras/muon/qwen2_full_sft.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen2-1.5B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
use_muon: true
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: qwen
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/qwen2-1_5b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
31
examples/extras/nlg_eval/llama3_lora_predict.yaml
Normal file
31
examples/extras/nlg_eval/llama3_lora_predict.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
# The batch generation can be SLOW using this config.
|
||||
# For faster inference, we recommend to use `scripts/vllm_infer.py`.
|
||||
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
adapter_name_or_path: saves/llama3-8b/lora/sft
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_predict: true
|
||||
finetuning_type: lora
|
||||
|
||||
### dataset
|
||||
eval_dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 50
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/predict
|
||||
overwrite_output_dir: true
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### eval
|
||||
per_device_eval_batch_size: 1
|
||||
predict_with_generate: true
|
||||
ddp_timeout: 180000000
|
||||
5
examples/extras/pissa/init.sh
Normal file
5
examples/extras/pissa/init.sh
Normal file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
python scripts/pissa_init.py \
|
||||
--model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct \
|
||||
--output_dir models/llama3-8b-pissa
|
||||
47
examples/extras/pissa/llama3_lora_sft.yaml
Normal file
47
examples/extras/pissa/llama3_lora_sft.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
pissa_init: true
|
||||
pissa_iter: 16
|
||||
pissa_convert: true
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
4
examples/inference/llama3.yaml
Normal file
4
examples/inference/llama3.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
template: llama3
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang]
|
||||
trust_remote_code: true
|
||||
4
examples/inference/llama3_full_sft.yaml
Normal file
4
examples/inference/llama3_full_sft.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
model_name_or_path: saves/llama3-8b/full/sft
|
||||
template: llama3
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang]
|
||||
trust_remote_code: true
|
||||
5
examples/inference/llama3_lora_sft.yaml
Normal file
5
examples/inference/llama3_lora_sft.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
adapter_name_or_path: saves/llama3-8b/lora/sft
|
||||
template: llama3
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang]
|
||||
trust_remote_code: true
|
||||
4
examples/inference/qwen2_5vl.yaml
Normal file
4
examples/inference/qwen2_5vl.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
model_name_or_path: Qwen/Qwen2.5-VL-7B-Instruct
|
||||
template: qwen2_vl
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang]
|
||||
trust_remote_code: true
|
||||
10
examples/merge_lora/llama3_full_sft.yaml
Normal file
10
examples/merge_lora/llama3_full_sft.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
### model
|
||||
model_name_or_path: saves/llama3-8b/full/sft
|
||||
template: llama3
|
||||
trust_remote_code: true
|
||||
|
||||
### export
|
||||
export_dir: output/llama3_full_sft
|
||||
export_size: 5
|
||||
export_device: cpu # choices: [cpu, auto]
|
||||
export_legacy_format: false
|
||||
12
examples/merge_lora/llama3_gptq.yaml
Normal file
12
examples/merge_lora/llama3_gptq.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
template: llama3
|
||||
trust_remote_code: true
|
||||
|
||||
### export
|
||||
export_dir: output/llama3_gptq
|
||||
export_quantization_bit: 4
|
||||
export_quantization_dataset: data/c4_demo.jsonl
|
||||
export_size: 5
|
||||
export_device: cpu # choices: [cpu, auto]
|
||||
export_legacy_format: false
|
||||
13
examples/merge_lora/llama3_lora_sft.yaml
Normal file
13
examples/merge_lora/llama3_lora_sft.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
### Note: DO NOT use quantized model or quantization_bit when merging lora adapters
|
||||
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
adapter_name_or_path: saves/llama3-8b/lora/sft
|
||||
template: llama3
|
||||
trust_remote_code: true
|
||||
|
||||
### export
|
||||
export_dir: output/llama3_lora_sft
|
||||
export_size: 5
|
||||
export_device: cpu # choices: [cpu, auto]
|
||||
export_legacy_format: false
|
||||
13
examples/merge_lora/qwen2_5vl_lora_sft.yaml
Normal file
13
examples/merge_lora/qwen2_5vl_lora_sft.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
### Note: DO NOT use quantized model or quantization_bit when merging lora adapters
|
||||
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen2.5-VL-7B-Instruct
|
||||
adapter_name_or_path: saves/qwen2_5vl-7b/lora/sft
|
||||
template: qwen2_vl
|
||||
trust_remote_code: true
|
||||
|
||||
### export
|
||||
export_dir: output/qwen2_5vl_lora_sft
|
||||
export_size: 5
|
||||
export_device: cpu # choices: [cpu, auto]
|
||||
export_legacy_format: false
|
||||
45
examples/train_full/llama3_full_sft.yaml
Normal file
45
examples/train_full/llama3_full_sft.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json]
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 2
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
# eval_dataset: alpaca_en_demo
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
49
examples/train_full/qwen2_5vl_full_sft.yaml
Normal file
49
examples/train_full/qwen2_5vl_full_sft.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen2.5-VL-7B-Instruct
|
||||
image_max_pixels: 262144
|
||||
video_max_pixels: 16384
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
freeze_vision_tower: true
|
||||
freeze_multi_modal_projector: true
|
||||
freeze_language_model: false
|
||||
deepspeed: examples/deepspeed/ds_z3_config.json
|
||||
|
||||
### dataset
|
||||
dataset: mllm_demo,identity,alpaca_en_demo
|
||||
template: qwen2_vl
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/qwen2_5vl-7b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 2
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
48
examples/train_lora/llama3_lora_dpo.yaml
Normal file
48
examples/train_lora/llama3_lora_dpo.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: dpo
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
pref_beta: 0.1
|
||||
pref_loss: sigmoid # choices: [sigmoid (dpo), orpo, simpo]
|
||||
|
||||
### dataset
|
||||
dataset: dpo_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/dpo
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 5.0e-6
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
# eval_dataset: dpo_en_demo
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
19
examples/train_lora/llama3_lora_eval.yaml
Normal file
19
examples/train_lora/llama3_lora_eval.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
adapter_name_or_path: saves/llama3-8b/lora/sft
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
finetuning_type: lora
|
||||
|
||||
### dataset
|
||||
task: mmlu_test # choices: [mmlu_test, ceval_validation, cmmlu_test]
|
||||
template: fewshot
|
||||
lang: en
|
||||
n_shot: 5
|
||||
|
||||
### output
|
||||
save_dir: saves/llama3-8b/lora/eval
|
||||
|
||||
### eval
|
||||
batch_size: 4
|
||||
44
examples/train_lora/llama3_lora_kto.yaml
Normal file
44
examples/train_lora/llama3_lora_kto.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: kto
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
pref_beta: 0.1
|
||||
|
||||
### dataset
|
||||
dataset: kto_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/kto
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 5.0e-6
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
43
examples/train_lora/llama3_lora_ppo.yaml
Normal file
43
examples/train_lora/llama3_lora_ppo.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
reward_model: saves/llama3-8b/lora/reward
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: ppo
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/ppo
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### generate
|
||||
max_new_tokens: 512
|
||||
top_k: 0
|
||||
top_p: 0.9
|
||||
45
examples/train_lora/llama3_lora_pretrain.yaml
Normal file
45
examples/train_lora/llama3_lora_pretrain.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: pt
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: c4_demo
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/pretrain
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
# eval_dataset: c4_demo
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
46
examples/train_lora/llama3_lora_reward.yaml
Normal file
46
examples/train_lora/llama3_lora_reward.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: rm
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: dpo_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/reward
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
# eval_dataset: dpo_en_demo
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
36
examples/train_lora/llama3_lora_sft.sh
Normal file
36
examples/train_lora/llama3_lora_sft.sh
Normal file
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
MODEL_PATH=meta-llama/Meta-Llama-3-8B-Instruct
|
||||
|
||||
llamafactory-cli train \
|
||||
--model_name_or_path ${MODEL_PATH} \
|
||||
--trust_remote_code \
|
||||
--stage sft \
|
||||
--do_train \
|
||||
--finetuning_type lora \
|
||||
--lora_rank 8 \
|
||||
--lora_target all \
|
||||
--dataset identity,alpaca_en_demo \
|
||||
--template llama3 \
|
||||
--cutoff_len 2048 \
|
||||
--max_samples 1000 \
|
||||
--overwrite_cache \
|
||||
--preprocessing_num_workers 16 \
|
||||
--dataloader_num_workers 4 \
|
||||
--output_dir saves/llama3-8b/lora/sft \
|
||||
--logging_steps 10 \
|
||||
--save_steps 500 \
|
||||
--plot_loss \
|
||||
--overwrite_output_dir \
|
||||
--save_only_model false \
|
||||
--report_to none \
|
||||
--per_device_train_batch_size 1 \
|
||||
--gradient_accumulation_steps 8 \
|
||||
--learning_rate 1e-4 \
|
||||
--num_train_epochs 3.0 \
|
||||
--lr_scheduler_type cosine \
|
||||
--warmup_ratio 0.1 \
|
||||
--bf16 \
|
||||
--ddp_timeout 180000000
|
||||
46
examples/train_lora/llama3_lora_sft.yaml
Normal file
46
examples/train_lora/llama3_lora_sft.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
# eval_dataset: alpaca_en_demo
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
47
examples/train_lora/llama3_lora_sft_ds3.yaml
Normal file
47
examples/train_lora/llama3_lora_sft_ds3.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json]
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 2
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
# eval_dataset: alpaca_en_demo
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
61
examples/train_lora/llama3_lora_sft_ray.yaml
Normal file
61
examples/train_lora/llama3_lora_sft_ray.yaml
Normal file
@@ -0,0 +1,61 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct # or use local absolute path
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
dataset_dir: REMOTE:llamafactory/demo_data # or use local absolute path
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: tmp_dir
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### ray
|
||||
ray_run_name: llama3_8b_sft_lora
|
||||
ray_storage_path: ./saves
|
||||
ray_num_workers: 4 # Number of GPUs to use.
|
||||
placement_strategy: PACK
|
||||
resources_per_worker:
|
||||
GPU: 1
|
||||
# ray_init_kwargs:
|
||||
# runtime_env:
|
||||
# env_vars:
|
||||
# <YOUR-ENV-VAR-HERE>: "<YOUR-ENV-VAR-HERE>"
|
||||
# pip:
|
||||
# - emoji
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
# eval_dataset: alpaca_en_demo
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
23
examples/train_lora/llama3_preprocess.yaml
Normal file
23
examples/train_lora/llama3_preprocess.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
tokenized_path: saves/llama3-8b/dataset/sft
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/sft
|
||||
overwrite_output_dir: true
|
||||
49
examples/train_lora/llama4_lora_sft_ds3.yaml
Normal file
49
examples/train_lora/llama4_lora_sft_ds3.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
# pip install git+https://github.com/hiyouga/transformers.git@llama4_train
|
||||
|
||||
### model
|
||||
model_name_or_path: meta-llama/Llama-4-Scout-17B-16E-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json]
|
||||
|
||||
### dataset
|
||||
dataset: mllm_demo,identity,alpaca_en_demo
|
||||
template: llama4
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama4-8b/lora/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 2
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
# eval_dataset: alpaca_en_demo
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
49
examples/train_lora/qwen2_5vl_lora_dpo.yaml
Normal file
49
examples/train_lora/qwen2_5vl_lora_dpo.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen2.5-VL-7B-Instruct
|
||||
image_max_pixels: 262144
|
||||
video_max_pixels: 16384
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: dpo
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
pref_beta: 0.1
|
||||
pref_loss: sigmoid # choices: [sigmoid (dpo), orpo, simpo]
|
||||
|
||||
### dataset
|
||||
dataset: rlhf_v
|
||||
template: qwen2_vl
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/qwen2_5vl-7b/lora/dpo
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 5.0e-6
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
47
examples/train_lora/qwen2_5vl_lora_sft.yaml
Normal file
47
examples/train_lora/qwen2_5vl_lora_sft.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen2.5-VL-7B-Instruct
|
||||
image_max_pixels: 262144
|
||||
video_max_pixels: 16384
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: mllm_demo,identity,alpaca_en_demo # video: mllm_video_demo
|
||||
template: qwen2_vl
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/qwen2_5vl-7b/lora/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
44
examples/train_qlora/llama3_lora_sft_aqlm.yaml
Normal file
44
examples/train_qlora/llama3_lora_sft_aqlm.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
### model
|
||||
model_name_or_path: ISTA-DASLab/Meta-Llama-3-8B-Instruct-AQLM-2Bit-1x16
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
44
examples/train_qlora/llama3_lora_sft_awq.yaml
Normal file
44
examples/train_qlora/llama3_lora_sft_awq.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
### model
|
||||
model_name_or_path: TechxGenus/Meta-Llama-3-8B-Instruct-AWQ
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user