Compare commits
484 Commits
datasets-r
...
main-base
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ff939d8a64 | ||
|
|
324d59ea0d | ||
|
|
f1ebaa07c6 | ||
|
|
34ba634b8c | ||
|
|
4e69aa48ab | ||
|
|
629450cecd | ||
|
|
2a1589f6f6 | ||
|
|
7d55607368 | ||
|
|
7803f0934f | ||
|
|
dd449c5cd8 | ||
|
|
40a88e8c4a | ||
|
|
43bdc5d3de | ||
|
|
b1e3e1b25f | ||
|
|
2ea70ebbd8 | ||
|
|
e8c8ea64b3 | ||
|
|
d485a08393 | ||
|
|
f083aed2c7 | ||
|
|
868c33954d | ||
|
|
8df7b888ff | ||
|
|
6366b0c212 | ||
|
|
05bcc9ea56 | ||
|
|
3bd8203c35 | ||
|
|
8b12468230 | ||
|
|
0976781e15 | ||
|
|
8a82d2e0a4 | ||
|
|
4326520829 | ||
|
|
b7d8a7dc4d | ||
|
|
b0ee9ec734 | ||
|
|
0bc114d2e1 | ||
|
|
7659c001aa | ||
|
|
3fd8093717 | ||
|
|
9b6ee83a73 | ||
|
|
638c2dafb5 | ||
|
|
58b0d4b0d8 | ||
|
|
ed70a08348 | ||
|
|
0cfdb2c90c | ||
|
|
37657473c8 | ||
|
|
e0f1895408 | ||
|
|
8984bf1722 | ||
|
|
2598c9f045 | ||
|
|
decb66e170 | ||
|
|
4d09b42ee3 | ||
|
|
b5b44925ec | ||
|
|
170d4d7092 | ||
|
|
00018629e7 | ||
|
|
6b3b271925 | ||
|
|
3a5a2d2f34 | ||
|
|
6d4bbb877f | ||
|
|
0f985e12fe | ||
|
|
c1a7b3dd69 | ||
|
|
2b9687f341 | ||
|
|
2c9c88b32a | ||
|
|
5265cd6b2c | ||
|
|
5be8b555a0 | ||
|
|
0f6af36d50 | ||
|
|
3f69571943 | ||
|
|
1e3d5305d3 | ||
|
|
16482796b0 | ||
|
|
f30d062b48 | ||
|
|
269c5436ea | ||
|
|
e7eed203d8 | ||
|
|
cf002312e0 | ||
|
|
7de912e097 | ||
|
|
d75653407c | ||
|
|
c6b01e0f4a | ||
|
|
cc3cebfa70 | ||
|
|
5894f0e57e | ||
|
|
5cf226e177 | ||
|
|
2ed52bd568 | ||
|
|
a359579371 | ||
|
|
2752d5f958 | ||
|
|
9e300aca0c | ||
|
|
3d2cd804ae | ||
|
|
6ab69ec5f8 | ||
|
|
3c00f406d6 | ||
|
|
a7a9a1433a | ||
|
|
e2786cce6a | ||
|
|
5a5d47458d | ||
|
|
8430db22e2 | ||
|
|
4b997c3e1a | ||
|
|
fac2d98c26 | ||
|
|
ea00dd0852 | ||
|
|
b2a4cb4396 | ||
|
|
aaf54dc730 | ||
|
|
9bca7db133 | ||
|
|
91cf4ee72c | ||
|
|
1daecd161e | ||
|
|
4a654b331e | ||
|
|
5698943263 | ||
|
|
411293bdca | ||
|
|
73f1bdaa15 | ||
|
|
1c7ed26785 | ||
|
|
13eea21f9b | ||
|
|
1072f28874 | ||
|
|
c7cf3810bd | ||
|
|
8c2e05ade3 | ||
|
|
2d65f470d5 | ||
|
|
dfd188502a | ||
|
|
00568c1539 | ||
|
|
c67fb71583 | ||
|
|
25e037fe2d | ||
|
|
52c83d30bf | ||
|
|
d113331e9a | ||
|
|
8f2b591baf | ||
|
|
5787e1a23f | ||
|
|
8608d8003e | ||
|
|
4cb7900a56 | ||
|
|
18f811978c | ||
|
|
afb5dd9655 | ||
|
|
8da1633124 | ||
|
|
36d053f6f0 | ||
|
|
af29d81f80 | ||
|
|
1b180034c7 | ||
|
|
62ca4a2b71 | ||
|
|
5407ddd233 | ||
|
|
74c72ca5eb | ||
|
|
e923e62d24 | ||
|
|
ba944e6554 | ||
|
|
badda3783b | ||
|
|
a01b998c0f | ||
|
|
33e117088f | ||
|
|
b4ac96adef | ||
|
|
98b4762077 | ||
|
|
ee0b5f60e5 | ||
|
|
08719b9609 | ||
|
|
1427d5b502 | ||
|
|
54d2ac155b | ||
|
|
af0243021c | ||
|
|
8a49309489 | ||
|
|
5bce45f800 | ||
|
|
d85d4942cf | ||
|
|
02f2c720fc | ||
|
|
71141deb18 | ||
|
|
dc051b861d | ||
|
|
59a31fe613 | ||
|
|
814aee6603 | ||
|
|
b715cd549a | ||
|
|
fb7f9b9516 | ||
|
|
cc250391a0 | ||
|
|
9135b9e2aa | ||
|
|
7523d1f557 | ||
|
|
5439707489 | ||
|
|
684038111e | ||
|
|
cda52dc32b | ||
|
|
e799e08d3c | ||
|
|
0f77b8d798 | ||
|
|
32580c1ca7 | ||
|
|
802f9667a2 | ||
|
|
b8e5603467 | ||
|
|
782b6a4216 | ||
|
|
eaaeefce55 | ||
|
|
f5a828aa20 | ||
|
|
fccb542b47 | ||
|
|
2ce5c0d68a | ||
|
|
3db5f2fd17 | ||
|
|
cbecf3e62a | ||
|
|
729740df81 | ||
|
|
08b8ba09a5 | ||
|
|
6910e6a8ca | ||
|
|
1d70f24b50 | ||
|
|
317fa2555a | ||
|
|
1e56b88cde | ||
|
|
7570446596 | ||
|
|
ece0211996 | ||
|
|
8487b97cf3 | ||
|
|
9cd27b2f91 | ||
|
|
c1b741d9fb | ||
|
|
0abf4d6504 | ||
|
|
086561326f | ||
|
|
2202a20f60 | ||
|
|
d66b10141e | ||
|
|
304ea1b814 | ||
|
|
da97285e63 | ||
|
|
2dc431078c | ||
|
|
6d342b52a4 | ||
|
|
b502392e82 | ||
|
|
44ba616da2 | ||
|
|
b432889256 | ||
|
|
54fe07a905 | ||
|
|
7512c3ad20 | ||
|
|
78c5b1979e | ||
|
|
23495a80af | ||
|
|
91502b98d4 | ||
|
|
6c19e9302a | ||
|
|
90036ebbc6 | ||
|
|
9032e610b1 | ||
|
|
d69ba2b0b7 | ||
|
|
9e3f0cb5a7 | ||
|
|
2f2582e6ed | ||
|
|
0ce1a6594e | ||
|
|
043c3860cd | ||
|
|
0f100800e3 | ||
|
|
ead34c516a | ||
|
|
ec02b7cc4e | ||
|
|
3b4c646f87 | ||
|
|
788649fe95 | ||
|
|
9be92d1448 | ||
|
|
d7057ccd36 | ||
|
|
768d348f42 | ||
|
|
090c24dcb0 | ||
|
|
651b7a31fc | ||
|
|
04b978b428 | ||
|
|
c3e8165f26 | ||
|
|
7f381750d9 | ||
|
|
14964417ee | ||
|
|
81d384598e | ||
|
|
732851f105 | ||
|
|
9ca358b671 | ||
|
|
553c80f79a | ||
|
|
eb4c99431b | ||
|
|
cbdbf9e6e5 | ||
|
|
bdfefaf054 | ||
|
|
63fb3eb426 | ||
|
|
31d23504a5 | ||
|
|
f243c2186d | ||
|
|
59b2d302c8 | ||
|
|
bcc78d8fa3 | ||
|
|
74532ddc45 | ||
|
|
8ba27f3bde | ||
|
|
a3e8783328 | ||
|
|
b31038aae9 | ||
|
|
c75f916745 | ||
|
|
4d2e842e46 | ||
|
|
3678a6c41d | ||
|
|
f8ae59b0a8 | ||
|
|
4f4d638b84 | ||
|
|
ba043a361e | ||
|
|
41353d2ea0 | ||
|
|
f6ecf14dd4 | ||
|
|
dec66d7c53 | ||
|
|
76357dc5da | ||
|
|
70b46ca4f4 | ||
|
|
85dd4d525b | ||
|
|
384b817dc0 | ||
|
|
db9094df0f | ||
|
|
6ef46f8dca | ||
|
|
628b754824 | ||
|
|
37820f6540 | ||
|
|
7d4185ffcb | ||
|
|
93ebec1ac5 | ||
|
|
2e61dc3180 | ||
|
|
1ffa3866f2 | ||
|
|
62ba1609b6 | ||
|
|
7bbaac98f7 | ||
|
|
161bcb6517 | ||
|
|
d25c34caa6 | ||
|
|
13e938149d | ||
|
|
85de004dd4 | ||
|
|
80ec7af358 | ||
|
|
f28e75513b | ||
|
|
5ada140ff0 | ||
|
|
712fd27b3f | ||
|
|
ef24342538 | ||
|
|
5ea3aa31f0 | ||
|
|
f1f60cb5b2 | ||
|
|
450e04d3c4 | ||
|
|
b0cf397ecb | ||
|
|
5f79b8242f | ||
|
|
f1de29dd1e | ||
|
|
7fabc4d95e | ||
|
|
9a5eb3990c | ||
|
|
86487c2e96 | ||
|
|
35f9b0f149 | ||
|
|
68b227a7d8 | ||
|
|
03c6318ba3 | ||
|
|
40a6362c92 | ||
|
|
d339beb9d9 | ||
|
|
fde091cb12 | ||
|
|
06ae39200b | ||
|
|
a581e9f8f6 | ||
|
|
992e742cdc | ||
|
|
a1da39cd48 | ||
|
|
58ec8b1113 | ||
|
|
476a205cea | ||
|
|
3e3229e2d9 | ||
|
|
1d21aa6b0a | ||
|
|
71b7ea3c05 | ||
|
|
a48dbf6561 | ||
|
|
6a4562ac08 | ||
|
|
1115c501b8 | ||
|
|
7ee3c4cacb | ||
|
|
fb12895a17 | ||
|
|
9fc29e082b | ||
|
|
575a082aae | ||
|
|
ddf815022a | ||
|
|
9bf854e59c | ||
|
|
797f3dd1de | ||
|
|
0de1457189 | ||
|
|
3cc67d2cdd | ||
|
|
1bc11868eb | ||
|
|
b3a61e8ce2 | ||
|
|
8a8d1c4023 | ||
|
|
332984db18 | ||
|
|
48630f5b34 | ||
|
|
b33c1d55a2 | ||
|
|
0c2a630326 | ||
|
|
db8a8afcba | ||
|
|
14706504e3 | ||
|
|
501b4d1379 | ||
|
|
306fe19c54 | ||
|
|
614cff4107 | ||
|
|
1a6309c8a6 | ||
|
|
105d0b350b | ||
|
|
f544ab2bed | ||
|
|
641e6f7e51 | ||
|
|
6dc68a653f | ||
|
|
7de6a5639c | ||
|
|
c74f045ba7 | ||
|
|
0402d19759 | ||
|
|
b2430ce670 | ||
|
|
4c834bf25d | ||
|
|
8056ecd30e | ||
|
|
738a057674 | ||
|
|
cdc71f73c8 | ||
|
|
6459ac7357 | ||
|
|
964d858da0 | ||
|
|
10388a8daf | ||
|
|
9f7e8a971d | ||
|
|
637ed095a0 | ||
|
|
827ec3d274 | ||
|
|
8b79ff0e94 | ||
|
|
0800885e2f | ||
|
|
d3193beac3 | ||
|
|
2e71ff03a6 | ||
|
|
facc49f32b | ||
|
|
e50ab072e2 | ||
|
|
05bd6f1122 | ||
|
|
20aa4b57d2 | ||
|
|
11d1d607db | ||
|
|
6c81c61bc4 | ||
|
|
9b43e7ea15 | ||
|
|
2d8def68dc | ||
|
|
44c9d0151a | ||
|
|
ca84cca2c0 | ||
|
|
32eeeb5b64 | ||
|
|
afedc470bd | ||
|
|
9923b72649 | ||
|
|
21cf09b608 | ||
|
|
15d3a654bf | ||
|
|
a21935f07a | ||
|
|
8966a6f566 | ||
|
|
e4d1585c4e | ||
|
|
70157ccb8f | ||
|
|
3a99495b05 | ||
|
|
440c3ab527 | ||
|
|
992d57f20a | ||
|
|
91a016f410 | ||
|
|
a045db0214 | ||
|
|
e1b214c62b | ||
|
|
3553172e3c | ||
|
|
7f2027d93f | ||
|
|
8d288a2ad4 | ||
|
|
f30afe4544 | ||
|
|
bfbdba8614 | ||
|
|
3bd9528390 | ||
|
|
2aa1f71464 | ||
|
|
1c412c7e9d | ||
|
|
490923fb78 | ||
|
|
5855dded3d | ||
|
|
ace70b33c6 | ||
|
|
11c48c5e03 | ||
|
|
295b2662e1 | ||
|
|
77c84e02fd | ||
|
|
f91db198f3 | ||
|
|
7f2618b5f4 | ||
|
|
aca0398315 | ||
|
|
29b8f46aed | ||
|
|
83a950bb87 | ||
|
|
de87ea68f6 | ||
|
|
4c8ddf2c6f | ||
|
|
669f1d052c | ||
|
|
d4a88e4eca | ||
|
|
2d60ba3a6e | ||
|
|
eb480dfd68 | ||
|
|
133e676bcc | ||
|
|
69fac9a020 | ||
|
|
e0b7eeabfd | ||
|
|
43856c0a39 | ||
|
|
e62d5901b5 | ||
|
|
697c50d408 | ||
|
|
90e0d673f7 | ||
|
|
2642caedf2 | ||
|
|
f34648c8b9 | ||
|
|
e50a64e85e | ||
|
|
f4868d733c | ||
|
|
a7e56d83c2 | ||
|
|
5b0bc48fbc | ||
|
|
9ec20777ba | ||
|
|
590d6032fd | ||
|
|
409ca0f21c | ||
|
|
8662e8ffe8 | ||
|
|
b2edaaeff6 | ||
|
|
b88f51512a | ||
|
|
eb41f76f92 | ||
|
|
383f88d7a7 | ||
|
|
b6ab8aad62 | ||
|
|
85b0be2ba7 | ||
|
|
8fe0e633d2 | ||
|
|
d1236f2c41 | ||
|
|
895f0a0723 | ||
|
|
e7d3e2dbb6 | ||
|
|
60c7c48c97 | ||
|
|
e8cbf50be6 | ||
|
|
d887ad86c3 | ||
|
|
19a600a8b8 | ||
|
|
5e5296a77c | ||
|
|
f3d939016a | ||
|
|
cfbce020e9 | ||
|
|
4fecbfe5e1 | ||
|
|
67b9888630 | ||
|
|
923eb91304 | ||
|
|
a363604dcf | ||
|
|
501958bb6f | ||
|
|
c25ba7939b | ||
|
|
d5f8589021 | ||
|
|
03e59077a0 | ||
|
|
97d3776ce6 | ||
|
|
2844eb22b6 | ||
|
|
e85d2eb06b | ||
|
|
196ff1181e | ||
|
|
92512c390b | ||
|
|
2fe95cdcc1 | ||
|
|
c1382e79b6 | ||
|
|
5d931cc042 | ||
|
|
ec0958f4f8 | ||
|
|
faecff9798 | ||
|
|
aa656e04bd | ||
|
|
b53e77775b | ||
|
|
674c57692d | ||
|
|
1eebbd09c3 | ||
|
|
62a774140b | ||
|
|
31b9e0c6e8 | ||
|
|
6b9b229356 | ||
|
|
131afdbd89 | ||
|
|
00dce35fb2 | ||
|
|
b15b19eb8d | ||
|
|
ab534d75ba | ||
|
|
21ec195c9f | ||
|
|
62eaee7649 | ||
|
|
be75668400 | ||
|
|
aeec7c4688 | ||
|
|
360788296a | ||
|
|
12a2dbbc2c | ||
|
|
3a2edc85c3 | ||
|
|
f7a22632d7 | ||
|
|
1aa400721e | ||
|
|
8dcd40ac78 | ||
|
|
a5a625f47e | ||
|
|
861cecac2a | ||
|
|
1078d3eae7 | ||
|
|
24146733db | ||
|
|
9218ebecd2 | ||
|
|
228420972e | ||
|
|
c6d870b91d | ||
|
|
115795079d | ||
|
|
3b18c963cc | ||
|
|
3fbde762ab | ||
|
|
f6060a664e | ||
|
|
a4e1bb6606 | ||
|
|
36e53c7442 | ||
|
|
e7aa7b1a1e | ||
|
|
e5bb22a56b | ||
|
|
fdb777bc06 | ||
|
|
bf0804447c | ||
|
|
5b67ea98a6 | ||
|
|
2f586d18db | ||
|
|
9845c5e12d | ||
|
|
772cd870d4 | ||
|
|
6c5fbe6223 | ||
|
|
bcbc9597e9 | ||
|
|
6d57f2f0f0 | ||
|
|
20ed4c1f9e | ||
|
|
c5dedb17ad | ||
|
|
b56503d423 | ||
|
|
a94f9cb99e | ||
|
|
c1921c9acb | ||
|
|
0b4cf5bc8c | ||
|
|
78ee2cdab2 | ||
|
|
34c0a86a11 | ||
|
|
5e2d8a42d9 | ||
|
|
e30f1e3cf7 | ||
|
|
343714972b | ||
|
|
245c5c41e2 | ||
|
|
a546ca2813 |
6
.github/FUNDING.yml
vendored
6
.github/FUNDING.yml
vendored
@@ -1,13 +1,13 @@
|
|||||||
# These are supported funding model platforms
|
# These are supported funding model platforms
|
||||||
|
|
||||||
github: OpenAccess-AI-Collective # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
github: [winglian, OpenAccess-AI-Collective] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||||
patreon: # Replace with a single Patreon username
|
patreon: # Replace with a single Patreon username
|
||||||
open_collective: # Replace with a single Open Collective username
|
open_collective: # Replace with a single Open Collective username
|
||||||
ko_fi: # Replace with a single Ko-fi username
|
ko_fi: axolotl_ai # Replace with a single Ko-fi username
|
||||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||||
liberapay: # Replace with a single Liberapay username
|
liberapay: # Replace with a single Liberapay username
|
||||||
issuehunt: # Replace with a single IssueHunt username
|
issuehunt: # Replace with a single IssueHunt username
|
||||||
otechie: # Replace with a single Otechie username
|
otechie: # Replace with a single Otechie username
|
||||||
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
custom: ['https://quickchart.io/qr?text=bitcoin%3Abc1qxlgwlqwfea5s2cxm42xqsfmwjct0rj8w8ea5np&size=480¢erImageUrl=https%3A%2F%2Fupload.wikimedia.org%2Fwikipedia%2Fcommons%2Fthumb%2F4%2F46%2FBitcoin.svg%2F64px-Bitcoin.svg.png'] # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||||
|
|||||||
8
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
8
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
@@ -53,6 +53,14 @@ body:
|
|||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: config
|
||||||
|
attributes:
|
||||||
|
label: Config yaml
|
||||||
|
description: |
|
||||||
|
Please attach the config yaml!
|
||||||
|
render: yaml
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: possible-solution
|
id: possible-solution
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
@@ -20,3 +20,8 @@
|
|||||||
## Types of changes
|
## Types of changes
|
||||||
|
|
||||||
<!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->
|
<!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->
|
||||||
|
|
||||||
|
## Social Handles (Optional)
|
||||||
|
|
||||||
|
<!-- Thanks for submitting a bugfix or enhancement. -->
|
||||||
|
<!-- We'd love to show our thanks to you on Twitter & Discord if you provide your handle -->
|
||||||
26
.github/workflows/base.yml
vendored
26
.github/workflows/base.yml
vendored
@@ -1,29 +1,31 @@
|
|||||||
name: ci-cd-base
|
name: ci-cd-base
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
workflow_dispatch:
|
||||||
branches:
|
|
||||||
- "main-base"
|
|
||||||
- "dev-base"
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-base:
|
build-base:
|
||||||
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
||||||
# this job needs to be run on self-hosted GPU runners...
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
runs-on: self-hosted
|
runs-on: axolotl-gpu-runner
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: "118"
|
|
||||||
cuda_version: 11.8.0
|
|
||||||
python_version: "3.9"
|
|
||||||
pytorch: 2.0.1
|
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 9.0+PTX"
|
|
||||||
- cuda: "118"
|
- cuda: "118"
|
||||||
cuda_version: 11.8.0
|
cuda_version: 11.8.0
|
||||||
python_version: "3.10"
|
python_version: "3.10"
|
||||||
pytorch: 2.0.1
|
pytorch: 2.1.2
|
||||||
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 9.0+PTX"
|
||||||
|
- cuda: "121"
|
||||||
|
cuda_version: 12.1.0
|
||||||
|
python_version: "3.10"
|
||||||
|
pytorch: 2.1.2
|
||||||
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 9.0+PTX"
|
||||||
|
- cuda: "121"
|
||||||
|
cuda_version: 12.1.0
|
||||||
|
python_version: "3.11"
|
||||||
|
pytorch: 2.1.2
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 9.0+PTX"
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 9.0+PTX"
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -46,7 +48,7 @@ jobs:
|
|||||||
context: .
|
context: .
|
||||||
file: ./docker/Dockerfile-base
|
file: ./docker/Dockerfile-base
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
tags: ${{ steps.metadata.outputs.tags }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||||
labels: ${{ steps.metadata.outputs.labels }}
|
labels: ${{ steps.metadata.outputs.labels }}
|
||||||
build-args: |
|
build-args: |
|
||||||
CUDA_VERSION=${{ matrix.cuda_version }}
|
CUDA_VERSION=${{ matrix.cuda_version }}
|
||||||
|
|||||||
31
.github/workflows/docs.yml
vendored
Normal file
31
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
name: Publish Docs
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pages: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-deploy:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Check out repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Set up Quarto
|
||||||
|
uses: quarto-dev/quarto-actions/setup@v2
|
||||||
|
- name: Setup Python
|
||||||
|
uses: actions/setup-python@v3
|
||||||
|
with:
|
||||||
|
python-version: '3.10'
|
||||||
|
- name: install dependencies
|
||||||
|
run: |
|
||||||
|
python3 -m pip install jupyter
|
||||||
|
- name: Publish to GitHub Pages (and render)
|
||||||
|
uses: quarto-dev/quarto-actions/publish@v2
|
||||||
|
with:
|
||||||
|
target: gh-pages
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
22
.github/workflows/lint.yml
vendored
Normal file
22
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
name: lint
|
||||||
|
on:
|
||||||
|
# check on PRs, and manual triggers
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- '**.py'
|
||||||
|
- 'requirements.txt'
|
||||||
|
- '.github/workflows/*.yml'
|
||||||
|
- "*.md"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
pre-commit:
|
||||||
|
name: pre-commit
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: "3.10"
|
||||||
|
cache: 'pip' # caching pip dependencies
|
||||||
|
- uses: pre-commit/action@v3.0.0
|
||||||
82
.github/workflows/main.yml
vendored
82
.github/workflows/main.yml
vendored
@@ -4,96 +4,116 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- "main"
|
- "main"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-axolotl:
|
build-axolotl:
|
||||||
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
if: ${{ ! contains(github.event.commits[0].message, '[skip docker]]') && github.repository_owner == 'OpenAccess-AI-Collective' }}
|
||||||
# this job needs to be run on self-hosted GPU runners...
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 118
|
|
||||||
cuda_version: 11.8.0
|
|
||||||
python_version: "3.9"
|
|
||||||
pytorch: 2.0.1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 118
|
- cuda: 118
|
||||||
cuda_version: 11.8.0
|
cuda_version: 11.8.0
|
||||||
python_version: "3.10"
|
python_version: "3.10"
|
||||||
pytorch: 2.0.1
|
pytorch: 2.1.2
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
runs-on: self-hosted
|
axolotl_args: "--extra-index-url https://download.pytorch.org/whl/cu118"
|
||||||
|
is_latest: true
|
||||||
|
- cuda: 121
|
||||||
|
cuda_version: 12.1.0
|
||||||
|
python_version: "3.10"
|
||||||
|
pytorch: 2.1.2
|
||||||
|
axolotl_extras:
|
||||||
|
- cuda: 121
|
||||||
|
cuda_version: 12.1.0
|
||||||
|
python_version: "3.11"
|
||||||
|
pytorch: 2.1.2
|
||||||
|
axolotl_extras:
|
||||||
|
runs-on: axolotl-gpu-runner
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: Docker metadata
|
- name: Docker metadata
|
||||||
id: metadata
|
id: metadata
|
||||||
uses: docker/metadata-action@v3
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: winglian/axolotl
|
images: winglian/axolotl
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
- name: Set up Docker Buildx
|
# guidance for testing before pushing: https://docs.docker.com/build/ci/github-actions/test-before-push/
|
||||||
uses: docker/setup-buildx-action@v2
|
- name: Build and export to Docker
|
||||||
- name: Build
|
uses: docker/build-push-action@v5
|
||||||
uses: docker/build-push-action@v4
|
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
build-args: |
|
build-args: |
|
||||||
BASE_TAG=${{ github.ref_name }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}
|
BASE_TAG=${{ github.ref_name }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}
|
||||||
CUDA=${{ matrix.cuda }}
|
CUDA=${{ matrix.cuda }}
|
||||||
|
PYTORCH_VERSION=${{ matrix.pytorch }}
|
||||||
|
AXOLOTL_ARGS=${{ matrix.axolotl_args }}
|
||||||
file: ./docker/Dockerfile
|
file: ./docker/Dockerfile
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
tags: |
|
||||||
|
${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||||
|
${{ (matrix.is_latest) && format('{0}-latest', steps.metadata.outputs.tags) || '' }}
|
||||||
labels: ${{ steps.metadata.outputs.labels }}
|
labels: ${{ steps.metadata.outputs.labels }}
|
||||||
|
|
||||||
build-axolotl-runpod:
|
build-axolotl-runpod:
|
||||||
needs: build-axolotl
|
needs: build-axolotl
|
||||||
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
if: ${{ ! contains(github.event.commits[0].message, '[skip docker]]') && github.repository_owner == 'OpenAccess-AI-Collective' }}
|
||||||
# this job needs to be run on self-hosted GPU runners...
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 118
|
|
||||||
cuda_version: 11.8.0
|
|
||||||
python_version: "3.9"
|
|
||||||
pytorch: 2.0.1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 118
|
- cuda: 118
|
||||||
cuda_version: 11.8.0
|
cuda_version: 11.8.0
|
||||||
python_version: "3.10"
|
python_version: "3.10"
|
||||||
pytorch: 2.0.1
|
pytorch: 2.1.2
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
is_latest: true
|
is_latest: true
|
||||||
runs-on: self-hosted
|
- cuda: 121
|
||||||
|
cuda_version: 12.1.0
|
||||||
|
python_version: "3.10"
|
||||||
|
pytorch: 2.1.2
|
||||||
|
axolotl_extras:
|
||||||
|
- cuda: 121
|
||||||
|
cuda_version: 12.1.0
|
||||||
|
python_version: "3.11"
|
||||||
|
pytorch: 2.1.2
|
||||||
|
axolotl_extras:
|
||||||
|
runs-on: axolotl-gpu-runner
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: Docker metadata
|
- name: Docker metadata
|
||||||
id: metadata
|
id: metadata
|
||||||
uses: docker/metadata-action@v3
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: winglian/axolotl-runpod
|
images: winglian/axolotl-cloud
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
uses: docker/build-push-action@v4
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
build-args: |
|
build-args: |
|
||||||
BASE_TAG=${{ github.ref_name }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
BASE_TAG=${{ github.ref_name }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||||
CUDA=${{ matrix.cuda }}
|
CUDA=${{ matrix.cuda }}
|
||||||
file: ./docker/Dockerfile-runpod
|
file: ./docker/Dockerfile-cloud
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: |
|
tags: |
|
||||||
${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||||
|
winglian/axolotl-runpod:main-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||||
${{ (matrix.is_latest) && format('{0}-latest', steps.metadata.outputs.tags) || '' }}
|
${{ (matrix.is_latest) && format('{0}-latest', steps.metadata.outputs.tags) || '' }}
|
||||||
|
${{ (matrix.is_latest) && format('{0}-latest', 'winglian/axolotl-runpod:main') || '' }}
|
||||||
labels: ${{ steps.metadata.outputs.labels }}
|
labels: ${{ steps.metadata.outputs.labels }}
|
||||||
|
|||||||
16
.github/workflows/pre-commit.yml
vendored
16
.github/workflows/pre-commit.yml
vendored
@@ -1,16 +0,0 @@
|
|||||||
name: pre-commit
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
pre-commit:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: "3.9"
|
|
||||||
cache: 'pip' # caching pip dependencies
|
|
||||||
- uses: pre-commit/action@v3.0.0
|
|
||||||
45
.github/workflows/pypi.yml
vendored
Normal file
45
.github/workflows/pypi.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
name: publish pypi
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
pypi-publish:
|
||||||
|
name: Upload release to PyPI
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
environment:
|
||||||
|
name: pypi
|
||||||
|
url: https://pypi.org/p/axolotl
|
||||||
|
permissions:
|
||||||
|
id-token: write # IMPORTANT: this permission is mandatory for trusted publishing
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Setup Python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: "3.10"
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
pip3 install wheel
|
||||||
|
pip3 install -e .
|
||||||
|
pip3 install -r requirements-tests.txt
|
||||||
|
|
||||||
|
- name: Extract tag name
|
||||||
|
id: tag
|
||||||
|
run: echo ::set-output name=TAG_NAME::$(echo $GITHUB_REF | cut -d / -f 3)
|
||||||
|
|
||||||
|
- name: Update version in setup.py
|
||||||
|
run: |
|
||||||
|
sed -i -E 's/version="([0-9.]+)",/version="${{ steps.tag.outputs.TAG_NAME }}",/g' setup.py
|
||||||
|
|
||||||
|
- name: Build a binary wheel
|
||||||
|
run: |
|
||||||
|
python setup.py sdist bdist_wheel
|
||||||
|
|
||||||
|
- name: Publish package distributions to PyPI
|
||||||
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
82
.github/workflows/tests.yml
vendored
82
.github/workflows/tests.yml
vendored
@@ -1,16 +1,40 @@
|
|||||||
name: PyTest
|
name: Tests
|
||||||
on:
|
on:
|
||||||
|
# check on push/merge to main, PRs, and manual triggers
|
||||||
push:
|
push:
|
||||||
|
branches:
|
||||||
|
- "main"
|
||||||
|
paths:
|
||||||
|
- '**.py'
|
||||||
|
- 'requirements.txt'
|
||||||
|
- '.github/workflows/*.yml'
|
||||||
pull_request:
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- '**.py'
|
||||||
|
- 'requirements.txt'
|
||||||
|
- '.github/workflows/*.yml'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
pre-commit:
|
||||||
|
name: pre-commit
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: "3.10"
|
||||||
|
cache: 'pip' # caching pip dependencies
|
||||||
|
- uses: pre-commit/action@v3.0.0
|
||||||
|
|
||||||
|
pytest:
|
||||||
|
name: PyTest
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.9", "3.10"]
|
python_version: ["3.10", "3.11"]
|
||||||
timeout-minutes: 10
|
timeout-minutes: 20
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
@@ -24,9 +48,53 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
pip install -e .
|
pip3 install -U -e .
|
||||||
pip install -r requirements-tests.txt
|
pip3 install -r requirements-tests.txt
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: |
|
run: |
|
||||||
pytest tests/
|
pytest --ignore=tests/e2e/ tests/
|
||||||
|
|
||||||
|
docker-e2e-tests:
|
||||||
|
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
||||||
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
|
runs-on: [self-hosted, modal]
|
||||||
|
timeout-minutes: 60
|
||||||
|
needs: [pre-commit, pytest]
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- cuda: 118
|
||||||
|
cuda_version: 11.8.0
|
||||||
|
python_version: "3.10"
|
||||||
|
pytorch: 2.1.2
|
||||||
|
axolotl_args: "--extra-index-url https://download.pytorch.org/whl/cu118"
|
||||||
|
num_gpus: 1
|
||||||
|
- cuda: 121
|
||||||
|
cuda_version: 12.1.0
|
||||||
|
python_version: "3.10"
|
||||||
|
pytorch: 2.1.2
|
||||||
|
num_gpus: 1
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Install Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.10"
|
||||||
|
- name: Install Modal
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install modal jinja2
|
||||||
|
- name: Update env vars
|
||||||
|
run: |
|
||||||
|
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||||
|
echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
|
||||||
|
echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
|
||||||
|
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||||
|
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||||
|
- name: Run tests job on Modal
|
||||||
|
run: |
|
||||||
|
modal run cicd.tests
|
||||||
|
|||||||
14
.gitignore
vendored
14
.gitignore
vendored
@@ -1,5 +1,8 @@
|
|||||||
**/axolotl.egg-info
|
**/axolotl.egg-info
|
||||||
configs
|
configs
|
||||||
|
last_run_prepared/
|
||||||
|
.vscode
|
||||||
|
_site/
|
||||||
|
|
||||||
# Byte-compiled / optimized / DLL files
|
# Byte-compiled / optimized / DLL files
|
||||||
__pycache__/
|
__pycache__/
|
||||||
@@ -161,3 +164,14 @@ cython_debug/
|
|||||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||||
.idea/
|
.idea/
|
||||||
|
|
||||||
|
# WandB
|
||||||
|
# wandb creates a folder to store logs for training runs
|
||||||
|
wandb
|
||||||
|
|
||||||
|
# Runs
|
||||||
|
lora-out/*
|
||||||
|
qlora-out/*
|
||||||
|
mlruns/*
|
||||||
|
|
||||||
|
/.quarto/
|
||||||
|
|||||||
@@ -1,2 +1,3 @@
|
|||||||
[settings]
|
[settings]
|
||||||
profile=black
|
profile=black
|
||||||
|
known_third_party=wandb
|
||||||
|
|||||||
14
.mypy.ini
14
.mypy.ini
@@ -1,5 +1,5 @@
|
|||||||
[mypy]
|
[mypy]
|
||||||
|
plugins = pydantic.mypy
|
||||||
exclude = venv
|
exclude = venv
|
||||||
|
|
||||||
[mypy-alpaca_lora_4bit.*]
|
[mypy-alpaca_lora_4bit.*]
|
||||||
@@ -8,6 +8,12 @@ ignore_missing_imports = True
|
|||||||
[mypy-axolotl.monkeypatch.*]
|
[mypy-axolotl.monkeypatch.*]
|
||||||
ignore_errors = True
|
ignore_errors = True
|
||||||
|
|
||||||
|
[mypy-axolotl.models.mixtral.*]
|
||||||
|
ignore_errors = True
|
||||||
|
|
||||||
|
[mypy-axolotl.models.phi.*]
|
||||||
|
ignore_errors = True
|
||||||
|
|
||||||
[mypy-flash_attn.*]
|
[mypy-flash_attn.*]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
@@ -20,9 +26,15 @@ ignore_missing_imports = True
|
|||||||
[mypy-peft]
|
[mypy-peft]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
|
[mypy-wandb]
|
||||||
|
ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-bitsandbytes]
|
[mypy-bitsandbytes]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
|
[mypy-requests]
|
||||||
|
ignore_missing_imports = True
|
||||||
|
|
||||||
[mypy-datasets]
|
[mypy-datasets]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ repos:
|
|||||||
additional_dependencies:
|
additional_dependencies:
|
||||||
[
|
[
|
||||||
'types-PyYAML',
|
'types-PyYAML',
|
||||||
|
'pydantic>=2.5.3',
|
||||||
]
|
]
|
||||||
- repo: https://github.com/PyCQA/bandit
|
- repo: https://github.com/PyCQA/bandit
|
||||||
rev: 1.7.5
|
rev: 1.7.5
|
||||||
|
|||||||
1
.vscode/README.md
vendored
Normal file
1
.vscode/README.md
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
See [docs/debugging.md](../docs/debugging.md) for guidance on how to modify these files to debug axolotl with VSCode.
|
||||||
34
.vscode/launch.json
vendored
Normal file
34
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
{
|
||||||
|
// Use IntelliSense to learn about possible attributes.
|
||||||
|
// Hover to view descriptions of existing attributes.
|
||||||
|
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Debug axolotl prompt - sharegpt",
|
||||||
|
"type": "python",
|
||||||
|
"module": "accelerate.commands.launch",
|
||||||
|
"request": "launch",
|
||||||
|
"args": [
|
||||||
|
"-m", "axolotl.cli.train", "dev_sharegpt.yml",
|
||||||
|
// The flags below simplify debugging by overriding the axolotl config
|
||||||
|
// with the debugging tips above. Modify as needed.
|
||||||
|
"--dataset_processes=1", // limits data preprocessing to one process
|
||||||
|
"--max_steps=1", // limits training to just one step
|
||||||
|
"--batch_size=1", // minimizes batch size
|
||||||
|
"--micro_batch_size=1", // minimizes batch size
|
||||||
|
"--val_set_size=0", // disables validation
|
||||||
|
"--sample_packing=False", // disables sample packing which is necessary for small datasets
|
||||||
|
"--eval_sample_packing=False",// disables sample packing on eval set
|
||||||
|
"--dataset_prepared_path=temp_debug/axolotl_outputs/data", // send data outputs to a temp folder
|
||||||
|
"--output_dir=temp_debug/axolotl_outputs/model" // send model outputs to a temp folder
|
||||||
|
],
|
||||||
|
"console": "integratedTerminal", // show output in the integrated terminal
|
||||||
|
"cwd": "${workspaceFolder}/devtools", // set working directory to devtools from the root of the project
|
||||||
|
"justMyCode": true, // step through only axolotl code
|
||||||
|
"env": {"CUDA_VISIBLE_DEVICES": "0", // Since we aren't doing distributed training, we need to limit to one GPU
|
||||||
|
"HF_HOME": "${workspaceFolder}/devtools/temp_debug/.hf-cache"}, // send HF cache to a temp folder
|
||||||
|
"preLaunchTask": "cleanup-for-dataprep", // delete temp folders (see below)
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
27
.vscode/tasks.json
vendored
Normal file
27
.vscode/tasks.json
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
//this file is used by launch.json
|
||||||
|
{
|
||||||
|
"version": "2.0.0",
|
||||||
|
"tasks": [
|
||||||
|
// this task changes into the devtools directory and deletes the temp_debug/axolotl_outputs folder
|
||||||
|
{
|
||||||
|
"label": "delete-outputs",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "rm -rf temp_debug/axolotl_outputs",
|
||||||
|
"options":{ "cwd": "${workspaceFolder}/devtools"},
|
||||||
|
"problemMatcher": []
|
||||||
|
},
|
||||||
|
// this task changes into the devtools directory and deletes the `temp_debug/.hf-cache/datasets` folder
|
||||||
|
{
|
||||||
|
"label": "delete-temp-hf-dataset-cache",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "rm -rf temp_debug/.hf-cache/datasets",
|
||||||
|
"options":{ "cwd": "${workspaceFolder}/devtools"},
|
||||||
|
"problemMatcher": []
|
||||||
|
},
|
||||||
|
// this task combines the two tasks above
|
||||||
|
{
|
||||||
|
"label": "cleanup-for-dataprep",
|
||||||
|
"dependsOn": ["delete-outputs", "delete-temp-hf-dataset-cache"],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
51
_quarto.yml
Normal file
51
_quarto.yml
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
project:
|
||||||
|
type: website
|
||||||
|
|
||||||
|
website:
|
||||||
|
title: "Axolotl"
|
||||||
|
description: "Fine-tuning"
|
||||||
|
favicon: favicon.jpg
|
||||||
|
navbar:
|
||||||
|
title: Axolotl
|
||||||
|
background: dark
|
||||||
|
pinned: false
|
||||||
|
collapse: false
|
||||||
|
tools:
|
||||||
|
- icon: twitter
|
||||||
|
href: https://twitter.com/axolotl_ai
|
||||||
|
- icon: github
|
||||||
|
href: https://github.com/OpenAccess-AI-Collective/axolotl/
|
||||||
|
- icon: discord
|
||||||
|
href: https://discord.gg/7m9sfhzaf3
|
||||||
|
|
||||||
|
sidebar:
|
||||||
|
pinned: true
|
||||||
|
collapse-level: 2
|
||||||
|
style: docked
|
||||||
|
contents:
|
||||||
|
- text: Home
|
||||||
|
href: index.qmd
|
||||||
|
- section: "How-To Guides"
|
||||||
|
contents:
|
||||||
|
# TODO Edit folder structure after we have more docs.
|
||||||
|
- docs/debugging.qmd
|
||||||
|
- docs/multipack.qmd
|
||||||
|
- docs/fdsp_qlora.qmd
|
||||||
|
- docs/input_output.qmd
|
||||||
|
- docs/rlhf.qmd
|
||||||
|
- docs/nccl.qmd
|
||||||
|
- docs/mac.qmd
|
||||||
|
- docs/multi-node.qmd
|
||||||
|
- section: "Reference"
|
||||||
|
contents:
|
||||||
|
- docs/config.qmd
|
||||||
|
- docs/faq.qmd
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
format:
|
||||||
|
html:
|
||||||
|
theme: materia
|
||||||
|
css: styles.css
|
||||||
|
toc: true
|
||||||
39
cicd/Dockerfile.jinja
Normal file
39
cicd/Dockerfile.jinja
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
FROM winglian/axolotl-base:{{ BASE_TAG }}
|
||||||
|
|
||||||
|
ENV TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
|
||||||
|
ENV AXOLOTL_EXTRAS="{{ AXOLOTL_EXTRAS }}"
|
||||||
|
ENV AXOLOTL_ARGS="{{ AXOLOTL_ARGS }}"
|
||||||
|
ENV CUDA="{{ CUDA }}"
|
||||||
|
ENV BNB_CUDA_VERSION="{{ CUDA }}"
|
||||||
|
ENV PYTORCH_VERSION="{{ PYTORCH_VERSION }}"
|
||||||
|
ENV GITHUB_REF="{{ GITHUB_REF }}"
|
||||||
|
ENV GITHUB_SHA="{{ GITHUB_SHA }}"
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --allow-change-held-packages vim curl nano libnccl2 libnccl-dev
|
||||||
|
|
||||||
|
WORKDIR /workspace
|
||||||
|
|
||||||
|
RUN git clone --depth=1 https://github.com/OpenAccess-AI-Collective/axolotl.git
|
||||||
|
|
||||||
|
WORKDIR /workspace/axolotl
|
||||||
|
|
||||||
|
RUN git fetch origin +$GITHUB_REF && \
|
||||||
|
git checkout FETCH_HEAD
|
||||||
|
|
||||||
|
# If AXOLOTL_EXTRAS is set, append it in brackets
|
||||||
|
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||||
|
pip install -e .[deepspeed,flash-attn,mamba-ssm,galore,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||||
|
else \
|
||||||
|
pip install -e .[deepspeed,flash-attn,mamba-ssm,galore] $AXOLOTL_ARGS; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# So we can test the Docker image
|
||||||
|
RUN pip install pytest
|
||||||
|
|
||||||
|
# fix so that git fetch/pull from remote works
|
||||||
|
RUN git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" && \
|
||||||
|
git config --get remote.origin.fetch
|
||||||
|
|
||||||
|
# helper for huggingface-login cli
|
||||||
|
RUN git config --global credential.helper store
|
||||||
5
cicd/cicd.sh
Executable file
5
cicd/cicd.sh
Executable file
@@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
pytest --ignore=tests/e2e/ /workspace/axolotl/tests/
|
||||||
|
pytest /workspace/axolotl/tests/e2e/patched/
|
||||||
|
pytest --ignore=tests/e2e/patched/ /workspace/axolotl/tests/e2e/
|
||||||
75
cicd/tests.py
Normal file
75
cicd/tests.py
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
"""
|
||||||
|
modal application to run axolotl gpu tests in Modal
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import pathlib
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
import jinja2
|
||||||
|
import modal
|
||||||
|
from jinja2 import select_autoescape
|
||||||
|
from modal import Image, Stub
|
||||||
|
|
||||||
|
cicd_path = pathlib.Path(__file__).parent.resolve()
|
||||||
|
|
||||||
|
template_loader = jinja2.FileSystemLoader(searchpath=cicd_path)
|
||||||
|
template_env = jinja2.Environment(
|
||||||
|
loader=template_loader, autoescape=select_autoescape()
|
||||||
|
)
|
||||||
|
df_template = template_env.get_template("Dockerfile.jinja")
|
||||||
|
|
||||||
|
df_args = {
|
||||||
|
"AXOLOTL_EXTRAS": os.environ.get("AXOLOTL_EXTRAS", ""),
|
||||||
|
"AXOLOTL_ARGS": os.environ.get("AXOLOTL_ARGS", ""),
|
||||||
|
"PYTORCH_VERSION": os.environ.get("PYTORCH_VERSION", "2.0.1"),
|
||||||
|
"BASE_TAG": os.environ.get("BASE_TAG", "main-base-py3.10-cu118-2.0.1"),
|
||||||
|
"CUDA": os.environ.get("CUDA", "118"),
|
||||||
|
"GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
|
||||||
|
"GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
|
||||||
|
}
|
||||||
|
|
||||||
|
dockerfile_contents = df_template.render(**df_args)
|
||||||
|
|
||||||
|
temp_dir = tempfile.mkdtemp()
|
||||||
|
with open(pathlib.Path(temp_dir) / "Dockerfile", "w", encoding="utf-8") as f:
|
||||||
|
f.write(dockerfile_contents)
|
||||||
|
|
||||||
|
cicd_image = (
|
||||||
|
Image.from_dockerfile(
|
||||||
|
pathlib.Path(temp_dir) / "Dockerfile",
|
||||||
|
force_build=True,
|
||||||
|
gpu="A10G",
|
||||||
|
)
|
||||||
|
.env(df_args)
|
||||||
|
.pip_install("fastapi==0.110.0", "pydantic==2.6.3")
|
||||||
|
)
|
||||||
|
|
||||||
|
stub = Stub("Axolotl CI/CD", secrets=[])
|
||||||
|
|
||||||
|
|
||||||
|
N_GPUS = int(os.environ.get("N_GPUS", 1))
|
||||||
|
GPU_CONFIG = modal.gpu.A10G(count=N_GPUS)
|
||||||
|
|
||||||
|
|
||||||
|
def run_cmd(cmd: str, run_folder: str):
|
||||||
|
import subprocess # nosec
|
||||||
|
|
||||||
|
# Propagate errors from subprocess.
|
||||||
|
if exit_code := subprocess.call(cmd.split(), cwd=run_folder): # nosec
|
||||||
|
exit(exit_code) # pylint: disable=consider-using-sys-exit
|
||||||
|
|
||||||
|
|
||||||
|
@stub.function(
|
||||||
|
image=cicd_image,
|
||||||
|
gpu=GPU_CONFIG,
|
||||||
|
timeout=45 * 60,
|
||||||
|
cpu=8.0,
|
||||||
|
memory=131072,
|
||||||
|
)
|
||||||
|
def cicd_pytest():
|
||||||
|
run_cmd("./cicd/cicd.sh", "/workspace/axolotl")
|
||||||
|
|
||||||
|
|
||||||
|
@stub.local_entrypoint()
|
||||||
|
def main():
|
||||||
|
cicd_pytest.remote()
|
||||||
@@ -1,46 +0,0 @@
|
|||||||
{
|
|
||||||
"zero_optimization": {
|
|
||||||
"stage": 2,
|
|
||||||
"offload_optimizer": {
|
|
||||||
"device": "cpu"
|
|
||||||
},
|
|
||||||
"contiguous_gradients": true,
|
|
||||||
"overlap_comm": true
|
|
||||||
},
|
|
||||||
"bf16": {
|
|
||||||
"enabled": "auto"
|
|
||||||
},
|
|
||||||
"fp16": {
|
|
||||||
"enabled": "auto",
|
|
||||||
"auto_cast": false,
|
|
||||||
"loss_scale": 0,
|
|
||||||
"initial_scale_power": 32,
|
|
||||||
"loss_scale_window": 1000,
|
|
||||||
"hysteresis": 2,
|
|
||||||
"min_loss_scale": 1
|
|
||||||
},
|
|
||||||
"optimizer": {
|
|
||||||
"type": "AdamW",
|
|
||||||
"params": {
|
|
||||||
"lr": "auto",
|
|
||||||
"betas": [
|
|
||||||
0.9,
|
|
||||||
0.999
|
|
||||||
],
|
|
||||||
"eps": 1e-8,
|
|
||||||
"weight_decay": "auto"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"scheduler": {
|
|
||||||
"type": "WarmupDecayLR",
|
|
||||||
"params": {
|
|
||||||
"warmup_min_lr": "auto",
|
|
||||||
"warmup_max_lr": "auto",
|
|
||||||
"warmup_num_steps": "auto",
|
|
||||||
"total_num_steps": "auto"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"train_batch_size": "auto",
|
|
||||||
"train_micro_batch_size_per_gpu": "auto",
|
|
||||||
"wall_clock_breakdown": false
|
|
||||||
}
|
|
||||||
23
deepspeed_configs/zero1.json
Normal file
23
deepspeed_configs/zero1.json
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
{
|
||||||
|
"zero_optimization": {
|
||||||
|
"stage": 1,
|
||||||
|
"overlap_comm": true
|
||||||
|
},
|
||||||
|
"bf16": {
|
||||||
|
"enabled": "auto"
|
||||||
|
},
|
||||||
|
"fp16": {
|
||||||
|
"enabled": "auto",
|
||||||
|
"auto_cast": false,
|
||||||
|
"loss_scale": 0,
|
||||||
|
"initial_scale_power": 32,
|
||||||
|
"loss_scale_window": 1000,
|
||||||
|
"hysteresis": 2,
|
||||||
|
"min_loss_scale": 1
|
||||||
|
},
|
||||||
|
"gradient_accumulation_steps": "auto",
|
||||||
|
"gradient_clipping": "auto",
|
||||||
|
"train_batch_size": "auto",
|
||||||
|
"train_micro_batch_size_per_gpu": "auto",
|
||||||
|
"wall_clock_breakdown": false
|
||||||
|
}
|
||||||
27
deepspeed_configs/zero2.json
Normal file
27
deepspeed_configs/zero2.json
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
{
|
||||||
|
"zero_optimization": {
|
||||||
|
"stage": 2,
|
||||||
|
"offload_optimizer": {
|
||||||
|
"device": "cpu"
|
||||||
|
},
|
||||||
|
"contiguous_gradients": true,
|
||||||
|
"overlap_comm": true
|
||||||
|
},
|
||||||
|
"bf16": {
|
||||||
|
"enabled": "auto"
|
||||||
|
},
|
||||||
|
"fp16": {
|
||||||
|
"enabled": "auto",
|
||||||
|
"auto_cast": false,
|
||||||
|
"loss_scale": 0,
|
||||||
|
"initial_scale_power": 32,
|
||||||
|
"loss_scale_window": 1000,
|
||||||
|
"hysteresis": 2,
|
||||||
|
"min_loss_scale": 1
|
||||||
|
},
|
||||||
|
"gradient_accumulation_steps": "auto",
|
||||||
|
"gradient_clipping": "auto",
|
||||||
|
"train_batch_size": "auto",
|
||||||
|
"train_micro_batch_size_per_gpu": "auto",
|
||||||
|
"wall_clock_breakdown": false
|
||||||
|
}
|
||||||
@@ -1,14 +1,6 @@
|
|||||||
{
|
{
|
||||||
"zero_optimization": {
|
"zero_optimization": {
|
||||||
"stage": 3,
|
"stage": 3,
|
||||||
"offload_optimizer": {
|
|
||||||
"device": "cpu",
|
|
||||||
"pin_memory": true
|
|
||||||
},
|
|
||||||
"offload_param": {
|
|
||||||
"device": "cpu",
|
|
||||||
"pin_memory": true
|
|
||||||
},
|
|
||||||
"overlap_comm": true,
|
"overlap_comm": true,
|
||||||
"contiguous_gradients": true,
|
"contiguous_gradients": true,
|
||||||
"sub_group_size": 0,
|
"sub_group_size": 0,
|
||||||
@@ -31,23 +23,8 @@
|
|||||||
"hysteresis": 2,
|
"hysteresis": 2,
|
||||||
"min_loss_scale": 1
|
"min_loss_scale": 1
|
||||||
},
|
},
|
||||||
"optimizer": {
|
"gradient_accumulation_steps": "auto",
|
||||||
"type": "AdamW",
|
"gradient_clipping": "auto",
|
||||||
"params": {
|
|
||||||
"lr": "auto",
|
|
||||||
"betas": "auto",
|
|
||||||
"eps": 1e-8,
|
|
||||||
"weight_decay": "auto"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"scheduler": {
|
|
||||||
"type": "WarmupLR",
|
|
||||||
"params": {
|
|
||||||
"warmup_min_lr": "auto",
|
|
||||||
"warmup_max_lr": "auto",
|
|
||||||
"warmup_num_steps": "auto"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"train_batch_size": "auto",
|
"train_batch_size": "auto",
|
||||||
"train_micro_batch_size_per_gpu": "auto",
|
"train_micro_batch_size_per_gpu": "auto",
|
||||||
"wall_clock_breakdown": false
|
"wall_clock_breakdown": false
|
||||||
31
deepspeed_configs/zero3_bf16.json
Normal file
31
deepspeed_configs/zero3_bf16.json
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
{
|
||||||
|
"zero_optimization": {
|
||||||
|
"stage": 3,
|
||||||
|
"overlap_comm": true,
|
||||||
|
"contiguous_gradients": true,
|
||||||
|
"sub_group_size": 0,
|
||||||
|
"reduce_bucket_size": "auto",
|
||||||
|
"stage3_prefetch_bucket_size": "auto",
|
||||||
|
"stage3_param_persistence_threshold": "auto",
|
||||||
|
"stage3_max_live_parameters": 0,
|
||||||
|
"stage3_max_reuse_distance": 0,
|
||||||
|
"stage3_gather_16bit_weights_on_model_save": true
|
||||||
|
},
|
||||||
|
"bf16": {
|
||||||
|
"enabled": true
|
||||||
|
},
|
||||||
|
"fp16": {
|
||||||
|
"enabled": "auto",
|
||||||
|
"auto_cast": false,
|
||||||
|
"loss_scale": 0,
|
||||||
|
"initial_scale_power": 32,
|
||||||
|
"loss_scale_window": 1000,
|
||||||
|
"hysteresis": 2,
|
||||||
|
"min_loss_scale": 1
|
||||||
|
},
|
||||||
|
"gradient_accumulation_steps": "auto",
|
||||||
|
"gradient_clipping": "auto",
|
||||||
|
"train_batch_size": "auto",
|
||||||
|
"train_micro_batch_size_per_gpu": "auto",
|
||||||
|
"wall_clock_breakdown": false
|
||||||
|
}
|
||||||
1
devtools/README.md
Normal file
1
devtools/README.md
Normal file
@@ -0,0 +1 @@
|
|||||||
|
This directory contains example config files that might be useful for debugging. Please see [docs/debugging.qmd](../docs/debugging.qmd) for more information.
|
||||||
48
devtools/dev_sharegpt.yml
Normal file
48
devtools/dev_sharegpt.yml
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
# Example config for debugging the sharegpt prompt format
|
||||||
|
base_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
|
||||||
|
model_type: LlamaForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: true
|
||||||
|
load_in_4bit: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: philschmid/guanaco-sharegpt-style
|
||||||
|
type: sharegpt
|
||||||
|
shards: 10
|
||||||
|
val_set_size: 0
|
||||||
|
output_dir: temp_debug/axolotl_outputs/model
|
||||||
|
dataset_prepared_path: temp_debug/axolotl_outputs/data
|
||||||
|
dataset_processes: 1
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
sample_packing: false
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
adapter: lora
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
micro_batch_size: 1
|
||||||
|
num_epochs: 1
|
||||||
|
max_steps: 10
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: false
|
||||||
|
fp16: true
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
logging_steps: 1
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
weight_decay: 0.0
|
||||||
@@ -9,6 +9,11 @@ services:
|
|||||||
- ~/.cache/huggingface/:/root/.cache/huggingface/
|
- ~/.cache/huggingface/:/root/.cache/huggingface/
|
||||||
# set environment variables
|
# set environment variables
|
||||||
environment:
|
environment:
|
||||||
|
# Set environment variables
|
||||||
|
- GIT_AUTHOR_NAME=${GIT_AUTHOR_NAME}
|
||||||
|
- GIT_AUTHOR_EMAIL=${GIT_AUTHOR_EMAIL}
|
||||||
|
- GIT_COMMITTER_NAME=${GIT_COMMITTER_NAME}
|
||||||
|
- GIT_COMMITTER_EMAIL=${GIT_COMMITTER_EMAIL}
|
||||||
- WANDB_API_KEY=${WANDB_API_KEY}
|
- WANDB_API_KEY=${WANDB_API_KEY}
|
||||||
deploy:
|
deploy:
|
||||||
resources:
|
resources:
|
||||||
|
|||||||
@@ -3,26 +3,34 @@ FROM winglian/axolotl-base:$BASE_TAG
|
|||||||
|
|
||||||
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
|
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
|
||||||
ARG AXOLOTL_EXTRAS=""
|
ARG AXOLOTL_EXTRAS=""
|
||||||
|
ARG AXOLOTL_ARGS=""
|
||||||
ARG CUDA="118"
|
ARG CUDA="118"
|
||||||
ENV BNB_CUDA_VERSION=$CUDA
|
ENV BNB_CUDA_VERSION=$CUDA
|
||||||
|
ARG PYTORCH_VERSION="2.1.2"
|
||||||
|
|
||||||
|
ENV PYTORCH_VERSION=$PYTORCH_VERSION
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y vim curl
|
apt-get install -y --allow-change-held-packages vim curl nano libnccl2 libnccl-dev
|
||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
RUN git clone --depth=1 https://github.com/OpenAccess-AI-Collective/axolotl.git
|
RUN git clone --depth=1 https://github.com/OpenAccess-AI-Collective/axolotl.git
|
||||||
|
|
||||||
|
WORKDIR /workspace/axolotl
|
||||||
|
|
||||||
# If AXOLOTL_EXTRAS is set, append it in brackets
|
# If AXOLOTL_EXTRAS is set, append it in brackets
|
||||||
RUN cd axolotl && \
|
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||||
if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
pip install -e .[deepspeed,flash-attn,mamba-ssm,galore,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||||
pip install -e .[flash-attn,gptq,$AXOLOTL_EXTRAS]; \
|
|
||||||
else \
|
else \
|
||||||
pip install -e .[flash-attn,gptq]; \
|
pip install -e .[deepspeed,flash-attn,mamba-ssm,galore] $AXOLOTL_ARGS; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# So we can test the Docker image
|
||||||
|
RUN pip install pytest
|
||||||
|
|
||||||
# fix so that git fetch/pull from remote works
|
# fix so that git fetch/pull from remote works
|
||||||
RUN cd axolotl && \
|
RUN git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" && \
|
||||||
git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" && \
|
|
||||||
git config --get remote.origin.fetch
|
git config --get remote.origin.fetch
|
||||||
|
|
||||||
# helper for huggingface-login cli
|
# helper for huggingface-login cli
|
||||||
|
|||||||
@@ -7,22 +7,22 @@ FROM nvidia/cuda:$CUDA_VERSION-cudnn$CUDNN_VERSION-devel-ubuntu$UBUNTU_VERSION a
|
|||||||
|
|
||||||
ENV PATH="/root/miniconda3/bin:${PATH}"
|
ENV PATH="/root/miniconda3/bin:${PATH}"
|
||||||
|
|
||||||
ARG PYTHON_VERSION="3.9"
|
ARG PYTHON_VERSION="3.10"
|
||||||
ARG PYTORCH_VERSION="2.0.1"
|
ARG PYTORCH_VERSION="2.1.2"
|
||||||
ARG CUDA="118"
|
ARG CUDA="118"
|
||||||
|
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
|
||||||
|
|
||||||
ENV PYTHON_VERSION=$PYTHON_VERSION
|
ENV PYTHON_VERSION=$PYTHON_VERSION
|
||||||
|
ENV TORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST
|
||||||
|
|
||||||
RUN apt-get update
|
RUN apt-get update \
|
||||||
RUN apt-get install -y wget git build-essential ninja-build git-lfs libaio-dev && rm -rf /var/lib/apt/lists/*
|
&& apt-get install -y wget git build-essential ninja-build git-lfs libaio-dev && rm -rf /var/lib/apt/lists/* \
|
||||||
|
&& wget \
|
||||||
RUN wget \
|
|
||||||
https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
|
https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
|
||||||
&& mkdir /root/.conda \
|
&& mkdir /root/.conda \
|
||||||
&& bash Miniconda3-latest-Linux-x86_64.sh -b \
|
&& bash Miniconda3-latest-Linux-x86_64.sh -b \
|
||||||
&& rm -f Miniconda3-latest-Linux-x86_64.sh
|
&& rm -f Miniconda3-latest-Linux-x86_64.sh \
|
||||||
|
&& conda create -n "py${PYTHON_VERSION}" python="${PYTHON_VERSION}"
|
||||||
RUN conda create -n "py${PYTHON_VERSION}" python="${PYTHON_VERSION}"
|
|
||||||
|
|
||||||
ENV PATH="/root/miniconda3/envs/py${PYTHON_VERSION}/bin:${PATH}"
|
ENV PATH="/root/miniconda3/envs/py${PYTHON_VERSION}/bin:${PATH}"
|
||||||
|
|
||||||
@@ -31,49 +31,7 @@ WORKDIR /workspace
|
|||||||
RUN python3 -m pip install --upgrade pip && pip3 install packaging && \
|
RUN python3 -m pip install --upgrade pip && pip3 install packaging && \
|
||||||
python3 -m pip install --no-cache-dir -U torch==${PYTORCH_VERSION}+cu${CUDA} --extra-index-url https://download.pytorch.org/whl/cu$CUDA
|
python3 -m pip install --no-cache-dir -U torch==${PYTORCH_VERSION}+cu${CUDA} --extra-index-url https://download.pytorch.org/whl/cu$CUDA
|
||||||
|
|
||||||
FROM base-builder AS deepspeed-builder
|
RUN git lfs install --skip-repo && \
|
||||||
|
pip3 install awscli && \
|
||||||
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
|
|
||||||
|
|
||||||
WORKDIR /workspace
|
|
||||||
|
|
||||||
RUN git clone https://github.com/microsoft/DeepSpeed.git && \
|
|
||||||
cd DeepSpeed && \
|
|
||||||
MAX_CONCURRENCY=8 DS_BUILD_SPARSE_ATTN=0 DS_BUILD_OPS=1 python3 setup.py bdist_wheel
|
|
||||||
|
|
||||||
FROM base-builder AS bnb-builder
|
|
||||||
|
|
||||||
WORKDIR /workspace
|
|
||||||
ARG CUDA="118"
|
|
||||||
ENV CUDA=$CUDA
|
|
||||||
|
|
||||||
RUN git clone https://github.com/TimDettmers/bitsandbytes.git && \
|
|
||||||
cd bitsandbytes && \
|
|
||||||
CUDA_VERSION=$CUDA make cuda11x && \
|
|
||||||
python setup.py bdist_wheel
|
|
||||||
|
|
||||||
FROM base-builder
|
|
||||||
|
|
||||||
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
|
|
||||||
ENV TORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST
|
|
||||||
|
|
||||||
# recompile apex
|
|
||||||
RUN python3 -m pip uninstall -y apex
|
|
||||||
RUN git clone https://github.com/NVIDIA/apex
|
|
||||||
# `MAX_JOBS=1` disables parallel building to avoid cpu memory OOM when building image on GitHub Action (standard) runners
|
|
||||||
RUN cd apex && MAX_JOBS=1 python3 -m pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" ./
|
|
||||||
|
|
||||||
RUN mkdir -p /workspace/builds
|
|
||||||
COPY --from=bnb-builder /workspace/bitsandbytes /workspace/builds/bitsandbytes
|
|
||||||
|
|
||||||
RUN mkdir -p /workspace/wheels/bitsandbytes
|
|
||||||
COPY --from=deepspeed-builder /workspace/DeepSpeed/dist/deepspeed-*.whl wheels
|
|
||||||
COPY --from=bnb-builder /workspace/bitsandbytes/dist/bitsandbytes-*.whl wheels
|
|
||||||
COPY --from=bnb-builder /workspace/bitsandbytes/bitsandbytes/libbitsandbytes*.so wheels/bitsandbytes
|
|
||||||
|
|
||||||
RUN pip3 install wheels/deepspeed-*.whl
|
|
||||||
RUN cd /workspace/builds/bitsandbytes && python3 setup.py install
|
|
||||||
RUN git lfs install --skip-repo
|
|
||||||
RUN pip3 install awscli && \
|
|
||||||
# The base image ships with `pydantic==1.8.2` which is not working
|
# The base image ships with `pydantic==1.8.2` which is not working
|
||||||
pip3 install -U --no-cache-dir pydantic==1.10.10
|
pip3 install -U --no-cache-dir pydantic==1.10.10
|
||||||
|
|||||||
@@ -4,15 +4,24 @@ FROM winglian/axolotl:$BASE_TAG
|
|||||||
ENV HF_DATASETS_CACHE="/workspace/data/huggingface-cache/datasets"
|
ENV HF_DATASETS_CACHE="/workspace/data/huggingface-cache/datasets"
|
||||||
ENV HUGGINGFACE_HUB_CACHE="/workspace/data/huggingface-cache/hub"
|
ENV HUGGINGFACE_HUB_CACHE="/workspace/data/huggingface-cache/hub"
|
||||||
ENV TRANSFORMERS_CACHE="/workspace/data/huggingface-cache/hub"
|
ENV TRANSFORMERS_CACHE="/workspace/data/huggingface-cache/hub"
|
||||||
|
ENV HF_HOME="/workspace/data/huggingface-cache/hub"
|
||||||
|
ENV HF_HUB_ENABLE_HF_TRANSFER="1"
|
||||||
|
|
||||||
COPY scripts/runpod-entrypoint.sh /root/runpod-entrypoint.sh
|
EXPOSE 8888
|
||||||
|
EXPOSE 22
|
||||||
|
|
||||||
|
COPY scripts/cloud-entrypoint.sh /root/cloud-entrypoint.sh
|
||||||
|
COPY scripts/motd /etc/motd
|
||||||
|
|
||||||
|
RUN pip install jupyterlab notebook ipywidgets && \
|
||||||
|
jupyter lab clean
|
||||||
RUN apt install --yes --no-install-recommends openssh-server tmux && \
|
RUN apt install --yes --no-install-recommends openssh-server tmux && \
|
||||||
mkdir -p ~/.ssh && \
|
mkdir -p ~/.ssh && \
|
||||||
chmod 700 ~/.ssh && \
|
chmod 700 ~/.ssh && \
|
||||||
printf "\n[[ -z \"\$TMUX\" ]] && { tmux attach-session -t ssh_tmux || tmux new-session -s ssh_tmux; exit; }\n" >> ~/.bashrc && \
|
printf "\n[[ -z \"\$TMUX\" ]] && { tmux attach-session -t ssh_tmux || tmux new-session -s ssh_tmux; exit; }\n" >> ~/.bashrc && \
|
||||||
chmod +x /workspace/axolotl/scripts/runpod-entrypoint.sh && \
|
printf "[ ! -z \"\$TERM\" -a -r /etc/motd ] && cat /etc/motd\n" >> ~/.bashrc && \
|
||||||
chmod +x /root/runpod-entrypoint.sh
|
chmod +x /workspace/axolotl/scripts/cloud-entrypoint.sh && \
|
||||||
|
chmod +x /root/cloud-entrypoint.sh
|
||||||
|
|
||||||
ENTRYPOINT ["/root/runpod-entrypoint.sh"]
|
ENTRYPOINT ["/root/cloud-entrypoint.sh"]
|
||||||
CMD ["sleep", "infinity"]
|
CMD ["sleep", "infinity"]
|
||||||
41
docker/Dockerfile-tests
Normal file
41
docker/Dockerfile-tests
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
ARG BASE_TAG=main-base
|
||||||
|
FROM winglian/axolotl-base:$BASE_TAG
|
||||||
|
|
||||||
|
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
|
||||||
|
ARG AXOLOTL_EXTRAS=""
|
||||||
|
ARG AXOLOTL_ARGS=""
|
||||||
|
ARG CUDA="118"
|
||||||
|
ENV BNB_CUDA_VERSION=$CUDA
|
||||||
|
ARG PYTORCH_VERSION="2.1.2"
|
||||||
|
ARG GITHUB_REF="main"
|
||||||
|
|
||||||
|
ENV PYTORCH_VERSION=$PYTORCH_VERSION
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --allow-change-held-packages vim curl nano libnccl2 libnccl-dev
|
||||||
|
|
||||||
|
WORKDIR /workspace
|
||||||
|
|
||||||
|
RUN git clone --depth=1 https://github.com/OpenAccess-AI-Collective/axolotl.git
|
||||||
|
|
||||||
|
WORKDIR /workspace/axolotl
|
||||||
|
|
||||||
|
RUN git fetch origin +$GITHUB_REF && \
|
||||||
|
git checkout FETCH_HEAD
|
||||||
|
|
||||||
|
# If AXOLOTL_EXTRAS is set, append it in brackets
|
||||||
|
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||||
|
pip install -e .[deepspeed,flash-attn,mamba-ssm,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||||
|
else \
|
||||||
|
pip install -e .[deepspeed,flash-attn,mamba-ssm] $AXOLOTL_ARGS; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# So we can test the Docker image
|
||||||
|
RUN pip install pytest
|
||||||
|
|
||||||
|
# fix so that git fetch/pull from remote works
|
||||||
|
RUN git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" && \
|
||||||
|
git config --get remote.origin.fetch
|
||||||
|
|
||||||
|
# helper for huggingface-login cli
|
||||||
|
RUN git config --global credential.helper store
|
||||||
2
docs/.gitignore
vendored
Normal file
2
docs/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
/.quarto/
|
||||||
|
_site/
|
||||||
17
docs/config.qmd
Normal file
17
docs/config.qmd
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
title: Config options
|
||||||
|
description: A complete list of all configuration options.
|
||||||
|
---
|
||||||
|
|
||||||
|
```{python}
|
||||||
|
#|echo: false
|
||||||
|
#|output: asis
|
||||||
|
import re
|
||||||
|
# Regex pattern to match the YAML block including its code fence
|
||||||
|
pattern = r'<details[^>]*id="all-yaml-options"[^>]*>.*?<summary>All yaml options.*?```yaml(.*?)```.*?</details>'
|
||||||
|
|
||||||
|
with open('../README.md', 'r') as f:
|
||||||
|
doc = f.read()
|
||||||
|
match = re.search(pattern, doc, re.DOTALL)
|
||||||
|
print("```yaml", match.group(1).strip(), "```", sep="\n")
|
||||||
|
```
|
||||||
245
docs/debugging.qmd
Normal file
245
docs/debugging.qmd
Normal file
@@ -0,0 +1,245 @@
|
|||||||
|
---
|
||||||
|
title: Debugging
|
||||||
|
description: How to debug Axolotl
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
This document provides some tips and tricks for debugging Axolotl. It also provides an example configuration for debugging with VSCode. A good debugging setup is essential to understanding how Axolotl code works behind the scenes.
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
- [General Tips](#general-tips)
|
||||||
|
- [Debugging with VSCode](#debugging-with-vscode)
|
||||||
|
- [Background](#background)
|
||||||
|
- [Configuration](#configuration)
|
||||||
|
- [Customizing your debugger](#customizing-your-debugger)
|
||||||
|
- [Video Tutorial](#video-tutorial)
|
||||||
|
- [Debugging With Docker](#debugging-with-docker)
|
||||||
|
- [Setup](#setup)
|
||||||
|
- [Attach To Container](#attach-to-container)
|
||||||
|
- [Video - Attaching To Docker On Remote Host](#video---attaching-to-docker-on-remote-host)
|
||||||
|
|
||||||
|
## General Tips
|
||||||
|
|
||||||
|
While debugging it's helpful to simplify your test scenario as much as possible. Here are some tips for doing so:
|
||||||
|
|
||||||
|
> [!Important]
|
||||||
|
> All of these tips are incorporated into the [example configuration](#configuration) for debugging with VSCode below.
|
||||||
|
|
||||||
|
1. **Make sure you are using the latest version of axolotl**: This project changes often and bugs get fixed fast. Check your git branch and make sure you have pulled the latest changes from `main`.
|
||||||
|
1. **Eliminate concurrency**: Restrict the number of processes to 1 for both training and data preprocessing:
|
||||||
|
- Set `CUDA_VISIBLE_DEVICES` to a single GPU, ex: `export CUDA_VISIBLE_DEVICES=0`.
|
||||||
|
- Set `dataset_processes: 1` in your axolotl config or run the training command with `--dataset_processes=1`.
|
||||||
|
2. **Use a small dataset**: Construct or use a small dataset from HF Hub. When using a small dataset, you will often have to make sure `sample_packing: False` and `eval_sample_packing: False` to avoid errors. If you are in a pinch and don't have time to construct a small dataset but want to use from the HF Hub, you can shard the data (this will still tokenize the entire dataset, but will only use a fraction of the data for training. For example, to shard the dataset into 20 pieces, add the following to your axolotl config):
|
||||||
|
```yaml
|
||||||
|
dataset:
|
||||||
|
...
|
||||||
|
shards: 20
|
||||||
|
```
|
||||||
|
3. **Use a small model**: A good example of a small model is [TinyLlama/TinyLlama-1.1B-Chat-v1.0](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0).
|
||||||
|
4. **Minimize iteration time**: Make sure the training loop finishes as fast as possible, with these settings.
|
||||||
|
- `micro_batch_size: 1`
|
||||||
|
- `max_steps: 1`
|
||||||
|
- `val_set_size: 0`
|
||||||
|
5. **Clear Caches:** Axolotl caches certain steps and so does the underlying HuggingFace trainer. You may want to clear some of these caches when debugging.
|
||||||
|
- Data preprocessing: When debugging data preprocessing, which includes prompt template formation, you may want to delete the directory set in `dataset_prepared_path:` in your axolotl config. If you didn't set this value, the default is `last_run_prepared`.
|
||||||
|
- HF Hub: If you are debugging data preprocessing, you should clear the relevant HF cache [HuggingFace cache](https://huggingface.co/docs/datasets/cache), by deleting the appropriate `~/.cache/huggingface/datasets/...` folder(s).
|
||||||
|
- **The recommended approach is to redirect all outputs and caches to a temporary folder and delete selected subfolders before each run. This is demonstrated in the example configuration below.**
|
||||||
|
|
||||||
|
|
||||||
|
## Debugging with VSCode
|
||||||
|
|
||||||
|
### Background
|
||||||
|
|
||||||
|
The below example shows how to configure VSCode to debug data preprocessing of the `sharegpt` format. This is the format used when you have the following in your axolotl config:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
datasets:
|
||||||
|
- path: <path to your sharegpt formatted dataset> # example on HF Hub: philschmid/guanaco-sharegpt-style
|
||||||
|
type: sharegpt
|
||||||
|
```
|
||||||
|
|
||||||
|
>[!Important]
|
||||||
|
> If you are already familiar with advanced VSCode debugging, you can skip the below explanation and look at the files [.vscode/launch.json](../.vscode/launch.json) and [.vscode/tasks.json](../.vscode/tasks.json) for an example configuration.
|
||||||
|
|
||||||
|
>[!Tip]
|
||||||
|
> If you prefer to watch a video, rather than read, you can skip to the [video tutorial](#video-tutorial) below (but doing both is recommended).
|
||||||
|
|
||||||
|
### Setup
|
||||||
|
|
||||||
|
Make sure you have an [editable install](https://setuptools.pypa.io/en/latest/userguide/development_mode.html) of Axolotl, which ensures that changes you make to the code are reflected at runtime. Run the following commands from the root of this project:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip3 install packaging
|
||||||
|
pip3 install -e '.[flash-attn,deepspeed]'
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Remote Hosts
|
||||||
|
|
||||||
|
If you developing on a remote host, you can easily use VSCode to debug remotely. To do so, you will need to follow this [remote - SSH guide](https://code.visualstudio.com/docs/remote/ssh). You can also see the video below on [Docker and Remote SSH debugging](#video---attaching-to-docker-on-remote-host).
|
||||||
|
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
The easiest way to get started is to modify the [.vscode/launch.json](../.vscode/launch.json) file in this project. This is just an example configuration, so you may need to modify or copy it to suit your needs.
|
||||||
|
|
||||||
|
For example, to mimic the command `cd devtools && CUDA_VISIBLE_DEVICES=0 accelerate launch -m axolotl.cli.train dev_sharegpt.yml`, you would use the below configuration[^1]. Note that we add additional flags that override the axolotl config and incorporate the tips above (see the comments). We also set the working directory to `devtools` and set the `env` variable `HF_HOME` to a temporary folder that is later partially deleted. This is because we want to delete the HF dataset cache before each run in order to ensure that the data preprocessing code is run from scratch.
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
// .vscode/launch.json
|
||||||
|
{
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Debug axolotl prompt - sharegpt",
|
||||||
|
"type": "python",
|
||||||
|
"module": "accelerate.commands.launch",
|
||||||
|
"request": "launch",
|
||||||
|
"args": [
|
||||||
|
"-m", "axolotl.cli.train", "dev_sharegpt.yml",
|
||||||
|
// The flags below simplify debugging by overriding the axolotl config
|
||||||
|
// with the debugging tips above. Modify as needed.
|
||||||
|
"--dataset_processes=1", // limits data preprocessing to one process
|
||||||
|
"--max_steps=1", // limits training to just one step
|
||||||
|
"--batch_size=1", // minimizes batch size
|
||||||
|
"--micro_batch_size=1", // minimizes batch size
|
||||||
|
"--val_set_size=0", // disables validation
|
||||||
|
"--sample_packing=False", // disables sample packing which is necessary for small datasets
|
||||||
|
"--eval_sample_packing=False",// disables sample packing on eval set
|
||||||
|
"--dataset_prepared_path=temp_debug/axolotl_outputs/data", // send data outputs to a temp folder
|
||||||
|
"--output_dir=temp_debug/axolotl_outputs/model" // send model outputs to a temp folder
|
||||||
|
],
|
||||||
|
"console": "integratedTerminal", // show output in the integrated terminal
|
||||||
|
"cwd": "${workspaceFolder}/devtools", // set working directory to devtools from the root of the project
|
||||||
|
"justMyCode": true, // step through only axolotl code
|
||||||
|
"env": {"CUDA_VISIBLE_DEVICES": "0", // Since we aren't doing distributed training, we need to limit to one GPU
|
||||||
|
"HF_HOME": "${workspaceFolder}/devtools/temp_debug/.hf-cache"}, // send HF cache to a temp folder
|
||||||
|
"preLaunchTask": "cleanup-for-dataprep", // delete temp folders (see below)
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Additional notes about this configuration:**
|
||||||
|
|
||||||
|
- The argument `justMyCode` is set to `true` such that you step through only the axolotl code. If you want to step into dependencies, set this to `false`.
|
||||||
|
- The `preLaunchTask`: `cleanup-for-dataprep` is defined in [.vscode/tasks.json](../.vscode/tasks.json) and is used to delete the following folders before debugging, which is essential to ensure that the data pre-processing code is run from scratch:
|
||||||
|
- `./devtools/temp_debug/axolotl_outputs`
|
||||||
|
- `./devtools/temp_debug/.hf-cache/datasets`
|
||||||
|
|
||||||
|
>[!Tip]
|
||||||
|
> You may not want to delete these folders. For example, if you are debugging model training instead of data pre-processing, you may NOT want to delete the cache or output folders. You may also need to add additional tasks to the `tasks.json` file depending on your use case.
|
||||||
|
|
||||||
|
Below is the [./vscode/tasks.json](../.vscode/tasks.json) file that defines the `cleanup-for-dataprep` task. This task is run before each debugging session when you use the above configuration. Note how there are two tasks that delete the two folders mentioned above. The third task `cleanup-for-dataprep` is a composite task that combines the two tasks. A composite task is necessary because VSCode does not allow you to specify multiple tasks in the `preLaunchTask` argument of the `launch.json` file.
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
// .vscode/tasks.json
|
||||||
|
// this file is used by launch.json
|
||||||
|
{
|
||||||
|
"version": "2.0.0",
|
||||||
|
"tasks": [
|
||||||
|
// this task changes into the devtools directory and deletes the temp_debug/axolotl_outputs folder
|
||||||
|
{
|
||||||
|
"label": "delete-outputs",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "rm -rf temp_debug/axolotl_outputs",
|
||||||
|
"options":{ "cwd": "${workspaceFolder}/devtools"},
|
||||||
|
"problemMatcher": []
|
||||||
|
},
|
||||||
|
// this task changes into the devtools directory and deletes the `temp_debug/.hf-cache/datasets` folder
|
||||||
|
{
|
||||||
|
"label": "delete-temp-hf-dataset-cache",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "rm -rf temp_debug/.hf-cache/datasets",
|
||||||
|
"options":{ "cwd": "${workspaceFolder}/devtools"},
|
||||||
|
"problemMatcher": []
|
||||||
|
},
|
||||||
|
// this task combines the two tasks above
|
||||||
|
{
|
||||||
|
"label": "cleanup-for-dataprep",
|
||||||
|
"dependsOn": ["delete-outputs", "delete-temp-hf-dataset-cache"],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Customizing your debugger
|
||||||
|
|
||||||
|
Your debugging use case may differ from the example above. The easiest thing to do is to put your own axolotl config in the `devtools` folder and modify the `launch.json` file to use your config. You may also want to modify the `preLaunchTask` to delete different folders or not delete anything at all.
|
||||||
|
|
||||||
|
### Video Tutorial
|
||||||
|
|
||||||
|
The following video tutorial walks through the above configuration and demonstrates how to debug with VSCode, (click the image below to watch):
|
||||||
|
|
||||||
|
<div style="text-align: center; line-height: 0;">
|
||||||
|
|
||||||
|
<a href="https://youtu.be/xUUB11yeMmc" target="_blank"
|
||||||
|
title="How to debug Axolotl (for fine tuning LLMs)"><img
|
||||||
|
src="https://i.ytimg.com/vi/xUUB11yeMmc/maxresdefault.jpg"
|
||||||
|
style="border-radius: 10px; display: block; margin: auto;" width="560" height="315" /></a>
|
||||||
|
|
||||||
|
<figcaption style="font-size: smaller;"><a href="https://hamel.dev">Hamel Husain's</a> tutorial: <a href="https://www.youtube.com/watch?v=xUUB11yeMmc">Debugging Axolotl w/VSCode</a></figcaption>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
<br>
|
||||||
|
|
||||||
|
## Debugging With Docker
|
||||||
|
|
||||||
|
Using [official Axolotl Docker images](https://hub.docker.com/r/winglian/axolotl/tags) is a great way to debug your code, and is a very popular way to use Axolotl. Attaching VSCode to Docker takes a few more steps.
|
||||||
|
|
||||||
|
### Setup
|
||||||
|
|
||||||
|
On the host that is running axolotl (ex: if you are using a remote host), clone the axolotl repo and change your current directory to the root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/OpenAccess-AI-Collective/axolotl
|
||||||
|
cd axolotl
|
||||||
|
```
|
||||||
|
|
||||||
|
>[!Tip]
|
||||||
|
> If you already have axolotl cloned on your host, make sure you have the latest changes and change into the root of the project.
|
||||||
|
|
||||||
|
Next, run the desired docker image and mount the current directory. Below is a docker command you can run to do this:[^2]
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --privileged --gpus '"all"' --shm-size 10g --rm -it --name axolotl --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 --mount type=bind,src="${PWD}",target=/workspace/axolotl -v ${HOME}/.cache/huggingface:/root/.cache/huggingface winglian/axolotl:main-py3.10-cu118-2.0.1
|
||||||
|
```
|
||||||
|
|
||||||
|
>[!Tip]
|
||||||
|
> To understand which containers are available, see the [Docker section of the README](../README.md#docker) and the [DockerHub repo](https://hub.docker.com/r/winglian/axolotl/tags). For details of how the Docker containers are built, see axolotl's [Docker CI builds](../.github/workflows/main.yml).
|
||||||
|
|
||||||
|
You will now be in the container. Next, perform an editable install of Axolotl:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip3 install packaging
|
||||||
|
pip3 install -e '.[flash-attn,deepspeed]'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Attach To Container
|
||||||
|
|
||||||
|
Next, if you are using a remote host, [Remote into this host with VSCode](https://code.visualstudio.com/docs/remote/ssh). If you are using a local host, you can skip this step.
|
||||||
|
|
||||||
|
Next, select `Dev Containers: Attach to Running Container...` using the command palette (`CMD + SHIFT + P`) in VSCode. You will be prompted to select a container to attach to. Select the container you just created. You will now be in the container with a working directory that is at the root of the project. Any changes you make to the code will be reflected both in the container and on the host.
|
||||||
|
|
||||||
|
Now you are ready to debug as described above (see [Debugging with VSCode](#debugging-with-vscode)).
|
||||||
|
|
||||||
|
### Video - Attaching To Docker On Remote Host
|
||||||
|
|
||||||
|
Here is a short video that demonstrates how to attach to a Docker container on a remote host:
|
||||||
|
|
||||||
|
<div style="text-align: center; line-height: 0;">
|
||||||
|
|
||||||
|
<a href="https://youtu.be/0AuoR7QnHR0" target="_blank"
|
||||||
|
title="Debugging Axolotl Part 2: Attaching to Docker on a Remote Host"><img
|
||||||
|
src="https://i.ytimg.com/vi/0AuoR7QnHR0/hqdefault.jpg"
|
||||||
|
style="border-radius: 10px; display: block; margin: auto;" width="560" height="315" /></a>
|
||||||
|
|
||||||
|
<figcaption style="font-size: smaller;"><a href="https://hamel.dev">Hamel Husain's</a> tutorial: <a href="https://youtu.be/0AuoR7QnHR0">Debugging Axolotl Part 2: Attaching to Docker on a Remote Host
|
||||||
|
</a></figcaption>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
<br>
|
||||||
|
|
||||||
|
[^1]: The config actually mimics the command `CUDA_VISIBLE_DEVICES=0 python -m accelerate.commands.launch -m axolotl.cli.train devtools/sharegpt.yml`, but this is the same thing.
|
||||||
|
|
||||||
|
[^2]: Many of the below flags are recommended best practices by Nvidia when using nvidia-container-toolkit. You can read more about these flags [here](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html).
|
||||||
21
docs/faq.qmd
Normal file
21
docs/faq.qmd
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
title: FAQ
|
||||||
|
description: Frequently asked questions
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
**Q: The trainer stopped and hasn't progressed in several minutes.**
|
||||||
|
|
||||||
|
> A: Usually an issue with the GPUs communicating with each other. See the [NCCL doc](nccl.qmd)
|
||||||
|
|
||||||
|
**Q: Exitcode -9**
|
||||||
|
|
||||||
|
> A: This usually happens when you run out of system RAM.
|
||||||
|
|
||||||
|
**Q: Exitcode -7 while using deepspeed**
|
||||||
|
|
||||||
|
> A: Try upgrading deepspeed w: `pip install -U deepspeed`
|
||||||
|
|
||||||
|
**Q: AttributeError: 'DummyOptim' object has no attribute 'step'**
|
||||||
|
|
||||||
|
> A: You may be using deepspeed with single gpu. Please don't set `deepspeed:` in yaml or cli.
|
||||||
43
docs/fsdp_qlora.qmd
Normal file
43
docs/fsdp_qlora.qmd
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
---
|
||||||
|
title: FDSP + QLoRA
|
||||||
|
description: Use FSDP with QLoRA to fine-tune large LLMs on consumer GPUs.
|
||||||
|
format:
|
||||||
|
html:
|
||||||
|
toc: true
|
||||||
|
---
|
||||||
|
|
||||||
|
## Background
|
||||||
|
|
||||||
|
Using FSDP with QLoRA is essential for **fine-tuning larger (70b+ parameter) LLMs on consumer GPUs.** For example, you can use FSDP + QLoRA to train a 70b model on two 24GB GPUs[^1].
|
||||||
|
|
||||||
|
Below, we describe how to use this feature in Axolotl.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
To enable `QLoRA` with `FSDP`, you need to perform the following steps:
|
||||||
|
|
||||||
|
> ![Tip]
|
||||||
|
> See the [example config](#example-config) file in addition to reading these instructions.
|
||||||
|
|
||||||
|
1. Set `adapter: qlora` in your axolotl config file.
|
||||||
|
2. Enable FSDP in your axolotl config, as [described here](https://github.com/OpenAccess-AI-Collective/axolotl?tab=readme-ov-file#fsdp).
|
||||||
|
3. Use one of the supported model types: `llama`, `mistral` or `mixtral`.
|
||||||
|
|
||||||
|
## Example Config
|
||||||
|
|
||||||
|
[examples/llama-2/qlora-fsdp.yml](../examples/llama-2/qlora-fsdp.yml) contains an example of how to enable QLoRA + FSDP in axolotl.
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [PR #1378](https://github.com/OpenAccess-AI-Collective/axolotl/pull/1378) enabling QLoRA in FSDP in Axolotl.
|
||||||
|
- [Blog Post](https://www.answer.ai/posts/2024-03-06-fsdp-qlora.html) from the [Answer.AI](https://www.answer.ai/) team describing the work that enabled QLoRA in FSDP.
|
||||||
|
- Related HuggingFace PRs Enabling FDSP + QLoRA:
|
||||||
|
- Accelerate [PR#2544](https://github.com/huggingface/accelerate/pull/2544 )
|
||||||
|
- Transformers [PR#29587](https://github.com/huggingface/transformers/pull/29587)
|
||||||
|
- TRL [PR#1416](https://github.com/huggingface/trl/pull/1416)
|
||||||
|
- PEFT [PR#1550](https://github.com/huggingface/peft/pull/1550)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[^1]: This was enabled by [this work](https://www.answer.ai/posts/2024-03-06-fsdp-qlora.html) from the Answer.AI team.
|
||||||
BIN
docs/images/4d-mask.png
Normal file
BIN
docs/images/4d-mask.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 239 KiB |
263
docs/input_output.qmd
Normal file
263
docs/input_output.qmd
Normal file
@@ -0,0 +1,263 @@
|
|||||||
|
---
|
||||||
|
title: Template-free prompt construction
|
||||||
|
description: "Template-free prompt construction with the `input_output` format"
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
|
||||||
|
- [Background](#background)
|
||||||
|
- [Masking Inputs](#masking-inputs)
|
||||||
|
- [You may not want prompt templates](#you-may-not-want-prompt-templates)
|
||||||
|
- [The `input_output` format](#the-input_output-format)
|
||||||
|
- [Usage](#usage)
|
||||||
|
- [1. Prepare Data](#1-prepare-data)
|
||||||
|
- [2. Use `type: input_output`](#2-use-type-input_output)
|
||||||
|
- [3. Check the prompts](#3-check-the-prompts)
|
||||||
|
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
<a id="markdown-background" name="background"></a>
|
||||||
|
|
||||||
|
## Background
|
||||||
|
|
||||||
|
<a id="markdown-masking-inputs" name="masking-inputs"></a>
|
||||||
|
|
||||||
|
### Masking Inputs
|
||||||
|
|
||||||
|
One of the most popular features of
|
||||||
|
[axolotl](https://github.com/OpenAccess-AI-Collective/axolotl) is
|
||||||
|
setting the following configuration value:
|
||||||
|
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
train_on_inputs: false
|
||||||
|
```
|
||||||
|
|
||||||
|
If you declare a [dataset formats](https://github.com/OpenAccess-AI-Collective/axolotl?tab=readme-ov-file#dataset)
|
||||||
|
such as `alpaca` or `chatml`, axolotl knows what is an input
|
||||||
|
(i.e. human) vs. an output (i.e. the assistant) and masks the input
|
||||||
|
labels so that your model can focus on predicting the outputs only.
|
||||||
|
|
||||||
|
<a id="markdown-you-may-not-want-prompt-templates" name="you-may-not-want-prompt-templates"></a>
|
||||||
|
|
||||||
|
### You may not want prompt templates
|
||||||
|
|
||||||
|
However, there are many situations where you don't want to use one of
|
||||||
|
these formats or templates (I usually don't!). This is because they can:
|
||||||
|
|
||||||
|
- Add unnecessary boilerplate to your prompts.
|
||||||
|
- Create artifacts like special delimiters `<|im_start|>` that can
|
||||||
|
quickly become footguns if you don't include them correctly at
|
||||||
|
inference time.
|
||||||
|
- Enforce a *chat* interface when you do not want one. Sometimes you
|
||||||
|
just want to fine-tune a model to a very specific task and do NOT
|
||||||
|
want multi-turn conversations, roles, etc.
|
||||||
|
- Limit you to only certain roles that the template allows.
|
||||||
|
|
||||||
|
<a id="markdown-the-inputoutput-format" name="the-inputoutput-format"></a>
|
||||||
|
|
||||||
|
### The `input_output` format
|
||||||
|
|
||||||
|
You can construct your prompts without a template by using the
|
||||||
|
`input_output` format, by setting `type: input_output` in your
|
||||||
|
configuration file like this:
|
||||||
|
|
||||||
|
**config.yml**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
train_on_inputs: false # Mask segments of your data
|
||||||
|
datasets:
|
||||||
|
- path: output.jsonl
|
||||||
|
type: input_output # use template free prompt construction
|
||||||
|
```
|
||||||
|
|
||||||
|
Unlike `type: completion`, which is also template-free,
|
||||||
|
`type: input_output` allows you to mask segments of your text. More
|
||||||
|
details on how this works are described below.
|
||||||
|
|
||||||
|
<a id="markdown-usage" name="usage"></a>
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
This is how you can use the `input_output` format:
|
||||||
|
|
||||||
|
<a id="markdown-1-prepare-data" name="1-prepare-data"></a>
|
||||||
|
|
||||||
|
### 1. Prepare Data
|
||||||
|
|
||||||
|
To use the `input_output` format, collect your data in the following
|
||||||
|
format into a jsonl file (below is the first row from the file
|
||||||
|
`output`.jsonl` pretty printed):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ head -n1 output.jsonl | python -m json.tool
|
||||||
|
|
||||||
|
{.cell-output .cell-output-stdout}
|
||||||
|
{
|
||||||
|
"segments": [
|
||||||
|
{
|
||||||
|
"label": true,
|
||||||
|
"text": "<s>Hello\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": true,
|
||||||
|
"text": "hi there!. "
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": false,
|
||||||
|
"text": "goodbye "
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": true,
|
||||||
|
"text": "farewell</s>"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Set `label:false` when you want to mask a segment of text so that the
|
||||||
|
model isn't trained on it. Some things to keep in mind:
|
||||||
|
|
||||||
|
> [!IMPORTANT]
|
||||||
|
> 1. **EOS, BOS, spaces, newlines etc. are entirely up to you. Axolotl
|
||||||
|
concatenates all the segments as-is.** The tokenizer doesn't add
|
||||||
|
anything additional. Notice how I added spaces, newlines, `<s>`
|
||||||
|
(BOS), and `</s>` (EOS) myself.
|
||||||
|
> 2. Make sure you check the materialized output to validate that the
|
||||||
|
prompt is getting assembled how you like.
|
||||||
|
|
||||||
|
<a id="markdown-2-use-type-inputoutput" name="2-use-type-inputoutput"></a>
|
||||||
|
|
||||||
|
### 2. Use `type: input_output`
|
||||||
|
|
||||||
|
Let's materialize data with our `output.jsonl` file by setting
|
||||||
|
`type: input_output` in our axolotl config:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# training_config.yaml
|
||||||
|
base_model: mistralai/Mistral-7B-v0.1
|
||||||
|
data_seed: 49
|
||||||
|
seed: 49
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: output.jsonl
|
||||||
|
type: input_output
|
||||||
|
val_set_size: 0.1
|
||||||
|
|
||||||
|
sequence_len: 896
|
||||||
|
sample_packing: false
|
||||||
|
|
||||||
|
micro_batch_size: 2
|
||||||
|
gradient_accumulation_steps: 3
|
||||||
|
eval_batch_size: 2
|
||||||
|
num_epochs: 1
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
special_tokens:
|
||||||
|
bos_token: "<s>"
|
||||||
|
eos_token: "</s>"
|
||||||
|
unk_token: "<unk>"
|
||||||
|
```
|
||||||
|
|
||||||
|
You can use the following command to materialize your data. The
|
||||||
|
`--debug` flag will print the tokens, along with the labels so you can
|
||||||
|
verify that the correct items are being ignored:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ python -m axolotl.cli.preprocess training_config.yaml --debug
|
||||||
|
|
||||||
|
...
|
||||||
|
[2024-03-05 23:36:46,969] [INFO] [axolotl.check_example_labels:35] [PID:607731] [RANK:0] <s>(1, 1) Hello(22557, 22557)
|
||||||
|
(13, 13) hi(12014, 12014) there(736, 736) !(28808, 28808) .(28723, 28723) (28705, 28705) good(-100, 1179) bye(-100, 17664) (-100, 28705) fare(19111, 19111) well(5458, 5458) </s>(2, 2)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
The format is `decoded_token`(`label`, `token_id`), for example,
|
||||||
|
`<s>(1, 1)` means that the token is `<s>`, the label is `1` and the
|
||||||
|
token_id is `1`. When the label is `-100` then that token is ignored for
|
||||||
|
training.
|
||||||
|
|
||||||
|
<a id="markdown-3-check-the-prompts" name="3-check-the-prompts"></a>
|
||||||
|
|
||||||
|
### 3. Check the prompts
|
||||||
|
|
||||||
|
Here is another way to check the materialized output:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
from datasets import load_from_disk
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
directory = !ls last_run_prepared/
|
||||||
|
with open('training_config.yaml', 'r') as f:
|
||||||
|
cfg = yaml.safe_load(f)
|
||||||
|
model_id = cfg['base_model']
|
||||||
|
tok = AutoTokenizer.from_pretrained(model_id)
|
||||||
|
ds = load_from_disk(f'last_run_prepared/{directory[0]}/')
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> row = ds[0]
|
||||||
|
>>> print(tok.decode(row['input_ids']))
|
||||||
|
<s> Hello
|
||||||
|
hi there!. goodbye farewell</s>
|
||||||
|
```
|
||||||
|
|
||||||
|
We can check that the right tokens are ingored by comparing the labels
|
||||||
|
to each token:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import pandas as pd
|
||||||
|
pd.DataFrame([{'token': tok.decode(i), 'label': l, 'id':i} for i,l in
|
||||||
|
zip(row['input_ids'], row['labels'])])
|
||||||
|
```
|
||||||
|
|
||||||
|
| token | label | id |
|
||||||
|
|-------|-------|-------|
|
||||||
|
| 0 | \<s\> | 1 |
|
||||||
|
| 1 | Hello | 22557 |
|
||||||
|
| 2 | \\n | 13 |
|
||||||
|
| 3 | hi | 12014 |
|
||||||
|
| 4 | there | 736 |
|
||||||
|
| 5 | ! | 28808 |
|
||||||
|
| 6 | . | 28723 |
|
||||||
|
| 7 | | 28705 |
|
||||||
|
| 8 | good | -100 |
|
||||||
|
| 9 | bye | -100 |
|
||||||
|
| 10 | | -100 |
|
||||||
|
| 11 | fare | 19111 |
|
||||||
|
| 12 | well | 5458 |
|
||||||
|
| 13 | \</s\>| 2 |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
If we look at the input data, the above table seems correct! (The jsonl
|
||||||
|
version is repeated below for reference):
|
||||||
|
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ head -n1 output.jsonl | python -m json.tool
|
||||||
|
|
||||||
|
{.cell-output .cell-output-stdout}
|
||||||
|
{
|
||||||
|
"segments": [
|
||||||
|
{
|
||||||
|
"label": true,
|
||||||
|
"text": "<s>Hello\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": true,
|
||||||
|
"text": "hi there!. "
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": false,
|
||||||
|
"text": "goodbye "
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": true,
|
||||||
|
"text": "farewell</s>"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
22
docs/mac.qmd
Normal file
22
docs/mac.qmd
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
title: Mac M-series
|
||||||
|
description: Mac M-series support
|
||||||
|
---
|
||||||
|
|
||||||
|
Currently Axolotl on Mac is partially usable, many of the dependencies of Axolotl including Pytorch do not support MPS or have incomplete support.
|
||||||
|
|
||||||
|
Current support:
|
||||||
|
|
||||||
|
- [x] Support for all models
|
||||||
|
- [x] Full training of models
|
||||||
|
- [x] LoRA training
|
||||||
|
- [x] Sample packing
|
||||||
|
- [ ] FP16 and BF16 (awaiting AMP support for MPS in Pytorch)
|
||||||
|
- [ ] Tri-dao's flash-attn (until it is supported use spd_attention as an alternative)
|
||||||
|
- [ ] xformers
|
||||||
|
- [ ] bitsandbytes (meaning no 4/8 bits loading and bnb optimizers)
|
||||||
|
- [ ] qlora
|
||||||
|
- [ ] DeepSpeed
|
||||||
|
|
||||||
|
Untested:
|
||||||
|
- FSDP
|
||||||
48
docs/multi-node.qmd
Normal file
48
docs/multi-node.qmd
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
---
|
||||||
|
title: Multi Node
|
||||||
|
description: How to use Axolotl on multiple machines
|
||||||
|
---
|
||||||
|
|
||||||
|
You will need to create a configuration for accelerate, either by using `accelerate config` and follow the instructions or you can use one of the preset below:
|
||||||
|
|
||||||
|
~/.cache/huggingface/accelerate/default_config.yaml
|
||||||
|
```yaml
|
||||||
|
compute_environment: LOCAL_MACHINE
|
||||||
|
debug: false
|
||||||
|
distributed_type: FSDP
|
||||||
|
downcast_bf16: 'no'
|
||||||
|
machine_rank: 0 # Set to 0 for the main machine, increment by one for other machines
|
||||||
|
main_process_ip: 10.0.0.4 # Set to main machine's IP
|
||||||
|
main_process_port: 5000
|
||||||
|
main_training_function: main
|
||||||
|
mixed_precision: bf16
|
||||||
|
num_machines: 2 # Change to the number of machines
|
||||||
|
num_processes: 4 # That's the total number of GPUs, (for example: if you have 2 machines with 4 GPU, put 8)
|
||||||
|
rdzv_backend: static
|
||||||
|
same_network: true
|
||||||
|
tpu_env: []
|
||||||
|
tpu_use_cluster: false
|
||||||
|
tpu_use_sudo: false
|
||||||
|
use_cpu: false
|
||||||
|
```
|
||||||
|
|
||||||
|
Configure your model to use FSDP with for example:
|
||||||
|
```yaml
|
||||||
|
fsdp:
|
||||||
|
- full_shard
|
||||||
|
- auto_wrap
|
||||||
|
fsdp_config:
|
||||||
|
fsdp_offload_params: true
|
||||||
|
fsdp_state_dict_type: FULL_STATE_DICT
|
||||||
|
fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer
|
||||||
|
```
|
||||||
|
|
||||||
|
## Machine configuration
|
||||||
|
|
||||||
|
On each machine you need a copy of Axolotl, we suggest using the same commit to ensure compatibility.
|
||||||
|
|
||||||
|
You will also need to have the same configuration file for your model on each machine.
|
||||||
|
|
||||||
|
On the main machine only, make sure the port you set as `main_process_port` is open in TCP and reachable by other machines.
|
||||||
|
|
||||||
|
All you have to do now is launch using accelerate as you would usually do on each machine and voila, the processes will start once you have launched accelerate on every machine.
|
||||||
76
docs/multipack.qmd
Normal file
76
docs/multipack.qmd
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
---
|
||||||
|
title: Multipack (Sample Packing)
|
||||||
|
description: Multipack is a technique to pack multiple sequences into a single batch to increase training throughput.
|
||||||
|
---
|
||||||
|
|
||||||
|
## Visualization of Multipack with Flash Attention
|
||||||
|
|
||||||
|
Because Flash Attention simply drops the attention mask, we do not need to
|
||||||
|
construct a 4d attention mask. We only need to concatenate the sequences into
|
||||||
|
a single batch and let flash attention know where each new sequence begins.
|
||||||
|
|
||||||
|
|
||||||
|
4k context, bsz =4,
|
||||||
|
each character represents 256 tokens
|
||||||
|
X represents a padding token
|
||||||
|
|
||||||
|
```
|
||||||
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
|
||||||
|
[[ A A A A A A A A A A A ]
|
||||||
|
B B B B B B ]
|
||||||
|
C C C C C C C ]
|
||||||
|
D D D D ]]
|
||||||
|
|
||||||
|
[[ E E E E E E E E ]
|
||||||
|
[ F F F F ]
|
||||||
|
[ G G G ]
|
||||||
|
[ H H H H ]]
|
||||||
|
|
||||||
|
[[ I I I ]
|
||||||
|
[ J J J ]
|
||||||
|
[ K K K K K]
|
||||||
|
[ L L L ]]
|
||||||
|
```
|
||||||
|
|
||||||
|
after padding to longest input in each step
|
||||||
|
```
|
||||||
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
|
||||||
|
[[ A A A A A A A A A A A ]
|
||||||
|
B B B B B B X X X X X X ]
|
||||||
|
C C C C C C C X X X X ]
|
||||||
|
D D D D X X X X X X X ]]
|
||||||
|
|
||||||
|
[[ E E E E E E E E ]
|
||||||
|
[ F F F F X X X X ]
|
||||||
|
[ G G G X X X X X ]
|
||||||
|
[ H H H H X X X X ]]
|
||||||
|
|
||||||
|
[[ I I I X X ]
|
||||||
|
[ J J J X X ]
|
||||||
|
[ K K K K K ]
|
||||||
|
[ L L L X X ]]
|
||||||
|
```
|
||||||
|
|
||||||
|
w packing ( note it's the same effective number of tokens per step, but a true bsz of 1)
|
||||||
|
```
|
||||||
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
|
||||||
|
[[ A A A A A A A A A A A B B B B B
|
||||||
|
B C C C C C C C D D D D E E E E
|
||||||
|
E E E E F F F F F G G G H H H H
|
||||||
|
I I I J J J J K K K K K L L L X ]]
|
||||||
|
```
|
||||||
|
|
||||||
|
cu_seqlens:
|
||||||
|
[[ 0, 11, 17, 24, 28, 36, 41 44, 48, 51, 55, 60, 64]]
|
||||||
|
|
||||||
|
|
||||||
|
## Multipack without Flash Attention
|
||||||
|
|
||||||
|
Multipack can still be achieved without Flash attention, but with lower packing
|
||||||
|
efficiency as we are not able to join multiple batches into a single batch due to
|
||||||
|
context length limits without flash attention. We can use either Pytorch's Scaled
|
||||||
|
Dot Product Attention implementation or native Pytorch attention implementation
|
||||||
|
along with [4d attention masks](https://github.com/huggingface/transformers/pull/27539)
|
||||||
|
to pack sequences together and avoid cross attention.
|
||||||
|
|
||||||
|
<img src="./images/4d-mask.png" alt="axolotl" width="800">
|
||||||
49
docs/nccl.qmd
Normal file
49
docs/nccl.qmd
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
title: NCCL
|
||||||
|
description: Troubleshooting NCCL issues
|
||||||
|
---
|
||||||
|
|
||||||
|
NVIDIA NCCL is a library to facilitate and optimize multi-GPU communication operations, such as broadcast, all-gather, reduce, all-reduce, etc. Broadly, NCCL configuration is highly environment-specific and is configured via several [environment variables](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/env.html). A common NCCL-related problem occurs when a long-running operation times out causing the training process to abort:
|
||||||
|
|
||||||
|
```text
|
||||||
|
Watchdog caught collective operation timeout: WorkNCCL(SeqNum=42, OpType=ALLGATHER, Timeout(ms)=1800000) ran for 1806948 milliseconds before timing out.
|
||||||
|
```
|
||||||
|
|
||||||
|
Often, this timeout will happen after 30 minutes (the default setting) and is accompanied by below-average power consumption with near 100% GPU utilization before the error is raised. Nvidia recommends [disabling PCI access control services (ACS)](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/troubleshooting.html#pci-access-control-services-acs) as a possible solution if this is available to you.
|
||||||
|
|
||||||
|
Forcing cross-GPU communication via [NVLink](https://en.wikipedia.org/wiki/NVLink) may help without increasing timeouts. To verify that your configuration is leveraging NVLink run the following command:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
nvidia-smi nvlink --status
|
||||||
|
```
|
||||||
|
|
||||||
|
To force NCCL to use NVLink, simply set this in the environment:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export NCCL_P2P_LEVEL=NVL
|
||||||
|
```
|
||||||
|
|
||||||
|
If NVLink is not available in your environment there are other options for ``NCCL_P2P_LEVEL`` in the table below:
|
||||||
|
|
||||||
|
| NCCL_P2P_LEVEL | Description |
|
||||||
|
| -------------- | ----------- |
|
||||||
|
| PIX | P2P data transfers through no more than a single PCIe bridge. Faster data transfer rates vs to paths involving multiple bridges, but slower compared to direct GPU-to-GPU communication. |
|
||||||
|
| PXB | P2P data transfers through multiple PCIe bridges but not going through the PCIe Host Bridge; this path involves a complex routing process, potentially incurring a moderate level of latency. |
|
||||||
|
| PHB | P2P data transfers occur over the PCIe and through a PCIe Host Bridge, typically involving the CPU, which can facilitate direct memory access but might introduce additional latency compared to more direct paths (ex PIX, NVL) |
|
||||||
|
|
||||||
|
To validate that acceptable data transfer speeds exist for your training job, running [NCCL Tests](https://github.com/NVIDIA/nccl-tests/blob/master/README.md) can help pinpoint bottlenecks, for example:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
./build/all_reduce_perf -b 8 -e 128M -f 2 -g 3
|
||||||
|
```
|
||||||
|
|
||||||
|
It can be useful when debugging NCCL communication timeouts to activate additional logging in both PyTorch and NCCL:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export NCCL_DEBUG=INFO
|
||||||
|
export NCCL_DEBUG_SUBSYS=ALL
|
||||||
|
export TORCH_DISTRIBUTED_DEBUG=INFO
|
||||||
|
export TORCHELASTIC_ERROR_FILE=/PATH/TO/torcherror.log
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, if you believe your training job needs more time you can increase the timeout past 30 minutes by setting the ``ddp_timeout`` value in the Axolotl configuration. See [PyTorch init_process_group](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for documentation on this value.
|
||||||
72
docs/rlhf.qmd
Normal file
72
docs/rlhf.qmd
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
---
|
||||||
|
title: "RLHF (Beta)"
|
||||||
|
description: "Reinforcement Learning from Human Feedback is a method whereby a language model is optimized from data using human feedback."
|
||||||
|
---
|
||||||
|
|
||||||
|
### Overview
|
||||||
|
|
||||||
|
Reinforcement Learning from Human Feedback is a method whereby a language model is optimized from data using human
|
||||||
|
feedback. Various methods include, but not limited to:
|
||||||
|
|
||||||
|
- Proximal Policy Optimization (PPO) (not yet supported in axolotl)
|
||||||
|
- Direct Preference Optimization (DPO)
|
||||||
|
- Identity Preference Optimization (IPO)
|
||||||
|
|
||||||
|
|
||||||
|
### RLHF using Axolotl
|
||||||
|
|
||||||
|
>[!IMPORTANT]
|
||||||
|
>This is a BETA feature and many features are not fully implemented. You are encouraged to open new PRs to improve the integration and functionality.
|
||||||
|
|
||||||
|
The various RL training methods are implemented in trl and wrapped via axolotl. Below are various examples with how you can use various preference datasets to train models that use ChatML
|
||||||
|
|
||||||
|
#### DPO
|
||||||
|
```yaml
|
||||||
|
rl: dpo
|
||||||
|
datasets:
|
||||||
|
- path: Intel/orca_dpo_pairs
|
||||||
|
split: train
|
||||||
|
type: chatml.intel
|
||||||
|
- path: argilla/ultrafeedback-binarized-preferences
|
||||||
|
split: train
|
||||||
|
type: chatml.argilla
|
||||||
|
```
|
||||||
|
|
||||||
|
#### IPO
|
||||||
|
```yaml
|
||||||
|
rl: ipo
|
||||||
|
```
|
||||||
|
|
||||||
|
#### ORPO
|
||||||
|
|
||||||
|
Paper: https://arxiv.org/abs/2403.07691
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
rl: orpo
|
||||||
|
orpo_alpha: 0.1
|
||||||
|
remove_unused_columns: false
|
||||||
|
|
||||||
|
chat_template: chatml
|
||||||
|
datasets:
|
||||||
|
- path: argilla/ultrafeedback-binarized-preferences-cleaned
|
||||||
|
type: orpo.chat_template
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Using local dataset files
|
||||||
|
```yaml
|
||||||
|
datasets:
|
||||||
|
- ds_type: json
|
||||||
|
data_files:
|
||||||
|
- orca_rlhf.jsonl
|
||||||
|
split: train
|
||||||
|
type: chatml.intel
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Trl autounwrap for peft
|
||||||
|
|
||||||
|
Trl supports autounwrapping peft models, so that a ref model does not need to be additionally loaded, leading to less VRAM needed. This is on by default. To turn it off, pass the following config.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# load ref model when adapter training.
|
||||||
|
rl_adapter_ref_model: true
|
||||||
|
```
|
||||||
89
examples/cerebras/btlm-ft.yml
Normal file
89
examples/cerebras/btlm-ft.yml
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
base_model: cerebras/btlm-3b-8k-base
|
||||||
|
model_type: AutoModelForCausalLM
|
||||||
|
tokenizer_type: GPT2Tokenizer
|
||||||
|
trust_remote_code: true
|
||||||
|
tokenizer_use_fast: true
|
||||||
|
tokenizer_legacy: true
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
push_dataset_to_hub:
|
||||||
|
hf_use_auth_token: true
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_prepared_run
|
||||||
|
val_set_size: 0.05
|
||||||
|
|
||||||
|
adapter:
|
||||||
|
lora_model_dir:
|
||||||
|
sequence_len: 2048
|
||||||
|
max_packed_sequence_len:
|
||||||
|
sample_packing: false
|
||||||
|
sample_packing_eff_est:
|
||||||
|
sample_packing_seq_len_multiplier:
|
||||||
|
total_num_tokens:
|
||||||
|
|
||||||
|
lora_r:
|
||||||
|
lora_alpha:
|
||||||
|
lora_dropout:
|
||||||
|
lora_target_modules:
|
||||||
|
lora_target_linear:
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
output_dir: btlm-out
|
||||||
|
gradient_accumulation_steps: 1
|
||||||
|
micro_batch_size: 1
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: adamw_torch
|
||||||
|
adam_beta2: 0.95
|
||||||
|
adam_eps: 0.000000001
|
||||||
|
max_grad_norm: 1.0
|
||||||
|
|
||||||
|
torchdistx_path:
|
||||||
|
lr_scheduler: cosine
|
||||||
|
lr_quadratic_warmup: true
|
||||||
|
learning_rate: 0.000085
|
||||||
|
train_on_inputs: true
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: true
|
||||||
|
|
||||||
|
gradient_checkpointing: false
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
sdp_attention:
|
||||||
|
flash_optimum:
|
||||||
|
|
||||||
|
gptq_groupsize:
|
||||||
|
gptq_model_v1:
|
||||||
|
|
||||||
|
warmup_steps: 32
|
||||||
|
evals_per_epoch: 4
|
||||||
|
saves_per_epoch: 1
|
||||||
|
save_total_limit:
|
||||||
|
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.1
|
||||||
|
special_tokens:
|
||||||
|
pad_token: "<|endoftext|>"
|
||||||
|
fsdp:
|
||||||
|
# - full_shard
|
||||||
|
# - auto_wrap
|
||||||
|
fsdp_config:
|
||||||
|
# fsdp_state_dict_type: FULL_STATE_DICT
|
||||||
|
# fsdp_transformer_layer_cls_to_wrap: BTLMBlock
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
base_model: cerebras/Cerebras-GPT-1.3B
|
base_model: cerebras/Cerebras-GPT-1.3B
|
||||||
base_model_config: cerebras/Cerebras-GPT-1.3B
|
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
load_in_4bit: true
|
load_in_4bit: true
|
||||||
strict: false
|
strict: false
|
||||||
@@ -7,12 +6,11 @@ push_dataset_to_hub:
|
|||||||
datasets:
|
datasets:
|
||||||
- path: teknium/GPT4-LLM-Cleaned
|
- path: teknium/GPT4-LLM-Cleaned
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
adapter: qlora
|
adapter: qlora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
sequence_len: 2048
|
sequence_len: 2048
|
||||||
max_packed_sequence_len: 2048
|
|
||||||
lora_r: 16
|
lora_r: 16
|
||||||
lora_alpha: 32
|
lora_alpha: 32
|
||||||
lora_dropout: 0.05
|
lora_dropout: 0.05
|
||||||
@@ -25,7 +23,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
batch_size: 4
|
batch_size: 4
|
||||||
@@ -37,8 +35,8 @@ lr_scheduler: cosine
|
|||||||
learning_rate: 0.0002
|
learning_rate: 0.0002
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
fp16: false
|
fp16:
|
||||||
tf32: true
|
tf32: true
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
early_stopping_patience:
|
early_stopping_patience:
|
||||||
@@ -50,8 +48,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
eval_steps: 20
|
evals_per_epoch: 4
|
||||||
save_steps:
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
base_model: codellama/CodeLlama-13b-hf
|
base_model: codellama/CodeLlama-13b-hf
|
||||||
base_model_config: codellama/CodeLlama-13b-hf
|
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: CodeLlamaTokenizer
|
tokenizer_type: CodeLlamaTokenizer
|
||||||
is_llama_derived_model: true
|
|
||||||
|
|
||||||
load_in_8bit: true
|
load_in_8bit: true
|
||||||
load_in_4bit: false
|
load_in_4bit: false
|
||||||
@@ -11,12 +9,13 @@ strict: false
|
|||||||
datasets:
|
datasets:
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
output_dir: ./lora-out
|
output_dir: ./lora-out
|
||||||
|
|
||||||
sequence_len: 100000
|
sequence_len: 4096
|
||||||
sample_packing: true
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
adapter: lora
|
adapter: lora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
@@ -29,20 +28,20 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
micro_batch_size: 2
|
micro_batch_size: 2
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer: adamw_bnb_8bit
|
optimizer: adamw_bnb_8bit
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.0002
|
learning_rate: 0.0002
|
||||||
|
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
fp16: false
|
fp16:
|
||||||
tf32: false
|
tf32: false
|
||||||
|
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
@@ -52,10 +51,11 @@ local_rank:
|
|||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
xformers_attention:
|
xformers_attention:
|
||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
s2_attention:
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
eval_steps: 20
|
evals_per_epoch: 4
|
||||||
save_steps:
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
base_model: codellama/CodeLlama-13b-hf
|
base_model: codellama/CodeLlama-13b-hf
|
||||||
base_model_config: codellama/CodeLlama-13b-hf
|
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: CodeLlamaTokenizer
|
tokenizer_type: CodeLlamaTokenizer
|
||||||
is_llama_derived_model: true
|
|
||||||
|
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
load_in_4bit: true
|
load_in_4bit: true
|
||||||
@@ -11,15 +9,16 @@ strict: false
|
|||||||
datasets:
|
datasets:
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
adapter: qlora
|
adapter: qlora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
|
|
||||||
sequence_len: 100000
|
sequence_len: 4096
|
||||||
sample_packing: true
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
lora_r: 32
|
lora_r: 32
|
||||||
lora_alpha: 16
|
lora_alpha: 16
|
||||||
@@ -31,20 +30,20 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
micro_batch_size: 2
|
micro_batch_size: 2
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer: paged_adamw_32bit
|
optimizer: paged_adamw_32bit
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.0002
|
learning_rate: 0.0002
|
||||||
|
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
fp16: false
|
fp16:
|
||||||
tf32: false
|
tf32: false
|
||||||
|
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
@@ -56,8 +55,8 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
eval_steps: 20
|
evals_per_epoch: 4
|
||||||
save_steps:
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
base_model: codellama/CodeLlama-34b-hf
|
base_model: codellama/CodeLlama-34b-hf
|
||||||
base_model_config: codellama/CodeLlama-34b-hf
|
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: CodeLlamaTokenizer
|
tokenizer_type: CodeLlamaTokenizer
|
||||||
is_llama_derived_model: true
|
|
||||||
|
|
||||||
load_in_8bit: true
|
load_in_8bit: true
|
||||||
load_in_4bit: false
|
load_in_4bit: false
|
||||||
@@ -11,12 +9,13 @@ strict: false
|
|||||||
datasets:
|
datasets:
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
output_dir: ./lora-out
|
output_dir: ./lora-out
|
||||||
|
|
||||||
sequence_len: 100000
|
sequence_len: 4096
|
||||||
sample_packing: true
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
adapter: lora
|
adapter: lora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
@@ -29,20 +28,20 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
micro_batch_size: 2
|
micro_batch_size: 2
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer: adamw_bnb_8bit
|
optimizer: adamw_bnb_8bit
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.0002
|
learning_rate: 0.0002
|
||||||
|
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
fp16: false
|
fp16:
|
||||||
tf32: false
|
tf32: false
|
||||||
|
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
@@ -52,10 +51,11 @@ local_rank:
|
|||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
xformers_attention:
|
xformers_attention:
|
||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
s2_attention:
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
eval_steps: 20
|
evals_per_epoch: 4
|
||||||
save_steps:
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
base_model: codellama/CodeLlama-34b-hf
|
base_model: codellama/CodeLlama-34b-hf
|
||||||
base_model_config: codellama/CodeLlama-34b-hf
|
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: CodeLlamaTokenizer
|
tokenizer_type: CodeLlamaTokenizer
|
||||||
is_llama_derived_model: true
|
|
||||||
|
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
load_in_4bit: true
|
load_in_4bit: true
|
||||||
@@ -11,15 +9,16 @@ strict: false
|
|||||||
datasets:
|
datasets:
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
adapter: qlora
|
adapter: qlora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
|
|
||||||
sequence_len: 100000
|
sequence_len: 4096
|
||||||
sample_packing: true
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
lora_r: 32
|
lora_r: 32
|
||||||
lora_alpha: 16
|
lora_alpha: 16
|
||||||
@@ -31,20 +30,20 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
micro_batch_size: 2
|
micro_batch_size: 2
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer: paged_adamw_32bit
|
optimizer: paged_adamw_32bit
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.0002
|
learning_rate: 0.0002
|
||||||
|
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
fp16: false
|
fp16:
|
||||||
tf32: false
|
tf32: false
|
||||||
|
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
@@ -56,8 +55,8 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
eval_steps: 20
|
evals_per_epoch: 4
|
||||||
save_steps:
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
base_model: codellama/CodeLlama-7b-hf
|
base_model: codellama/CodeLlama-7b-hf
|
||||||
base_model_config: codellama/CodeLlama-7b-hf
|
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: CodeLlamaTokenizer
|
tokenizer_type: CodeLlamaTokenizer
|
||||||
is_llama_derived_model: true
|
|
||||||
|
|
||||||
load_in_8bit: true
|
load_in_8bit: true
|
||||||
load_in_4bit: false
|
load_in_4bit: false
|
||||||
@@ -11,12 +9,13 @@ strict: false
|
|||||||
datasets:
|
datasets:
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
output_dir: ./lora-out
|
output_dir: ./lora-out
|
||||||
|
|
||||||
sequence_len: 100000
|
sequence_len: 4096
|
||||||
sample_packing: true
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
adapter: lora
|
adapter: lora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
@@ -29,20 +28,20 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
micro_batch_size: 2
|
micro_batch_size: 2
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer: adamw_bnb_8bit
|
optimizer: adamw_bnb_8bit
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.0002
|
learning_rate: 0.0002
|
||||||
|
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
fp16: false
|
fp16:
|
||||||
tf32: false
|
tf32: false
|
||||||
|
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
@@ -52,10 +51,11 @@ local_rank:
|
|||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
xformers_attention:
|
xformers_attention:
|
||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
s2_attention:
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
eval_steps: 20
|
evals_per_epoch: 4
|
||||||
save_steps:
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
base_model: codellama/CodeLlama-7b-hf
|
base_model: codellama/CodeLlama-7b-hf
|
||||||
base_model_config: codellama/CodeLlama-7b-hf
|
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: CodeLlamaTokenizer
|
tokenizer_type: CodeLlamaTokenizer
|
||||||
is_llama_derived_model: true
|
|
||||||
|
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
load_in_4bit: true
|
load_in_4bit: true
|
||||||
@@ -11,15 +9,16 @@ strict: false
|
|||||||
datasets:
|
datasets:
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
adapter: qlora
|
adapter: qlora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
|
|
||||||
sequence_len: 100000
|
sequence_len: 4096
|
||||||
sample_packing: true
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
lora_r: 32
|
lora_r: 32
|
||||||
lora_alpha: 16
|
lora_alpha: 16
|
||||||
@@ -31,20 +30,20 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
micro_batch_size: 2
|
micro_batch_size: 2
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer: paged_adamw_32bit
|
optimizer: paged_adamw_32bit
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.0002
|
learning_rate: 0.0002
|
||||||
|
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
fp16: false
|
fp16:
|
||||||
tf32: false
|
tf32: false
|
||||||
|
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
@@ -56,8 +55,8 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
eval_steps: 20
|
evals_per_epoch: 4
|
||||||
save_steps:
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
216
examples/colab-notebooks/colab-axolotl-example.ipynb
Normal file
216
examples/colab-notebooks/colab-axolotl-example.ipynb
Normal file
@@ -0,0 +1,216 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "AKjdG7tbTb-n"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"# Example notebook for running Axolotl on google colab"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "RcbNpOgWRcii"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import torch\n",
|
||||||
|
"# Check so there is a gpu available, a T4(free tier) is enough to run this notebook\n",
|
||||||
|
"assert (torch.cuda.is_available()==True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "h3nLav8oTRA5"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"## Install Axolotl and dependencies"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {
|
||||||
|
"base_uri": "https://localhost:8080/"
|
||||||
|
},
|
||||||
|
"id": "3c3yGAwnOIdi",
|
||||||
|
"outputId": "e3777b5a-40ef-424f-e181-62dfecd1dd01"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"!pip install torch==\"2.1.2\"\n",
|
||||||
|
"!pip install -e git+https://github.com/OpenAccess-AI-Collective/axolotl#egg=axolotl\n",
|
||||||
|
"!pip install flash-attn==\"2.5.0\"\n",
|
||||||
|
"!pip install deepspeed==\"0.13.1\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "BW2MFr7HTjub"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"## Create an yaml config file"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "9pkF2dSoQEUN"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import yaml\n",
|
||||||
|
"\n",
|
||||||
|
"# Your YAML string\n",
|
||||||
|
"yaml_string = \"\"\"\n",
|
||||||
|
"base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T\n",
|
||||||
|
"model_type: LlamaForCausalLM\n",
|
||||||
|
"tokenizer_type: LlamaTokenizer\n",
|
||||||
|
"is_llama_derived_model: true\n",
|
||||||
|
"\n",
|
||||||
|
"load_in_8bit: false\n",
|
||||||
|
"load_in_4bit: true\n",
|
||||||
|
"strict: false\n",
|
||||||
|
"\n",
|
||||||
|
"datasets:\n",
|
||||||
|
" - path: mhenrichsen/alpaca_2k_test\n",
|
||||||
|
" type: alpaca\n",
|
||||||
|
"dataset_prepared_path:\n",
|
||||||
|
"val_set_size: 0.05\n",
|
||||||
|
"output_dir: ./qlora-out\n",
|
||||||
|
"\n",
|
||||||
|
"adapter: qlora\n",
|
||||||
|
"lora_model_dir:\n",
|
||||||
|
"\n",
|
||||||
|
"sequence_len: 1096\n",
|
||||||
|
"sample_packing: true\n",
|
||||||
|
"pad_to_sequence_len: true\n",
|
||||||
|
"\n",
|
||||||
|
"lora_r: 32\n",
|
||||||
|
"lora_alpha: 16\n",
|
||||||
|
"lora_dropout: 0.05\n",
|
||||||
|
"lora_target_modules:\n",
|
||||||
|
"lora_target_linear: true\n",
|
||||||
|
"lora_fan_in_fan_out:\n",
|
||||||
|
"\n",
|
||||||
|
"wandb_project:\n",
|
||||||
|
"wandb_entity:\n",
|
||||||
|
"wandb_watch:\n",
|
||||||
|
"wandb_name:\n",
|
||||||
|
"wandb_log_model:\n",
|
||||||
|
"\n",
|
||||||
|
"mlflow_experiment_name: colab-example\n",
|
||||||
|
"\n",
|
||||||
|
"gradient_accumulation_steps: 1\n",
|
||||||
|
"micro_batch_size: 1\n",
|
||||||
|
"num_epochs: 4\n",
|
||||||
|
"max_steps: 20\n",
|
||||||
|
"optimizer: paged_adamw_32bit\n",
|
||||||
|
"lr_scheduler: cosine\n",
|
||||||
|
"learning_rate: 0.0002\n",
|
||||||
|
"\n",
|
||||||
|
"train_on_inputs: false\n",
|
||||||
|
"group_by_length: false\n",
|
||||||
|
"bf16: false\n",
|
||||||
|
"fp16: true\n",
|
||||||
|
"tf32: false\n",
|
||||||
|
"\n",
|
||||||
|
"gradient_checkpointing: true\n",
|
||||||
|
"early_stopping_patience:\n",
|
||||||
|
"resume_from_checkpoint:\n",
|
||||||
|
"local_rank:\n",
|
||||||
|
"logging_steps: 1\n",
|
||||||
|
"xformers_attention:\n",
|
||||||
|
"flash_attention: false\n",
|
||||||
|
"\n",
|
||||||
|
"warmup_steps: 10\n",
|
||||||
|
"evals_per_epoch:\n",
|
||||||
|
"saves_per_epoch:\n",
|
||||||
|
"debug:\n",
|
||||||
|
"deepspeed:\n",
|
||||||
|
"weight_decay: 0.0\n",
|
||||||
|
"fsdp:\n",
|
||||||
|
"fsdp_config:\n",
|
||||||
|
"special_tokens:\n",
|
||||||
|
"\n",
|
||||||
|
"\"\"\"\n",
|
||||||
|
"\n",
|
||||||
|
"# Convert the YAML string to a Python dictionary\n",
|
||||||
|
"yaml_dict = yaml.safe_load(yaml_string)\n",
|
||||||
|
"\n",
|
||||||
|
"# Specify your file path\n",
|
||||||
|
"file_path = 'test_axolotl.yaml'\n",
|
||||||
|
"\n",
|
||||||
|
"# Write the YAML file\n",
|
||||||
|
"with open(file_path, 'w') as file:\n",
|
||||||
|
" yaml.dump(yaml_dict, file)\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "bidoj8YLTusD"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"## Launch the training"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {
|
||||||
|
"base_uri": "https://localhost:8080/"
|
||||||
|
},
|
||||||
|
"id": "ydTI2Jk2RStU",
|
||||||
|
"outputId": "d6d0df17-4b53-439c-c802-22c0456d301b"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Buy using the ! the comand will be executed as a bash command\n",
|
||||||
|
"!accelerate launch -m axolotl.cli.train /content/test_axolotl.yaml"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Play with inference"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Buy using the ! the comand will be executed as a bash command\n",
|
||||||
|
"!accelerate launch -m axolotl.cli.inference /content/test_axolotl.yaml \\\n",
|
||||||
|
" --qlora_model_dir=\"./qlora-out\" --gradio"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"accelerator": "GPU",
|
||||||
|
"colab": {
|
||||||
|
"gpuType": "T4",
|
||||||
|
"provenance": []
|
||||||
|
},
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"name": "python"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 0
|
||||||
|
}
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
base_model: tiiuae/falcon-7b
|
base_model: tiiuae/falcon-7b
|
||||||
base_model_config: tiiuae/falcon-7b
|
|
||||||
trust_remote_code: true
|
trust_remote_code: true
|
||||||
model_type: AutoModelForCausalLM
|
model_type: AutoModelForCausalLM
|
||||||
tokenizer_type: AutoTokenizer
|
tokenizer_type: AutoTokenizer
|
||||||
|
|
||||||
load_in_8bit: true
|
load_in_8bit: true
|
||||||
load_in_4bit: false
|
load_in_4bit: false
|
||||||
gptq: false
|
gptq: false
|
||||||
@@ -11,8 +11,8 @@ push_dataset_to_hub:
|
|||||||
datasets:
|
datasets:
|
||||||
- path: teknium/GPT4-LLM-Cleaned
|
- path: teknium/GPT4-LLM-Cleaned
|
||||||
type: alpaca:chat
|
type: alpaca:chat
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
adapter: lora
|
adapter: lora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
sequence_len: 2048
|
sequence_len: 2048
|
||||||
@@ -26,7 +26,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./falcon-7b
|
output_dir: ./falcon-7b
|
||||||
batch_size: 2
|
batch_size: 2
|
||||||
@@ -38,8 +38,8 @@ lr_scheduler: cosine
|
|||||||
learning_rate: 0.00003
|
learning_rate: 0.00003
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
fp16: false
|
fp16:
|
||||||
tf32: true
|
tf32: true
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
early_stopping_patience:
|
early_stopping_patience:
|
||||||
@@ -51,8 +51,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 40
|
warmup_steps: 40
|
||||||
eval_steps: 5
|
evals_per_epoch: 4
|
||||||
save_steps: 43
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
@@ -60,5 +60,5 @@ fsdp:
|
|||||||
fsdp_config:
|
fsdp_config:
|
||||||
special_tokens:
|
special_tokens:
|
||||||
pad_token: "<|endoftext|>"
|
pad_token: "<|endoftext|>"
|
||||||
bos_token: ">>ABSTRACT<<"
|
bos_token: "<|endoftext|>"
|
||||||
eos_token: "<|endoftext|>"
|
eos_token: "<|endoftext|>"
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
# 1b: tiiuae/falcon-rw-1b
|
# 1b: tiiuae/falcon-rw-1b
|
||||||
# 40b: tiiuae/falcon-40b
|
# 40b: tiiuae/falcon-40b
|
||||||
base_model: tiiuae/falcon-7b
|
base_model: tiiuae/falcon-7b
|
||||||
base_model_config: tiiuae/falcon-7b
|
|
||||||
# required by falcon custom model code: https://huggingface.co/tiiuae/falcon-7b/tree/main
|
# required by falcon custom model code: https://huggingface.co/tiiuae/falcon-7b/tree/main
|
||||||
trust_remote_code: true
|
trust_remote_code: true
|
||||||
model_type: AutoModelForCausalLM
|
model_type: AutoModelForCausalLM
|
||||||
tokenizer_type: AutoTokenizer
|
tokenizer_type: AutoTokenizer
|
||||||
|
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
# enable 4bit for QLoRA
|
# enable 4bit for QLoRA
|
||||||
load_in_4bit: true
|
load_in_4bit: true
|
||||||
@@ -17,8 +17,8 @@ datasets:
|
|||||||
data_files:
|
data_files:
|
||||||
- Chain-of-Thought/formatted_cot_data/gsm8k_train.json
|
- Chain-of-Thought/formatted_cot_data/gsm8k_train.json
|
||||||
type: "alpaca:chat"
|
type: "alpaca:chat"
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
# enable QLoRA
|
# enable QLoRA
|
||||||
adapter: qlora
|
adapter: qlora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
@@ -40,7 +40,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
@@ -53,7 +53,7 @@ output_dir: ./qlora-out
|
|||||||
# decrease if OOM, increase for max VRAM utilization
|
# decrease if OOM, increase for max VRAM utilization
|
||||||
micro_batch_size: 1
|
micro_batch_size: 1
|
||||||
gradient_accumulation_steps: 2
|
gradient_accumulation_steps: 2
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
# Optimizer for QLoRA
|
# Optimizer for QLoRA
|
||||||
optimizer: paged_adamw_32bit
|
optimizer: paged_adamw_32bit
|
||||||
torchdistx_path:
|
torchdistx_path:
|
||||||
@@ -64,8 +64,8 @@ lr_scheduler: cosine
|
|||||||
learning_rate: 0.0002
|
learning_rate: 0.0002
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
fp16: false
|
fp16:
|
||||||
tf32: true
|
tf32: true
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
# stop training after this many evaluation losses have increased in a row
|
# stop training after this many evaluation losses have increased in a row
|
||||||
@@ -80,8 +80,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
eval_steps: 5
|
evals_per_epoch: 4
|
||||||
save_steps: 10
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.000001
|
weight_decay: 0.000001
|
||||||
@@ -89,5 +89,5 @@ fsdp:
|
|||||||
fsdp_config:
|
fsdp_config:
|
||||||
special_tokens:
|
special_tokens:
|
||||||
pad_token: "<|endoftext|>"
|
pad_token: "<|endoftext|>"
|
||||||
bos_token: ">>ABSTRACT<<"
|
bos_token: "<|endoftext|>"
|
||||||
eos_token: "<|endoftext|>"
|
eos_token: "<|endoftext|>"
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
base_model: tiiuae/falcon-7b
|
base_model: tiiuae/falcon-7b
|
||||||
base_model_config: tiiuae/falcon-7b
|
|
||||||
trust_remote_code: true
|
trust_remote_code: true
|
||||||
model_type: AutoModelForCausalLM
|
model_type: AutoModelForCausalLM
|
||||||
tokenizer_type: AutoTokenizer
|
tokenizer_type: AutoTokenizer
|
||||||
|
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
load_in_4bit: false
|
load_in_4bit: false
|
||||||
gptq: false
|
gptq: false
|
||||||
@@ -11,8 +11,8 @@ push_dataset_to_hub:
|
|||||||
datasets:
|
datasets:
|
||||||
- path: teknium/GPT4-LLM-Cleaned
|
- path: teknium/GPT4-LLM-Cleaned
|
||||||
type: alpaca:chat
|
type: alpaca:chat
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
adapter:
|
adapter:
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
sequence_len: 2048
|
sequence_len: 2048
|
||||||
@@ -26,7 +26,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./falcon-7b
|
output_dir: ./falcon-7b
|
||||||
batch_size: 2
|
batch_size: 2
|
||||||
@@ -38,8 +38,8 @@ lr_scheduler: cosine
|
|||||||
learning_rate: 0.00003
|
learning_rate: 0.00003
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
fp16: false
|
fp16:
|
||||||
tf32: true
|
tf32: true
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
early_stopping_patience:
|
early_stopping_patience:
|
||||||
@@ -51,8 +51,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 40
|
warmup_steps: 40
|
||||||
eval_steps: 5
|
evals_per_epoch: 4
|
||||||
save_steps: 43
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
@@ -60,5 +60,5 @@ fsdp:
|
|||||||
fsdp_config:
|
fsdp_config:
|
||||||
special_tokens:
|
special_tokens:
|
||||||
pad_token: "<|endoftext|>"
|
pad_token: "<|endoftext|>"
|
||||||
bos_token: ">>ABSTRACT<<"
|
bos_token: "<|endoftext|>"
|
||||||
eos_token: "<|endoftext|>"
|
eos_token: "<|endoftext|>"
|
||||||
|
|||||||
65
examples/gemma/qlora.yml
Normal file
65
examples/gemma/qlora.yml
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
# use google/gemma-7b if you have access
|
||||||
|
base_model: mhenrichsen/gemma-7b
|
||||||
|
model_type: AutoModelForCausalLM
|
||||||
|
tokenizer_type: AutoTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
# huggingface repo
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
val_set_size: 0.1
|
||||||
|
output_dir: ./out
|
||||||
|
|
||||||
|
adapter: qlora
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
sample_packing: false
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 3
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 4
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_ratio: 0.1
|
||||||
|
evals_per_epoch: 4
|
||||||
|
eval_table_size:
|
||||||
|
eval_max_new_tokens: 128
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
base_model: EleutherAI/gpt-j-6b
|
base_model: EleutherAI/gpt-j-6b
|
||||||
base_model_config: EleutherAI/gpt-j-6b
|
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
load_in_4bit: true
|
load_in_4bit: true
|
||||||
strict: false
|
strict: false
|
||||||
@@ -7,8 +6,8 @@ push_dataset_to_hub:
|
|||||||
datasets:
|
datasets:
|
||||||
- path: teknium/GPT4-LLM-Cleaned
|
- path: teknium/GPT4-LLM-Cleaned
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
adapter: qlora
|
adapter: qlora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
sequence_len: 2048
|
sequence_len: 2048
|
||||||
@@ -22,7 +21,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
gradient_accumulation_steps: 2
|
gradient_accumulation_steps: 2
|
||||||
@@ -34,8 +33,8 @@ lr_scheduler: cosine
|
|||||||
learning_rate: 0.0001
|
learning_rate: 0.0001
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
fp16: false
|
fp16:
|
||||||
tf32: true
|
tf32: true
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
early_stopping_patience:
|
early_stopping_patience:
|
||||||
@@ -47,8 +46,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
eval_steps: 20
|
evals_per_epoch: 4
|
||||||
save_steps:
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
|
|||||||
@@ -1,12 +1,11 @@
|
|||||||
base_model: huggyllama/llama-7b
|
base_model: huggyllama/llama-7b
|
||||||
base_model_config: huggyllama/llama-7b
|
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: LlamaTokenizer
|
tokenizer_type: LlamaTokenizer
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
datasets:
|
datasets:
|
||||||
- path: openaccess-ai-collective/jeopardy
|
- path: openaccess-ai-collective/jeopardy
|
||||||
type: jeopardy
|
type: jeopardy
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.02
|
val_set_size: 0.02
|
||||||
adapter:
|
adapter:
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
@@ -20,19 +19,19 @@ lora_fan_in_fan_out: false
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./jeopardy-bot-7b
|
output_dir: ./jeopardy-bot-7b
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
micro_batch_size: 1
|
micro_batch_size: 1
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer: adamw_bnb_8bit
|
optimizer: adamw_bnb_8bit
|
||||||
torchdistx_path:
|
torchdistx_path:
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.00003
|
learning_rate: 0.00003
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
tf32: true
|
tf32: true
|
||||||
early_stopping_patience:
|
early_stopping_patience:
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
@@ -43,8 +42,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 20
|
warmup_steps: 20
|
||||||
eval_steps: 110
|
evals_per_epoch: 4
|
||||||
save_steps: 660
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
|
|||||||
@@ -9,12 +9,16 @@ gradient_accumulation_steps: 2
|
|||||||
micro_batch_size: 1
|
micro_batch_size: 1
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
accelerate launch scripts/finetune.py examples/llama-2/qlora.yml
|
accelerate launch -m axolotl.cli.train examples/llama-2/qlora.yml
|
||||||
|
|
||||||
```
|
```
|
||||||
or
|
or
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
accelerate launch scripts/finetune.py examples/llama-2/lora.yml
|
accelerate launch -m axolotl.cli.train examples/llama-2/lora.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
To launch a full finetuning with 16-bit precision:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
accelerate launch -m axolotl.cli.train examples/llama-2/fft_optimized.yml
|
||||||
```
|
```
|
||||||
|
|||||||
68
examples/llama-2/fft_optimized.yml
Normal file
68
examples/llama-2/fft_optimized.yml
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
base_model: NousResearch/Llama-2-7b-hf
|
||||||
|
model_type: LlamaForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./out
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
adapter:
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r:
|
||||||
|
lora_alpha:
|
||||||
|
lora_dropout:
|
||||||
|
lora_target_linear:
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 1
|
||||||
|
micro_batch_size: 1
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
flash_attn_cross_entropy: false
|
||||||
|
flash_attn_rms_norm: true
|
||||||
|
flash_attn_fuse_qkv: false
|
||||||
|
flash_attn_fuse_mlp: true
|
||||||
|
|
||||||
|
warmup_steps: 100
|
||||||
|
evals_per_epoch: 4
|
||||||
|
eval_table_size:
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed: #deepspeed_configs/zero2.json # multi-gpu only
|
||||||
|
weight_decay: 0.1
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
@@ -1,8 +1,6 @@
|
|||||||
base_model: TheBloke/Llama-2-7B-GPTQ
|
base_model: TheBloke/Llama-2-7B-GPTQ
|
||||||
base_model_config: TheBloke/Llama-2-7B-GPTQ
|
|
||||||
is_llama_derived_model: false
|
|
||||||
gptq: true
|
gptq: true
|
||||||
gptq_bits: 4
|
gptq_disable_exllama: true
|
||||||
model_type: AutoModelForCausalLM
|
model_type: AutoModelForCausalLM
|
||||||
tokenizer_type: LlamaTokenizer
|
tokenizer_type: LlamaTokenizer
|
||||||
tokenizer_use_fast: true
|
tokenizer_use_fast: true
|
||||||
@@ -15,8 +13,8 @@ hf_use_auth_token: true
|
|||||||
datasets:
|
datasets:
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
adapter: lora
|
adapter: lora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
sequence_len: 4096
|
sequence_len: 4096
|
||||||
@@ -33,12 +31,12 @@ lora_target_linear:
|
|||||||
lora_fan_in_fan_out:
|
lora_fan_in_fan_out:
|
||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./model-out
|
output_dir: ./model-out
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
micro_batch_size: 1
|
micro_batch_size: 1
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer: adamw_torch
|
optimizer: adamw_torch
|
||||||
adam_beta2: 0.95
|
adam_beta2: 0.95
|
||||||
adam_eps: 0.00001
|
adam_eps: 0.00001
|
||||||
@@ -62,11 +60,9 @@ xformers_attention:
|
|||||||
flash_attention:
|
flash_attention:
|
||||||
sdp_attention:
|
sdp_attention:
|
||||||
flash_optimum:
|
flash_optimum:
|
||||||
gptq_groupsize:
|
|
||||||
gptq_model_v1:
|
|
||||||
warmup_steps: 100
|
warmup_steps: 100
|
||||||
eval_steps:
|
evals_per_epoch: 4
|
||||||
save_steps:
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
|
|||||||
69
examples/llama-2/loftq.yml
Normal file
69
examples/llama-2/loftq.yml
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
base_model: NousResearch/Llama-2-7b-hf
|
||||||
|
model_type: LlamaForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./lora-out
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
adapter: lora
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
peft:
|
||||||
|
loftq_config:
|
||||||
|
loftq_bits: 4
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 4
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
s2_attention:
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
eval_table_size:
|
||||||
|
eval_max_new_tokens: 128
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
@@ -1,8 +1,6 @@
|
|||||||
base_model: meta-llama/Llama-2-7b-hf
|
base_model: NousResearch/Llama-2-7b-hf
|
||||||
base_model_config: meta-llama/Llama-2-7b-hf
|
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: LlamaTokenizer
|
tokenizer_type: LlamaTokenizer
|
||||||
is_llama_derived_model: true
|
|
||||||
|
|
||||||
load_in_8bit: true
|
load_in_8bit: true
|
||||||
load_in_4bit: false
|
load_in_4bit: false
|
||||||
@@ -11,12 +9,13 @@ strict: false
|
|||||||
datasets:
|
datasets:
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
output_dir: ./lora-out
|
output_dir: ./lora-out
|
||||||
|
|
||||||
sequence_len: 4096
|
sequence_len: 4096
|
||||||
sample_packing: true
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
adapter: lora
|
adapter: lora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
@@ -29,20 +28,20 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
micro_batch_size: 2
|
micro_batch_size: 2
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer: adamw_bnb_8bit
|
optimizer: adamw_bnb_8bit
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.0002
|
learning_rate: 0.0002
|
||||||
|
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
fp16: false
|
fp16:
|
||||||
tf32: false
|
tf32: false
|
||||||
|
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
@@ -52,16 +51,16 @@ local_rank:
|
|||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
xformers_attention:
|
xformers_attention:
|
||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
s2_attention:
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
eval_steps: 20
|
evals_per_epoch: 4
|
||||||
save_steps:
|
eval_table_size:
|
||||||
|
eval_max_new_tokens: 128
|
||||||
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
fsdp:
|
fsdp:
|
||||||
fsdp_config:
|
fsdp_config:
|
||||||
special_tokens:
|
special_tokens:
|
||||||
bos_token: "<s>"
|
|
||||||
eos_token: "</s>"
|
|
||||||
unk_token: "<unk>"
|
|
||||||
|
|||||||
76
examples/llama-2/qlora-fsdp.yml
Normal file
76
examples/llama-2/qlora-fsdp.yml
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
base_model: NousResearch/Llama-2-7b-hf
|
||||||
|
model_type: LlamaForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: yahma/alpaca-cleaned
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
|
||||||
|
sequence_len: 512
|
||||||
|
sample_packing: false
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_modules:
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 4
|
||||||
|
num_epochs: 4
|
||||||
|
optimizer: adamw_torch
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.00001
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
gradient_checkpointing_kwargs:
|
||||||
|
use_reentrant: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
eval_table_size:
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
- full_shard
|
||||||
|
fsdp_config:
|
||||||
|
fsdp_limit_all_gathers: true
|
||||||
|
fsdp_sync_module_states: true
|
||||||
|
fsdp_offload_params: true
|
||||||
|
fsdp_use_orig_params: false
|
||||||
|
fsdp_cpu_ram_efficient_loading: true
|
||||||
|
fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer
|
||||||
|
fsdp_state_dict_type: SHARDED_STATE_DICT
|
||||||
|
special_tokens:
|
||||||
@@ -1,8 +1,6 @@
|
|||||||
base_model: meta-llama/Llama-2-7b-hf
|
base_model: NousResearch/Llama-2-7b-hf
|
||||||
base_model_config: meta-llama/Llama-2-7b-hf
|
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: LlamaTokenizer
|
tokenizer_type: LlamaTokenizer
|
||||||
is_llama_derived_model: true
|
|
||||||
|
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
load_in_4bit: true
|
load_in_4bit: true
|
||||||
@@ -11,8 +9,8 @@ strict: false
|
|||||||
datasets:
|
datasets:
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
adapter: qlora
|
adapter: qlora
|
||||||
@@ -20,6 +18,7 @@ lora_model_dir:
|
|||||||
|
|
||||||
sequence_len: 4096
|
sequence_len: 4096
|
||||||
sample_packing: true
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
lora_r: 32
|
lora_r: 32
|
||||||
lora_alpha: 16
|
lora_alpha: 16
|
||||||
@@ -31,20 +30,20 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
micro_batch_size: 2
|
micro_batch_size: 2
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer: paged_adamw_32bit
|
optimizer: paged_adamw_32bit
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.0002
|
learning_rate: 0.0002
|
||||||
|
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
fp16: false
|
fp16:
|
||||||
tf32: false
|
tf32: false
|
||||||
|
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
@@ -56,14 +55,12 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
eval_steps: 20
|
evals_per_epoch: 4
|
||||||
save_steps:
|
eval_table_size:
|
||||||
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
fsdp:
|
fsdp:
|
||||||
fsdp_config:
|
fsdp_config:
|
||||||
special_tokens:
|
special_tokens:
|
||||||
bos_token: "<s>"
|
|
||||||
eos_token: "</s>"
|
|
||||||
unk_token: "<unk>"
|
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
base_model: meta-llama/Llama-2-7b-hf
|
base_model: NousResearch/Llama-2-7b-hf
|
||||||
base_model_config: meta-llama/Llama-2-7b-hf
|
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: LlamaTokenizer
|
tokenizer_type: LlamaTokenizer
|
||||||
is_llama_derived_model: true
|
|
||||||
|
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
load_in_4bit: true
|
load_in_4bit: true
|
||||||
@@ -11,8 +10,8 @@ strict: false
|
|||||||
datasets:
|
datasets:
|
||||||
- path: teknium/GPT4-LLM-Cleaned
|
- path: teknium/GPT4-LLM-Cleaned
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
output_dir: ./relora-out
|
output_dir: ./relora-out
|
||||||
|
|
||||||
adapter: qlora
|
adapter: qlora
|
||||||
@@ -20,6 +19,7 @@ lora_model_dir:
|
|||||||
|
|
||||||
sequence_len: 4096
|
sequence_len: 4096
|
||||||
sample_packing: true
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
lora_r: 8
|
lora_r: 8
|
||||||
lora_alpha: 16
|
lora_alpha: 16
|
||||||
@@ -35,20 +35,20 @@ relora_cpu_offload: false
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
micro_batch_size: 4
|
micro_batch_size: 4
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer: adamw_bnb_8bit
|
optimizer: adamw_bnb_8bit
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.0002
|
learning_rate: 0.0002
|
||||||
|
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
fp16: false
|
fp16:
|
||||||
tf32: false
|
tf32: false
|
||||||
|
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
@@ -60,8 +60,8 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
eval_steps: 20
|
evals_per_epoch: 4
|
||||||
save_steps: 50
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
61
examples/mamba/config.yml
Normal file
61
examples/mamba/config.yml
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
base_model: state-spaces/mamba-2.8b
|
||||||
|
model_type: MambaLMHeadModel
|
||||||
|
tokenizer_type: AutoTokenizer
|
||||||
|
tokenizer_config: EleutherAI/gpt-neox-20b
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.0
|
||||||
|
output_dir: ./out
|
||||||
|
|
||||||
|
sequence_len: 2048
|
||||||
|
sample_packing: false
|
||||||
|
pad_to_sequence_len: false
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 1
|
||||||
|
num_epochs: 2
|
||||||
|
optimizer: paged_adamw_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 5e-5
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: true
|
||||||
|
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: true
|
||||||
|
|
||||||
|
gradient_checkpointing: false
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention:
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
eval_table_size:
|
||||||
|
eval_max_new_tokens: 128
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
|
tokens:
|
||||||
|
save_safetensors: False
|
||||||
12
examples/mistral/README.md
Normal file
12
examples/mistral/README.md
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
**Mistral 7B** is a language model with a total of 7.3 billion parameters, showcasing a notable performance across a variety of benchmarks.
|
||||||
|
|
||||||
|
Fine Tune:
|
||||||
|
```shell
|
||||||
|
accelerate launch -m axolotl.cli.train examples/mistral/config.yml
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
If you run into CUDA OOM, use deepspeed with config zero2.json:
|
||||||
|
```shell
|
||||||
|
accelerate launch -m axolotl.cli.train examples/mistral/config.yml --deepspeed deepspeed_configs/zero2.json
|
||||||
|
```
|
||||||
58
examples/mistral/config.yml
Normal file
58
examples/mistral/config.yml
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
base_model: mistralai/Mistral-7B-v0.1
|
||||||
|
model_type: MistralForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./out
|
||||||
|
|
||||||
|
sequence_len: 8192
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
eval_sample_packing: false
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 4
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.000005
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
eval_table_size:
|
||||||
|
eval_max_new_tokens: 128
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
79
examples/mistral/lora-mps.yml
Normal file
79
examples/mistral/lora-mps.yml
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
base_model: mistralai/Mistral-7B-v0.1
|
||||||
|
model_type: MistralForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0
|
||||||
|
output_dir: ./lora-out
|
||||||
|
eval_sample_packing: false
|
||||||
|
|
||||||
|
adapter: lora
|
||||||
|
lora_model_dir:
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
lora_target_modules:
|
||||||
|
- gate_proj
|
||||||
|
- down_proj
|
||||||
|
- up_proj
|
||||||
|
- q_proj
|
||||||
|
- v_proj
|
||||||
|
- k_proj
|
||||||
|
- o_proj
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 8
|
||||||
|
micro_batch_size: 1
|
||||||
|
num_epochs: 2
|
||||||
|
optimizer: adamw_torch
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16: false
|
||||||
|
tf32: true
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: false
|
||||||
|
sdp_attention: true
|
||||||
|
|
||||||
|
loss_watchdog_threshold: 5.0
|
||||||
|
loss_watchdog_patience: 3
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
eval_table_size:
|
||||||
|
eval_table_max_new_tokens: 128
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
77
examples/mistral/lora.yml
Normal file
77
examples/mistral/lora.yml
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
base_model: mistralai/Mistral-7B-v0.1
|
||||||
|
model_type: MistralForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: true
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.1
|
||||||
|
output_dir: ./lora-out
|
||||||
|
|
||||||
|
adapter: lora
|
||||||
|
lora_model_dir:
|
||||||
|
|
||||||
|
sequence_len: 8192
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
lora_target_modules:
|
||||||
|
- gate_proj
|
||||||
|
- down_proj
|
||||||
|
- up_proj
|
||||||
|
- q_proj
|
||||||
|
- v_proj
|
||||||
|
- k_proj
|
||||||
|
- o_proj
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
loss_watchdog_threshold: 5.0
|
||||||
|
loss_watchdog_patience: 3
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
eval_table_size:
|
||||||
|
eval_max_new_tokens: 128
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
74
examples/mistral/mixtral-qlora-fsdp.yml
Normal file
74
examples/mistral/mixtral-qlora-fsdp.yml
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
base_model: mistralai/Mixtral-8x7B-v0.1
|
||||||
|
model_type: AutoModelForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: tatsu-lab/alpaca
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.02
|
||||||
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
|
model_config:
|
||||||
|
output_router_logits: true
|
||||||
|
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
|
||||||
|
sequence_len: 1024
|
||||||
|
sample_packing: false
|
||||||
|
pad_to_sequence_len: false
|
||||||
|
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: paged_adamw_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
loss_watchdog_threshold: 5.0
|
||||||
|
loss_watchdog_patience: 3
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
eval_table_size:
|
||||||
|
eval_max_new_tokens: 128
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
- full_shard
|
||||||
|
fsdp_config:
|
||||||
|
fsdp_transformer_layer_cls_to_wrap: MixtralSparseMoeBlock
|
||||||
|
special_tokens:
|
||||||
91
examples/mistral/mixtral.yml
Normal file
91
examples/mistral/mixtral.yml
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
base_model: mistralai/Mixtral-8x7B-v0.1
|
||||||
|
model_type: AutoModelForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: tatsu-lab/alpaca
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.0
|
||||||
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
|
## You can optionally freeze the entire model and unfreeze a subset of parameters
|
||||||
|
unfrozen_parameters:
|
||||||
|
# - ^lm_head.weight$
|
||||||
|
# - ^model.embed_tokens.weight$[:32000]
|
||||||
|
# - model.layers.2[0-9]+.block_sparse_moe.gate
|
||||||
|
# - model.layers.2[0-9]+.block_sparse_moe.experts
|
||||||
|
# - model.layers.3[0-9]+.block_sparse_moe.gate
|
||||||
|
# - model.layers.3[0-9]+.block_sparse_moe.experts
|
||||||
|
|
||||||
|
model_config:
|
||||||
|
output_router_logits: true
|
||||||
|
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
#lora_target_modules:
|
||||||
|
# - gate
|
||||||
|
# - q_proj
|
||||||
|
# - k_proj
|
||||||
|
# - v_proj
|
||||||
|
# - o_proj
|
||||||
|
# - w1
|
||||||
|
# - w2
|
||||||
|
# - w3
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 2
|
||||||
|
micro_batch_size: 1
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
loss_watchdog_threshold: 5.0
|
||||||
|
loss_watchdog_patience: 3
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
eval_table_size:
|
||||||
|
eval_max_new_tokens: 128
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed: deepspeed_configs/zero2.json
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
77
examples/mistral/qlora.yml
Normal file
77
examples/mistral/qlora.yml
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
base_model: mistralai/Mistral-7B-v0.1
|
||||||
|
model_type: MistralForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.1
|
||||||
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
|
||||||
|
sequence_len: 8192
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
lora_target_modules:
|
||||||
|
- gate_proj
|
||||||
|
- down_proj
|
||||||
|
- up_proj
|
||||||
|
- q_proj
|
||||||
|
- v_proj
|
||||||
|
- k_proj
|
||||||
|
- o_proj
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
loss_watchdog_threshold: 5.0
|
||||||
|
loss_watchdog_patience: 3
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
eval_table_size:
|
||||||
|
eval_max_new_tokens: 128
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
@@ -1,12 +1,11 @@
|
|||||||
base_model: mosaicml/mpt-7b
|
base_model: mosaicml/mpt-7b
|
||||||
base_model_config: mosaicml/mpt-7b
|
|
||||||
tokenizer_type: AutoTokenizer
|
tokenizer_type: AutoTokenizer
|
||||||
trust_remote_code: true # required for mpt as their model class is not merged into transformers yet
|
trust_remote_code: true # required for mpt as their model class is not merged into transformers yet
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
datasets:
|
datasets:
|
||||||
- path: vicgalle/alpaca-gpt4
|
- path: vicgalle/alpaca-gpt4
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.02
|
val_set_size: 0.02
|
||||||
adapter:
|
adapter:
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
@@ -22,19 +21,19 @@ lora_fan_in_fan_out: false
|
|||||||
wandb_project: mpt-alpaca-7b
|
wandb_project: mpt-alpaca-7b
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./mpt-alpaca-7b
|
output_dir: ./mpt-alpaca-7b
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
micro_batch_size: 1
|
micro_batch_size: 1
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer: adamw_bnb_8bit
|
optimizer: adamw_bnb_8bit
|
||||||
torchdistx_path:
|
torchdistx_path:
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.0000002
|
learning_rate: 0.0000002
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
tf32: true
|
tf32: true
|
||||||
early_stopping_patience:
|
early_stopping_patience:
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
@@ -45,8 +44,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 20
|
warmup_steps: 20
|
||||||
eval_steps: 110
|
evals_per_epoch: 4
|
||||||
save_steps: 660
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0001
|
weight_decay: 0.0001
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
base_model: openlm-research/open_llama_3b
|
base_model: openlm-research/open_llama_3b_v2
|
||||||
base_model_config: openlm-research/open_llama_3b
|
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: LlamaTokenizer
|
tokenizer_type: LlamaTokenizer
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
@@ -9,12 +8,12 @@ push_dataset_to_hub:
|
|||||||
datasets:
|
datasets:
|
||||||
- path: teknium/GPT4-LLM-Cleaned
|
- path: teknium/GPT4-LLM-Cleaned
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.02
|
val_set_size: 0.02
|
||||||
adapter:
|
adapter:
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
sequence_len: 256
|
sequence_len: 1024
|
||||||
max_packed_sequence_len:
|
sample_packing: true
|
||||||
lora_r:
|
lora_r:
|
||||||
lora_alpha:
|
lora_alpha:
|
||||||
lora_dropout:
|
lora_dropout:
|
||||||
@@ -24,16 +23,16 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./openllama-out
|
output_dir: ./openllama-out
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
micro_batch_size: 1
|
micro_batch_size: 1
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer: adamw_bnb_8bit
|
optimizer: adamw_bnb_8bit
|
||||||
torchdistx_path:
|
torchdistx_path:
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.00001
|
learning_rate: 0.000003
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
float16: true
|
float16: true
|
||||||
@@ -45,13 +44,13 @@ early_stopping_patience:
|
|||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
local_rank:
|
local_rank:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
xformers_attention: true
|
xformers_attention:
|
||||||
flash_attention:
|
flash_attention: true
|
||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 10
|
warmup_steps: 20
|
||||||
eval_steps: 50
|
evals_per_epoch: 4
|
||||||
save_steps:
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
base_model: openlm-research/open_llama_3b
|
base_model: openlm-research/open_llama_3b_v2
|
||||||
base_model_config: openlm-research/open_llama_3b
|
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: LlamaTokenizer
|
tokenizer_type: LlamaTokenizer
|
||||||
load_in_8bit: true
|
load_in_8bit: true
|
||||||
@@ -9,12 +8,12 @@ push_dataset_to_hub:
|
|||||||
datasets:
|
datasets:
|
||||||
- path: teknium/GPT4-LLM-Cleaned
|
- path: teknium/GPT4-LLM-Cleaned
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.02
|
val_set_size: 0.02
|
||||||
adapter: lora
|
adapter: lora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
sequence_len: 256
|
sequence_len: 1024
|
||||||
max_packed_sequence_len:
|
sample_packing: true
|
||||||
lora_r: 8
|
lora_r: 8
|
||||||
lora_alpha: 16
|
lora_alpha: 16
|
||||||
lora_dropout: 0.0
|
lora_dropout: 0.0
|
||||||
@@ -30,12 +29,12 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./lora-out
|
output_dir: ./lora-out
|
||||||
batch_size: 16
|
gradient_accumulation_steps: 1
|
||||||
micro_batch_size: 4
|
micro_batch_size: 2
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer: adamw_bnb_8bit
|
optimizer: adamw_bnb_8bit
|
||||||
torchdistx_path:
|
torchdistx_path:
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
@@ -50,16 +49,17 @@ early_stopping_patience:
|
|||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
local_rank:
|
local_rank:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
xformers_attention: true
|
xformers_attention:
|
||||||
flash_attention:
|
flash_attention: true
|
||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
|
s2_attention:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 10
|
warmup_steps: 20
|
||||||
eval_steps: 50
|
evals_per_epoch: 4
|
||||||
save_steps:
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.1
|
||||||
fsdp:
|
fsdp:
|
||||||
fsdp_config:
|
fsdp_config:
|
||||||
special_tokens:
|
special_tokens:
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
base_model: openlm-research/open_llama_3b
|
base_model: openlm-research/open_llama_3b_v2
|
||||||
base_model_config: openlm-research/open_llama_3b
|
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: LlamaTokenizer
|
tokenizer_type: LlamaTokenizer
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
@@ -9,12 +8,12 @@ push_dataset_to_hub:
|
|||||||
datasets:
|
datasets:
|
||||||
- path: teknium/GPT4-LLM-Cleaned
|
- path: teknium/GPT4-LLM-Cleaned
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.01
|
val_set_size: 0.05
|
||||||
adapter: qlora
|
adapter: qlora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
sequence_len: 2048
|
sequence_len: 1024
|
||||||
max_packed_sequence_len: 2048
|
sample_packing: true
|
||||||
lora_r: 8
|
lora_r: 8
|
||||||
lora_alpha: 32
|
lora_alpha: 32
|
||||||
lora_dropout: 0.05
|
lora_dropout: 0.05
|
||||||
@@ -24,36 +23,36 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
batch_size: 4
|
gradient_accumulation_steps: 1
|
||||||
micro_batch_size: 4
|
micro_batch_size: 2
|
||||||
num_epochs: 2
|
num_epochs: 4
|
||||||
optimizer: paged_adamw_32bit
|
optimizer: paged_adamw_32bit
|
||||||
torchdistx_path:
|
torchdistx_path:
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.0002
|
learning_rate: 0.0002
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: false
|
||||||
fp16: false
|
fp16: true
|
||||||
tf32: true
|
tf32: false
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
early_stopping_patience:
|
early_stopping_patience:
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
local_rank:
|
local_rank:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
xformers_attention: true
|
xformers_attention:
|
||||||
flash_attention:
|
flash_attention: true
|
||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 10
|
warmup_steps: 20
|
||||||
eval_steps: 20
|
evals_per_epoch: 4
|
||||||
save_steps:
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.1
|
||||||
fsdp:
|
fsdp:
|
||||||
fsdp_config:
|
fsdp_config:
|
||||||
special_tokens:
|
special_tokens:
|
||||||
|
|||||||
11
examples/phi/README.md
Normal file
11
examples/phi/README.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# Phi
|
||||||
|
|
||||||
|
Due to some nuances with the phi code, please use deepspeed when training phi for full finetune.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
accelerate launch -m axolotl.cli.train examples/phi/phi-ft.yml --deepspeed deepspeed_configs/zero1.json
|
||||||
|
|
||||||
|
# OR
|
||||||
|
|
||||||
|
python -m axolotl.cli.train examples/phi/phi-qlora.yml
|
||||||
|
```
|
||||||
71
examples/phi/phi-ft.yml
Normal file
71
examples/phi/phi-ft.yml
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
base_model: microsoft/phi-1_5
|
||||||
|
model_type: AutoModelForCausalLM
|
||||||
|
tokenizer_type: AutoTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: garage-bAInd/Open-Platypus
|
||||||
|
type: alpaca
|
||||||
|
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./phi-sft-out
|
||||||
|
|
||||||
|
sequence_len: 2048
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
adapter:
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r:
|
||||||
|
lora_alpha:
|
||||||
|
lora_dropout:
|
||||||
|
lora_target_linear:
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 1
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 4
|
||||||
|
optimizer: adamw_torch
|
||||||
|
adam_beta2: 0.95
|
||||||
|
adam_epsilon: 0.00001
|
||||||
|
max_grad_norm: 1.0
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.000003
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: true
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
gradient_checkpointing_kwargs:
|
||||||
|
use_reentrant: True
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 100
|
||||||
|
evals_per_epoch: 4
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.1
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
resize_token_embeddings_to_32x: true
|
||||||
|
special_tokens:
|
||||||
|
pad_token: "<|endoftext|>"
|
||||||
71
examples/phi/phi-qlora.yml
Normal file
71
examples/phi/phi-qlora.yml
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
base_model: microsoft/phi-1_5
|
||||||
|
model_type: AutoModelForCausalLM
|
||||||
|
tokenizer_type: AutoTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: garage-bAInd/Open-Platypus
|
||||||
|
type: alpaca
|
||||||
|
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./phi-sft-out
|
||||||
|
|
||||||
|
sequence_len: 2048
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r: 64
|
||||||
|
lora_alpha: 32
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 1
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 4
|
||||||
|
optimizer: adamw_torch
|
||||||
|
adam_beta2: 0.95
|
||||||
|
adam_epsilon: 0.00001
|
||||||
|
max_grad_norm: 1.0
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.000003
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: true
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
gradient_checkpointing_kwargs:
|
||||||
|
use_reentrant: True
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 100
|
||||||
|
evals_per_epoch: 4
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.1
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
resize_token_embeddings_to_32x: true
|
||||||
|
special_tokens:
|
||||||
|
pad_token: "<|endoftext|>"
|
||||||
71
examples/phi/phi2-ft.yml
Normal file
71
examples/phi/phi2-ft.yml
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
base_model: microsoft/phi-2
|
||||||
|
model_type: AutoModelForCausalLM
|
||||||
|
tokenizer_type: AutoTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: garage-bAInd/Open-Platypus
|
||||||
|
type: alpaca
|
||||||
|
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./phi-sft-out
|
||||||
|
|
||||||
|
sequence_len: 2048
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
adapter:
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r:
|
||||||
|
lora_alpha:
|
||||||
|
lora_dropout:
|
||||||
|
lora_target_linear:
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 1
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 4
|
||||||
|
optimizer: adamw_torch
|
||||||
|
adam_beta2: 0.95
|
||||||
|
adam_epsilon: 0.00001
|
||||||
|
max_grad_norm: 1.0
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.000003
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: true
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
gradient_checkpointing_kwargs:
|
||||||
|
use_reentrant: True
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 100
|
||||||
|
evals_per_epoch: 4
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.1
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
resize_token_embeddings_to_32x: true
|
||||||
|
special_tokens:
|
||||||
|
pad_token: "<|endoftext|>"
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
base_model: EleutherAI/pythia-12b-deduped
|
base_model: EleutherAI/pythia-12b-deduped
|
||||||
base_model_config: EleutherAI/pythia-12b-deduped
|
|
||||||
base_model_ignore_patterns: pytorch* # prefer safetensors
|
base_model_ignore_patterns: pytorch* # prefer safetensors
|
||||||
model_type: GPTNeoXForCausalLM
|
model_type: GPTNeoXForCausalLM
|
||||||
tokenizer_type: AutoTokenizer
|
tokenizer_type: AutoTokenizer
|
||||||
@@ -10,7 +9,7 @@ device_map: auto
|
|||||||
datasets:
|
datasets:
|
||||||
- path: vicgalle/alpaca-gpt4
|
- path: vicgalle/alpaca-gpt4
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.05
|
val_set_size: 0.05
|
||||||
adapter:
|
adapter:
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
@@ -25,7 +24,7 @@ lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./pythia-12b
|
output_dir: ./pythia-12b
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
|
|||||||
@@ -1,10 +1,9 @@
|
|||||||
base_model: EleutherAI/pythia-1.4b-deduped
|
base_model: EleutherAI/pythia-1.4b-deduped
|
||||||
base_model_config: EleutherAI/pythia-1.4b-deduped
|
|
||||||
load_in_8bit: true
|
load_in_8bit: true
|
||||||
datasets:
|
datasets:
|
||||||
- path: teknium/GPT4-LLM-Cleaned
|
- path: teknium/GPT4-LLM-Cleaned
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.05
|
val_set_size: 0.05
|
||||||
adapter: lora
|
adapter: lora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
@@ -19,20 +18,20 @@ lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./lora-alpaca-pythia
|
output_dir: ./lora-alpaca-pythia
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
micro_batch_size: 4
|
micro_batch_size: 4
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
learning_rate: 0.00001
|
learning_rate: 0.00001
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: True
|
bf16: auto
|
||||||
tf32: True
|
tf32: true
|
||||||
early_stopping_patience:
|
early_stopping_patience:
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
local_rank:
|
local_rank:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
eval_steps: 20
|
evals_per_epoch: 4
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
|
|||||||
67
examples/qwen/lora.yml
Normal file
67
examples/qwen/lora.yml
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
base_model: Qwen/Qwen-7B
|
||||||
|
model_type: AutoModelForCausalLM
|
||||||
|
tokenizer_type: AutoTokenizer
|
||||||
|
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
load_in_8bit: true
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./lora-out
|
||||||
|
|
||||||
|
sequence_len: 2048 # supports up to 8192
|
||||||
|
sample_packing: false
|
||||||
|
pad_to_sequence_len:
|
||||||
|
|
||||||
|
adapter: lora
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 4
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: false
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention:
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
eval_table_size:
|
||||||
|
eval_max_new_tokens: 128
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
67
examples/qwen/qlora.yml
Normal file
67
examples/qwen/qlora.yml
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
base_model: Qwen/Qwen-7B
|
||||||
|
model_type: AutoModelForCausalLM
|
||||||
|
tokenizer_type: AutoTokenizer
|
||||||
|
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./lora-out
|
||||||
|
|
||||||
|
sequence_len: 2048 # supports up to 8192
|
||||||
|
sample_packing: false
|
||||||
|
pad_to_sequence_len:
|
||||||
|
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 4
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: false
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention:
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
eval_table_size:
|
||||||
|
eval_max_new_tokens: 128
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
base_model: togethercomputer/RedPajama-INCITE-Chat-3B-v1
|
base_model: togethercomputer/RedPajama-INCITE-Chat-3B-v1
|
||||||
base_model_config: togethercomputer/RedPajama-INCITE-Chat-3B-v1
|
|
||||||
model_type: GPTNeoXForCausalLM
|
model_type: GPTNeoXForCausalLM
|
||||||
tokenizer_type: AutoTokenizer
|
tokenizer_type: AutoTokenizer
|
||||||
trust_remote_code:
|
trust_remote_code:
|
||||||
@@ -7,7 +6,7 @@ load_in_8bit: false
|
|||||||
datasets:
|
datasets:
|
||||||
- path: vicgalle/alpaca-gpt4
|
- path: vicgalle/alpaca-gpt4
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.02
|
val_set_size: 0.02
|
||||||
adapter:
|
adapter:
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
@@ -23,19 +22,19 @@ lora_fan_in_fan_out: false
|
|||||||
wandb_project: redpajama-alpaca-3b
|
wandb_project: redpajama-alpaca-3b
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./redpajama-alpaca-3b
|
output_dir: ./redpajama-alpaca-3b
|
||||||
batch_size: 4
|
batch_size: 4
|
||||||
micro_batch_size: 1
|
micro_batch_size: 1
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer: adamw_bnb_8bit
|
optimizer: adamw_bnb_8bit
|
||||||
torchdistx_path:
|
torchdistx_path:
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.0000002
|
learning_rate: 0.0000002
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
tf32: true
|
tf32: true
|
||||||
early_stopping_patience:
|
early_stopping_patience:
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
@@ -46,8 +45,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 20
|
warmup_steps: 20
|
||||||
eval_steps: 110
|
evals_per_epoch: 4
|
||||||
save_steps: 660
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0001
|
weight_decay: 0.0001
|
||||||
|
|||||||
@@ -1,11 +1,10 @@
|
|||||||
base_model: replit/replit-code-v1-3b
|
base_model: replit/replit-code-v1-3b
|
||||||
base_model_config: replit/replit-code-v1-3b
|
|
||||||
trust_remote_code: true
|
trust_remote_code: true
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
datasets:
|
datasets:
|
||||||
- path: vicgalle/alpaca-gpt4
|
- path: vicgalle/alpaca-gpt4
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path:
|
||||||
val_set_size: 0.05
|
val_set_size: 0.05
|
||||||
adapter: lora
|
adapter: lora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
@@ -22,19 +21,19 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project: lora-replit
|
wandb_project: lora-replit
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./lora-replit
|
output_dir: ./lora-replit
|
||||||
batch_size: 8
|
batch_size: 8
|
||||||
micro_batch_size: 1
|
micro_batch_size: 1
|
||||||
num_epochs: 3
|
num_epochs: 4
|
||||||
optimizer:
|
optimizer:
|
||||||
torchdistx_path:
|
torchdistx_path:
|
||||||
lr_scheduler:
|
lr_scheduler:
|
||||||
learning_rate: 0.00001
|
learning_rate: 0.00001
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: auto
|
||||||
tf32: true
|
tf32: true
|
||||||
gradient_checkpointing:
|
gradient_checkpointing:
|
||||||
early_stopping_patience:
|
early_stopping_patience:
|
||||||
@@ -46,8 +45,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 20
|
warmup_steps: 20
|
||||||
eval_steps: 50
|
evals_per_epoch: 4
|
||||||
save_steps:
|
saves_per_epoch: 1
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0
|
weight_decay: 0
|
||||||
|
|||||||
69
examples/stablelm-2/1.6b/fft.yml
Normal file
69
examples/stablelm-2/1.6b/fft.yml
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
base_model: stabilityai/stablelm-2-1_6b
|
||||||
|
model_type: AutoModelForCausalLM
|
||||||
|
tokenizer_type: AutoTokenizer
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./out
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
adapter:
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r:
|
||||||
|
lora_alpha:
|
||||||
|
lora_dropout:
|
||||||
|
lora_target_linear:
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 1
|
||||||
|
micro_batch_size: 1
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
flash_attn_cross_entropy: false
|
||||||
|
flash_attn_rms_norm: true
|
||||||
|
flash_attn_fuse_qkv: false
|
||||||
|
flash_attn_fuse_mlp: true
|
||||||
|
|
||||||
|
warmup_steps: 100
|
||||||
|
evals_per_epoch: 4
|
||||||
|
eval_table_size:
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed: #deepspeed_configs/zero2.json # multi-gpu only
|
||||||
|
weight_decay: 0.1
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
66
examples/stablelm-2/1.6b/lora.yml
Normal file
66
examples/stablelm-2/1.6b/lora.yml
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
base_model: stabilityai/stablelm-2-1_6b
|
||||||
|
model_type: AutoModelForCausalLM
|
||||||
|
tokenizer_type: AutoTokenizer
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
load_in_8bit: true
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./lora-out
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
adapter: lora
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 1
|
||||||
|
micro_batch_size: 1
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
flash_attn_cross_entropy: false
|
||||||
|
flash_attn_rms_norm: true
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
36
examples/stablelm-2/README.md
Normal file
36
examples/stablelm-2/README.md
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# StableLM 2
|
||||||
|
|
||||||
|
This repository contains examples for training and processing using StableLM-2. It also includes a section to help you estimate the GPU requirements for your specific use case.
|
||||||
|
|
||||||
|
## Estimating GPU Requirements
|
||||||
|
|
||||||
|
| type | deepspeed | batch size | context length | vRAM GPU (GBs) |
|
||||||
|
|---------------|-----------|------------|----------------|----------------|
|
||||||
|
| full finetune | N/A | 1 | 4096 | ~21.5GBs |
|
||||||
|
| full finetune | zero2 | 1 | 4096 | ~20GBs |
|
||||||
|
| lora | N/A | 1 | 4096 | ~16.6GBs |
|
||||||
|
|
||||||
|
The above are estimates and might differ slight depending on the setup for example whether you pack your sequence lengths or not (the above assumes you do to length 4096).
|
||||||
|
|
||||||
|
This blog post from Hamel Husain was a great resource for estimating these numbers: https://hamel.dev/notes/llm/03_estimating_vram.html
|
||||||
|
|
||||||
|
## Training
|
||||||
|
We have example scripts here for both full finetuning and lora using the popular alpaca dataset:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# preprocess the dataset
|
||||||
|
CUDA_VISIBLE_DEVICES="" python -m axolotl.cli.preprocess examples/stablelm-2/1.6b/lora.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
Single GPU Training:
|
||||||
|
```shell
|
||||||
|
python -m axolotl.cli.train examples/stablelm-2/fft.yml --deepspeed deepspeed_configs/zero2.json
|
||||||
|
# OR
|
||||||
|
python -m axolotl.cli.train examples/stablelm-2/1.6b/lora.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
Multinode GPU Training with `accelerate`:
|
||||||
|
```shell
|
||||||
|
# make sure you've configured accelerate properly
|
||||||
|
accelerate launch -m axolotl.cli.train examples/stablelm-2/1.6b/fft.yml --deepspeed deepspeed_configs/zero2.json
|
||||||
|
```
|
||||||
69
examples/starcoder2/qlora.yml
Normal file
69
examples/starcoder2/qlora.yml
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
base_model: bigcode/starcoder2-3b
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
|
||||||
|
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.2
|
||||||
|
output_dir: ./qlora
|
||||||
|
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
|
||||||
|
sequence_len: 8192
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_modules:
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_run_id:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 8
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 3
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 2e-5
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16: false
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 20
|
||||||
|
evals_per_epoch: 4
|
||||||
|
eval_steps:
|
||||||
|
eval_table_size:
|
||||||
|
saves_per_epoch: 4
|
||||||
|
save_steps:
|
||||||
|
save_total_limit: 2
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay:
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
17
examples/tiny-llama/README.md
Normal file
17
examples/tiny-llama/README.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# Overview
|
||||||
|
|
||||||
|
This is a simple example of how to finetune TinyLlama1.1B using either lora or qlora:
|
||||||
|
|
||||||
|
LoRa:
|
||||||
|
|
||||||
|
```
|
||||||
|
accelerate launch -m axolotl.cli.train examples/tiny-llama/lora.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
qLoRa:
|
||||||
|
|
||||||
|
```
|
||||||
|
accelerate launch -m axolotl.cli.train examples/tiny-llama/qlora.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
Both take about 10 minutes to complete on a 4090.
|
||||||
64
examples/tiny-llama/lora-mps.yml
Normal file
64
examples/tiny-llama/lora-mps.yml
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
|
||||||
|
model_type: LlamaForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: true
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0
|
||||||
|
output_dir: ./lora-out
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
eval_sample_packing: false
|
||||||
|
|
||||||
|
adapter: lora
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 4
|
||||||
|
optimizer: adamw_torch
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16: false
|
||||||
|
tf32: true
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: false
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 0
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
64
examples/tiny-llama/lora.yml
Normal file
64
examples/tiny-llama/lora.yml
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
|
||||||
|
model_type: LlamaForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: true
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./lora-out
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
sample_packing: true
|
||||||
|
eval_sample_packing: false
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
adapter: lora
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 4
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
58
examples/tiny-llama/pretrain.yml
Normal file
58
examples/tiny-llama/pretrain.yml
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
base_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
|
||||||
|
|
||||||
|
model_type: LlamaForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
max_steps: 200
|
||||||
|
pretraining_dataset:
|
||||||
|
path: c4
|
||||||
|
name: en
|
||||||
|
type: pretrain
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.0
|
||||||
|
output_dir: ./model-out
|
||||||
|
|
||||||
|
sequence_len: 2048
|
||||||
|
sample_packing: true
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 4
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch:
|
||||||
|
eval_table_size:
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
65
examples/tiny-llama/qlora.yml
Normal file
65
examples/tiny-llama/qlora.yml
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
|
||||||
|
model_type: LlamaForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_modules:
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 4
|
||||||
|
optimizer: paged_adamw_32bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user