Compare commits
157 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9793faf6dc | ||
|
|
64852ae15a | ||
|
|
1fed74b1d9 | ||
|
|
a300a4db1d | ||
|
|
fe285430bc | ||
|
|
0d2e34f056 | ||
|
|
b56a6c0101 | ||
|
|
2eda9e02a9 | ||
|
|
78b9efb7f4 | ||
|
|
312a9fad07 | ||
|
|
58d665943e | ||
|
|
cc7e80026e | ||
|
|
dc71d8872a | ||
|
|
248bf90f89 | ||
|
|
77085ea24e | ||
|
|
db2a3586f3 | ||
|
|
6c9a87c8ee | ||
|
|
894cba09f3 | ||
|
|
41a4d15d43 | ||
|
|
2c37bf6c21 | ||
|
|
9f69c4d8c1 | ||
|
|
3d4984b9a5 | ||
|
|
ff7f18d1ed | ||
|
|
cf62cfd661 | ||
|
|
c5df969262 | ||
|
|
40a53ff181 | ||
|
|
dcdec44347 | ||
|
|
3ffb018a4c | ||
|
|
a94f2eecb1 | ||
|
|
1066751358 | ||
|
|
1b63bf13bc | ||
|
|
5cce2a42ff | ||
|
|
2a428e8014 | ||
|
|
cdf85fdbd5 | ||
|
|
9b790d359b | ||
|
|
38811434e6 | ||
|
|
06c61d6f13 | ||
|
|
262dc29df2 | ||
|
|
165907fddb | ||
|
|
a032c9f452 | ||
|
|
b06d3e3645 | ||
|
|
c58034d48c | ||
|
|
28fd429bcf | ||
|
|
45ac7c4f88 | ||
|
|
edd6980dd9 | ||
|
|
dc6d25124d | ||
|
|
6dd2e7d671 | ||
|
|
b64f411849 | ||
|
|
03a59c1ed4 | ||
|
|
ebaec3c406 | ||
|
|
73e70e3996 | ||
|
|
d75adb9835 | ||
|
|
02224668c3 | ||
|
|
f162f3c7cc | ||
|
|
eca3531329 | ||
|
|
6f16c4569d | ||
|
|
0bd09c077d | ||
|
|
469c08c9ba | ||
|
|
334af625d0 | ||
|
|
273b3a3aa7 | ||
|
|
3cdd8e4122 | ||
|
|
cf5ae6b649 | ||
|
|
b1f4f7a34d | ||
|
|
83237b8445 | ||
|
|
46032a1a1f | ||
|
|
8bba64258e | ||
|
|
88089e8b32 | ||
|
|
168a7a09cc | ||
|
|
231031a0e1 | ||
|
|
9234b75cb4 | ||
|
|
553a86b52c | ||
|
|
5daf7d5299 | ||
|
|
5491278a79 | ||
|
|
1514739f0f | ||
|
|
896c1aebcf | ||
|
|
ef17e15483 | ||
|
|
69a235061b | ||
|
|
687d889928 | ||
|
|
c4cf567b55 | ||
|
|
c49729d2bc | ||
|
|
13ac4d8de2 | ||
|
|
19cf0bda99 | ||
|
|
f74edd5b56 | ||
|
|
d69da99c2c | ||
|
|
66afb76a15 | ||
|
|
a692ad3f4c | ||
|
|
41da98b982 | ||
|
|
9e64f42e0f | ||
|
|
b9b7d4ce92 | ||
|
|
9bed281867 | ||
|
|
e79c8e617e | ||
|
|
71456955f5 | ||
|
|
3a783c04e4 | ||
|
|
1e5014acec | ||
|
|
a10da1caff | ||
|
|
4066c78631 | ||
|
|
78a1e1fa12 | ||
|
|
bc8a2e5547 | ||
|
|
910ebe47f5 | ||
|
|
c146880a75 | ||
|
|
77bdb7d144 | ||
|
|
530809fd74 | ||
|
|
924bbfddec | ||
|
|
f150c027e3 | ||
|
|
5c39c006c9 | ||
|
|
612aabd8c4 | ||
|
|
af05883f75 | ||
|
|
05ab9092e3 | ||
|
|
7b57ed7618 | ||
|
|
3a38271276 | ||
|
|
8d20e0a3d3 | ||
|
|
de8ed229c3 | ||
|
|
478d8c7b8e | ||
|
|
645c13592c | ||
|
|
47d601fa23 | ||
|
|
756dfba97b | ||
|
|
91ab0592af | ||
|
|
0aeb7c7802 | ||
|
|
9bdd30cdfd | ||
|
|
d35278aaf1 | ||
|
|
9492d4ebb7 | ||
|
|
ad5ca4f734 | ||
|
|
cb9d3af5c0 | ||
|
|
c969f0a9dc | ||
|
|
6d0ee4ba34 | ||
|
|
a81f52d575 | ||
|
|
1925eaf1e6 | ||
|
|
1ab3bf3e67 | ||
|
|
d7635b7148 | ||
|
|
88e17ffc50 | ||
|
|
baed440fa1 | ||
|
|
7925ddce86 | ||
|
|
6f849809c5 | ||
|
|
c16644d05e | ||
|
|
945c4191a3 | ||
|
|
136522f9c9 | ||
|
|
556fe408b3 | ||
|
|
16bb6276a5 | ||
|
|
06674a11f2 | ||
|
|
3513885f43 | ||
|
|
4b43a66a0b | ||
|
|
7dc580b837 | ||
|
|
fd2c9814c9 | ||
|
|
c9a149f9e8 | ||
|
|
958da70376 | ||
|
|
759e8673ce | ||
|
|
0c6f928601 | ||
|
|
eea2731a5e | ||
|
|
1db46a9c72 | ||
|
|
ab5cd28acf | ||
|
|
1a82082e91 | ||
|
|
1210dc8fd5 | ||
|
|
488a67d75a | ||
|
|
71a43f8479 | ||
|
|
39619028a3 | ||
|
|
8792199799 | ||
|
|
1edc30c786 |
23
.github/workflows/base.yml
vendored
23
.github/workflows/base.yml
vendored
@@ -12,28 +12,19 @@ jobs:
|
|||||||
# this job needs to be run on self-hosted GPU runners...
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
runs-on: self-hosted
|
runs-on: self-hosted
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: "118"
|
- cuda: "118"
|
||||||
cuda_version: 11.8.0
|
cuda_version: 11.8.0
|
||||||
python_version: "3.9"
|
python_version: "3.9"
|
||||||
pytorch: 2.0.0
|
pytorch: 2.0.1
|
||||||
axolotl_extras:
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 9.0+PTX"
|
||||||
- cuda: "118"
|
- cuda: "118"
|
||||||
cuda_version: 11.8.0
|
cuda_version: 11.8.0
|
||||||
python_version: "3.10"
|
python_version: "3.10"
|
||||||
pytorch: 2.0.0
|
pytorch: 2.0.1
|
||||||
axolotl_extras:
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 9.0+PTX"
|
||||||
- cuda: "117"
|
|
||||||
cuda_version: 11.7.0
|
|
||||||
python_version: "3.9"
|
|
||||||
pytorch: 1.13.1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: "118"
|
|
||||||
cuda_version: 11.8.0
|
|
||||||
python_version: "3.9"
|
|
||||||
pytorch: 2.0.0
|
|
||||||
axolotl_extras: gptq
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -57,11 +48,9 @@ jobs:
|
|||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
tags: ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||||
labels: ${{ steps.metadata.outputs.labels }}
|
labels: ${{ steps.metadata.outputs.labels }}
|
||||||
cache-from: type=gha
|
|
||||||
cache-to: type=gha,mode=max
|
|
||||||
build-args: |
|
build-args: |
|
||||||
CUDA_VERSION=${{ matrix.cuda_version }}
|
CUDA_VERSION=${{ matrix.cuda_version }}
|
||||||
CUDA=${{ matrix.cuda }}
|
CUDA=${{ matrix.cuda }}
|
||||||
PYTHON_VERSION=${{ matrix.python_version }}
|
PYTHON_VERSION=${{ matrix.python_version }}
|
||||||
PYTORCH_VERSION=${{ matrix.pytorch }}
|
PYTORCH_VERSION=${{ matrix.pytorch }}
|
||||||
AXOLOTL_EXTRAS=${{ matrix.axolotl_extras }}
|
TORCH_CUDA_ARCH_LIST=${{ matrix.torch_cuda_arch_list }}
|
||||||
|
|||||||
40
.github/workflows/main.yml
vendored
40
.github/workflows/main.yml
vendored
@@ -11,28 +11,24 @@ jobs:
|
|||||||
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
||||||
# this job needs to be run on self-hosted GPU runners...
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: cu118
|
- cuda: cu118
|
||||||
cuda_version: 11.8.0
|
cuda_version: 11.8.0
|
||||||
python_version: "3.9"
|
python_version: "3.9"
|
||||||
pytorch: 2.0.0
|
pytorch: 2.0.1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
- cuda: cu118
|
- cuda: cu118
|
||||||
cuda_version: 11.8.0
|
cuda_version: 11.8.0
|
||||||
python_version: "3.10"
|
python_version: "3.10"
|
||||||
pytorch: 2.0.0
|
pytorch: 2.0.1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
- cuda: cu118
|
- cuda: cu118
|
||||||
cuda_version: 11.8.0
|
cuda_version: 11.8.0
|
||||||
python_version: "3.9"
|
python_version: "3.9"
|
||||||
pytorch: 2.0.0
|
pytorch: 2.0.1
|
||||||
axolotl_extras: gptq
|
axolotl_extras: gptq
|
||||||
- cuda: cu117
|
|
||||||
cuda_version: 11.7.0
|
|
||||||
python_version: "3.9"
|
|
||||||
pytorch: 1.13.1
|
|
||||||
axolotl_extras:
|
|
||||||
runs-on: self-hosted
|
runs-on: self-hosted
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -54,13 +50,11 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
build-args: |
|
build-args: |
|
||||||
BASE_TAG=${{ github.ref_name }}-base-py${{ matrix.python_version }}-${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
BASE_TAG=${{ github.ref_name }}-base-py${{ matrix.python_version }}-${{ matrix.cuda }}-${{ matrix.pytorch }}
|
||||||
file: ./docker/Dockerfile
|
file: ./docker/Dockerfile
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
tags: ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||||
labels: ${{ steps.metadata.outputs.labels }}
|
labels: ${{ steps.metadata.outputs.labels }}
|
||||||
cache-from: type=gha
|
|
||||||
cache-to: type=gha,mode=max
|
|
||||||
build-axolotl-runpod:
|
build-axolotl-runpod:
|
||||||
needs: build-axolotl
|
needs: build-axolotl
|
||||||
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
||||||
@@ -68,26 +62,21 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: cu118
|
- cuda: 118
|
||||||
cuda_version: 11.8.0
|
cuda_version: 11.8.0
|
||||||
python_version: "3.9"
|
python_version: "3.9"
|
||||||
pytorch: 2.0.0
|
pytorch: 2.0.1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
- cuda: cu118
|
- cuda: 118
|
||||||
cuda_version: 11.8.0
|
cuda_version: 11.8.0
|
||||||
python_version: "3.10"
|
python_version: "3.10"
|
||||||
pytorch: 2.0.0
|
pytorch: 2.0.1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
- cuda: cu118
|
- cuda: 118
|
||||||
cuda_version: 11.8.0
|
cuda_version: 11.8.0
|
||||||
python_version: "3.9"
|
python_version: "3.9"
|
||||||
pytorch: 2.0.0
|
pytorch: 2.0.1
|
||||||
axolotl_extras: gptq
|
axolotl_extras: gptq
|
||||||
- cuda: cu117
|
|
||||||
cuda_version: 11.7.0
|
|
||||||
python_version: "3.9"
|
|
||||||
pytorch: 1.13.1
|
|
||||||
axolotl_extras:
|
|
||||||
runs-on: self-hosted
|
runs-on: self-hosted
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -109,10 +98,9 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
build-args: |
|
build-args: |
|
||||||
BASE_TAG=${{ github.ref_name }}-py${{ matrix.python_version }}-${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
BASE_TAG=${{ github.ref_name }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||||
|
CUDA=${{ matrix.cuda }}
|
||||||
file: ./docker/Dockerfile-runpod
|
file: ./docker/Dockerfile-runpod
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
tags: ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||||
labels: ${{ steps.metadata.outputs.labels }}
|
labels: ${{ steps.metadata.outputs.labels }}
|
||||||
cache-from: type=gha
|
|
||||||
cache-to: type=gha,mode=max
|
|
||||||
|
|||||||
1
.github/workflows/tests.yml
vendored
1
.github/workflows/tests.yml
vendored
@@ -7,6 +7,7 @@ jobs:
|
|||||||
test:
|
test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.9", "3.10"]
|
python_version: ["3.9", "3.10"]
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
default_language_version:
|
default_language_version:
|
||||||
python: python3.9
|
python: python3
|
||||||
|
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
|||||||
202
LICENSE
Normal file
202
LICENSE
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
105
README.md
105
README.md
@@ -24,11 +24,12 @@
|
|||||||
| mpt | ✅ | ❌ | ❓ | ❌ | ❓ | ❌ | ❌ | ❓ |
|
| mpt | ✅ | ❌ | ❓ | ❌ | ❓ | ❌ | ❌ | ❓ |
|
||||||
| falcon | ✅ | ✅ | ✅ | ❌ | ❓ | ❌ | ❌ | ✅ |
|
| falcon | ✅ | ✅ | ✅ | ❌ | ❓ | ❌ | ❌ | ✅ |
|
||||||
| gpt-j | ✅ | ✅ | ✅ | ❌ | ❓ | ❌ | ❓ | ✅ |
|
| gpt-j | ✅ | ✅ | ✅ | ❌ | ❓ | ❌ | ❓ | ✅ |
|
||||||
|
| XGen | ✅ | ❓ | ✅ | ❓ | ❓ | ❓ | ❓ | ✅
|
||||||
|
|
||||||
|
|
||||||
## Quickstart ⚡
|
## Quickstart ⚡
|
||||||
|
|
||||||
**Requirements**: Python 3.9 and Pytorch 2.0.
|
**Requirements**: Python >=3.9 and Pytorch >=2.0.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/OpenAccess-AI-Collective/axolotl
|
git clone https://github.com/OpenAccess-AI-Collective/axolotl
|
||||||
@@ -36,8 +37,6 @@ git clone https://github.com/OpenAccess-AI-Collective/axolotl
|
|||||||
pip3 install -e .
|
pip3 install -e .
|
||||||
pip3 install -U git+https://github.com/huggingface/peft.git
|
pip3 install -U git+https://github.com/huggingface/peft.git
|
||||||
|
|
||||||
accelerate config
|
|
||||||
|
|
||||||
# finetune lora
|
# finetune lora
|
||||||
accelerate launch scripts/finetune.py examples/openllama-3b/lora.yml
|
accelerate launch scripts/finetune.py examples/openllama-3b/lora.yml
|
||||||
|
|
||||||
@@ -52,11 +51,10 @@ accelerate launch scripts/finetune.py examples/openllama-3b/lora.yml \
|
|||||||
|
|
||||||
- Docker
|
- Docker
|
||||||
```bash
|
```bash
|
||||||
docker run --gpus '"all"' --rm -it winglian/axolotl:main-py3.9-cu118-2.0.0
|
docker run --gpus '"all"' --rm -it winglian/axolotl:main-py3.10-cu118-2.0.1
|
||||||
```
|
```
|
||||||
- `winglian/axolotl-runpod:main-py3.9-cu118-2.0.0`: for runpod
|
- `winglian/axolotl-runpod:main-py3.10-cu118-2.0.1`: for runpod
|
||||||
- `winglian/axolotl-runpod:main-py3.9-cu118-2.0.0-gptq`: for gptq
|
- `winglian/axolotl-runpod:main-py3.9-cu118-2.0.1-gptq`: for gptq
|
||||||
- `winglian/axolotl:dev`: dev branch (not usually up to date)
|
|
||||||
|
|
||||||
Or run on the current files for development:
|
Or run on the current files for development:
|
||||||
|
|
||||||
@@ -108,7 +106,7 @@ accelerate launch scripts/finetune.py examples/openllama-3b/lora.yml \
|
|||||||
|
|
||||||
3. Install torch
|
3. Install torch
|
||||||
```bash
|
```bash
|
||||||
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
|
pip3 install -U torch --index-url https://download.pytorch.org/whl/cu118
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Axolotl
|
4. Axolotl
|
||||||
@@ -138,7 +136,7 @@ Have dataset(s) in one of the following format (JSONL recommended):
|
|||||||
```json
|
```json
|
||||||
{"instruction": "...", "input": "...", "output": "..."}
|
{"instruction": "...", "input": "...", "output": "..."}
|
||||||
```
|
```
|
||||||
- `sharegpt`: conversations
|
- `sharegpt:chat`: conversations
|
||||||
```json
|
```json
|
||||||
{"conversations": [{"from": "...", "value": "..."}]}
|
{"conversations": [{"from": "...", "value": "..."}]}
|
||||||
```
|
```
|
||||||
@@ -195,6 +193,10 @@ Have dataset(s) in one of the following format (JSONL recommended):
|
|||||||
```json
|
```json
|
||||||
{"message_1": "...", "message_2": "..."}
|
{"message_1": "...", "message_2": "..."}
|
||||||
```
|
```
|
||||||
|
- `alpaca_w_system.load_open_orca`: support for open orca datasets with included system prompts, instruct
|
||||||
|
```json
|
||||||
|
{"system_prompt": "...", "question": "...", "response": "..."}
|
||||||
|
```
|
||||||
- `context_qa`: in context question answering from an article
|
- `context_qa`: in context question answering from an article
|
||||||
```json
|
```json
|
||||||
{"article": "...", "question": "...", "answer": "..."}
|
{"article": "...", "question": "...", "answer": "..."}
|
||||||
@@ -233,7 +235,7 @@ Have dataset(s) in one of the following format (JSONL recommended):
|
|||||||
#### How to add custom prompts
|
#### How to add custom prompts
|
||||||
|
|
||||||
1. Add your method to a file in [prompt_strategies](src/axolotl/prompt_strategies). Please see other files as example.
|
1. Add your method to a file in [prompt_strategies](src/axolotl/prompt_strategies). Please see other files as example.
|
||||||
2. Use your custom file name as the dataset type.
|
2. Use your custom file name as the dataset type `<prompt_strategies_file>.load_<load_fn>`.
|
||||||
|
|
||||||
Optionally, download some datasets, see [data/README.md](data/README.md)
|
Optionally, download some datasets, see [data/README.md](data/README.md)
|
||||||
|
|
||||||
@@ -241,7 +243,7 @@ Optionally, download some datasets, see [data/README.md](data/README.md)
|
|||||||
|
|
||||||
### Config
|
### Config
|
||||||
|
|
||||||
See sample configs in [configs](configs) folder or [examples](examples) for quick start. It is recommended to duplicate and modify to your needs. The most important options are:
|
See [examples](examples) for quick start. It is recommended to duplicate and modify to your needs. The most important options are:
|
||||||
|
|
||||||
- model
|
- model
|
||||||
```yaml
|
```yaml
|
||||||
@@ -251,10 +253,24 @@ See sample configs in [configs](configs) folder or [examples](examples) for quic
|
|||||||
|
|
||||||
- dataset
|
- dataset
|
||||||
```yaml
|
```yaml
|
||||||
|
sequence_len: 2048 # max token length for prompt
|
||||||
|
|
||||||
|
# huggingface repo
|
||||||
datasets:
|
datasets:
|
||||||
- path: vicgalle/alpaca-gpt4 # local or huggingface repo
|
- path: vicgalle/alpaca-gpt4
|
||||||
|
type: alpaca # format from earlier
|
||||||
|
|
||||||
|
# huggingface repo with specific configuration/subset
|
||||||
|
datasets:
|
||||||
|
- path: EleutherAI/pile
|
||||||
|
name: enron_emails
|
||||||
|
type: completion # format from earlier
|
||||||
|
|
||||||
|
# local
|
||||||
|
datasets:
|
||||||
|
- path: json
|
||||||
|
data_files: data.jsonl # or json
|
||||||
type: alpaca # format from earlier
|
type: alpaca # format from earlier
|
||||||
sequence_len: 2048 # max token length / prompt
|
|
||||||
```
|
```
|
||||||
|
|
||||||
- loading
|
- loading
|
||||||
@@ -264,6 +280,8 @@ See sample configs in [configs](configs) folder or [examples](examples) for quic
|
|||||||
bf16: true # require >=ampere
|
bf16: true # require >=ampere
|
||||||
fp16: true
|
fp16: true
|
||||||
tf32: true # require >=ampere
|
tf32: true # require >=ampere
|
||||||
|
bfloat16: true # require >=ampere, use instead of bf16 when you don't want AMP (automatic mixed precision)
|
||||||
|
float16: true # use instead of fp16 when you don't want AMP
|
||||||
```
|
```
|
||||||
Note: Repo does not do 4-bit quantization.
|
Note: Repo does not do 4-bit quantization.
|
||||||
|
|
||||||
@@ -291,6 +309,8 @@ base_model_ignore_patterns:
|
|||||||
# if the base_model repo on hf hub doesn't include configuration .json files,
|
# if the base_model repo on hf hub doesn't include configuration .json files,
|
||||||
# you can set that here, or leave this empty to default to base_model
|
# you can set that here, or leave this empty to default to base_model
|
||||||
base_model_config: ./llama-7b-hf
|
base_model_config: ./llama-7b-hf
|
||||||
|
# you can specify to choose a specific model revision from huggingface hub
|
||||||
|
model_revision:
|
||||||
# Optional tokenizer configuration override in case you want to use a different tokenizer
|
# Optional tokenizer configuration override in case you want to use a different tokenizer
|
||||||
# than the one defined in the base model
|
# than the one defined in the base model
|
||||||
tokenizer_config:
|
tokenizer_config:
|
||||||
@@ -300,6 +320,11 @@ model_type: AutoModelForCausalLM
|
|||||||
tokenizer_type: AutoTokenizer
|
tokenizer_type: AutoTokenizer
|
||||||
# Trust remote code for untrusted source
|
# Trust remote code for untrusted source
|
||||||
trust_remote_code:
|
trust_remote_code:
|
||||||
|
# use_fast option for tokenizer loading from_pretrained, default to True
|
||||||
|
tokenizer_use_fast:
|
||||||
|
# resize the model embeddings when new tokens are added to multiples of 32
|
||||||
|
# this is reported to improve training speed on some models
|
||||||
|
resize_token_embeddings_to_32x:
|
||||||
|
|
||||||
# whether you are training a 4-bit GPTQ quantized model
|
# whether you are training a 4-bit GPTQ quantized model
|
||||||
gptq: true
|
gptq: true
|
||||||
@@ -320,18 +345,21 @@ tf32: true # require >=ampere
|
|||||||
|
|
||||||
# a list of one or more datasets to finetune the model with
|
# a list of one or more datasets to finetune the model with
|
||||||
datasets:
|
datasets:
|
||||||
# this can be either a hf dataset, or relative path
|
# hf dataset repo | "json" for local dataset, make sure to fill data_files
|
||||||
- path: vicgalle/alpaca-gpt4
|
- path: vicgalle/alpaca-gpt4
|
||||||
# The type of prompt to use for training. [alpaca, sharegpt, gpteacher, oasst, reflection]
|
# The type of prompt to use for training. [alpaca, sharegpt, gpteacher, oasst, reflection]
|
||||||
type: alpaca # format OR format:prompt_style (chat/instruct)
|
type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>
|
||||||
data_files: # path to source data files
|
data_files: # path to source data files
|
||||||
shards: # number of shards to split data into
|
shards: # number of shards to split data into
|
||||||
|
name: # name of dataset configuration to load
|
||||||
|
|
||||||
# axolotl attempts to save the dataset as an arrow after packing the data together so
|
# axolotl attempts to save the dataset as an arrow after packing the data together so
|
||||||
# subsequent training attempts load faster, relative path
|
# subsequent training attempts load faster, relative path
|
||||||
dataset_prepared_path: data/last_run_prepared
|
dataset_prepared_path: data/last_run_prepared
|
||||||
# push prepared dataset to hub
|
# push prepared dataset to hub
|
||||||
push_dataset_to_hub: # repo path
|
push_dataset_to_hub: # repo path
|
||||||
|
# push checkpoints to hub
|
||||||
|
hub_model_id: # repo path to push finetuned model
|
||||||
# whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets
|
# whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets
|
||||||
# required to be true when used in combination with `push_dataset_to_hub`
|
# required to be true when used in combination with `push_dataset_to_hub`
|
||||||
hf_use_auth_token: # boolean
|
hf_use_auth_token: # boolean
|
||||||
@@ -393,6 +421,9 @@ logging_steps:
|
|||||||
save_steps:
|
save_steps:
|
||||||
eval_steps:
|
eval_steps:
|
||||||
|
|
||||||
|
# save model as safetensors (require safetensors package)
|
||||||
|
save_safetensors:
|
||||||
|
|
||||||
# whether to mask out or include the human's prompt from the training labels
|
# whether to mask out or include the human's prompt from the training labels
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
# don't use this, leads to wonky training (according to someone on the internet)
|
# don't use this, leads to wonky training (according to someone on the internet)
|
||||||
@@ -420,7 +451,15 @@ log_sweep_max_lr:
|
|||||||
optimizer:
|
optimizer:
|
||||||
# specify weight decay
|
# specify weight decay
|
||||||
weight_decay:
|
weight_decay:
|
||||||
|
# adamw hyperparams
|
||||||
|
adam_beta1:
|
||||||
|
adam_beta2:
|
||||||
|
adam_epsilon:
|
||||||
|
# Gradient clipping max norm
|
||||||
|
max_grad_norm:
|
||||||
|
|
||||||
|
# whether to bettertransformers
|
||||||
|
flash_optimum:
|
||||||
# whether to use xformers attention patch https://github.com/facebookresearch/xformers:
|
# whether to use xformers attention patch https://github.com/facebookresearch/xformers:
|
||||||
xformers_attention:
|
xformers_attention:
|
||||||
# whether to use flash attention patch https://github.com/HazyResearch/flash-attention:
|
# whether to use flash attention patch https://github.com/HazyResearch/flash-attention:
|
||||||
@@ -476,17 +515,6 @@ strict:
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### Accelerate
|
|
||||||
|
|
||||||
Configure accelerate
|
|
||||||
|
|
||||||
```bash
|
|
||||||
accelerate config
|
|
||||||
|
|
||||||
# Edit manually
|
|
||||||
# nano ~/.cache/huggingface/accelerate/default_config.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
### Train
|
### Train
|
||||||
|
|
||||||
Run
|
Run
|
||||||
@@ -494,6 +522,21 @@ Run
|
|||||||
accelerate launch scripts/finetune.py configs/your_config.yml
|
accelerate launch scripts/finetune.py configs/your_config.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Multi-GPU Config
|
||||||
|
|
||||||
|
- llama FSDP
|
||||||
|
```yaml
|
||||||
|
fsdp:
|
||||||
|
- full_shard
|
||||||
|
- auto_wrap
|
||||||
|
fsdp_config:
|
||||||
|
fsdp_offload_params: true
|
||||||
|
fsdp_state_dict_type: FULL_STATE_DICT
|
||||||
|
fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer
|
||||||
|
```
|
||||||
|
|
||||||
|
- llama Deepspeed: append `ACCELERATE_USE_DEEPSPEED=true` in front of finetune command
|
||||||
|
|
||||||
### Inference
|
### Inference
|
||||||
|
|
||||||
Pass the appropriate flag to the train command:
|
Pass the appropriate flag to the train command:
|
||||||
@@ -520,6 +563,12 @@ Add below flag to train command above
|
|||||||
--merge_lora --lora_model_dir="./completed-model" --load_in_8bit=False --load_in_4bit=False
|
--merge_lora --lora_model_dir="./completed-model" --load_in_8bit=False --load_in_4bit=False
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you run out of CUDA memory, you can try to merge in system RAM with
|
||||||
|
|
||||||
|
```bash
|
||||||
|
CUDA_VISIBLE_DEVICES="" python3 scripts/finetune.py ...
|
||||||
|
```
|
||||||
|
|
||||||
## Common Errors 🧰
|
## Common Errors 🧰
|
||||||
|
|
||||||
> Cuda out of memory
|
> Cuda out of memory
|
||||||
@@ -538,6 +587,10 @@ Try set `fp16: true`
|
|||||||
|
|
||||||
Try to turn off xformers.
|
Try to turn off xformers.
|
||||||
|
|
||||||
|
> accelerate config missing
|
||||||
|
|
||||||
|
It's safe to ignore it.
|
||||||
|
|
||||||
## Need help? 🙋♂️
|
## Need help? 🙋♂️
|
||||||
|
|
||||||
Join our [Discord server](https://discord.gg/HhrNrHJPRb) where we can help you
|
Join our [Discord server](https://discord.gg/HhrNrHJPRb) where we can help you
|
||||||
|
|||||||
@@ -10,10 +10,10 @@ curl https://github.com/teknium1/GPTeacher/blob/main/Roleplay/roleplay-similarit
|
|||||||
## Convert the JSON data files to JSONL.
|
## Convert the JSON data files to JSONL.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
python3 ./scripts/alpaca_json_to_jsonl.py --input data/alpaca_data_gpt4.json > data/alpaca_data_gpt4.jsonl
|
python3 ./scripts/alpaca_json_to_jsonl.py --file data/alpaca_data_gpt4.json --output data/alpaca_data_gpt4.jsonl
|
||||||
python3 ./scripts/alpaca_json_to_jsonl.py --input data/raw/vicuna_cleaned.json > data/vicuna_cleaned.jsonl
|
python3 ./scripts/alpaca_json_to_jsonl.py --file data/raw/vicuna_cleaned.json --output data/vicuna_cleaned.jsonl
|
||||||
python3 ./scripts/alpaca_json_to_jsonl.py --input data/raw/roleplay-similarity_0.6-instruct-dataset.json > data/roleplay-similarity_0.6-instruct-dataset.jsonl
|
python3 ./scripts/alpaca_json_to_jsonl.py --file data/raw/roleplay-similarity_0.6-instruct-dataset.json --output data/roleplay-similarity_0.6-instruct-dataset.jsonl
|
||||||
python3 ./scripts/alpaca_json_to_jsonl.py --input data/raw/gpt4-instruct-similarity-0.6-dataset.json > data/gpt4-instruct-similarity-0.6-dataset.jsonl
|
python3 ./scripts/alpaca_json_to_jsonl.py --file data/raw/gpt4-instruct-similarity-0.6-dataset.json --output data/gpt4-instruct-similarity-0.6-dataset.jsonl
|
||||||
```
|
```
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -3,16 +3,15 @@ FROM winglian/axolotl-base:$BASE_TAG
|
|||||||
|
|
||||||
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
|
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
|
||||||
ARG AXOLOTL_EXTRAS=""
|
ARG AXOLOTL_EXTRAS=""
|
||||||
|
ARG CUDA="118"
|
||||||
|
ENV BNB_CUDA_VERSION=$CUDA
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y vim curl
|
apt-get install -y vim curl
|
||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
RUN pip3 install --force-reinstall "peft @ git+https://github.com/huggingface/peft.git@main" \
|
RUN pip3 install --force-reinstall "peft @ git+https://github.com/huggingface/peft.git@main"
|
||||||
"accelerate @ git+https://github.com/huggingface/accelerate.git@main" \
|
|
||||||
"transformers @ git+https://github.com/huggingface/transformers.git@main"
|
|
||||||
|
|
||||||
RUN git clone --depth=1 https://github.com/OpenAccess-AI-Collective/axolotl.git
|
RUN git clone --depth=1 https://github.com/OpenAccess-AI-Collective/axolotl.git
|
||||||
# If AXOLOTL_EXTRAS is set, append it in brackets
|
# If AXOLOTL_EXTRAS is set, append it in brackets
|
||||||
RUN cd axolotl && \
|
RUN cd axolotl && \
|
||||||
@@ -22,5 +21,10 @@ RUN cd axolotl && \
|
|||||||
pip install -e .; \
|
pip install -e .; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# fix so that git fetch/pull from remote works
|
||||||
|
RUN cd axolotl && \
|
||||||
|
git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" && \
|
||||||
|
git config --get remote.origin.fetch
|
||||||
|
|
||||||
# helper for huggingface-login cli
|
# helper for huggingface-login cli
|
||||||
RUN git config --global credential.helper store
|
RUN git config --global credential.helper store
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ FROM nvidia/cuda:$CUDA_VERSION-cudnn$CUDNN_VERSION-devel-ubuntu$UBUNTU_VERSION a
|
|||||||
ENV PATH="/root/miniconda3/bin:${PATH}"
|
ENV PATH="/root/miniconda3/bin:${PATH}"
|
||||||
|
|
||||||
ARG PYTHON_VERSION="3.9"
|
ARG PYTHON_VERSION="3.9"
|
||||||
ARG PYTORCH="2.0.0"
|
ARG PYTORCH_VERSION="2.0.1"
|
||||||
ARG CUDA="118"
|
ARG CUDA="118"
|
||||||
|
|
||||||
ENV PYTHON_VERSION=$PYTHON_VERSION
|
ENV PYTHON_VERSION=$PYTHON_VERSION
|
||||||
@@ -29,17 +29,18 @@ ENV PATH="/root/miniconda3/envs/py${PYTHON_VERSION}/bin:${PATH}"
|
|||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
RUN python3 -m pip install --upgrade pip && pip3 install packaging && \
|
RUN python3 -m pip install --upgrade pip && pip3 install packaging && \
|
||||||
python3 -m pip install --no-cache-dir -U torch==${PYTORCH} torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu$CUDA
|
python3 -m pip install --no-cache-dir -U torch==${PYTORCH_VERSION}+cu${CUDA} --extra-index-url https://download.pytorch.org/whl/cu$CUDA
|
||||||
|
|
||||||
|
|
||||||
FROM base-builder AS flash-attn-builder
|
FROM base-builder AS flash-attn-builder
|
||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
|
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
|
||||||
|
|
||||||
RUN git clone https://github.com/HazyResearch/flash-attention.git && \
|
RUN git clone https://github.com/Dao-AILab/flash-attention.git && \
|
||||||
cd flash-attention && \
|
cd flash-attention && \
|
||||||
|
git checkout v2.0.1 && \
|
||||||
python3 setup.py bdist_wheel && \
|
python3 setup.py bdist_wheel && \
|
||||||
cd csrc/fused_dense_lib && \
|
cd csrc/fused_dense_lib && \
|
||||||
python3 setup.py bdist_wheel && \
|
python3 setup.py bdist_wheel && \
|
||||||
@@ -52,7 +53,7 @@ RUN git clone https://github.com/HazyResearch/flash-attention.git && \
|
|||||||
|
|
||||||
FROM base-builder AS deepspeed-builder
|
FROM base-builder AS deepspeed-builder
|
||||||
|
|
||||||
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
|
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
|
||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
@@ -73,11 +74,14 @@ RUN git clone https://github.com/TimDettmers/bitsandbytes.git && \
|
|||||||
|
|
||||||
FROM base-builder
|
FROM base-builder
|
||||||
|
|
||||||
|
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
|
||||||
|
ENV TORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST
|
||||||
|
|
||||||
# recompile apex
|
# recompile apex
|
||||||
RUN python3 -m pip uninstall -y apex
|
RUN python3 -m pip uninstall -y apex
|
||||||
RUN git clone https://github.com/NVIDIA/apex
|
RUN git clone https://github.com/NVIDIA/apex
|
||||||
# `MAX_JOBS=1` disables parallel building to avoid cpu memory OOM when building image on GitHub Action (standard) runners
|
# `MAX_JOBS=1` disables parallel building to avoid cpu memory OOM when building image on GitHub Action (standard) runners
|
||||||
RUN cd apex && MAX_JOBS=1 python3 -m pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check .
|
RUN cd apex && MAX_JOBS=1 python3 -m pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" ./
|
||||||
|
|
||||||
RUN mkdir -p /workspace/builds
|
RUN mkdir -p /workspace/builds
|
||||||
COPY --from=bnb-builder /workspace/bitsandbytes /workspace/builds/bitsandbytes
|
COPY --from=bnb-builder /workspace/bitsandbytes /workspace/builds/bitsandbytes
|
||||||
@@ -97,4 +101,4 @@ RUN cd /workspace/builds/bitsandbytes && python3 setup.py install
|
|||||||
RUN git lfs install --skip-repo
|
RUN git lfs install --skip-repo
|
||||||
RUN pip3 install awscli && \
|
RUN pip3 install awscli && \
|
||||||
# The base image ships with `pydantic==1.8.2` which is not working
|
# The base image ships with `pydantic==1.8.2` which is not working
|
||||||
pip3 install -U --no-cache-dir pydantic
|
pip3 install -U --no-cache-dir pydantic==1.10.10
|
||||||
|
|||||||
@@ -1,6 +1,10 @@
|
|||||||
ARG BASE_TAG=main
|
ARG BASE_TAG=main
|
||||||
FROM winglian/axolotl:$BASE_TAG
|
FROM winglian/axolotl:$BASE_TAG
|
||||||
|
|
||||||
|
ENV HF_DATASETS_CACHE="/workspace/data/huggingface-cache/datasets"
|
||||||
|
ENV HUGGINGFACE_HUB_CACHE="/workspace/data/huggingface-cache/hub"
|
||||||
|
ENV TRANSFORMERS_CACHE="/workspace/data/huggingface-cache/hub"
|
||||||
|
|
||||||
COPY scripts/runpod-entrypoint.sh /root/runpod-entrypoint.sh
|
COPY scripts/runpod-entrypoint.sh /root/runpod-entrypoint.sh
|
||||||
|
|
||||||
RUN apt install --yes --no-install-recommends openssh-server tmux && \
|
RUN apt install --yes --no-install-recommends openssh-server tmux && \
|
||||||
|
|||||||
20
examples/llama-2/README.md
Normal file
20
examples/llama-2/README.md
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Overview
|
||||||
|
|
||||||
|
This is an example of a llama-2 configuration for 7b and 13b. The yaml file contains configuration for the 7b variant, but you can just aswell use the same settings for 13b.
|
||||||
|
|
||||||
|
The 7b variant fits on any 24GB VRAM GPU and will take up about 17 GB of VRAM during training if using qlora and 20 GB if using lora. On a RTX 4090 it trains 3 epochs of the default dataset in about 15 minutes.
|
||||||
|
|
||||||
|
The 13b variant will fit if you change these settings to these values:
|
||||||
|
gradient_accumulation_steps: 2
|
||||||
|
micro_batch_size: 1
|
||||||
|
|
||||||
|
```shell
|
||||||
|
accelerate launch scripts/finetune.py examples/llama-2/qlora.yml
|
||||||
|
|
||||||
|
```
|
||||||
|
or
|
||||||
|
|
||||||
|
```shell
|
||||||
|
accelerate launch scripts/finetune.py examples/llama-2/lora.yml
|
||||||
|
|
||||||
|
```
|
||||||
66
examples/llama-2/lora.yml
Normal file
66
examples/llama-2/lora.yml
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
base_model: meta-llama/Llama-2-7b-hf
|
||||||
|
base_model_config: meta-llama/Llama-2-7b-hf
|
||||||
|
model_type: LlamaForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: true
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.01
|
||||||
|
output_dir: ./lora-out
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
max_packed_sequence_len: 4096
|
||||||
|
|
||||||
|
adapter: lora
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_run_id:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 3
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: true
|
||||||
|
bf16: true
|
||||||
|
fp16: false
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention: true
|
||||||
|
flash_attention:
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
eval_steps: 20
|
||||||
|
save_steps:
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
|
bos_token: "<s>"
|
||||||
|
eos_token: "</s>"
|
||||||
|
unk_token: "<unk>"
|
||||||
|
pad_token: "<pad>"
|
||||||
67
examples/llama-2/qlora.yml
Normal file
67
examples/llama-2/qlora.yml
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
base_model: meta-llama/Llama-2-7b-hf
|
||||||
|
base_model_config: meta-llama/Llama-2-7b-hf
|
||||||
|
model_type: LlamaForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.01
|
||||||
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
max_packed_sequence_len: 4096
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_modules:
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_run_id:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 3
|
||||||
|
optimizer: paged_adamw_32bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: true
|
||||||
|
bf16: true
|
||||||
|
fp16: false
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention: true
|
||||||
|
flash_attention:
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
eval_steps: 20
|
||||||
|
save_steps:
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
|
bos_token: "<s>"
|
||||||
|
eos_token: "</s>"
|
||||||
|
unk_token: "<unk>"
|
||||||
|
pad_token: "<pad>"
|
||||||
9
examples/pythia-12b/README.md
Normal file
9
examples/pythia-12b/README.md
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# Pythia 12B
|
||||||
|
|
||||||
|
- Single-GPU A100 only (?)
|
||||||
|
|
||||||
|
```shell
|
||||||
|
python scripts/finetune.py examples/pythia-12b/config.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
⚠️ Multiple-GPU A100 - Doesn't seem to work with multi-gpu without causing OOM! ⚠️
|
||||||
49
examples/pythia-12b/config.yml
Normal file
49
examples/pythia-12b/config.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
base_model: EleutherAI/pythia-12b-deduped
|
||||||
|
base_model_config: EleutherAI/pythia-12b-deduped
|
||||||
|
base_model_ignore_patterns: pytorch* # prefer safetensors
|
||||||
|
model_type: GPTNeoXForCausalLM
|
||||||
|
tokenizer_type: AutoTokenizer
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
gptq: false
|
||||||
|
device_map: auto
|
||||||
|
datasets:
|
||||||
|
- path: vicgalle/alpaca-gpt4
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.05
|
||||||
|
adapter:
|
||||||
|
lora_model_dir:
|
||||||
|
sequence_len: 2048
|
||||||
|
max_packed_sequence_len: 2048
|
||||||
|
lora_r: 64
|
||||||
|
lora_alpha: 32
|
||||||
|
lora_dropout: 0.0
|
||||||
|
lora_target_modules:
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
|
||||||
|
wandb_project:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_run_id:
|
||||||
|
wandb_log_model:
|
||||||
|
output_dir: ./pythia-12b
|
||||||
|
gradient_accumulation_steps: 1
|
||||||
|
micro_batch_size: 1
|
||||||
|
num_epochs: 5
|
||||||
|
learning_rate: 0.00003
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: false
|
||||||
|
fp16: false
|
||||||
|
float16: true
|
||||||
|
tf32: true
|
||||||
|
flash_optimum: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
gradient_checkpointing: true
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
collator_pad_to_longest: true
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
base_model: togethercomputer/RedPajama-INCITE-Chat-3B-v1
|
base_model: togethercomputer/RedPajama-INCITE-Chat-3B-v1
|
||||||
base_model_config: togethercomputer/RedPajama-INCITE-Chat-3B-v1
|
base_model_config: togethercomputer/RedPajama-INCITE-Chat-3B-v1
|
||||||
model_type: GPTNeoXForCausalLM
|
model_type: GPTNeoXForCausalLM
|
||||||
tokenizer_type: GPTNeoXTokenizer
|
tokenizer_type: AutoTokenizer
|
||||||
trust_remote_code:
|
trust_remote_code:
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
datasets:
|
datasets:
|
||||||
|
|||||||
90
examples/xgen-7b/xgen-7b-8k-qlora.yml
Normal file
90
examples/xgen-7b/xgen-7b-8k-qlora.yml
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
# An example finetuning Saleforce's XGen-7b model with 8k context using qlora
|
||||||
|
# on Tim Dettmer's Guanaco dataset.
|
||||||
|
base_model: Salesforce/xgen-7b-8k-base
|
||||||
|
base_model_config: Salesforce/xgen-7b-8k-base
|
||||||
|
trust_remote_code: true
|
||||||
|
model_type: AutoModelForCausalLM
|
||||||
|
tokenizer_type: AutoTokenizer
|
||||||
|
load_in_8bit: false
|
||||||
|
# enable 4bit for QLoRA
|
||||||
|
load_in_4bit: true
|
||||||
|
gptq: false
|
||||||
|
strict: false
|
||||||
|
push_dataset_to_hub:
|
||||||
|
datasets:
|
||||||
|
- path: timdettmers/openassistant-guanaco
|
||||||
|
data_files:
|
||||||
|
- openassistant_best_replies_train.jsonl
|
||||||
|
type: "completion"
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.01
|
||||||
|
# enable QLoRA
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
sequence_len: 8192
|
||||||
|
max_packed_sequence_len:
|
||||||
|
|
||||||
|
# hyperparameters from QLoRA paper Appendix B.2
|
||||||
|
# "We find hyperparameters to be largely robust across datasets"
|
||||||
|
lora_r: 64
|
||||||
|
lora_alpha: 16
|
||||||
|
# 0.1 for models up to 13B
|
||||||
|
# 0.05 for 33B and 65B models
|
||||||
|
lora_dropout: 0.05
|
||||||
|
# add LoRA modules on all linear layers of the base model
|
||||||
|
lora_target_modules:
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_run_id:
|
||||||
|
wandb_log_model:
|
||||||
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
|
# QLoRA paper Table 9
|
||||||
|
# - 16 for 7b & 13b
|
||||||
|
# - 32 for 33b, 64 for 64b
|
||||||
|
# Max size tested on A6000
|
||||||
|
# - 7b: 40
|
||||||
|
# - 40b: 4
|
||||||
|
# decrease if OOM, increase for max VRAM utilization
|
||||||
|
micro_batch_size: 1
|
||||||
|
gradient_accumulation_steps: 1
|
||||||
|
num_epochs: 3
|
||||||
|
# Optimizer for QLoRA
|
||||||
|
optimizer: paged_adamw_32bit
|
||||||
|
torchdistx_path:
|
||||||
|
lr_scheduler: cosine
|
||||||
|
# QLoRA paper Table 9
|
||||||
|
# - 2e-4 for 7b & 13b
|
||||||
|
# - 1e-4 for 33b & 64b
|
||||||
|
learning_rate: 0.00002
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: true
|
||||||
|
fp16: false
|
||||||
|
tf32: false
|
||||||
|
gradient_checkpointing: true
|
||||||
|
# stop training after this many evaluation losses have increased in a row
|
||||||
|
# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback
|
||||||
|
early_stopping_patience: 3
|
||||||
|
resume_from_checkpoint:
|
||||||
|
auto_resume_from_checkpoints: true
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention: true
|
||||||
|
flash_attention:
|
||||||
|
gptq_groupsize:
|
||||||
|
gptq_model_v1:
|
||||||
|
warmup_steps: 10
|
||||||
|
eval_steps: 50
|
||||||
|
save_steps: 50
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
special_tokens:
|
||||||
|
eos_token: "<|endoftext|>"
|
||||||
|
bos_token: "<|endoftext|>"
|
||||||
|
unk_token: "<|endoftext|>"
|
||||||
|
pad_token: "<|endoftext|>"
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
peft @ git+https://github.com/huggingface/peft.git
|
peft @ git+https://github.com/huggingface/peft.git
|
||||||
transformers @ git+https://github.com/huggingface/transformers.git
|
transformers @ git+https://github.com/huggingface/transformers.git
|
||||||
bitsandbytes>=0.39.0
|
bitsandbytes>=0.39.0
|
||||||
accelerate
|
accelerate @ git+https://github.com/huggingface/accelerate@2a289f6108e77a77a4efffb3f6316bc98538413b
|
||||||
addict
|
addict
|
||||||
fire
|
fire
|
||||||
PyYAML==6.0
|
PyYAML==6.0
|
||||||
@@ -11,6 +11,8 @@ sentencepiece
|
|||||||
wandb
|
wandb
|
||||||
einops
|
einops
|
||||||
xformers
|
xformers
|
||||||
|
optimum
|
||||||
|
hf_transfer
|
||||||
# qlora things
|
# qlora things
|
||||||
bert-score==0.3.13
|
bert-score==0.3.13
|
||||||
evaluate==0.4.0
|
evaluate==0.4.0
|
||||||
|
|||||||
@@ -15,6 +15,9 @@ from axolotl.convert import (
|
|||||||
JsonToJsonlConverter,
|
JsonToJsonlConverter,
|
||||||
StdoutWriter,
|
StdoutWriter,
|
||||||
)
|
)
|
||||||
|
from axolotl.logging_config import configure_logging
|
||||||
|
|
||||||
|
configure_logging()
|
||||||
|
|
||||||
# add src to the pythonpath so we don't need to pip install this
|
# add src to the pythonpath so we don't need to pip install this
|
||||||
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
||||||
|
|||||||
@@ -12,13 +12,15 @@ from typing import Any, Dict, List, Optional, Union
|
|||||||
import fire
|
import fire
|
||||||
import torch
|
import torch
|
||||||
import yaml
|
import yaml
|
||||||
from transformers import GenerationConfig, TextStreamer
|
|
||||||
|
|
||||||
from axolotl.utils.data import load_prepare_datasets
|
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
from axolotl.utils.models import load_model, load_tokenizer
|
|
||||||
|
|
||||||
# add src to the pythonpath so we don't need to pip install this
|
# add src to the pythonpath so we don't need to pip install this
|
||||||
|
from optimum.bettertransformer import BetterTransformer
|
||||||
|
from transformers import GenerationConfig, TextStreamer
|
||||||
|
|
||||||
|
from axolotl.logging_config import configure_logging
|
||||||
|
from axolotl.utils.data import load_prepare_datasets, load_pretraining_dataset
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.models import load_model, load_tokenizer
|
||||||
from axolotl.utils.tokenization import check_dataset_labels
|
from axolotl.utils.tokenization import check_dataset_labels
|
||||||
from axolotl.utils.trainer import setup_trainer
|
from axolotl.utils.trainer import setup_trainer
|
||||||
from axolotl.utils.validation import validate_config
|
from axolotl.utils.validation import validate_config
|
||||||
@@ -28,9 +30,12 @@ project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
|||||||
src_dir = os.path.join(project_root, "src")
|
src_dir = os.path.join(project_root, "src")
|
||||||
sys.path.insert(0, src_dir)
|
sys.path.insert(0, src_dir)
|
||||||
|
|
||||||
|
configure_logging()
|
||||||
|
LOG = logging.getLogger("axolotl.scripts")
|
||||||
|
|
||||||
|
|
||||||
logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO"))
|
|
||||||
DEFAULT_DATASET_PREPARED_PATH = "last_run_prepared"
|
DEFAULT_DATASET_PREPARED_PATH = "last_run_prepared"
|
||||||
|
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
||||||
|
|
||||||
|
|
||||||
def choose_device(cfg):
|
def choose_device(cfg):
|
||||||
@@ -211,18 +216,29 @@ def train(
|
|||||||
|
|
||||||
# load the tokenizer first
|
# load the tokenizer first
|
||||||
tokenizer_config = cfg.tokenizer_config or cfg.base_model_config
|
tokenizer_config = cfg.tokenizer_config or cfg.base_model_config
|
||||||
logging.info(f"loading tokenizer... {tokenizer_config}")
|
LOG.info(f"loading tokenizer... {tokenizer_config}")
|
||||||
tokenizer = load_tokenizer(tokenizer_config, cfg.tokenizer_type, cfg)
|
tokenizer = load_tokenizer(tokenizer_config, cfg.tokenizer_type, cfg)
|
||||||
|
|
||||||
if (
|
if (
|
||||||
check_not_in(["shard", "merge_lora"], kwargs) and not cfg.inference
|
check_not_in(["shard", "merge_lora"], kwargs) and not cfg.inference
|
||||||
): # don't need to load dataset for these
|
): # don't need to load dataset for these
|
||||||
train_dataset, eval_dataset = load_prepare_datasets(
|
if not cfg.pretraining_dataset:
|
||||||
tokenizer, cfg, DEFAULT_DATASET_PREPARED_PATH
|
train_dataset, eval_dataset = load_prepare_datasets(
|
||||||
)
|
tokenizer, cfg, DEFAULT_DATASET_PREPARED_PATH
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
train_dataset = load_pretraining_dataset(
|
||||||
|
cfg.pretraining_dataset,
|
||||||
|
tokenizer,
|
||||||
|
max_tokens=cfg.sequence_len,
|
||||||
|
seed=cfg.seed,
|
||||||
|
)
|
||||||
|
# https://discuss.huggingface.co/t/how-to-use-huggingface-trainer-streaming-datasets-without-wrapping-it-with-torchdatas-iterablewrapper/25230
|
||||||
|
train_dataset = train_dataset.with_format("torch")
|
||||||
|
eval_dataset = None
|
||||||
|
|
||||||
if cfg.debug or "debug" in kwargs:
|
if cfg.debug or "debug" in kwargs:
|
||||||
logging.info("check_dataset_labels...")
|
LOG.info("check_dataset_labels...")
|
||||||
check_dataset_labels(
|
check_dataset_labels(
|
||||||
train_dataset.select(
|
train_dataset.select(
|
||||||
[random.randrange(0, len(train_dataset) - 1) for _ in range(5)] # nosec
|
[random.randrange(0, len(train_dataset) - 1) for _ in range(5)] # nosec
|
||||||
@@ -231,11 +247,11 @@ def train(
|
|||||||
)
|
)
|
||||||
|
|
||||||
if prepare_ds_only:
|
if prepare_ds_only:
|
||||||
logging.info("Finished preparing dataset. Exiting...")
|
LOG.info("Finished preparing dataset. Exiting...")
|
||||||
return
|
return
|
||||||
|
|
||||||
# Load the model and tokenizer
|
# Load the model and tokenizer
|
||||||
logging.info("loading model and peft_config...")
|
LOG.info("loading model and peft_config...")
|
||||||
model, peft_config = load_model(
|
model, peft_config = load_model(
|
||||||
cfg.base_model,
|
cfg.base_model,
|
||||||
cfg.base_model_config,
|
cfg.base_model_config,
|
||||||
@@ -246,17 +262,17 @@ def train(
|
|||||||
)
|
)
|
||||||
|
|
||||||
if "merge_lora" in kwargs and cfg.adapter is not None:
|
if "merge_lora" in kwargs and cfg.adapter is not None:
|
||||||
logging.info("running merge of LoRA with base model")
|
LOG.info("running merge of LoRA with base model")
|
||||||
model = model.merge_and_unload()
|
model = model.merge_and_unload()
|
||||||
model.to(dtype=torch.float16)
|
model.to(dtype=torch.float16)
|
||||||
|
|
||||||
if cfg.local_rank == 0:
|
if cfg.local_rank == 0:
|
||||||
logging.info("saving merged model")
|
LOG.info("saving merged model")
|
||||||
model.save_pretrained(str(Path(cfg.output_dir) / "merged"))
|
model.save_pretrained(str(Path(cfg.output_dir) / "merged"))
|
||||||
return
|
return
|
||||||
|
|
||||||
if cfg.inference:
|
if cfg.inference:
|
||||||
logging.info("calling do_inference function")
|
LOG.info("calling do_inference function")
|
||||||
prompter: Optional[str] = "AlpacaPrompter"
|
prompter: Optional[str] = "AlpacaPrompter"
|
||||||
if "prompter" in kwargs:
|
if "prompter" in kwargs:
|
||||||
if kwargs["prompter"] == "None":
|
if kwargs["prompter"] == "None":
|
||||||
@@ -275,27 +291,30 @@ def train(
|
|||||||
model.config.use_cache = False
|
model.config.use_cache = False
|
||||||
|
|
||||||
if torch.__version__ >= "2" and sys.platform != "win32":
|
if torch.__version__ >= "2" and sys.platform != "win32":
|
||||||
logging.info("Compiling torch model")
|
LOG.info("Compiling torch model")
|
||||||
model = torch.compile(model)
|
model = torch.compile(model)
|
||||||
|
|
||||||
# go ahead and presave, so we have the adapter config available to inspect
|
# go ahead and presave, so we have the adapter config available to inspect
|
||||||
if peft_config:
|
if peft_config:
|
||||||
logging.info(f"Pre-saving adapter config to {cfg.output_dir}")
|
LOG.info(f"Pre-saving adapter config to {cfg.output_dir}")
|
||||||
peft_config.save_pretrained(cfg.output_dir)
|
peft_config.save_pretrained(cfg.output_dir)
|
||||||
|
|
||||||
# In case we want to stop early with ctrl+c, this is a nice to have to save the pretrained model
|
# In case we want to stop early with ctrl+c, this is a nice to have to save the pretrained model
|
||||||
if cfg.local_rank == 0:
|
if cfg.local_rank == 0:
|
||||||
|
|
||||||
|
def terminate_handler(_, __, model):
|
||||||
|
if cfg.flash_optimum:
|
||||||
|
model = BetterTransformer.reverse(model)
|
||||||
|
model.save_pretrained(cfg.output_dir)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
signal.signal(
|
signal.signal(
|
||||||
signal.SIGINT,
|
signal.SIGINT, lambda signum, frame: terminate_handler(signum, frame, model)
|
||||||
lambda signal, frame: (
|
|
||||||
model.save_pretrained(cfg.output_dir),
|
|
||||||
sys.exit(0),
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
logging.info("Starting trainer...")
|
LOG.info("Starting trainer...")
|
||||||
if cfg.group_by_length:
|
if cfg.group_by_length:
|
||||||
logging.info("hang tight... sorting dataset for group_by_length")
|
LOG.info("hang tight... sorting dataset for group_by_length")
|
||||||
resume_from_checkpoint = cfg.resume_from_checkpoint
|
resume_from_checkpoint = cfg.resume_from_checkpoint
|
||||||
if cfg.resume_from_checkpoint is None and cfg.auto_resume_from_checkpoints:
|
if cfg.resume_from_checkpoint is None and cfg.auto_resume_from_checkpoints:
|
||||||
possible_checkpoints = [
|
possible_checkpoints = [
|
||||||
@@ -307,19 +326,29 @@ def train(
|
|||||||
key=lambda path: int(path.split("-")[-1]),
|
key=lambda path: int(path.split("-")[-1]),
|
||||||
)
|
)
|
||||||
resume_from_checkpoint = sorted_paths[-1]
|
resume_from_checkpoint = sorted_paths[-1]
|
||||||
logging.info(
|
LOG.info(
|
||||||
f"Using Auto-resume functionality to start with checkpoint at {resume_from_checkpoint}"
|
f"Using Auto-resume functionality to start with checkpoint at {resume_from_checkpoint}"
|
||||||
)
|
)
|
||||||
|
|
||||||
if not Path(cfg.output_dir).is_dir():
|
if not Path(cfg.output_dir).is_dir():
|
||||||
os.makedirs(cfg.output_dir, exist_ok=True)
|
os.makedirs(cfg.output_dir, exist_ok=True)
|
||||||
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
|
if cfg.flash_optimum:
|
||||||
|
with torch.backends.cuda.sdp_kernel(
|
||||||
|
enable_flash=True, enable_math=True, enable_mem_efficient=True
|
||||||
|
):
|
||||||
|
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
|
||||||
|
else:
|
||||||
|
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
|
||||||
|
|
||||||
logging.info(f"Training Completed!!! Saving pre-trained model to {cfg.output_dir}")
|
LOG.info(f"Training Completed!!! Saving pre-trained model to {cfg.output_dir}")
|
||||||
|
|
||||||
# TODO do we need this fix? https://huggingface.co/docs/accelerate/usage_guides/fsdp#saving-and-loading
|
# TODO do we need this fix? https://huggingface.co/docs/accelerate/usage_guides/fsdp#saving-and-loading
|
||||||
# only save on rank 0, otherwise it corrupts output on multi-GPU when multiple processes attempt to write the same file
|
# only save on rank 0, otherwise it corrupts output on multi-GPU when multiple processes attempt to write the same file
|
||||||
if cfg.local_rank == 0:
|
if cfg.fsdp:
|
||||||
|
model.save_pretrained(cfg.output_dir)
|
||||||
|
elif cfg.local_rank == 0:
|
||||||
|
if cfg.flash_optimum:
|
||||||
|
model = BetterTransformer.reverse(model)
|
||||||
model.save_pretrained(cfg.output_dir)
|
model.save_pretrained(cfg.output_dir)
|
||||||
|
|
||||||
# trainer.save_model(cfg.output_dir) # TODO this may be needed for deepspeed to work? need to review another time
|
# trainer.save_model(cfg.output_dir) # TODO this may be needed for deepspeed to work? need to review another time
|
||||||
|
|||||||
19
scripts/runpod-entrypoint.sh
Normal file → Executable file
19
scripts/runpod-entrypoint.sh
Normal file → Executable file
@@ -1,10 +1,21 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
echo $PUBLIC_KEY >> ~/.ssh/authorized_keys
|
# Export specific ENV variables to /etc/rp_environment
|
||||||
chmod 700 -R ~/.ssh
|
echo "Exporting environment variables..."
|
||||||
|
printenv | grep -E '^RUNPOD_|^PATH=|^_=' | sed 's/^\(.*\)=\(.*\)$/export \1="\2"/' >> /etc/rp_environment
|
||||||
|
echo 'source /etc/rp_environment' >> ~/.bashrc
|
||||||
|
|
||||||
# Start the SSH service in the background
|
if [[ $PUBLIC_KEY ]]
|
||||||
service ssh start
|
then
|
||||||
|
mkdir -p ~/.ssh
|
||||||
|
chmod 700 ~/.ssh
|
||||||
|
echo $PUBLIC_KEY >> ~/.ssh/authorized_keys
|
||||||
|
chmod 700 -R ~/.ssh
|
||||||
|
# Start the SSH service in the background
|
||||||
|
service ssh start
|
||||||
|
else
|
||||||
|
echo "No PUBLIC_KEY ENV variable provided, not starting openSSH daemon"
|
||||||
|
fi
|
||||||
|
|
||||||
# Execute the passed arguments (CMD)
|
# Execute the passed arguments (CMD)
|
||||||
exec "$@"
|
exec "$@"
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
"""Module containing Dataset functionality"""
|
"""Module containing Dataset functionality"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from datasets import IterableDataset
|
from datasets import IterableDataset
|
||||||
|
|
||||||
from .prompt_tokenizers import InvalidDataException, PromptTokenizingStrategy
|
from .prompt_tokenizers import PromptTokenizingStrategy
|
||||||
|
|
||||||
# We want this to be a wrapper for an existing dataset that we have loaded
|
# We want this to be a wrapper for an existing dataset that we have loaded
|
||||||
# lets use the concept of middlewares to wrap each dataset, for example
|
# lets use the concept of middlewares to wrap each dataset, for example
|
||||||
@@ -14,6 +15,8 @@ from .prompt_tokenizers import InvalidDataException, PromptTokenizingStrategy
|
|||||||
# let's check to ensure we don't truncate an item in the middle, we'll use
|
# let's check to ensure we don't truncate an item in the middle, we'll use
|
||||||
# the collators later on to pad the datasets
|
# the collators later on to pad the datasets
|
||||||
|
|
||||||
|
LOG = logging.getLogger("axolotl")
|
||||||
|
|
||||||
|
|
||||||
class TokenizedPromptDataset(IterableDataset):
|
class TokenizedPromptDataset(IterableDataset):
|
||||||
"""
|
"""
|
||||||
@@ -32,17 +35,15 @@ class TokenizedPromptDataset(IterableDataset):
|
|||||||
self.dataset = dataset
|
self.dataset = dataset
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
iterator = iter(self.dataset)
|
features = self.dataset.features.keys()
|
||||||
count = 0
|
num_proc = os.cpu_count()
|
||||||
# Loop through the entire dataset
|
return iter(
|
||||||
for example in iterator:
|
self.dataset.map(
|
||||||
try:
|
self.prompt_tokenizer.tokenize_prompt,
|
||||||
yield self.prompt_tokenizer.tokenize_prompt(example)
|
num_proc=num_proc,
|
||||||
count += 1
|
remove_columns=features,
|
||||||
except InvalidDataException:
|
)
|
||||||
pass
|
)
|
||||||
if count == 0:
|
|
||||||
raise RuntimeError("Expected at least one datapoint in dataset.")
|
|
||||||
|
|
||||||
|
|
||||||
# TODO this isn't the best since it can't interleave datasets
|
# TODO this isn't the best since it can't interleave datasets
|
||||||
@@ -115,7 +116,7 @@ class ConstantLengthDataset(IterableDataset):
|
|||||||
"attention_mask": attention_mask,
|
"attention_mask": attention_mask,
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
logging.warning(
|
LOG.warning(
|
||||||
f"dropping batch due to tensor size mismatch input_ids: {input_ids.size()}, labels: {labels.size()}, attention_mask: {attention_mask.size()}"
|
f"dropping batch due to tensor size mismatch input_ids: {input_ids.size()}, labels: {labels.size()}, attention_mask: {attention_mask.size()}"
|
||||||
)
|
)
|
||||||
buffer = {
|
buffer = {
|
||||||
@@ -126,6 +127,7 @@ class ConstantLengthDataset(IterableDataset):
|
|||||||
buffer_len = 0
|
buffer_len = 0
|
||||||
|
|
||||||
if example:
|
if example:
|
||||||
|
# FIXME
|
||||||
# just going to drop data points that are too long
|
# just going to drop data points that are too long
|
||||||
if len(example["input_ids"]) <= self.seq_length:
|
if len(example["input_ids"]) <= self.seq_length:
|
||||||
input_ids = example["input_ids"]
|
input_ids = example["input_ids"]
|
||||||
|
|||||||
33
src/axolotl/logging_config.py
Normal file
33
src/axolotl/logging_config.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
"""Logging configuration settings"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from logging.config import dictConfig
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
DEFAULT_LOGGING_CONFIG: Dict[str, Any] = {
|
||||||
|
"version": 1,
|
||||||
|
"formatters": {
|
||||||
|
"simple": {
|
||||||
|
"format": "[%(asctime)s] [%(levelname)s] [%(name)s.%(funcName)s:%(lineno)d] [PID:%(process)d] %(message)s",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"filters": {},
|
||||||
|
"handlers": {
|
||||||
|
"console": {
|
||||||
|
"class": "logging.StreamHandler",
|
||||||
|
"formatter": "simple",
|
||||||
|
"filters": [],
|
||||||
|
"stream": sys.stdout,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"root": {"handlers": ["console"], "level": os.getenv("LOG_LEVEL", "INFO")},
|
||||||
|
"loggers": {
|
||||||
|
"axolotl": {"handlers": ["console"], "level": "DEBUG", "propagate": False},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def configure_logging():
|
||||||
|
"""Configure with default logging"""
|
||||||
|
dictConfig(DEFAULT_LOGGING_CONFIG)
|
||||||
@@ -8,7 +8,7 @@ import torch
|
|||||||
import transformers
|
import transformers
|
||||||
from einops import rearrange
|
from einops import rearrange
|
||||||
from flash_attn.bert_padding import pad_input, unpad_input
|
from flash_attn.bert_padding import pad_input, unpad_input
|
||||||
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
|
from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func
|
||||||
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
|
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
|
||||||
|
|
||||||
|
|
||||||
@@ -79,7 +79,7 @@ def forward(
|
|||||||
dtype=torch.int32,
|
dtype=torch.int32,
|
||||||
device=qkv.device,
|
device=qkv.device,
|
||||||
)
|
)
|
||||||
output = flash_attn_unpadded_qkvpacked_func(
|
output = flash_attn_varlen_qkvpacked_func(
|
||||||
qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
|
qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
|
||||||
)
|
)
|
||||||
output = rearrange(output, "(b s) ... -> b s ...", b=bsz)
|
output = rearrange(output, "(b s) ... -> b s ...", b=bsz)
|
||||||
@@ -95,7 +95,7 @@ def forward(
|
|||||||
three=3,
|
three=3,
|
||||||
h=nheads,
|
h=nheads,
|
||||||
)
|
)
|
||||||
output_unpad = flash_attn_unpadded_qkvpacked_func(
|
output_unpad = flash_attn_varlen_qkvpacked_func(
|
||||||
x_unpad,
|
x_unpad,
|
||||||
cu_q_lens,
|
cu_q_lens,
|
||||||
max_s,
|
max_s,
|
||||||
@@ -7,6 +7,7 @@ import math
|
|||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
import torch.nn.functional as F
|
||||||
import transformers.models.llama.modeling_llama
|
import transformers.models.llama.modeling_llama
|
||||||
from torch import nn
|
from torch import nn
|
||||||
|
|
||||||
@@ -38,21 +39,48 @@ def xformers_forward(
|
|||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
bsz, q_len, _ = hidden_states.size()
|
bsz, q_len, _ = hidden_states.size()
|
||||||
|
|
||||||
query_states = (
|
if not hasattr(self, "pretraining_tp"):
|
||||||
self.q_proj(hidden_states)
|
self.pretraining_tp = 1
|
||||||
.view(bsz, q_len, self.num_heads, self.head_dim)
|
|
||||||
.transpose(1, 2)
|
if self.pretraining_tp > 1:
|
||||||
)
|
key_value_slicing = (
|
||||||
key_states = (
|
self.num_key_value_heads * self.head_dim
|
||||||
self.k_proj(hidden_states)
|
) // self.pretraining_tp
|
||||||
.view(bsz, q_len, self.num_heads, self.head_dim)
|
query_slices = self.q_proj.weight.split(
|
||||||
.transpose(1, 2)
|
(self.num_heads * self.head_dim) // self.pretraining_tp, dim=0
|
||||||
)
|
)
|
||||||
value_states = (
|
key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)
|
||||||
self.v_proj(hidden_states)
|
value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)
|
||||||
.view(bsz, q_len, self.num_heads, self.head_dim)
|
|
||||||
.transpose(1, 2)
|
query_states = [
|
||||||
)
|
F.linear(hidden_states, query_slices[i]) for i in range(self.pretraining_tp)
|
||||||
|
]
|
||||||
|
query_states = torch.cat(query_states, dim=-1)
|
||||||
|
|
||||||
|
key_states = [
|
||||||
|
F.linear(hidden_states, key_slices[i]) for i in range(self.pretraining_tp)
|
||||||
|
]
|
||||||
|
key_states = torch.cat(key_states, dim=-1)
|
||||||
|
|
||||||
|
value_states = [
|
||||||
|
F.linear(hidden_states, value_slices[i]) for i in range(self.pretraining_tp)
|
||||||
|
]
|
||||||
|
value_states = torch.cat(value_states, dim=-1)
|
||||||
|
|
||||||
|
else:
|
||||||
|
query_states = self.q_proj(hidden_states)
|
||||||
|
key_states = self.k_proj(hidden_states)
|
||||||
|
value_states = self.v_proj(hidden_states)
|
||||||
|
|
||||||
|
query_states = query_states.view(
|
||||||
|
bsz, q_len, self.num_heads, self.head_dim
|
||||||
|
).transpose(1, 2)
|
||||||
|
key_states = key_states.view(
|
||||||
|
bsz, q_len, self.num_key_value_heads, self.head_dim
|
||||||
|
).transpose(1, 2)
|
||||||
|
value_states = value_states.view(
|
||||||
|
bsz, q_len, self.num_key_value_heads, self.head_dim
|
||||||
|
).transpose(1, 2)
|
||||||
|
|
||||||
kv_seq_len = key_states.shape[-2]
|
kv_seq_len = key_states.shape[-2]
|
||||||
if past_key_value is not None:
|
if past_key_value is not None:
|
||||||
@@ -73,6 +101,14 @@ def xformers_forward(
|
|||||||
|
|
||||||
past_key_value = (key_states, value_states) if use_cache else None
|
past_key_value = (key_states, value_states) if use_cache else None
|
||||||
|
|
||||||
|
# repeat k/v heads if n_kv_heads < n_heads
|
||||||
|
key_states = transformers.models.llama.modeling_llama.repeat_kv(
|
||||||
|
key_states, self.num_key_value_groups
|
||||||
|
)
|
||||||
|
value_states = transformers.models.llama.modeling_llama.repeat_kv(
|
||||||
|
value_states, self.num_key_value_groups
|
||||||
|
)
|
||||||
|
|
||||||
# We only apply xformers optimizations if we don't need to output the whole attention matrix
|
# We only apply xformers optimizations if we don't need to output the whole attention matrix
|
||||||
if not output_attentions:
|
if not output_attentions:
|
||||||
query_states = query_states.transpose(1, 2)
|
query_states = query_states.transpose(1, 2)
|
||||||
@@ -128,10 +164,23 @@ def xformers_forward(
|
|||||||
f" {attn_output.size()}"
|
f" {attn_output.size()}"
|
||||||
)
|
)
|
||||||
|
|
||||||
attn_output = attn_output.transpose(1, 2)
|
attn_output = attn_output.transpose(1, 2).contiguous()
|
||||||
|
# end x-formers vs. not x-formers if-else block
|
||||||
|
|
||||||
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
||||||
attn_output = self.o_proj(attn_output)
|
|
||||||
|
if self.pretraining_tp > 1:
|
||||||
|
attn_output = attn_output.split(self.hidden_size // self.pretraining_tp, dim=2)
|
||||||
|
o_proj_slices = self.o_proj.weight.split(
|
||||||
|
self.hidden_size // self.pretraining_tp, dim=1
|
||||||
|
)
|
||||||
|
attn_output = sum(
|
||||||
|
F.linear(attn_output[i], o_proj_slices[i])
|
||||||
|
for i in range(self.pretraining_tp)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
attn_output = self.o_proj(attn_output)
|
||||||
|
|
||||||
return attn_output, attn_weights, past_key_value
|
return attn_output, attn_weights, past_key_value
|
||||||
|
|
||||||
|
|
||||||
@@ -184,14 +233,15 @@ def sdp_attention_forward(
|
|||||||
|
|
||||||
# We only apply sdp attention if we don't need to output the whole attention matrix
|
# We only apply sdp attention if we don't need to output the whole attention matrix
|
||||||
if not output_attentions:
|
if not output_attentions:
|
||||||
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
with torch.backends.cuda.sdp_kernel():
|
||||||
query_states,
|
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
||||||
key_states,
|
query_states,
|
||||||
value_states,
|
key_states,
|
||||||
attn_mask=attention_mask,
|
value_states,
|
||||||
is_causal=False,
|
attn_mask=attention_mask,
|
||||||
)
|
is_causal=False,
|
||||||
attn_weights = None
|
)
|
||||||
|
attn_weights = None
|
||||||
else:
|
else:
|
||||||
attn_weights = torch.matmul(
|
attn_weights = torch.matmul(
|
||||||
query_states, key_states.transpose(2, 3)
|
query_states, key_states.transpose(2, 3)
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ from transformers.utils import (
|
|||||||
replace_return_docstrings,
|
replace_return_docstrings,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger = logging.get_logger(__name__)
|
LOG = logging.getLogger("axolotl")
|
||||||
|
|
||||||
_CONFIG_FOR_DOC = "LlamaConfig"
|
_CONFIG_FOR_DOC = "LlamaConfig"
|
||||||
|
|
||||||
@@ -862,7 +862,7 @@ class LlamaModel(LlamaPreTrainedModel):
|
|||||||
|
|
||||||
if self.gradient_checkpointing and self.training:
|
if self.gradient_checkpointing and self.training:
|
||||||
if use_cache:
|
if use_cache:
|
||||||
logger.warning_once(
|
LOG.warning_once(
|
||||||
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
||||||
)
|
)
|
||||||
use_cache = False
|
use_cache = False
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from axolotl.prompt_tokenizers import (
|
|||||||
AlpacaPromptTokenizingStrategy,
|
AlpacaPromptTokenizingStrategy,
|
||||||
InstructionPromptTokenizingStrategy,
|
InstructionPromptTokenizingStrategy,
|
||||||
)
|
)
|
||||||
from axolotl.prompters import AlpacaPrompter, PromptStyle
|
from axolotl.prompters import AlpacaPrompter, PromptStyle, UnpromptedPrompter
|
||||||
|
|
||||||
|
|
||||||
def load(tokenizer, cfg):
|
def load(tokenizer, cfg):
|
||||||
@@ -20,11 +20,38 @@ def load(tokenizer, cfg):
|
|||||||
|
|
||||||
class AlpacaConcisePrompter(AlpacaPrompter):
|
class AlpacaConcisePrompter(AlpacaPrompter):
|
||||||
"""
|
"""
|
||||||
Alpaca Prompter extending the system prompt to ask for concise answers
|
Alpaca Prompter extending the system prompt to ask for concise chat-instruct answers
|
||||||
"""
|
"""
|
||||||
|
|
||||||
system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that concisely and appropriately completes the request.\n\n"
|
system_prompt = "Below is an instruction from a USER that describes a task, paired with an input that provides further context. The ASSISTANT writes a response that concisely and appropriately completes the request.\n\n"
|
||||||
system_no_input_prompt = "Below is an instruction that describes a task. Write a response that appropriately and concisely completes the request.\n\n"
|
system_no_input_prompt = "Below is an instruction from a USER that describes a task. The ASSISTANT writes a response that appropriately and concisely completes the request.\n\n"
|
||||||
|
|
||||||
|
|
||||||
|
class AlpacaChatPrompter(AlpacaPrompter):
|
||||||
|
"""
|
||||||
|
Alpaca Chat Prompter extending the system prompt to for chat-instruct answers
|
||||||
|
"""
|
||||||
|
|
||||||
|
system_prompt = "Below is an instruction from a USER that describes a task, paired with an input that provides further context. The ASSISTANT writes a response that concisely and appropriately completes the request.\n\n"
|
||||||
|
system_no_input_prompt = "Below is an instruction from a USER that describes a task. The ASSISTANT writes a response that appropriately and concisely completes the request.\n\n"
|
||||||
|
|
||||||
|
def __init__(self): # pylint: disable=super-init-not-called
|
||||||
|
self.prompt_style = PromptStyle.CHAT.value
|
||||||
|
self.match_prompt_style()
|
||||||
|
|
||||||
|
|
||||||
|
class NoSystemPrompter(AlpacaPrompter):
|
||||||
|
"""
|
||||||
|
Null Prompter with no system prompts
|
||||||
|
"""
|
||||||
|
|
||||||
|
system_prompt = ""
|
||||||
|
system_no_input_prompt = ""
|
||||||
|
turn_format = "{instruction} {input} "
|
||||||
|
turn_no_input_format = "{instruction} "
|
||||||
|
|
||||||
|
def __init__(self): # pylint: disable=super-init-not-called
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class AlpacaQAPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
|
class AlpacaQAPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
|
||||||
@@ -64,7 +91,7 @@ def load_concise(tokenizer, cfg):
|
|||||||
|
|
||||||
def load_qa(tokenizer, cfg):
|
def load_qa(tokenizer, cfg):
|
||||||
return AlpacaQAPromptTokenizingStrategy(
|
return AlpacaQAPromptTokenizingStrategy(
|
||||||
AlpacaPrompter(PromptStyle.CHAT.value),
|
AlpacaChatPrompter(),
|
||||||
tokenizer,
|
tokenizer,
|
||||||
cfg.train_on_inputs,
|
cfg.train_on_inputs,
|
||||||
cfg.sequence_len,
|
cfg.sequence_len,
|
||||||
@@ -73,7 +100,16 @@ def load_qa(tokenizer, cfg):
|
|||||||
|
|
||||||
def load_camel_ai(tokenizer, cfg):
|
def load_camel_ai(tokenizer, cfg):
|
||||||
return CamelAIPromptTokenizingStrategy(
|
return CamelAIPromptTokenizingStrategy(
|
||||||
AlpacaPrompter(PromptStyle.CHAT.value),
|
AlpacaChatPrompter(),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_no_prompt(tokenizer, cfg):
|
||||||
|
return AlpacaPromptTokenizingStrategy(
|
||||||
|
UnpromptedPrompter(PromptStyle.CHAT.value),
|
||||||
tokenizer,
|
tokenizer,
|
||||||
cfg.train_on_inputs,
|
cfg.train_on_inputs,
|
||||||
cfg.sequence_len,
|
cfg.sequence_len,
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
"""Module loading the AlpacaInstructPromptTokenizingStrategy class"""
|
"""Module loading the AlpacaInstructPromptTokenizingStrategy class"""
|
||||||
|
|
||||||
from axolotl.prompt_tokenizers import AlpacaPromptTokenizingStrategy
|
from axolotl.prompt_tokenizers import AlpacaPromptTokenizingStrategy
|
||||||
from axolotl.prompters import AlpacaPrompter, PromptStyle
|
from axolotl.prompters import AlpacaPrompter, PromptStyle, UnpromptedPrompter
|
||||||
|
|
||||||
|
|
||||||
def load(tokenizer, cfg):
|
def load(tokenizer, cfg):
|
||||||
@@ -11,3 +11,12 @@ def load(tokenizer, cfg):
|
|||||||
cfg.train_on_inputs,
|
cfg.train_on_inputs,
|
||||||
cfg.sequence_len,
|
cfg.sequence_len,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_no_prompt(tokenizer, cfg):
|
||||||
|
return AlpacaPromptTokenizingStrategy(
|
||||||
|
UnpromptedPrompter(PromptStyle.INSTRUCT.value),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|||||||
139
src/axolotl/prompt_strategies/alpaca_w_system.py
Normal file
139
src/axolotl/prompt_strategies/alpaca_w_system.py
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
"""
|
||||||
|
Prompt strategies loader for alpaca instruction datasets with system prompts
|
||||||
|
"""
|
||||||
|
from typing import Generator, Tuple, Union
|
||||||
|
|
||||||
|
from axolotl.prompt_tokenizers import PromptTokenizingStrategy
|
||||||
|
from axolotl.prompters import AlpacaPrompter, PromptStyle
|
||||||
|
|
||||||
|
|
||||||
|
class InstructionWSystemPromptTokenizingStrategy(PromptTokenizingStrategy):
|
||||||
|
"""
|
||||||
|
Tokenizing strategy for instruction-based prompts.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def parse_instruction_fields(self, prompt) -> Tuple[str, str, str, str]:
|
||||||
|
return (
|
||||||
|
prompt["instruction"],
|
||||||
|
prompt["input"] if "input" in prompt else "",
|
||||||
|
prompt["output"],
|
||||||
|
prompt["system"],
|
||||||
|
)
|
||||||
|
|
||||||
|
def tokenize_prompt(self, prompt):
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
(
|
||||||
|
instruction,
|
||||||
|
input, # pylint: disable=redefined-builtin
|
||||||
|
response,
|
||||||
|
system,
|
||||||
|
) = self.parse_instruction_fields(prompt)
|
||||||
|
user_prompt = next(
|
||||||
|
iter(
|
||||||
|
self.prompter.build_prompt_w_system(
|
||||||
|
system,
|
||||||
|
instruction,
|
||||||
|
input,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
tokenized_prompt = self._tokenize(user_prompt, add_eos_token=False)
|
||||||
|
if not self.train_on_inputs:
|
||||||
|
user_prompt_len = len(tokenized_prompt["input_ids"])
|
||||||
|
# TODO this could be sped up using numpy array slicing
|
||||||
|
tokenized_prompt["labels"] = [-100] * user_prompt_len
|
||||||
|
tokenized_res_prompt = self._tokenize(
|
||||||
|
response, strip_bos_token=True, add_eos_token=True
|
||||||
|
)
|
||||||
|
tokenized_prompt["input_ids"] += tokenized_res_prompt["input_ids"]
|
||||||
|
tokenized_prompt["attention_mask"] += tokenized_res_prompt["attention_mask"]
|
||||||
|
tokenized_prompt["labels"] += tokenized_res_prompt["input_ids"]
|
||||||
|
|
||||||
|
return tokenized_prompt
|
||||||
|
|
||||||
|
|
||||||
|
class SystemDataPrompter(AlpacaPrompter):
|
||||||
|
"""
|
||||||
|
Alpaca Style Prompter that uses system prompts from the dataset
|
||||||
|
"""
|
||||||
|
|
||||||
|
def build_prompt_w_system(
|
||||||
|
self,
|
||||||
|
system: str,
|
||||||
|
instruction: str,
|
||||||
|
input: Union[None, str] = None, # pylint: disable=redefined-builtin
|
||||||
|
output: Union[None, str] = None,
|
||||||
|
) -> Generator[str, None, None]:
|
||||||
|
# returns the full prompt from instruction and optional input
|
||||||
|
# if a label (=response, =output) is provided, it's also appended.
|
||||||
|
formatted_sys_prompt = f"### System:\n{system}\n\n" if system else ""
|
||||||
|
if input:
|
||||||
|
res = formatted_sys_prompt + self.turn_format.format(
|
||||||
|
instruction=instruction, input=input
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
res = formatted_sys_prompt + self.turn_no_input_format.format(
|
||||||
|
instruction=instruction
|
||||||
|
)
|
||||||
|
if output:
|
||||||
|
res = f"{res}{output}"
|
||||||
|
yield res
|
||||||
|
|
||||||
|
|
||||||
|
class OpenOrcaSystemDataPrompter(SystemDataPrompter):
|
||||||
|
"""
|
||||||
|
Alpaca Style Prompter that uses system prompts from the dataset, with OpenOrca prompts
|
||||||
|
"""
|
||||||
|
|
||||||
|
def match_prompt_style(self):
|
||||||
|
if self.prompt_style == PromptStyle.INSTRUCT.value:
|
||||||
|
self.turn_format = "### User:\n{instruction}\n\n### Additional Context:\n{input}\n\n### Assistant:\n"
|
||||||
|
self.turn_no_input_format = "### User:\n{instruction}\n\n### Assistant:\n"
|
||||||
|
if self.prompt_style == PromptStyle.CHAT.value:
|
||||||
|
self.turn_format = "USER: {instruction}\n{input}\nASSISTANT:"
|
||||||
|
self.turn_no_input_format = "USER: {instruction}\nASSISTANT:"
|
||||||
|
|
||||||
|
|
||||||
|
class OpenOrcaPromptTokenizingStrategy(InstructionWSystemPromptTokenizingStrategy):
|
||||||
|
"""
|
||||||
|
Tokenizing strategy for OpenOrca datasets
|
||||||
|
"""
|
||||||
|
|
||||||
|
def parse_instruction_fields(self, prompt) -> Tuple[str, str, str, str]:
|
||||||
|
return (
|
||||||
|
prompt["question"],
|
||||||
|
"",
|
||||||
|
prompt["response"],
|
||||||
|
prompt["system_prompt"],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load(tokenizer, cfg):
|
||||||
|
return load_chat(tokenizer, cfg)
|
||||||
|
|
||||||
|
|
||||||
|
def load_instruct(tokenizer, cfg):
|
||||||
|
return InstructionWSystemPromptTokenizingStrategy(
|
||||||
|
SystemDataPrompter(PromptStyle.INSTRUCT.value),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_chat(tokenizer, cfg):
|
||||||
|
return InstructionWSystemPromptTokenizingStrategy(
|
||||||
|
SystemDataPrompter(PromptStyle.CHAT.value),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_open_orca(tokenizer, cfg):
|
||||||
|
return OpenOrcaPromptTokenizingStrategy(
|
||||||
|
OpenOrcaSystemDataPrompter(PromptStyle.INSTRUCT.value),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
@@ -11,6 +11,8 @@ from axolotl.prompt_tokenizers import (
|
|||||||
tokenize_prompt_default,
|
tokenize_prompt_default,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
LOG = logging.getLogger("axolotl")
|
||||||
|
|
||||||
IGNORE_TOKEN_ID = -100
|
IGNORE_TOKEN_ID = -100
|
||||||
|
|
||||||
|
|
||||||
@@ -64,7 +66,7 @@ class PygmalionPromptTokenizingStrategy(PromptTokenizingStrategy):
|
|||||||
*copy.deepcopy(res["input_ids"])
|
*copy.deepcopy(res["input_ids"])
|
||||||
][len(self.bot_prefix_token_ids) :]
|
][len(self.bot_prefix_token_ids) :]
|
||||||
else:
|
else:
|
||||||
logging.warning(f"unknown role in conversation: {role}")
|
LOG.warning(f"unknown role in conversation: {role}")
|
||||||
res = defaultdict(lambda: [])
|
res = defaultdict(lambda: [])
|
||||||
|
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
|
|||||||
@@ -10,6 +10,8 @@ from transformers import PreTrainedTokenizer
|
|||||||
|
|
||||||
from axolotl.prompters import IGNORE_TOKEN_ID
|
from axolotl.prompters import IGNORE_TOKEN_ID
|
||||||
|
|
||||||
|
LOG = logging.getLogger("axolotl")
|
||||||
|
|
||||||
IGNORE_INDEX = -100
|
IGNORE_INDEX = -100
|
||||||
LLAMA_DEFAULT_PAD_TOKEN = "[PAD]" # nosec
|
LLAMA_DEFAULT_PAD_TOKEN = "[PAD]" # nosec
|
||||||
LLAMA_DEFAULT_EOS_TOKEN = "</s>" # nosec
|
LLAMA_DEFAULT_EOS_TOKEN = "</s>" # nosec
|
||||||
@@ -46,16 +48,22 @@ class PromptTokenizingStrategy(abc.ABC):
|
|||||||
|
|
||||||
@functools.lru_cache(maxsize=128)
|
@functools.lru_cache(maxsize=128)
|
||||||
def _get_user_token(self):
|
def _get_user_token(self):
|
||||||
id_or_ids = self.tokenizer.convert_tokens_to_ids("<|USER|>")
|
try:
|
||||||
if isinstance(id_or_ids, (int,)):
|
id_or_ids = self.tokenizer.convert_tokens_to_ids("<|USER|>")
|
||||||
return id_or_ids
|
if isinstance(id_or_ids, (int,)):
|
||||||
|
return id_or_ids
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@functools.lru_cache(maxsize=128)
|
@functools.lru_cache(maxsize=128)
|
||||||
def _get_assistant_token(self):
|
def _get_assistant_token(self):
|
||||||
id_or_ids = self.tokenizer.convert_tokens_to_ids("<|ASSISTANT|>")
|
try:
|
||||||
if isinstance(id_or_ids, (int,)):
|
id_or_ids = self.tokenizer.convert_tokens_to_ids("<|ASSISTANT|>")
|
||||||
return id_or_ids
|
if isinstance(id_or_ids, (int,)):
|
||||||
|
return id_or_ids
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _tokenize(self, prompt: str, add_eos_token=True, strip_bos_token=False):
|
def _tokenize(self, prompt: str, add_eos_token=True, strip_bos_token=False):
|
||||||
@@ -87,7 +95,9 @@ class InstructionPromptTokenizingStrategy(PromptTokenizingStrategy):
|
|||||||
Tokenizing strategy for instruction-based prompts.
|
Tokenizing strategy for instruction-based prompts.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
|
def parse_instruction_fields(
|
||||||
|
self, prompt
|
||||||
|
) -> Union[Tuple[str, str, str], Tuple[str, str, str, str]]:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def tokenize_prompt(self, prompt):
|
def tokenize_prompt(self, prompt):
|
||||||
@@ -96,25 +106,27 @@ class InstructionPromptTokenizingStrategy(PromptTokenizingStrategy):
|
|||||||
input, # pylint: disable=redefined-builtin
|
input, # pylint: disable=redefined-builtin
|
||||||
response,
|
response,
|
||||||
) = self.parse_instruction_fields(prompt)
|
) = self.parse_instruction_fields(prompt)
|
||||||
full_prompt = self._build_full_prompt(instruction, input, response)
|
user_prompt = next(
|
||||||
tokenized_full_prompt = self._tokenize(full_prompt)
|
iter(
|
||||||
if not self.train_on_inputs:
|
self.prompter.build_prompt(
|
||||||
user_prompt = next(
|
instruction,
|
||||||
iter(
|
input,
|
||||||
self.prompter.build_prompt(
|
|
||||||
instruction,
|
|
||||||
input,
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
tokenized_user_prompt = self._tokenize(user_prompt, add_eos_token=False)
|
)
|
||||||
user_prompt_len = len(tokenized_user_prompt["input_ids"])
|
tokenized_prompt = self._tokenize(user_prompt, add_eos_token=False)
|
||||||
|
if not self.train_on_inputs:
|
||||||
|
user_prompt_len = len(tokenized_prompt["input_ids"])
|
||||||
# TODO this could be sped up using numpy array slicing
|
# TODO this could be sped up using numpy array slicing
|
||||||
tokenized_full_prompt["labels"] = [
|
tokenized_prompt["labels"] = [-100] * user_prompt_len
|
||||||
-100
|
tokenized_res_prompt = self._tokenize(
|
||||||
] * user_prompt_len + tokenized_full_prompt["labels"][user_prompt_len:]
|
response, strip_bos_token=True, add_eos_token=True
|
||||||
|
)
|
||||||
|
tokenized_prompt["input_ids"] += tokenized_res_prompt["input_ids"]
|
||||||
|
tokenized_prompt["attention_mask"] += tokenized_res_prompt["attention_mask"]
|
||||||
|
tokenized_prompt["labels"] += tokenized_res_prompt["input_ids"]
|
||||||
|
|
||||||
return tokenized_full_prompt
|
return tokenized_prompt
|
||||||
|
|
||||||
def _build_full_prompt(
|
def _build_full_prompt(
|
||||||
self, instruction, input, response # pylint: disable=redefined-builtin
|
self, instruction, input, response # pylint: disable=redefined-builtin
|
||||||
@@ -380,7 +392,7 @@ class ShareGPTPromptTokenizingStrategy(PromptTokenizingStrategy):
|
|||||||
# everything from this is masked out from the labels
|
# everything from this is masked out from the labels
|
||||||
labels = [IGNORE_TOKEN_ID] * len(res["input_ids"])
|
labels = [IGNORE_TOKEN_ID] * len(res["input_ids"])
|
||||||
else:
|
else:
|
||||||
logging.warning(f"unhandled role: {part[0]}")
|
LOG.warning(f"unhandled role: {part[0]}")
|
||||||
|
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
result, current_len = parse_tokenized_to_result(
|
result, current_len = parse_tokenized_to_result(
|
||||||
@@ -436,7 +448,7 @@ def parse_tokenized_to_result(
|
|||||||
result: Dict[str, List[int]],
|
result: Dict[str, List[int]],
|
||||||
current_len: int,
|
current_len: int,
|
||||||
res: Dict[str, List[int]],
|
res: Dict[str, List[int]],
|
||||||
labels: list[int],
|
labels: List[int],
|
||||||
pad_token_id: Union[int, None] = None,
|
pad_token_id: Union[int, None] = None,
|
||||||
) -> Tuple[Dict[str, List[int]], int]:
|
) -> Tuple[Dict[str, List[int]], int]:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import logging
|
|||||||
from enum import Enum, auto
|
from enum import Enum, auto
|
||||||
from typing import Generator, List, Optional, Tuple, Union
|
from typing import Generator, List, Optional, Tuple, Union
|
||||||
|
|
||||||
|
LOG = logging.getLogger("axolotl")
|
||||||
IGNORE_TOKEN_ID = -100
|
IGNORE_TOKEN_ID = -100
|
||||||
|
|
||||||
|
|
||||||
@@ -24,6 +25,8 @@ class AlpacaPrompter:
|
|||||||
|
|
||||||
system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
|
system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
|
||||||
system_no_input_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n"
|
system_no_input_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n"
|
||||||
|
turn_format: str
|
||||||
|
turn_no_input_format: str
|
||||||
prompt_style: Optional[PromptStyle] = None
|
prompt_style: Optional[PromptStyle] = None
|
||||||
|
|
||||||
def __init__(self, prompt_style=PromptStyle.INSTRUCT.value):
|
def __init__(self, prompt_style=PromptStyle.INSTRUCT.value):
|
||||||
@@ -32,23 +35,13 @@ class AlpacaPrompter:
|
|||||||
|
|
||||||
def match_prompt_style(self):
|
def match_prompt_style(self):
|
||||||
if self.prompt_style == PromptStyle.INSTRUCT.value:
|
if self.prompt_style == PromptStyle.INSTRUCT.value:
|
||||||
self.prompt_input = (
|
self.turn_format = "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
|
||||||
self.system_prompt
|
self.turn_no_input_format = (
|
||||||
+ "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
|
"### Instruction:\n{instruction}\n\n### Response:\n"
|
||||||
)
|
)
|
||||||
self.prompt_no_input = (
|
|
||||||
self.system_no_input_prompt
|
|
||||||
+ "### Instruction:\n{instruction}\n\n### Response:\n"
|
|
||||||
)
|
|
||||||
self.response_split = "### Response:"
|
|
||||||
if self.prompt_style == PromptStyle.CHAT.value:
|
if self.prompt_style == PromptStyle.CHAT.value:
|
||||||
self.prompt_input = (
|
self.turn_format = "USER: {instruction}\n{input}\nASSISTANT:"
|
||||||
self.system_prompt + "USER: {instruction}\n{input}\nASSISTANT:"
|
self.turn_no_input_format = "USER: {instruction}\nASSISTANT:"
|
||||||
)
|
|
||||||
self.prompt_no_input = (
|
|
||||||
self.system_no_input_prompt + "USER: {instruction}\nASSISTANT:"
|
|
||||||
)
|
|
||||||
self.response_split = "ASSISTANT:"
|
|
||||||
|
|
||||||
def build_prompt(
|
def build_prompt(
|
||||||
self,
|
self,
|
||||||
@@ -59,16 +52,17 @@ class AlpacaPrompter:
|
|||||||
# returns the full prompt from instruction and optional input
|
# returns the full prompt from instruction and optional input
|
||||||
# if a label (=response, =output) is provided, it's also appended.
|
# if a label (=response, =output) is provided, it's also appended.
|
||||||
if input:
|
if input:
|
||||||
res = self.prompt_input.format(instruction=instruction, input=input)
|
res = self.system_prompt + self.turn_format.format(
|
||||||
|
instruction=instruction, input=input
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
res = self.prompt_no_input.format(instruction=instruction)
|
res = self.system_no_input_prompt + self.turn_no_input_format.format(
|
||||||
|
instruction=instruction
|
||||||
|
)
|
||||||
if output:
|
if output:
|
||||||
res = f"{res}{output}"
|
res = f"{res}{output}"
|
||||||
yield res
|
yield res
|
||||||
|
|
||||||
def get_response(self, output: str) -> str:
|
|
||||||
return output.split(self.response_split)[1].strip()
|
|
||||||
|
|
||||||
|
|
||||||
class UnpromptedPrompter(AlpacaPrompter):
|
class UnpromptedPrompter(AlpacaPrompter):
|
||||||
"""
|
"""
|
||||||
@@ -93,7 +87,10 @@ class MultipleChoiceExplainPrompter(AlpacaPrompter):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
system_prompt = (
|
system_prompt = (
|
||||||
"Choose the answer that best answers the question. Explain your reasoning."
|
"Choose the answer that best answers the question. Explain your reasoning.\n"
|
||||||
|
)
|
||||||
|
system_no_input_prompt = (
|
||||||
|
"Choose the answer that best answers the question. Explain your reasoning.\n"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -102,7 +99,12 @@ class MultipleChoiceConcisePrompter(AlpacaPrompter):
|
|||||||
Prompter for multiple choice concise
|
Prompter for multiple choice concise
|
||||||
"""
|
"""
|
||||||
|
|
||||||
prompt_input = "Choose the answer that best answers the question. Be concise in your response.\n\nUSER: {instruction}\n{input}\nASSISTANT:\n"
|
system_prompt = "Choose the answer that best answers the question. Be concise in your response.\n\n"
|
||||||
|
system_no_input_prompt = "Choose the answer that best answers the question. Be concise in your response.\n\n"
|
||||||
|
|
||||||
|
def match_prompt_style(self):
|
||||||
|
self.turn_format = "USER: {instruction}\n{input}\nASSISTANT:"
|
||||||
|
self.turn_no_input_format = "USER: {instruction}\nASSISTANT:"
|
||||||
|
|
||||||
|
|
||||||
class SummarizeTLDRPrompter(AlpacaPrompter):
|
class SummarizeTLDRPrompter(AlpacaPrompter):
|
||||||
@@ -110,9 +112,12 @@ class SummarizeTLDRPrompter(AlpacaPrompter):
|
|||||||
Prompter for summarize TLDR
|
Prompter for summarize TLDR
|
||||||
"""
|
"""
|
||||||
|
|
||||||
prompt_no_input = (
|
system_prompt = ""
|
||||||
"USER: Summarize the following article as a TL;DR.\n{instruction}\nASSISTANT:"
|
system_no_input_prompt = ""
|
||||||
)
|
|
||||||
|
def match_prompt_style(self):
|
||||||
|
self.turn_format = "USER: Summarize the following article as a TL;DR.\n{instruction}\n{input}\nASSISTANT:"
|
||||||
|
self.turn_no_input_format = "USER: Summarize the following article as a TL;DR.\n{instruction}\nASSISTANT:"
|
||||||
|
|
||||||
|
|
||||||
class CompletionPrompter:
|
class CompletionPrompter:
|
||||||
@@ -128,9 +133,6 @@ class CompletionPrompter:
|
|||||||
) -> Generator[str, None, None]:
|
) -> Generator[str, None, None]:
|
||||||
yield instruction
|
yield instruction
|
||||||
|
|
||||||
def get_response(self, output: str) -> str:
|
|
||||||
return output.strip()
|
|
||||||
|
|
||||||
|
|
||||||
class GPTeacherPrompter(AlpacaPrompter):
|
class GPTeacherPrompter(AlpacaPrompter):
|
||||||
"""
|
"""
|
||||||
@@ -210,9 +212,6 @@ class ReflectAlpacaPrompter:
|
|||||||
res = f"{res}{label}"
|
res = f"{res}{label}"
|
||||||
yield res
|
yield res
|
||||||
|
|
||||||
def get_response(self, output: str) -> str:
|
|
||||||
return output.split(self.response_split)[1].strip()
|
|
||||||
|
|
||||||
|
|
||||||
class SeparatorStyle(Enum):
|
class SeparatorStyle(Enum):
|
||||||
"""Different separator style."""
|
"""Different separator style."""
|
||||||
@@ -243,7 +242,7 @@ class Conversation:
|
|||||||
if message:
|
if message:
|
||||||
yield (role + ":", " " + message)
|
yield (role + ":", " " + message)
|
||||||
else:
|
else:
|
||||||
logging.warning(f"role with empty message: {role}")
|
LOG.warning(f"role with empty message: {role}")
|
||||||
yield (role + ":", "")
|
yield (role + ":", "")
|
||||||
|
|
||||||
def copy(self):
|
def copy(self):
|
||||||
@@ -289,12 +288,6 @@ class ShareGPTPrompter: # pylint: disable=too-few-public-methods
|
|||||||
sep2=" ",
|
sep2=" ",
|
||||||
)
|
)
|
||||||
|
|
||||||
# def match_prompt_style(self):
|
|
||||||
# if self.prompt_style == PromptStyle.chat.value:
|
|
||||||
# self.prompt_input = self.system_prompt + "USER: {instruction}\n{input}\nASSISTANT:"
|
|
||||||
# self.prompt_no_input = self.system_no_input_prompt + "USER: {instruction}\nASSISTANT:"
|
|
||||||
# self.response_split = "ASSISTANT:"
|
|
||||||
|
|
||||||
def build_prompt(self, source) -> Generator[str, None, None]:
|
def build_prompt(self, source) -> Generator[str, None, None]:
|
||||||
# ignore the system prompt if provided
|
# ignore the system prompt if provided
|
||||||
if source[0]["from"] == "system":
|
if source[0]["from"] == "system":
|
||||||
|
|||||||
@@ -2,13 +2,14 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from optimum.bettertransformer import BetterTransformer
|
||||||
from transformers import (
|
from transformers import (
|
||||||
TrainerCallback,
|
TrainerCallback,
|
||||||
TrainerControl,
|
TrainerControl,
|
||||||
TrainerState,
|
TrainerState,
|
||||||
TrainingArguments,
|
TrainingArguments,
|
||||||
)
|
)
|
||||||
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
|
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, IntervalStrategy
|
||||||
|
|
||||||
|
|
||||||
class SavePeftModelCallback(TrainerCallback): # pylint: disable=too-few-public-methods
|
class SavePeftModelCallback(TrainerCallback): # pylint: disable=too-few-public-methods
|
||||||
@@ -30,3 +31,39 @@ class SavePeftModelCallback(TrainerCallback): # pylint: disable=too-few-public-
|
|||||||
kwargs["model"].save_pretrained(peft_model_path)
|
kwargs["model"].save_pretrained(peft_model_path)
|
||||||
|
|
||||||
return control
|
return control
|
||||||
|
|
||||||
|
|
||||||
|
class SaveBetterTransformerModelCallback(
|
||||||
|
TrainerCallback
|
||||||
|
): # pylint: disable=too-few-public-methods
|
||||||
|
"""Callback to save the BetterTransformer wrapped model"""
|
||||||
|
|
||||||
|
def on_step_end(
|
||||||
|
self,
|
||||||
|
args: TrainingArguments,
|
||||||
|
state: TrainerState,
|
||||||
|
control: TrainerControl,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
# Save
|
||||||
|
if (
|
||||||
|
args.save_strategy == IntervalStrategy.STEPS
|
||||||
|
and args.save_steps > 0
|
||||||
|
and state.global_step % args.save_steps == 0
|
||||||
|
):
|
||||||
|
control.should_save = True
|
||||||
|
|
||||||
|
if control.should_save:
|
||||||
|
checkpoint_folder = os.path.join(
|
||||||
|
args.output_dir,
|
||||||
|
f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}",
|
||||||
|
)
|
||||||
|
|
||||||
|
model = BetterTransformer.reverse(kwargs["model"])
|
||||||
|
model.save_pretrained(checkpoint_folder)
|
||||||
|
# FIXME - need to cleanup old checkpoints
|
||||||
|
|
||||||
|
# since we're saving here, we don't need the trainer loop to attempt to save too b/c
|
||||||
|
# the trainer will raise an exception since it can't save a BetterTransformer wrapped model
|
||||||
|
control.should_save = False
|
||||||
|
return control
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
"""Module containing data utilities"""
|
"""Module containing data utilities"""
|
||||||
|
import functools
|
||||||
|
import itertools
|
||||||
import logging
|
import logging
|
||||||
from hashlib import md5
|
from hashlib import md5
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Tuple, Union
|
from typing import List, Tuple, Union
|
||||||
|
|
||||||
|
import torch
|
||||||
from datasets import Dataset, DatasetDict, load_dataset, load_from_disk
|
from datasets import Dataset, DatasetDict, load_dataset, load_from_disk
|
||||||
from huggingface_hub import hf_hub_download
|
from huggingface_hub import hf_hub_download
|
||||||
from transformers import PreTrainedTokenizerBase
|
from transformers import PreTrainedTokenizerBase
|
||||||
@@ -34,6 +36,8 @@ from axolotl.prompters import (
|
|||||||
SummarizeTLDRPrompter,
|
SummarizeTLDRPrompter,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
LOG = logging.getLogger("axolotl")
|
||||||
|
|
||||||
|
|
||||||
def load_tokenized_prepared_datasets(
|
def load_tokenized_prepared_datasets(
|
||||||
tokenizer, cfg, default_dataset_prepared_path
|
tokenizer, cfg, default_dataset_prepared_path
|
||||||
@@ -72,17 +76,17 @@ def load_tokenized_prepared_datasets(
|
|||||||
if dataset:
|
if dataset:
|
||||||
...
|
...
|
||||||
elif any(prepared_ds_path.glob("*")):
|
elif any(prepared_ds_path.glob("*")):
|
||||||
logging.info(f"Loading prepared dataset from disk at {prepared_ds_path}...")
|
LOG.info(f"Loading prepared dataset from disk at {prepared_ds_path}...")
|
||||||
dataset = load_from_disk(str(prepared_ds_path))
|
dataset = load_from_disk(str(prepared_ds_path))
|
||||||
logging.info("Prepared dataset loaded from disk...")
|
LOG.info("Prepared dataset loaded from disk...")
|
||||||
else:
|
else:
|
||||||
logging.info(f"Unable to find prepared dataset in {prepared_ds_path}")
|
LOG.info(f"Unable to find prepared dataset in {prepared_ds_path}")
|
||||||
logging.info("Loading raw datasets...")
|
LOG.info("Loading raw datasets...")
|
||||||
|
|
||||||
if cfg.seed:
|
if cfg.seed:
|
||||||
seed = cfg.seed
|
seed = cfg.seed
|
||||||
else:
|
else:
|
||||||
logging.info("No seed provided, using default seed of 42")
|
LOG.info("No seed provided, using default seed of 42")
|
||||||
seed = 42
|
seed = 42
|
||||||
|
|
||||||
datasets = []
|
datasets = []
|
||||||
@@ -93,6 +97,7 @@ def load_tokenized_prepared_datasets(
|
|||||||
try:
|
try:
|
||||||
load_dataset(
|
load_dataset(
|
||||||
d.path,
|
d.path,
|
||||||
|
name=d.name,
|
||||||
streaming=True,
|
streaming=True,
|
||||||
use_auth_token=use_auth_token,
|
use_auth_token=use_auth_token,
|
||||||
)
|
)
|
||||||
@@ -101,34 +106,45 @@ def load_tokenized_prepared_datasets(
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
# prefer local dataset, even if hub exists
|
# prefer local dataset, even if hub exists
|
||||||
if Path(d.path).exists():
|
local_path = Path(d.path)
|
||||||
ds = load_dataset(
|
if local_path.exists():
|
||||||
"json",
|
if local_path.is_dir():
|
||||||
data_files=d.path,
|
|
||||||
streaming=False,
|
|
||||||
split=None,
|
|
||||||
)
|
|
||||||
elif ds_from_hub:
|
|
||||||
if d.data_files:
|
|
||||||
ds = load_dataset(
|
ds = load_dataset(
|
||||||
d.path,
|
d.path,
|
||||||
streaming=False,
|
name=d.name,
|
||||||
data_files=d.data_files,
|
data_files=d.data_files,
|
||||||
use_auth_token=use_auth_token,
|
streaming=False,
|
||||||
|
split=None,
|
||||||
|
)
|
||||||
|
elif local_path.is_file():
|
||||||
|
ds = load_dataset(
|
||||||
|
"json",
|
||||||
|
name=d.name,
|
||||||
|
data_files=d.path,
|
||||||
|
streaming=False,
|
||||||
|
split=None,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
ds = load_dataset(
|
raise ValueError(
|
||||||
d.path,
|
"unhandled dataset load: local path exists, but is neither a directory or a file"
|
||||||
streaming=False,
|
|
||||||
use_auth_token=use_auth_token,
|
|
||||||
)
|
)
|
||||||
|
elif ds_from_hub:
|
||||||
|
ds = load_dataset(
|
||||||
|
d.path,
|
||||||
|
name=d.name,
|
||||||
|
streaming=False,
|
||||||
|
data_files=d.data_files,
|
||||||
|
use_auth_token=use_auth_token,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
fp = hf_hub_download(
|
fp = hf_hub_download(
|
||||||
repo_id=d.path,
|
repo_id=d.path,
|
||||||
repo_type="dataset",
|
repo_type="dataset",
|
||||||
filename=d.data_files,
|
filename=d.data_files,
|
||||||
)
|
)
|
||||||
ds = load_dataset("json", data_files=fp, streaming=False, split=None)
|
ds = load_dataset(
|
||||||
|
"json", name=d.name, data_files=fp, streaming=False, split=None
|
||||||
|
)
|
||||||
if not ds:
|
if not ds:
|
||||||
raise ValueError("unhandled dataset load")
|
raise ValueError("unhandled dataset load")
|
||||||
# support for using a subset of the data
|
# support for using a subset of the data
|
||||||
@@ -242,25 +258,29 @@ def load_tokenized_prepared_datasets(
|
|||||||
suffix = ""
|
suffix = ""
|
||||||
if ":load_" in d.type:
|
if ":load_" in d.type:
|
||||||
suffix = f" Did you mean {d.type.replace(':load_', '.load_')}?"
|
suffix = f" Did you mean {d.type.replace(':load_', '.load_')}?"
|
||||||
logging.error(
|
LOG.error(f"unhandled prompt tokenization strategy: {d.type}. {suffix}")
|
||||||
f"unhandled prompt tokenization strategy: {d.type}. {suffix}"
|
|
||||||
)
|
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"unhandled prompt tokenization strategy: {d.type} {suffix}"
|
f"unhandled prompt tokenization strategy: {d.type} {suffix}"
|
||||||
)
|
)
|
||||||
logging.info("tokenizing, merging, and shuffling master dataset")
|
LOG.info("tokenizing, merging, and shuffling master dataset")
|
||||||
|
|
||||||
samples: List[int] = []
|
samples: List[int] = []
|
||||||
|
chunk_size = 1000
|
||||||
for d in datasets:
|
for d in datasets:
|
||||||
samples = samples + list(d)
|
d_iter = iter(d)
|
||||||
|
while True:
|
||||||
|
chunk = list(itertools.islice(d_iter, chunk_size))
|
||||||
|
if not chunk:
|
||||||
|
break
|
||||||
|
samples.extend(chunk)
|
||||||
|
|
||||||
|
LOG.info("shuffle")
|
||||||
dataset = Dataset.from_list(samples).shuffle(seed=seed)
|
dataset = Dataset.from_list(samples).shuffle(seed=seed)
|
||||||
if cfg.local_rank == 0:
|
if cfg.local_rank == 0:
|
||||||
logging.info(
|
LOG.info(f"Saving merged prepared dataset to disk... {prepared_ds_path}")
|
||||||
f"Saving merged prepared dataset to disk... {prepared_ds_path}"
|
|
||||||
)
|
|
||||||
dataset.save_to_disk(prepared_ds_path)
|
dataset.save_to_disk(prepared_ds_path)
|
||||||
if cfg.push_dataset_to_hub:
|
if cfg.push_dataset_to_hub:
|
||||||
logging.info(
|
LOG.info(
|
||||||
f"Saving merged prepared dataset with push_to_hub... {cfg.push_dataset_to_hub}/{ds_hash}"
|
f"Saving merged prepared dataset with push_to_hub... {cfg.push_dataset_to_hub}/{ds_hash}"
|
||||||
)
|
)
|
||||||
dataset.push_to_hub(
|
dataset.push_to_hub(
|
||||||
@@ -311,7 +331,7 @@ def load_prepare_datasets(
|
|||||||
use_auth_token = cfg.hf_use_auth_token
|
use_auth_token = cfg.hf_use_auth_token
|
||||||
try:
|
try:
|
||||||
if cfg.push_dataset_to_hub:
|
if cfg.push_dataset_to_hub:
|
||||||
logging.info(
|
LOG.info(
|
||||||
f"Checking for packed prepared dataset from hub... {cfg.push_dataset_to_hub}/{ds_hash}"
|
f"Checking for packed prepared dataset from hub... {cfg.push_dataset_to_hub}/{ds_hash}"
|
||||||
)
|
)
|
||||||
dataset = load_dataset(
|
dataset = load_dataset(
|
||||||
@@ -325,13 +345,13 @@ def load_prepare_datasets(
|
|||||||
if dataset:
|
if dataset:
|
||||||
...
|
...
|
||||||
elif any(prepared_ds_path.glob("*")):
|
elif any(prepared_ds_path.glob("*")):
|
||||||
logging.info(
|
LOG.info(
|
||||||
f"Loading prepared packed dataset from disk at {prepared_ds_path}..."
|
f"Loading prepared packed dataset from disk at {prepared_ds_path}..."
|
||||||
)
|
)
|
||||||
dataset = load_from_disk(str(prepared_ds_path))
|
dataset = load_from_disk(str(prepared_ds_path))
|
||||||
logging.info("Prepared packed dataset loaded from disk...")
|
LOG.info("Prepared packed dataset loaded from disk...")
|
||||||
if cfg.push_dataset_to_hub:
|
if cfg.push_dataset_to_hub:
|
||||||
logging.info(
|
LOG.info(
|
||||||
f"Saving packed prepared dataset with push_to_hub... {cfg.push_dataset_to_hub}/{ds_hash}"
|
f"Saving packed prepared dataset with push_to_hub... {cfg.push_dataset_to_hub}/{ds_hash}"
|
||||||
)
|
)
|
||||||
dataset.push_to_hub(
|
dataset.push_to_hub(
|
||||||
@@ -350,9 +370,7 @@ def load_prepare_datasets(
|
|||||||
[dataset],
|
[dataset],
|
||||||
seq_length=max_packed_sequence_len,
|
seq_length=max_packed_sequence_len,
|
||||||
)
|
)
|
||||||
logging.info(
|
LOG.info(f"packing master dataset to len: {cfg.max_packed_sequence_len}")
|
||||||
f"packing master dataset to len: {cfg.max_packed_sequence_len}"
|
|
||||||
)
|
|
||||||
dataset = Dataset.from_list(list(constant_len_dataset))
|
dataset = Dataset.from_list(list(constant_len_dataset))
|
||||||
|
|
||||||
# filter out bad data
|
# filter out bad data
|
||||||
@@ -368,12 +386,12 @@ def load_prepare_datasets(
|
|||||||
)
|
)
|
||||||
|
|
||||||
if cfg.local_rank == 0:
|
if cfg.local_rank == 0:
|
||||||
logging.info(
|
LOG.info(
|
||||||
f"Saving packed prepared dataset to disk... {prepared_ds_path}"
|
f"Saving packed prepared dataset to disk... {prepared_ds_path}"
|
||||||
)
|
)
|
||||||
dataset.save_to_disk(prepared_ds_path)
|
dataset.save_to_disk(prepared_ds_path)
|
||||||
if cfg.push_dataset_to_hub:
|
if cfg.push_dataset_to_hub:
|
||||||
logging.info(
|
LOG.info(
|
||||||
f"Saving packed prepared dataset with push_to_hub... {cfg.push_dataset_to_hub}/{ds_hash}"
|
f"Saving packed prepared dataset with push_to_hub... {cfg.push_dataset_to_hub}/{ds_hash}"
|
||||||
)
|
)
|
||||||
dataset.push_to_hub(
|
dataset.push_to_hub(
|
||||||
@@ -386,7 +404,7 @@ def load_prepare_datasets(
|
|||||||
)
|
)
|
||||||
|
|
||||||
if cfg.dataset_shard_num and cfg.dataset_shard_idx is not None:
|
if cfg.dataset_shard_num and cfg.dataset_shard_idx is not None:
|
||||||
logging.info(
|
LOG.info(
|
||||||
f"Using index #{cfg.dataset_shard_idx} of {cfg.dataset_shard_num} shards"
|
f"Using index #{cfg.dataset_shard_idx} of {cfg.dataset_shard_num} shards"
|
||||||
)
|
)
|
||||||
dataset = dataset.shard(
|
dataset = dataset.shard(
|
||||||
@@ -394,8 +412,127 @@ def load_prepare_datasets(
|
|||||||
index=cfg.dataset_shard_idx,
|
index=cfg.dataset_shard_idx,
|
||||||
)
|
)
|
||||||
|
|
||||||
dataset = dataset.train_test_split(test_size=cfg.val_set_size, shuffle=False)
|
if cfg.val_set_size:
|
||||||
train_dataset = dataset["train"]
|
dataset = dataset.train_test_split(test_size=cfg.val_set_size, shuffle=False)
|
||||||
eval_dataset = dataset["test"]
|
train_dataset = dataset["train"]
|
||||||
|
eval_dataset = dataset["test"]
|
||||||
|
else:
|
||||||
|
train_dataset = dataset
|
||||||
|
eval_dataset = None
|
||||||
|
|
||||||
return train_dataset, eval_dataset
|
return train_dataset, eval_dataset
|
||||||
|
|
||||||
|
|
||||||
|
def encode_pretraining(tokenizer, max_tokens, examples):
|
||||||
|
res = tokenizer(
|
||||||
|
examples["text"],
|
||||||
|
truncation=True,
|
||||||
|
max_length=max_tokens - 2,
|
||||||
|
add_special_tokens=True,
|
||||||
|
)
|
||||||
|
# Convert to PyTorch tensors
|
||||||
|
input_ids = [torch.tensor(seq) for seq in res["input_ids"]]
|
||||||
|
attention_mask = [torch.tensor(seq) for seq in res["attention_mask"]]
|
||||||
|
new_input_ids = []
|
||||||
|
new_attention_mask = []
|
||||||
|
# Append EOS and PAD tokens to input_ids, and correct attention_mask
|
||||||
|
for i, _ in enumerate(input_ids):
|
||||||
|
input_ids[i] = torch.cat(
|
||||||
|
(
|
||||||
|
input_ids[i],
|
||||||
|
torch.tensor([tokenizer.eos_token_id, tokenizer.pad_token_id]),
|
||||||
|
),
|
||||||
|
dim=0,
|
||||||
|
)
|
||||||
|
attention_mask[i] = torch.cat((attention_mask[i], torch.tensor([1, 0])), dim=0)
|
||||||
|
|
||||||
|
# Concatenate tokens so that their lengths are less than max_tokens
|
||||||
|
buffer_input_ids = torch.tensor([], dtype=torch.long)
|
||||||
|
buffer_attention_mask = torch.tensor([], dtype=torch.long)
|
||||||
|
|
||||||
|
for ids, mask in zip(input_ids, attention_mask):
|
||||||
|
if buffer_input_ids.numel() == max_tokens:
|
||||||
|
new_input_ids.append(buffer_input_ids)
|
||||||
|
new_attention_mask.append(buffer_attention_mask)
|
||||||
|
buffer_input_ids = torch.tensor([], dtype=torch.long)
|
||||||
|
buffer_attention_mask = torch.tensor([], dtype=torch.long)
|
||||||
|
buffer_input_ids = torch.cat((buffer_input_ids, ids), dim=0)
|
||||||
|
buffer_attention_mask = torch.cat((buffer_attention_mask, mask), dim=0)
|
||||||
|
elif buffer_input_ids.numel() + ids.numel() <= max_tokens:
|
||||||
|
buffer_input_ids = torch.cat((buffer_input_ids, ids), dim=0)
|
||||||
|
buffer_attention_mask = torch.cat((buffer_attention_mask, mask), dim=0)
|
||||||
|
else:
|
||||||
|
buffer_input_ids = torch.cat(
|
||||||
|
(
|
||||||
|
buffer_input_ids,
|
||||||
|
torch.full(
|
||||||
|
(max_tokens - buffer_input_ids.numel(),),
|
||||||
|
tokenizer.pad_token_id,
|
||||||
|
dtype=torch.long,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
dim=0,
|
||||||
|
)
|
||||||
|
buffer_attention_mask = torch.cat(
|
||||||
|
(
|
||||||
|
buffer_attention_mask,
|
||||||
|
torch.full(
|
||||||
|
(max_tokens - buffer_attention_mask.numel(),),
|
||||||
|
0,
|
||||||
|
dtype=torch.long,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
dim=0,
|
||||||
|
)
|
||||||
|
new_input_ids.append(buffer_input_ids)
|
||||||
|
new_attention_mask.append(buffer_attention_mask)
|
||||||
|
buffer_input_ids = torch.tensor([], dtype=torch.long)
|
||||||
|
buffer_attention_mask = torch.tensor([], dtype=torch.long)
|
||||||
|
|
||||||
|
buffer_input_ids = torch.cat((buffer_input_ids, ids), dim=0)
|
||||||
|
buffer_attention_mask = torch.cat((buffer_attention_mask, mask), dim=0)
|
||||||
|
|
||||||
|
if buffer_input_ids.numel() > 0: # for any leftover tokens
|
||||||
|
while buffer_input_ids.numel() < max_tokens: # make all sequences equal in size
|
||||||
|
buffer_input_ids = torch.cat(
|
||||||
|
(
|
||||||
|
buffer_input_ids,
|
||||||
|
torch.full(
|
||||||
|
(max_tokens - buffer_input_ids.numel(),),
|
||||||
|
tokenizer.pad_token_id,
|
||||||
|
dtype=torch.long,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
dim=0,
|
||||||
|
)
|
||||||
|
buffer_attention_mask = torch.cat(
|
||||||
|
(
|
||||||
|
buffer_attention_mask,
|
||||||
|
torch.full(
|
||||||
|
(max_tokens - buffer_attention_mask.numel(),),
|
||||||
|
0,
|
||||||
|
dtype=torch.long,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
dim=0,
|
||||||
|
)
|
||||||
|
new_input_ids.append(buffer_input_ids)
|
||||||
|
new_attention_mask.append(buffer_attention_mask)
|
||||||
|
|
||||||
|
ret = {
|
||||||
|
"input_ids": [seq.tolist() for seq in new_input_ids],
|
||||||
|
"labels": [seq.tolist() for seq in new_input_ids],
|
||||||
|
"attention_mask": [seq.tolist() for seq in new_attention_mask],
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG.debug(len(ret["input_ids"]))
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
def load_pretraining_dataset(path, tokenizer, max_tokens=2048, seed=42):
|
||||||
|
encode = functools.partial(encode_pretraining, tokenizer, max_tokens)
|
||||||
|
dataset = load_dataset(path, streaming=True, split="train")
|
||||||
|
dataset = dataset.shuffle(seed=seed, buffer_size=10_000)
|
||||||
|
# TODO dynamically figure out which columns/features to remove
|
||||||
|
dataset = dataset.map(encode, batched=True, remove_columns=["text", "meta"])
|
||||||
|
return dataset
|
||||||
|
|||||||
@@ -10,17 +10,21 @@ from typing import TYPE_CHECKING, Optional, Tuple # noqa: F401
|
|||||||
import bitsandbytes as bnb
|
import bitsandbytes as bnb
|
||||||
import torch
|
import torch
|
||||||
import transformers
|
import transformers
|
||||||
from transformers import PreTrainedModel # noqa: F401
|
from optimum.bettertransformer import BetterTransformer
|
||||||
from transformers import ( # noqa: F401
|
from transformers import ( # noqa: F401
|
||||||
AutoConfig,
|
AutoConfig,
|
||||||
AutoModelForCausalLM,
|
AutoModelForCausalLM,
|
||||||
AutoTokenizer,
|
AutoTokenizer,
|
||||||
BitsAndBytesConfig,
|
BitsAndBytesConfig,
|
||||||
LlamaConfig,
|
LlamaConfig,
|
||||||
|
PreTrainedModel,
|
||||||
|
PreTrainedTokenizerBase,
|
||||||
)
|
)
|
||||||
|
|
||||||
from axolotl.prompt_tokenizers import LLAMA_DEFAULT_PAD_TOKEN
|
from axolotl.prompt_tokenizers import LLAMA_DEFAULT_PAD_TOKEN
|
||||||
|
|
||||||
|
LOG = logging.getLogger("axolotl")
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from peft import PeftConfig # noqa: F401
|
from peft import PeftConfig # noqa: F401
|
||||||
|
|
||||||
@@ -32,21 +36,26 @@ def load_tokenizer(
|
|||||||
tokenizer_type,
|
tokenizer_type,
|
||||||
cfg,
|
cfg,
|
||||||
):
|
):
|
||||||
|
use_fast = True # this is the default
|
||||||
|
if cfg.tokenizer_use_fast is not None:
|
||||||
|
use_fast = cfg.tokenizer_use_fast
|
||||||
if tokenizer_type:
|
if tokenizer_type:
|
||||||
tokenizer = getattr(transformers, tokenizer_type).from_pretrained(
|
tokenizer = getattr(transformers, tokenizer_type).from_pretrained(
|
||||||
tokenizer_config,
|
tokenizer_config,
|
||||||
trust_remote_code=cfg.trust_remote_code or False,
|
trust_remote_code=cfg.trust_remote_code or False,
|
||||||
|
use_fast=use_fast,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
tokenizer = AutoTokenizer.from_pretrained(
|
tokenizer = AutoTokenizer.from_pretrained(
|
||||||
tokenizer_config,
|
tokenizer_config,
|
||||||
trust_remote_code=cfg.trust_remote_code or False,
|
trust_remote_code=cfg.trust_remote_code or False,
|
||||||
|
use_fast=use_fast,
|
||||||
)
|
)
|
||||||
|
|
||||||
logging.debug(f"EOS: {tokenizer.eos_token_id} / {tokenizer.eos_token}")
|
LOG.debug(f"EOS: {tokenizer.eos_token_id} / {tokenizer.eos_token}")
|
||||||
logging.debug(f"BOS: {tokenizer.bos_token_id} / {tokenizer.bos_token}")
|
LOG.debug(f"BOS: {tokenizer.bos_token_id} / {tokenizer.bos_token}")
|
||||||
logging.debug(f"PAD: {tokenizer.pad_token_id} / {tokenizer.pad_token}")
|
LOG.debug(f"PAD: {tokenizer.pad_token_id} / {tokenizer.pad_token}")
|
||||||
logging.debug(f"UNK: {tokenizer.unk_token_id} / {tokenizer.unk_token}")
|
LOG.debug(f"UNK: {tokenizer.unk_token_id} / {tokenizer.unk_token}")
|
||||||
|
|
||||||
if tokenizer.__class__.__name__ in [
|
if tokenizer.__class__.__name__ in [
|
||||||
"LlamaTokenizer",
|
"LlamaTokenizer",
|
||||||
@@ -70,7 +79,7 @@ def load_tokenizer(
|
|||||||
def load_model(
|
def load_model(
|
||||||
base_model, base_model_config, model_type, tokenizer, cfg, adapter="lora"
|
base_model, base_model_config, model_type, tokenizer, cfg, adapter="lora"
|
||||||
):
|
):
|
||||||
# type: (str, str, str, AutoTokenizer, DictDefault, Optional[str]) -> Tuple[PreTrainedModel, Optional[PeftConfig]]
|
# type: (str, str, str, PreTrainedTokenizerBase, DictDefault, Optional[str]) -> Tuple[PreTrainedModel, Optional[PeftConfig]]
|
||||||
"""
|
"""
|
||||||
Load a model from a base model and a model type.
|
Load a model from a base model and a model type.
|
||||||
"""
|
"""
|
||||||
@@ -83,23 +92,25 @@ def load_model(
|
|||||||
|
|
||||||
if cfg.is_llama_derived_model and cfg.flash_attention:
|
if cfg.is_llama_derived_model and cfg.flash_attention:
|
||||||
if cfg.device not in ["mps", "cpu"] and not cfg.inference:
|
if cfg.device not in ["mps", "cpu"] and not cfg.inference:
|
||||||
from axolotl.flash_attn import replace_llama_attn_with_flash_attn
|
from axolotl.monkeypatch.llama_attn_hijack_flash import (
|
||||||
|
replace_llama_attn_with_flash_attn,
|
||||||
|
)
|
||||||
|
|
||||||
logging.info("patching with flash attention")
|
LOG.info("patching with flash attention")
|
||||||
replace_llama_attn_with_flash_attn()
|
replace_llama_attn_with_flash_attn()
|
||||||
elif cfg.is_llama_derived_model and cfg.xformers_attention:
|
elif cfg.is_llama_derived_model and cfg.xformers_attention:
|
||||||
from axolotl.monkeypatch.llama_attn_hijack_xformers import (
|
from axolotl.monkeypatch.llama_attn_hijack_xformers import (
|
||||||
hijack_llama_attention,
|
hijack_llama_attention,
|
||||||
)
|
)
|
||||||
|
|
||||||
logging.info("patching with xformers attention")
|
LOG.info("patching with xformers attention")
|
||||||
hijack_llama_attention()
|
hijack_llama_attention()
|
||||||
elif cfg.is_llama_derived_model and cfg.sdp_attention:
|
elif cfg.is_llama_derived_model and cfg.sdp_attention:
|
||||||
from axolotl.monkeypatch.llama_attn_hijack_xformers import (
|
from axolotl.monkeypatch.llama_attn_hijack_xformers import (
|
||||||
hijack_llama_sdp_attention,
|
hijack_llama_sdp_attention,
|
||||||
)
|
)
|
||||||
|
|
||||||
logging.info("patching with sdp attention")
|
LOG.info("patching with sdp attention")
|
||||||
hijack_llama_sdp_attention()
|
hijack_llama_sdp_attention()
|
||||||
elif cfg.is_llama_derived_model and cfg.landmark_attention:
|
elif cfg.is_llama_derived_model and cfg.landmark_attention:
|
||||||
from axolotl.monkeypatch.llama_landmark_attn import (
|
from axolotl.monkeypatch.llama_landmark_attn import (
|
||||||
@@ -107,7 +118,7 @@ def load_model(
|
|||||||
patch_llama_with_landmark_attn,
|
patch_llama_with_landmark_attn,
|
||||||
)
|
)
|
||||||
|
|
||||||
logging.info("patching with landmark attention")
|
LOG.info("patching with landmark attention")
|
||||||
patch_llama_with_landmark_attn()
|
patch_llama_with_landmark_attn()
|
||||||
|
|
||||||
# Note: This might overwrite previous additional_special_tokens
|
# Note: This might overwrite previous additional_special_tokens
|
||||||
@@ -118,12 +129,12 @@ def load_model(
|
|||||||
replace_llama_rope_with_xpos_rope,
|
replace_llama_rope_with_xpos_rope,
|
||||||
)
|
)
|
||||||
|
|
||||||
logging.info("patching with xpos rope")
|
LOG.info("patching with xpos rope")
|
||||||
replace_llama_rope_with_xpos_rope()
|
replace_llama_rope_with_xpos_rope()
|
||||||
|
|
||||||
if cfg.bf16:
|
if cfg.bf16 or cfg.bfloat16:
|
||||||
torch_dtype = torch.bfloat16
|
torch_dtype = torch.bfloat16
|
||||||
elif cfg.load_in_8bit or cfg.fp16:
|
elif cfg.load_in_8bit or cfg.fp16 or cfg.float16:
|
||||||
torch_dtype = torch.float16
|
torch_dtype = torch.float16
|
||||||
else:
|
else:
|
||||||
torch_dtype = torch.float32
|
torch_dtype = torch.float32
|
||||||
@@ -135,18 +146,24 @@ def load_model(
|
|||||||
|
|
||||||
replace_peft_model_with_int4_lora_model()
|
replace_peft_model_with_int4_lora_model()
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logging.exception(err)
|
LOG.exception(err)
|
||||||
raise err
|
raise err
|
||||||
|
|
||||||
try:
|
if not cfg.gptq and (
|
||||||
from peft import prepare_model_for_kbit_training
|
(cfg.adapter == "lora" and load_in_8bit)
|
||||||
except ImportError:
|
or (cfg.adapter == "qlora" and cfg.load_in_4bit)
|
||||||
# For backward compatibility
|
):
|
||||||
from peft import (
|
try:
|
||||||
prepare_model_for_int8_training as prepare_model_for_kbit_training,
|
from peft import prepare_model_for_kbit_training
|
||||||
)
|
except ImportError:
|
||||||
|
# For backward compatibility
|
||||||
|
from peft import (
|
||||||
|
prepare_model_for_int8_training as prepare_model_for_kbit_training,
|
||||||
|
)
|
||||||
|
|
||||||
model_kwargs = {}
|
model_kwargs = {}
|
||||||
|
if cfg.model_revision:
|
||||||
|
model_kwargs["revision"] = cfg.model_revision
|
||||||
if cfg.adapter == "qlora" and cfg.load_in_4bit:
|
if cfg.adapter == "qlora" and cfg.load_in_4bit:
|
||||||
model_kwargs["quantization_config"] = BitsAndBytesConfig(
|
model_kwargs["quantization_config"] = BitsAndBytesConfig(
|
||||||
load_in_4bit=True,
|
load_in_4bit=True,
|
||||||
@@ -178,7 +195,7 @@ def load_model(
|
|||||||
if len(files) > 0:
|
if len(files) > 0:
|
||||||
model_path = str(files[0])
|
model_path = str(files[0])
|
||||||
else:
|
else:
|
||||||
logging.warning(
|
LOG.warning(
|
||||||
"unable to find a cached model file, this will likely fail..."
|
"unable to find a cached model file, this will likely fail..."
|
||||||
)
|
)
|
||||||
model_path = str(cache_model_path)
|
model_path = str(cache_model_path)
|
||||||
@@ -195,7 +212,7 @@ def load_model(
|
|||||||
else True,
|
else True,
|
||||||
)
|
)
|
||||||
load_in_8bit = False
|
load_in_8bit = False
|
||||||
elif cfg.is_llama_derived_model:
|
elif cfg.is_llama_derived_model and not cfg.trust_remote_code:
|
||||||
from transformers import LlamaForCausalLM
|
from transformers import LlamaForCausalLM
|
||||||
|
|
||||||
config = LlamaConfig.from_pretrained(base_model_config)
|
config = LlamaConfig.from_pretrained(base_model_config)
|
||||||
@@ -234,7 +251,7 @@ def load_model(
|
|||||||
# device=cfg.device,
|
# device=cfg.device,
|
||||||
# )
|
# )
|
||||||
# model.train() # sets to train instead of eval mode
|
# model.train() # sets to train instead of eval mode
|
||||||
elif model_type:
|
elif model_type and not cfg.trust_remote_code:
|
||||||
model = getattr(transformers, model_type).from_pretrained(
|
model = getattr(transformers, model_type).from_pretrained(
|
||||||
base_model,
|
base_model,
|
||||||
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
||||||
@@ -251,15 +268,20 @@ def load_model(
|
|||||||
)
|
)
|
||||||
# Shouldn't be a problem most of the time. will obviously error if the model doesn't support this
|
# Shouldn't be a problem most of the time. will obviously error if the model doesn't support this
|
||||||
# when training starts
|
# when training starts
|
||||||
if hasattr(config, "max_seq_len") and cfg.sequence_len > config.max_seq_len:
|
if (
|
||||||
|
hasattr(config, "max_seq_len")
|
||||||
|
and config.max_seq_len
|
||||||
|
and cfg.sequence_len > config.max_seq_len
|
||||||
|
):
|
||||||
config.max_seq_len = cfg.sequence_len
|
config.max_seq_len = cfg.sequence_len
|
||||||
logging.warning(f"increasing context length to {cfg.sequence_len}")
|
LOG.warning(f"increasing context length to {cfg.sequence_len}")
|
||||||
elif (
|
elif (
|
||||||
hasattr(config, "max_sequence_length")
|
hasattr(config, "max_sequence_length")
|
||||||
|
and config.max_sequence_length
|
||||||
and cfg.sequence_len > config.max_sequence_length
|
and cfg.sequence_len > config.max_sequence_length
|
||||||
):
|
):
|
||||||
config.max_sequence_length = cfg.sequence_len
|
config.max_sequence_length = cfg.sequence_len
|
||||||
logging.warning(f"increasing context length to {cfg.sequence_len}")
|
LOG.warning(f"increasing context length to {cfg.sequence_len}")
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
base_model,
|
base_model,
|
||||||
config=config,
|
config=config,
|
||||||
@@ -271,31 +293,56 @@ def load_model(
|
|||||||
**model_kwargs,
|
**model_kwargs,
|
||||||
)
|
)
|
||||||
except Exception as err: # pylint: disable=broad-exception-caught
|
except Exception as err: # pylint: disable=broad-exception-caught
|
||||||
logging.error(
|
LOG.error(
|
||||||
"Exception raised attempting to load model, retrying with AutoModelForCausalLM"
|
"Exception raised attempting to load model, retrying with AutoModelForCausalLM"
|
||||||
)
|
)
|
||||||
logging.exception(err)
|
LOG.exception(err)
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
base_model,
|
base_model,
|
||||||
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
||||||
|
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
|
||||||
torch_dtype=torch_dtype,
|
torch_dtype=torch_dtype,
|
||||||
device_map=cfg.device_map,
|
device_map=cfg.device_map,
|
||||||
trust_remote_code=cfg.trust_remote_code or False,
|
trust_remote_code=cfg.trust_remote_code or False,
|
||||||
**model_kwargs,
|
**model_kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
embeddings_len = math.ceil(len(tokenizer) / 32) * 32
|
embeddings_len = (
|
||||||
|
math.ceil(len(tokenizer) / 32) * 32
|
||||||
|
if cfg.resize_token_embeddings_to_32x
|
||||||
|
else len(tokenizer)
|
||||||
|
)
|
||||||
model.resize_token_embeddings(embeddings_len)
|
model.resize_token_embeddings(embeddings_len)
|
||||||
|
|
||||||
|
if (
|
||||||
|
hasattr(model.config, "max_position_embeddings")
|
||||||
|
and model.config.max_position_embeddings
|
||||||
|
and cfg.sequence_len >= model.config.max_position_embeddings
|
||||||
|
):
|
||||||
|
LOG.warning(
|
||||||
|
f"increasing model.config.max_position_embeddings to {cfg.sequence_len}"
|
||||||
|
)
|
||||||
|
model.config.max_position_embeddings = cfg.sequence_len
|
||||||
|
|
||||||
if not cfg.gptq and (
|
if not cfg.gptq and (
|
||||||
(cfg.adapter == "lora" and load_in_8bit)
|
(cfg.adapter == "lora" and load_in_8bit)
|
||||||
or (cfg.adapter == "qlora" and cfg.load_in_4bit)
|
or (cfg.adapter == "qlora" and cfg.load_in_4bit)
|
||||||
):
|
):
|
||||||
logging.info("converting PEFT model w/ prepare_model_for_kbit_training")
|
LOG.info("converting PEFT model w/ prepare_model_for_kbit_training")
|
||||||
model = prepare_model_for_kbit_training(
|
model = prepare_model_for_kbit_training(
|
||||||
model, use_gradient_checkpointing=cfg.gradient_checkpointing
|
model, use_gradient_checkpointing=cfg.gradient_checkpointing
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# LlamaRMSNorm layers are in fp32 after kbit_training, so we need to
|
||||||
|
# convert them back to fp16/bf16 for flash-attn compatibility.
|
||||||
|
if cfg.flash_attention and cfg.is_llama_derived_model:
|
||||||
|
for name, module in model.named_modules():
|
||||||
|
if "norm" in name:
|
||||||
|
module.to(torch_dtype)
|
||||||
|
if "lm_head" in name or "embed_tokens" in name:
|
||||||
|
if hasattr(module, "weight"):
|
||||||
|
module.to(torch_dtype)
|
||||||
|
|
||||||
model, lora_config = load_adapter(model, cfg, adapter)
|
model, lora_config = load_adapter(model, cfg, adapter)
|
||||||
|
|
||||||
if cfg.ddp and not load_in_8bit:
|
if cfg.ddp and not load_in_8bit:
|
||||||
@@ -303,7 +350,7 @@ def load_model(
|
|||||||
|
|
||||||
if cfg.gptq:
|
if cfg.gptq:
|
||||||
# Scales to half
|
# Scales to half
|
||||||
logging.info("Fitting 4bit scales and zeros to half")
|
LOG.info("Fitting 4bit scales and zeros to half")
|
||||||
for _, module in model.named_modules():
|
for _, module in model.named_modules():
|
||||||
if "Autograd4bitQuantLinear" in str(type(module)) or "Linear4bitLt" in str(
|
if "Autograd4bitQuantLinear" in str(type(module)) or "Linear4bitLt" in str(
|
||||||
type(module)
|
type(module)
|
||||||
@@ -329,9 +376,12 @@ def load_model(
|
|||||||
if param.requires_grad:
|
if param.requires_grad:
|
||||||
requires_grad.append(f"{name}: {param.requires_grad}")
|
requires_grad.append(f"{name}: {param.requires_grad}")
|
||||||
if len(requires_grad) == 0:
|
if len(requires_grad) == 0:
|
||||||
logging.warning("there are no parameters that require gradient updates")
|
LOG.warning("there are no parameters that require gradient updates")
|
||||||
model.config.use_cache = False
|
model.config.use_cache = False
|
||||||
|
|
||||||
|
if cfg.flash_optimum:
|
||||||
|
model = BetterTransformer.transform(model)
|
||||||
|
|
||||||
# TODO resume_from_checkpoint handling
|
# TODO resume_from_checkpoint handling
|
||||||
return model, lora_config
|
return model, lora_config
|
||||||
|
|
||||||
@@ -360,7 +410,7 @@ def load_llama_adapter(model, cfg):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if cfg.lora_model_dir:
|
if cfg.lora_model_dir:
|
||||||
logging.info("Loading pretained LORA")
|
LOG.info("Loading pretained LORA")
|
||||||
model = PeftModel.from_pretrained(
|
model = PeftModel.from_pretrained(
|
||||||
model,
|
model,
|
||||||
cfg.lora_model_dir,
|
cfg.lora_model_dir,
|
||||||
@@ -407,7 +457,7 @@ def load_lora(model, cfg):
|
|||||||
bits = 8
|
bits = 8
|
||||||
|
|
||||||
linear_names = find_all_linear_names(bits, model)
|
linear_names = find_all_linear_names(bits, model)
|
||||||
logging.info(f"found linear modules: {repr(linear_names)}")
|
LOG.info(f"found linear modules: {repr(linear_names)}")
|
||||||
lora_target_modules = list(set(lora_target_modules + linear_names))
|
lora_target_modules = list(set(lora_target_modules + linear_names))
|
||||||
|
|
||||||
lora_config = LoraConfig(
|
lora_config = LoraConfig(
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
"""Module for custom LRScheduler class"""
|
"""Module for custom LRScheduler class"""
|
||||||
|
import math
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
from torch.optim.lr_scheduler import LRScheduler
|
from torch.optim import Optimizer
|
||||||
|
from torch.optim.lr_scheduler import LambdaLR, LRScheduler
|
||||||
|
|
||||||
|
|
||||||
class InterpolatingLogScheduler(LRScheduler):
|
class InterpolatingLogScheduler(LRScheduler):
|
||||||
@@ -42,3 +45,58 @@ class InterpolatingLogScheduler(LRScheduler):
|
|||||||
lrs = [self.max_lr for base_lr in self.base_lrs]
|
lrs = [self.max_lr for base_lr in self.base_lrs]
|
||||||
|
|
||||||
return lrs
|
return lrs
|
||||||
|
|
||||||
|
|
||||||
|
def _get_cosine_schedule_with_quadratic_warmup_lr_lambda(
|
||||||
|
current_step: int,
|
||||||
|
*,
|
||||||
|
num_warmup_steps: int,
|
||||||
|
num_training_steps: int,
|
||||||
|
num_cycles: float
|
||||||
|
):
|
||||||
|
if current_step < num_warmup_steps:
|
||||||
|
return (float(current_step) / float(max(1, num_warmup_steps))) ** 2
|
||||||
|
progress = float(current_step - num_warmup_steps) / float(
|
||||||
|
max(1, num_training_steps - num_warmup_steps)
|
||||||
|
)
|
||||||
|
return max(
|
||||||
|
0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_cosine_schedule_with_quadratic_warmup(
|
||||||
|
optimizer: Optimizer,
|
||||||
|
num_warmup_steps: int,
|
||||||
|
num_training_steps: int,
|
||||||
|
num_cycles: float = 0.5,
|
||||||
|
last_epoch: int = -1,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Create a schedule with a learning rate that decreases following the values of the cosine function between the
|
||||||
|
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
|
||||||
|
initial lr set in the optimizer.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
optimizer ([`~torch.optim.Optimizer`]):
|
||||||
|
The optimizer for which to schedule the learning rate.
|
||||||
|
num_warmup_steps (`int`):
|
||||||
|
The number of steps for the warmup phase.
|
||||||
|
num_training_steps (`int`):
|
||||||
|
The total number of training steps.
|
||||||
|
num_cycles (`float`, *optional*, defaults to 0.5):
|
||||||
|
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
|
||||||
|
following a half-cosine).
|
||||||
|
last_epoch (`int`, *optional*, defaults to -1):
|
||||||
|
The index of the last epoch when resuming training.
|
||||||
|
|
||||||
|
Return:
|
||||||
|
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
|
||||||
|
"""
|
||||||
|
|
||||||
|
lr_lambda = partial(
|
||||||
|
_get_cosine_schedule_with_quadratic_warmup_lr_lambda,
|
||||||
|
num_warmup_steps=num_warmup_steps,
|
||||||
|
num_training_steps=num_training_steps,
|
||||||
|
num_cycles=num_cycles,
|
||||||
|
)
|
||||||
|
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
||||||
|
|||||||
@@ -5,6 +5,8 @@ import logging
|
|||||||
|
|
||||||
from termcolor import colored
|
from termcolor import colored
|
||||||
|
|
||||||
|
LOG = logging.getLogger("axolotl")
|
||||||
|
|
||||||
|
|
||||||
def check_dataset_labels(dataset, tokenizer):
|
def check_dataset_labels(dataset, tokenizer):
|
||||||
# the dataset is already shuffled, so let's just check the first 5 elements
|
# the dataset is already shuffled, so let's just check the first 5 elements
|
||||||
@@ -32,5 +34,7 @@ def check_example_labels(example, tokenizer):
|
|||||||
)
|
)
|
||||||
colored_tokens.append(colored_token)
|
colored_tokens.append(colored_token)
|
||||||
|
|
||||||
logging.info(" ".join(colored_tokens))
|
LOG.info(" ".join(colored_tokens))
|
||||||
logging.info("\n\n\n")
|
LOG.info("\n\n\n")
|
||||||
|
|
||||||
|
return " ".join(colored_tokens)
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import logging
|
|||||||
import math
|
import math
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
from dataclasses import dataclass, field
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
@@ -13,14 +14,70 @@ import torch.cuda
|
|||||||
import transformers
|
import transformers
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from torch.optim.lr_scheduler import OneCycleLR
|
from torch.optim.lr_scheduler import OneCycleLR
|
||||||
from transformers import EarlyStoppingCallback, Trainer
|
from transformers import EarlyStoppingCallback, Trainer, TrainingArguments
|
||||||
from transformers.trainer_pt_utils import get_parameter_names
|
from transformers.trainer_pt_utils import get_parameter_names
|
||||||
|
|
||||||
from axolotl.utils.callbacks import SavePeftModelCallback
|
from axolotl.utils.callbacks import (
|
||||||
from axolotl.utils.schedulers import InterpolatingLogScheduler
|
SaveBetterTransformerModelCallback,
|
||||||
|
SavePeftModelCallback,
|
||||||
|
)
|
||||||
|
from axolotl.utils.schedulers import (
|
||||||
|
InterpolatingLogScheduler,
|
||||||
|
get_cosine_schedule_with_quadratic_warmup,
|
||||||
|
)
|
||||||
|
|
||||||
|
LOG = logging.getLogger("axolotl")
|
||||||
|
|
||||||
|
|
||||||
class OneCycleLRSchedulerTrainer(Trainer):
|
@dataclass
|
||||||
|
class AxolotlTrainingArguments(TrainingArguments):
|
||||||
|
"""
|
||||||
|
Extend the base TrainingArguments for axolotl helpers
|
||||||
|
"""
|
||||||
|
|
||||||
|
lr_quadratic_warmup: bool = field(
|
||||||
|
default=False,
|
||||||
|
metadata={"help": "Use quadratic warmup for cosine scheduling."},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AxolotlTrainer(Trainer):
|
||||||
|
"""
|
||||||
|
Extend the base Trainer for axolotl helpers
|
||||||
|
"""
|
||||||
|
|
||||||
|
args = None # type: AxolotlTrainingArguments
|
||||||
|
|
||||||
|
def create_scheduler(
|
||||||
|
self, num_training_steps: int, optimizer: torch.optim.Optimizer = None
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
|
||||||
|
passed as an argument.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
num_training_steps (int): The number of training steps to do.
|
||||||
|
optimizer (torch.optim.Optimizer): The training optimizer
|
||||||
|
"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
if self.lr_scheduler is None: # type: ignore # pylint: disable=access-member-before-definition
|
||||||
|
# fmt: on
|
||||||
|
if (
|
||||||
|
self.args.lr_scheduler_type == "cosine"
|
||||||
|
and self.args.lr_quadratic_warmup is True
|
||||||
|
):
|
||||||
|
self.lr_scheduler = get_cosine_schedule_with_quadratic_warmup( # pylint: disable=attribute-defined-outside-init
|
||||||
|
optimizer,
|
||||||
|
num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
|
||||||
|
num_training_steps=num_training_steps,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return super().create_scheduler(num_training_steps, optimizer)
|
||||||
|
return self.lr_scheduler
|
||||||
|
|
||||||
|
|
||||||
|
class OneCycleLRSchedulerTrainer(AxolotlTrainer):
|
||||||
"""
|
"""
|
||||||
Trainer subclass that uses the OneCycleLR scheduler
|
Trainer subclass that uses the OneCycleLR scheduler
|
||||||
"""
|
"""
|
||||||
@@ -100,6 +157,9 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
|||||||
if cfg.fsdp_config:
|
if cfg.fsdp_config:
|
||||||
training_arguments_kwargs["fsdp_config"] = dict(cfg.fsdp_config)
|
training_arguments_kwargs["fsdp_config"] = dict(cfg.fsdp_config)
|
||||||
|
|
||||||
|
if cfg.lr_quadratic_warmup is not None:
|
||||||
|
training_arguments_kwargs["lr_quadratic_warmup"] = cfg.lr_quadratic_warmup
|
||||||
|
|
||||||
# deepspeed
|
# deepspeed
|
||||||
if (
|
if (
|
||||||
os.environ.get("ACCELERATE_USE_DEEPSPEED") == "true"
|
os.environ.get("ACCELERATE_USE_DEEPSPEED") == "true"
|
||||||
@@ -112,7 +172,24 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
|||||||
# TODO search Path("./") for one
|
# TODO search Path("./") for one
|
||||||
training_arguments_kwargs["deepspeed"] = "./ds_config.json"
|
training_arguments_kwargs["deepspeed"] = "./ds_config.json"
|
||||||
|
|
||||||
training_args = transformers.TrainingArguments(
|
if cfg.adam_beta1:
|
||||||
|
training_arguments_kwargs["adam_beta1"] = cfg.adam_beta1
|
||||||
|
if cfg.adam_beta2:
|
||||||
|
training_arguments_kwargs["adam_beta2"] = cfg.adam_beta2
|
||||||
|
if cfg.adam_epsilon:
|
||||||
|
training_arguments_kwargs["adam_epsilon"] = cfg.adam_epsilon
|
||||||
|
if cfg.max_grad_norm:
|
||||||
|
training_arguments_kwargs["max_grad_norm"] = cfg.max_grad_norm
|
||||||
|
|
||||||
|
if cfg.hub_model_id:
|
||||||
|
training_arguments_kwargs["hub_model_id"] = cfg.hub_model_id
|
||||||
|
training_arguments_kwargs["push_to_hub"] = True
|
||||||
|
training_arguments_kwargs["hub_private_repo"] = True
|
||||||
|
|
||||||
|
if cfg.save_safetensors:
|
||||||
|
training_arguments_kwargs["save_safetensors"] = cfg.save_safetensors
|
||||||
|
|
||||||
|
training_args = AxolotlTrainingArguments( # pylint: disable=unexpected-keyword-arg
|
||||||
per_device_train_batch_size=cfg.micro_batch_size,
|
per_device_train_batch_size=cfg.micro_batch_size,
|
||||||
per_device_eval_batch_size=cfg.eval_batch_size
|
per_device_eval_batch_size=cfg.eval_batch_size
|
||||||
if cfg.eval_batch_size is not None
|
if cfg.eval_batch_size is not None
|
||||||
@@ -228,6 +305,9 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
|||||||
]: # only save in rank 0
|
]: # only save in rank 0
|
||||||
callbacks.append(SavePeftModelCallback)
|
callbacks.append(SavePeftModelCallback)
|
||||||
|
|
||||||
|
if hasattr(model, "use_bettertransformer") and model.use_bettertransformer is True:
|
||||||
|
callbacks.append(SaveBetterTransformerModelCallback)
|
||||||
|
|
||||||
data_collator_kwargs = {
|
data_collator_kwargs = {
|
||||||
"padding": True,
|
"padding": True,
|
||||||
}
|
}
|
||||||
@@ -247,7 +327,7 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
|||||||
|
|
||||||
set_model_mem_id(model, tokenizer)
|
set_model_mem_id(model, tokenizer)
|
||||||
|
|
||||||
logging.info("Adding landmark attention tokens to dataset")
|
LOG.info("Adding landmark attention tokens to dataset")
|
||||||
|
|
||||||
for dataset in [train_dataset, eval_dataset]:
|
for dataset in [train_dataset, eval_dataset]:
|
||||||
dataset = dataset.map(
|
dataset = dataset.map(
|
||||||
@@ -259,7 +339,7 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
|||||||
trainer_cls = (
|
trainer_cls = (
|
||||||
OneCycleLRSchedulerTrainer
|
OneCycleLRSchedulerTrainer
|
||||||
if cfg.lr_scheduler == "one_cycle" and (cfg.fsdp or cfg.adapter == "qlora")
|
if cfg.lr_scheduler == "one_cycle" and (cfg.fsdp or cfg.adapter == "qlora")
|
||||||
else transformers.Trainer
|
else AxolotlTrainer
|
||||||
)
|
)
|
||||||
trainer = trainer_cls(
|
trainer = trainer_cls(
|
||||||
model=model,
|
model=model,
|
||||||
|
|||||||
@@ -2,6 +2,10 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
LOG = logging.getLogger("axolotl")
|
||||||
|
|
||||||
|
|
||||||
def validate_config(cfg):
|
def validate_config(cfg):
|
||||||
if cfg.gradient_accumulation_steps and cfg.batch_size:
|
if cfg.gradient_accumulation_steps and cfg.batch_size:
|
||||||
@@ -9,7 +13,7 @@ def validate_config(cfg):
|
|||||||
"please set only one of gradient_accumulation_steps or batch_size"
|
"please set only one of gradient_accumulation_steps or batch_size"
|
||||||
)
|
)
|
||||||
if cfg.batch_size:
|
if cfg.batch_size:
|
||||||
logging.warning(
|
LOG.warning(
|
||||||
"%s\n%s",
|
"%s\n%s",
|
||||||
"batch_size is not recommended. Please use gradient_accumulation_steps instead.",
|
"batch_size is not recommended. Please use gradient_accumulation_steps instead.",
|
||||||
"To calculate the equivalent gradient_accumulation_steps, divide batch_size / micro_batch_size / number of gpus.",
|
"To calculate the equivalent gradient_accumulation_steps, divide batch_size / micro_batch_size / number of gpus.",
|
||||||
@@ -42,10 +46,10 @@ def validate_config(cfg):
|
|||||||
raise ValueError("Require cfg.load_in_4bit to be True for qlora")
|
raise ValueError("Require cfg.load_in_4bit to be True for qlora")
|
||||||
|
|
||||||
if not cfg.load_in_8bit and cfg.adapter == "lora":
|
if not cfg.load_in_8bit and cfg.adapter == "lora":
|
||||||
logging.warning("We recommend setting `load_in_8bit: true` for LORA finetuning")
|
LOG.warning("We recommend setting `load_in_8bit: true` for LORA finetuning")
|
||||||
|
|
||||||
if cfg.trust_remote_code:
|
if cfg.trust_remote_code:
|
||||||
logging.warning(
|
LOG.warning(
|
||||||
"`trust_remote_code` is set to true. Please make sure that you reviewed the remote code/model."
|
"`trust_remote_code` is set to true. Please make sure that you reviewed the remote code/model."
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -62,7 +66,45 @@ def validate_config(cfg):
|
|||||||
) and cfg.gradient_checkpointing:
|
) and cfg.gradient_checkpointing:
|
||||||
raise ValueError("gradient_checkpointing is not supported for MPT models")
|
raise ValueError("gradient_checkpointing is not supported for MPT models")
|
||||||
|
|
||||||
|
if cfg.flash_optimum is True:
|
||||||
|
if cfg.adapter:
|
||||||
|
LOG.warning("BetterTransformers probably doesn't work with PEFT adapters")
|
||||||
|
if cfg.fp16 or cfg.bf16:
|
||||||
|
raise ValueError("AMP is not supported with BetterTransformer")
|
||||||
|
if cfg.float16 is not True and cfg.bloat16 is not True:
|
||||||
|
LOG.warning(
|
||||||
|
"You should probably set bfloat16 or float16 to true to "
|
||||||
|
"load the model in float16 for BetterTransformers"
|
||||||
|
)
|
||||||
|
if int(torch.__version__.split(".")[0]) < 2:
|
||||||
|
LOG.warning("torch>=2.0.0 required")
|
||||||
|
raise ValueError(
|
||||||
|
f"flash_optimum for BetterTransformers may not be used with {torch.__version__}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if cfg.pretraining_dataset and cfg.group_by_length:
|
||||||
|
LOG.warning(
|
||||||
|
"You probably want to disable group_by_length as it will force a streamed dataset to download completely."
|
||||||
|
)
|
||||||
|
|
||||||
|
if any([cfg.adam_beta1, cfg.adam_beta2, cfg.adam_epsilon]) and (
|
||||||
|
not cfg.optimizer or "adamw" not in cfg.optimizer
|
||||||
|
):
|
||||||
|
LOG.warning("adamw hyperparameters found, but no adamw optimizer set")
|
||||||
|
|
||||||
|
if cfg.push_to_hub_model_id:
|
||||||
|
raise ValueError(
|
||||||
|
"push_to_hub_model_id is deprecated. Please use hub_model_id instead."
|
||||||
|
)
|
||||||
|
|
||||||
# TODO
|
# TODO
|
||||||
# MPT 7b
|
# MPT 7b
|
||||||
# https://github.com/facebookresearch/bitsandbytes/issues/25
|
# https://github.com/facebookresearch/bitsandbytes/issues/25
|
||||||
# no 8bit adamw w bf16
|
# no 8bit adaAmw w bf16
|
||||||
|
|
||||||
|
# GPT-NeoX
|
||||||
|
# evals broken when extending context len
|
||||||
|
# File "/root/miniconda3/envs/py3.9/lib/python3.9/site-packages/transformers/models/gpt_neox/modeling_gpt_neox.py", line 162, in forward attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
|
||||||
|
# File "/root/miniconda3/envs/py3.9/lib/python3.9/site-packages/optimum/bettertransformer/models/attention.py", line 74, in gpt2_wrapped_scaled_dot_product
|
||||||
|
# attention_mask = causal_mask + attention_mask
|
||||||
|
# RuntimeError: The size of tensor a (2048) must match the size of tensor b (8132) at non-singleton dimension 3
|
||||||
|
|||||||
@@ -6,10 +6,18 @@ from pathlib import Path
|
|||||||
|
|
||||||
from transformers import AutoTokenizer
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
from axolotl.prompt_tokenizers import ShareGPTPromptTokenizingStrategy
|
from axolotl.prompt_strategies.alpaca_chat import NoSystemPrompter
|
||||||
from axolotl.prompters import ShareGPTPrompter
|
from axolotl.prompt_strategies.alpaca_w_system import (
|
||||||
|
InstructionWSystemPromptTokenizingStrategy,
|
||||||
|
SystemDataPrompter,
|
||||||
|
)
|
||||||
|
from axolotl.prompt_tokenizers import (
|
||||||
|
AlpacaPromptTokenizingStrategy,
|
||||||
|
ShareGPTPromptTokenizingStrategy,
|
||||||
|
)
|
||||||
|
from axolotl.prompters import AlpacaPrompter, PromptStyle, ShareGPTPrompter
|
||||||
|
|
||||||
logging.basicConfig(level="INFO")
|
LOG = logging.getLogger("axolotl")
|
||||||
|
|
||||||
|
|
||||||
class TestPromptTokenizationStrategies(unittest.TestCase):
|
class TestPromptTokenizationStrategies(unittest.TestCase):
|
||||||
@@ -29,7 +37,6 @@ class TestPromptTokenizationStrategies(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def test_sharegpt_integration(self):
|
def test_sharegpt_integration(self):
|
||||||
print(Path(__file__).parent)
|
|
||||||
with open(
|
with open(
|
||||||
Path(__file__).parent / "fixtures/conversation.json", encoding="utf-8"
|
Path(__file__).parent / "fixtures/conversation.json", encoding="utf-8"
|
||||||
) as fin:
|
) as fin:
|
||||||
@@ -53,6 +60,80 @@ class TestPromptTokenizationStrategies(unittest.TestCase):
|
|||||||
self.assertEqual(len(example[fields]), len(tokenized_conversation[fields]))
|
self.assertEqual(len(example[fields]), len(tokenized_conversation[fields]))
|
||||||
self.assertEqual(example[fields], tokenized_conversation[fields])
|
self.assertEqual(example[fields], tokenized_conversation[fields])
|
||||||
|
|
||||||
|
def test_no_sys_prompt(self):
|
||||||
|
"""
|
||||||
|
tests the interface between the user and assistant parts
|
||||||
|
"""
|
||||||
|
prompter = NoSystemPrompter()
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
strat = AlpacaPromptTokenizingStrategy(
|
||||||
|
prompter,
|
||||||
|
self.tokenizer,
|
||||||
|
False,
|
||||||
|
2048,
|
||||||
|
)
|
||||||
|
sample = {
|
||||||
|
"instruction": "hello cruel. lorem ipsum dolor sit amet.",
|
||||||
|
"output": "world!",
|
||||||
|
}
|
||||||
|
example = strat.tokenize_prompt(sample)
|
||||||
|
world_idx = example["input_ids"].index(3186)
|
||||||
|
assert example["labels"][world_idx] == 3186
|
||||||
|
assert example["labels"][world_idx - 1] == -100
|
||||||
|
|
||||||
|
def test_alpaca(self):
|
||||||
|
"""
|
||||||
|
tests the interface between the user and assistant parts
|
||||||
|
"""
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
prompter = AlpacaPrompter()
|
||||||
|
strat = AlpacaPromptTokenizingStrategy(
|
||||||
|
prompter,
|
||||||
|
self.tokenizer,
|
||||||
|
False,
|
||||||
|
2048,
|
||||||
|
)
|
||||||
|
sample = {"instruction": "hello!", "output": "Hi! How can I help?"}
|
||||||
|
example = strat.tokenize_prompt(sample)
|
||||||
|
world_idx = example["input_ids"].index(6324)
|
||||||
|
assert example["labels"][world_idx] == 6324
|
||||||
|
assert example["labels"][world_idx - 1] == -100
|
||||||
|
|
||||||
|
|
||||||
|
class InstructionWSystemPromptTokenizingStrategyTest(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
Test class for prompt tokenization strategies with sys prompt from the dataset
|
||||||
|
"""
|
||||||
|
|
||||||
|
def setUp(self) -> None:
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
self.tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
|
||||||
|
self.tokenizer.add_special_tokens(
|
||||||
|
{
|
||||||
|
"bos_token": "<s>",
|
||||||
|
"eos_token": "</s>",
|
||||||
|
"unk_token": "<unk>",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_system_alpaca(self):
|
||||||
|
prompter = SystemDataPrompter(PromptStyle.CHAT.value)
|
||||||
|
strat = InstructionWSystemPromptTokenizingStrategy(
|
||||||
|
prompter,
|
||||||
|
self.tokenizer,
|
||||||
|
False,
|
||||||
|
2048,
|
||||||
|
)
|
||||||
|
sample = {
|
||||||
|
"system": "use cot",
|
||||||
|
"instruction": "hello!",
|
||||||
|
"output": "Hi! How can I help?",
|
||||||
|
}
|
||||||
|
example = strat.tokenize_prompt(sample)
|
||||||
|
assert example["input_ids"][0:4] == [1, 835, 2184, 29901] # "<s>### System:"
|
||||||
|
assert example["input_ids"][5:7] == [1509, 20118] # "use cot"
|
||||||
|
assert example["input_ids"][9] == 11889 # USER
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -2,7 +2,13 @@
|
|||||||
|
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
from axolotl.prompters import AlpacaPrompter, PromptStyle
|
from axolotl.prompt_strategies.alpaca_w_system import SystemDataPrompter
|
||||||
|
from axolotl.prompters import (
|
||||||
|
AlpacaPrompter,
|
||||||
|
MultipleChoiceExplainPrompter,
|
||||||
|
PromptStyle,
|
||||||
|
UnpromptedPrompter,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class AlpacaPrompterTest(unittest.TestCase):
|
class AlpacaPrompterTest(unittest.TestCase):
|
||||||
@@ -55,3 +61,64 @@ class AlpacaPrompterTest(unittest.TestCase):
|
|||||||
assert "### Response:" not in res
|
assert "### Response:" not in res
|
||||||
assert "USER:" in res
|
assert "USER:" in res
|
||||||
assert "ASSISTANT:" in res
|
assert "ASSISTANT:" in res
|
||||||
|
|
||||||
|
def test_system_prompt(self):
|
||||||
|
prompter = SystemDataPrompter(prompt_style=PromptStyle.CHAT.value)
|
||||||
|
res = next(
|
||||||
|
prompter.build_prompt_w_system(
|
||||||
|
"use cot", "tell me a joke about the following", "alpacas"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
assert "use cot" in res
|
||||||
|
assert res.startswith("### System:")
|
||||||
|
assert "### Instruction:" not in res
|
||||||
|
assert "### Input:" not in res
|
||||||
|
assert "alpacas" in res
|
||||||
|
assert "### Response:" not in res
|
||||||
|
assert "USER:" in res
|
||||||
|
assert "ASSISTANT:" in res
|
||||||
|
|
||||||
|
|
||||||
|
class UnpromptedPrompterTest(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
Test class for UnpromptedPrompter with no system prompts
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_prompt_style_w_none(self):
|
||||||
|
prompter = UnpromptedPrompter(prompt_style=None)
|
||||||
|
res = next(prompter.build_prompt("tell me a joke"))
|
||||||
|
assert "### Instruction:" in res
|
||||||
|
assert "tell me a joke" in res
|
||||||
|
assert res.startswith("###")
|
||||||
|
|
||||||
|
def test_prompt_style_w_instruct(self):
|
||||||
|
prompter = UnpromptedPrompter(prompt_style=PromptStyle.INSTRUCT.value)
|
||||||
|
res = next(
|
||||||
|
prompter.build_prompt("tell me a joke about the following", "alpacas")
|
||||||
|
)
|
||||||
|
assert "### Instruction:" in res
|
||||||
|
assert "tell me a joke" in res
|
||||||
|
assert res.startswith("###")
|
||||||
|
|
||||||
|
def test_prompt_style_w_chat(self):
|
||||||
|
prompter = UnpromptedPrompter(prompt_style=PromptStyle.CHAT.value)
|
||||||
|
res = next(
|
||||||
|
prompter.build_prompt("tell me a joke about the following", "alpacas")
|
||||||
|
)
|
||||||
|
assert "USER:" in res
|
||||||
|
assert "tell me a joke" in res
|
||||||
|
assert res.startswith("USER:")
|
||||||
|
|
||||||
|
|
||||||
|
class MultipleChoiceExplainPrompterTest(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
Test class for MultipleChoiceExplainPrompter
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_prompt_style_w_chat(self):
|
||||||
|
prompter = MultipleChoiceExplainPrompter(prompt_style=PromptStyle.CHAT.value)
|
||||||
|
res = next(prompter.build_prompt("choose one", "- A\n- B\n- C", "C"))
|
||||||
|
assert "USER:" in res
|
||||||
|
assert "choose one" in res
|
||||||
|
assert "Choose the answer that best answers the question." in res
|
||||||
|
assert "- A\n- B\n- C" in res
|
||||||
|
|||||||
31
tests/test_tokenizers.py
Normal file
31
tests/test_tokenizers.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
"""
|
||||||
|
Test cases for the tokenizer loading
|
||||||
|
"""
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.models import load_tokenizer
|
||||||
|
|
||||||
|
|
||||||
|
class TestTokenizers(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
test class for the load_tokenizer fn
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_default_use_fast(self):
|
||||||
|
cfg = DictDefault({})
|
||||||
|
tokenizer = load_tokenizer("huggyllama/llama-7b", None, cfg)
|
||||||
|
assert "Fast" in tokenizer.__class__.__name__
|
||||||
|
|
||||||
|
def test_dont_use_fast(self):
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"tokenizer_use_fast": False,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
tokenizer = load_tokenizer("huggyllama/llama-7b", None, cfg)
|
||||||
|
assert "Fast" not in tokenizer.__class__.__name__
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
@@ -212,3 +212,104 @@ class ValidationTest(unittest.TestCase):
|
|||||||
|
|
||||||
with pytest.raises(ValueError, match=regex_exp):
|
with pytest.raises(ValueError, match=regex_exp):
|
||||||
validate_config(cfg)
|
validate_config(cfg)
|
||||||
|
|
||||||
|
def test_flash_optimum(self):
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"flash_optimum": True,
|
||||||
|
"adapter": "lora",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with self._caplog.at_level(logging.WARNING):
|
||||||
|
validate_config(cfg)
|
||||||
|
assert any(
|
||||||
|
"BetterTransformers probably doesn't work with PEFT adapters"
|
||||||
|
in record.message
|
||||||
|
for record in self._caplog.records
|
||||||
|
)
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"flash_optimum": True,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with self._caplog.at_level(logging.WARNING):
|
||||||
|
validate_config(cfg)
|
||||||
|
assert any(
|
||||||
|
"probably set bfloat16 or float16" in record.message
|
||||||
|
for record in self._caplog.records
|
||||||
|
)
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"flash_optimum": True,
|
||||||
|
"fp16": True,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
regex_exp = r".*AMP is not supported.*"
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match=regex_exp):
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"flash_optimum": True,
|
||||||
|
"bf16": True,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
regex_exp = r".*AMP is not supported.*"
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match=regex_exp):
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
def test_adamw_hyperparams(self):
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"optimizer": None,
|
||||||
|
"adam_epsilon": 0.0001,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with self._caplog.at_level(logging.WARNING):
|
||||||
|
validate_config(cfg)
|
||||||
|
assert any(
|
||||||
|
"adamw hyperparameters found, but no adamw optimizer set"
|
||||||
|
in record.message
|
||||||
|
for record in self._caplog.records
|
||||||
|
)
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"optimizer": "adafactor",
|
||||||
|
"adam_beta1": 0.0001,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with self._caplog.at_level(logging.WARNING):
|
||||||
|
validate_config(cfg)
|
||||||
|
assert any(
|
||||||
|
"adamw hyperparameters found, but no adamw optimizer set"
|
||||||
|
in record.message
|
||||||
|
for record in self._caplog.records
|
||||||
|
)
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"optimizer": "adamw_bnb_8bit",
|
||||||
|
"adam_beta1": 0.9,
|
||||||
|
"adam_beta2": 0.99,
|
||||||
|
"adam_epsilon": 0.0001,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"optimizer": "adafactor",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
validate_config(cfg)
|
||||||
|
|||||||
Reference in New Issue
Block a user