Compare commits

..

289 Commits

Author SHA1 Message Date
Wing Lian
a924a33b45 Merge pull request #111 from OpenAccess-AI-Collective/sharegpt-token-tests
Some checks failed
PyTest / test (3.10) (push) Has been cancelled
PyTest / test (3.9) (push) Has been cancelled
add unit test for sharegpt tokenization
2023-05-30 11:18:31 -04:00
Wing Lian
e65aeedce7 fix relative path for fixtures 2023-05-30 10:38:20 -04:00
Wing Lian
e6fdeb087f add unit test for sharegpt tokenization 2023-05-30 10:28:17 -04:00
Wing Lian
e43bcc6c4f move CUDA_VERSION_BNB arg inside of stage build scope 2023-05-29 13:30:15 -04:00
Wing Lian
00323f0a6f fix CUDA_VERSION_BNB env var 2023-05-29 08:06:22 -04:00
Wing Lian
7f7fd68e8e Merge pull request #104 from OpenAccess-AI-Collective/training-fixes-20230529
bnb fix, trainer debug fix
2023-05-29 02:19:03 -04:00
Wing Lian
21f17cca69 bnb fixes 2023-05-29 00:06:35 -04:00
Wing Lian
319e34bfb5 Merge pull request #101 from OpenAccess-AI-Collective/sharegpt-conv
refactor conversation plucking in sharegpt
2023-05-28 19:43:54 -04:00
Wing Lian
809ccebb38 use python setup install, bdist wheel is unreliable in installing extension 2023-05-28 15:49:13 -04:00
Wing Lian
21c8e2deab refactor conversation plucking in sharegpt 2023-05-28 14:36:33 -04:00
Wing Lian
8fe12e3bc1 Merge pull request #100 from OpenAccess-AI-Collective/py310-tests
add py310 to the test matrix
2023-05-28 14:31:07 -04:00
Wing Lian
37fc85ac52 Merge pull request #99 from OpenAccess-AI-Collective/hf_use_auth_token
new hf_use_auth_token setting so login to hf isn't required
2023-05-28 14:30:04 -04:00
Wing Lian
658ed86cb5 add py310 to the test matrix 2023-05-28 14:25:57 -04:00
Wing Lian
fd5f9656a2 update for pr feedback 2023-05-28 14:23:27 -04:00
Wing Lian
1c33eb88a7 new hf_use_auth_token setting so login to hf isn't required 2023-05-28 13:08:49 -04:00
Wing Lian
a798ba1659 ensure libbitsandbytes*.so gets included with wheel 2023-05-28 12:28:37 -04:00
NanoCode012
666febcfb5 Merge pull request #97 from NanoCode012/feat/test-validation
Feat: Update validate_config and add tests
2023-05-29 00:38:22 +09:00
NanoCode012
52dd92a0cd Feat: Update validate_config and add tests 2023-05-29 00:25:54 +09:00
Wing Lian
88889590ec Merge pull request #90 from NanoCode012/feat/addict
Feat: Convert attrdict to addict
2023-05-28 10:43:07 -04:00
NanoCode012
f87bd20555 Fix incorrect syntax in test 2023-05-28 23:35:29 +09:00
NanoCode012
dd83a20c27 Update test to run on PR 2023-05-28 23:30:17 +09:00
NanoCode012
7bf2069afd Apply black formatter 2023-05-28 23:14:04 +09:00
NanoCode012
923151ffab Add test for DictDefault 2023-05-28 23:06:10 +09:00
NanoCode012
56f9ca5709 refactor: fix previous refactors 2023-05-28 23:06:10 +09:00
NanoCode012
8bd7a49cd7 Refactor to use DictDefault instead 2023-05-28 23:06:10 +09:00
NanoCode012
18d41cee4a Add DictDefault 2023-05-28 23:06:10 +09:00
NanoCode012
93acb648bd Fix load error 2023-05-28 23:06:10 +09:00
NanoCode012
bdfe7c9201 Convert attrdict to addict 2023-05-28 23:06:10 +09:00
Wing Lian
0d4a7f4c04 Merge pull request #67 from OpenAccess-AI-Collective/refactor-tokenizer-load
load the tokenizer seperately from the model
2023-05-28 08:49:34 -04:00
Wing Lian
af3aacbe16 Merge pull request #93 from OpenAccess-AI-Collective/dev-base
cuda properly compiled bitsandbytes for qlora support
2023-05-27 19:40:29 -04:00
Wing Lian
cc67862dd3 move list not in list logic to fn 2023-05-27 16:42:05 -04:00
Wing Lian
cf37980395 fix missing run coninuation 2023-05-27 15:28:54 -04:00
NanoCode012
ed2dd77e35 Merge pull request #89 from OpenAccess-AI-Collective/NanoCode012-update-action-version
Feat: Update actions version
2023-05-28 02:12:26 +09:00
NanoCode012
2b8c28bab8 Update actions version 2023-05-28 01:51:10 +09:00
Wing Lian
312b8d51d6 update docker to compile latest bnb to properly support qlora 2023-05-27 12:36:53 -04:00
NanoCode012
782996d94a Merge pull request #86 from OpenAccess-AI-Collective/NanoCode012-warning-remote-code
Feat:  Add warning for `trust_remote_code`
2023-05-28 01:29:35 +09:00
NanoCode012
b50d7d311c Merge pull request #88 from OpenAccess-AI-Collective/NanoCode012-completion-prompter-no-inherit
Fix: Remove base class inherit for CompletionPrompter
2023-05-28 01:29:03 +09:00
Wing Lian
35af017001 Merge pull request #87 from OpenAccess-AI-Collective/add_prompter_tests
automated testing in github actions
2023-05-27 12:21:23 -04:00
Wing Lian
a653392287 use requirements file for tests 2023-05-27 12:17:46 -04:00
Wing Lian
72b6ca0d9f cache pip 2023-05-27 12:16:54 -04:00
Wing Lian
7f53fd2ab6 alright, just local install it 2023-05-27 12:16:06 -04:00
Wing Lian
c29d33352c move python path to same step as tests 2023-05-27 12:06:23 -04:00
Wing Lian
403af0b1d7 fix path and streamline pip installs 2023-05-27 11:58:37 -04:00
NanoCode012
9ac1884323 Fix: Remove base class inherit for CompletionPrompter 2023-05-28 00:51:35 +09:00
Wing Lian
d199d6c261 automated testing in github actions 2023-05-27 11:51:01 -04:00
NanoCode012
2824423d10 Add warning for trust_remote_code 2023-05-28 00:46:56 +09:00
NanoCode012
cb18856fc2 Merge pull request #85 from NanoCode012/fix/add-dataset-shard-readme
Feat: Add `dataset_shard_num` and `dataset_shard_idx` to Readme
2023-05-27 23:52:50 +09:00
NanoCode012
8626b54aab Add dataset_shard_num and dataset_shard_idx 2023-05-27 23:51:17 +09:00
Wing Lian
87dffbc451 Merge pull request #75 from Thytu/refactor/rename-4b-to-gptq
refactor: change 4bit nomenclature to gptq
2023-05-27 09:37:57 -04:00
Wing Lian
147241ca66 Merge branch 'main' into refactor/rename-4b-to-gptq 2023-05-27 09:37:52 -04:00
Wing Lian
7e974decb7 Merge pull request #76 from OpenAccess-AI-Collective/truthy-validation
Truthy validation
2023-05-27 09:36:10 -04:00
Wing Lian
11fd39b1f5 Merge pull request #78 from OpenAccess-AI-Collective/falcoln-support
falcon: sane starter defaults and add lora support
2023-05-27 09:35:56 -04:00
Wing Lian
157420df13 sane starter defaults and add lora 2023-05-27 09:33:14 -04:00
Wing Lian
679ffd7395 Merge pull request #77 from OpenAccess-AI-Collective/falcoln-support
add example for falcon support
2023-05-27 09:18:48 -04:00
Wing Lian
d5f944ce2a add example for falcoln support 2023-05-27 09:16:43 -04:00
Wing Lian
4c906339f7 fix auto linear modules for lora w/o any set already 2023-05-27 08:49:43 -04:00
Wing Lian
4c500f5830 checking for False is not sufficent for NoneType/unset configs 2023-05-27 08:43:48 -04:00
Thytu
7cf07fc8b3 refactor(example): rename 4bit-lora-7b by gptq-lora-7b
Signed-off-by: Thytu <vdmatos@gladia.io>
2023-05-27 12:37:53 +00:00
Thytu
dd0065773a refactor(param): rename load_4bit config param by gptq
Signed-off-by: Thytu <vdmatos@gladia.io>
2023-05-27 12:36:03 +00:00
Wing Lian
ca1bb92337 Update src/axolotl/utils/models.py
Co-authored-by: NanoCode012 <kevinvong@rocketmail.com>
2023-05-26 17:51:24 -04:00
Wing Lian
933e970cb5 Update src/axolotl/utils/models.py
Co-authored-by: NanoCode012 <kevinvong@rocketmail.com>
2023-05-26 17:51:17 -04:00
Wing Lian
c3d256271e fix wheel install glob 2023-05-26 10:37:02 -04:00
NanoCode012
46c5a44003 Merge pull request #69 from OpenAccess-AI-Collective/NanoCode012-quickstart-disable-xformers
Fix: Disable xformers for QuickStart config
2023-05-26 22:40:16 +09:00
NanoCode012
ec3c0314bf Merge pull request #65 from NanoCode012/feat/target-linear
Feat: Add `cfg.lora_target_linear`
2023-05-26 22:39:38 +09:00
NanoCode012
79560934f9 Disable formers for QuickStart config 2023-05-26 22:23:38 +09:00
NanoCode012
353cebd838 Merge pull request #68 from OpenAccess-AI-Collective/NanoCode012-patch-1
Fix: Incorrect recommendation condition
2023-05-26 22:20:31 +09:00
NanoCode012
fe0e69f4f9 Fix recommendation condition 2023-05-26 22:19:50 +09:00
Wing Lian
1fc9b44e3d fix wheel blobs in dockerfile 2023-05-26 07:40:11 -04:00
Wing Lian
32e6fe9286 load the tokenizer seperately from the model 2023-05-26 07:29:35 -04:00
NanoCode012
919623793a Add cfg.lora_target_linear 2023-05-26 14:32:30 +09:00
Wing Lian
bbfc333a01 Merge pull request #62 from OpenAccess-AI-Collective/qlora-fixes
Qlora fixes
2023-05-26 00:28:16 -04:00
Wing Lian
a5bf838685 add logging and make sure model unloads to float16 2023-05-26 00:09:55 -04:00
Wing Lian
a4f12415a0 update readme and add typehints 2023-05-25 23:10:11 -04:00
Wing Lian
48f4c0571e fix validation for qlora merge 2023-05-25 23:02:03 -04:00
Wing Lian
1987e5cf56 qlora and 4bit check so we are able to merge and unload 2023-05-25 22:55:13 -04:00
Wing Lian
e7e1a777bd fix bool args according to python fire docs 2023-05-25 22:45:41 -04:00
Wing Lian
7b5e762be2 fix merge conflict failure, black format 2023-05-25 22:40:27 -04:00
Wing Lian
3f6017db9e qlora merge and load requires that base model isn't loaded in 4 or 8 bit 2023-05-25 22:39:13 -04:00
Wing Lian
34c99f9812 fixes to make qlora actually work 2023-05-25 22:37:23 -04:00
NanoCode012
3815c054b6 Merge pull request #61 from NanoCode012/feat/update-readme
Feat: Update readme
2023-05-26 11:27:31 +09:00
NanoCode012
85326bfbf3 Update quickstart config 2023-05-26 11:15:57 +09:00
NanoCode012
e689069afd Add xformers error 2023-05-26 11:12:03 +09:00
NanoCode012
d7d8bc739e Add strict yml 2023-05-26 11:10:59 +09:00
NanoCode012
60e32ff457 Fix shard config 2023-05-26 11:09:28 +09:00
Wing Lian
259262bf42 fix xentropy wheel name typo 2023-05-25 17:25:38 -04:00
Wing Lian
2e56203b50 another fix for shard and train split 2023-05-25 17:23:57 -04:00
Wing Lian
be3d3963cd Merge pull request #58 from OpenAccess-AI-Collective/shards-fix
shard fix
2023-05-25 16:32:31 -04:00
Wing Lian
ac79360161 shard fix 2023-05-25 16:31:59 -04:00
Wing Lian
b2fb61845e Merge pull request #54 from OpenAccess-AI-Collective/winglian-patch-1
add discord link to #axolotl-help channel
2023-05-25 12:45:19 -04:00
Wing Lian
71d600fc43 Merge branch 'main' into winglian-patch-1 2023-05-25 12:45:13 -04:00
Wing Lian
4fd0c2d1b9 Merge pull request #57 from OpenAccess-AI-Collective/fixes-for-basic-samples
fixes w/ example for super basic lora starter
2023-05-25 12:43:22 -04:00
Wing Lian
943961fd10 missed ... 2023-05-25 12:42:56 -04:00
Wing Lian
d2a6f79fd1 change auth token setting back 2023-05-25 12:41:17 -04:00
Wing Lian
98b1bce57e pr comments addressed 2023-05-25 12:25:07 -04:00
Wing Lian
004820209d Update src/axolotl/prompters.py
Co-authored-by: NanoCode012 <kevinvong@rocketmail.com>
2023-05-25 12:21:02 -04:00
Wing Lian
8d6a28953f fix relative path in flash-attn build: 2023-05-25 12:18:28 -04:00
Wing Lian
e396654319 fix tokenizer loading, got openllama 3b working 2023-05-25 12:15:12 -04:00
Wing Lian
a5d739b66b fixes w/ example for super basic lora starter 2023-05-25 11:59:08 -04:00
Wing Lian
951facbb1f Merge pull request #56 from OpenAccess-AI-Collective/fix-build-flash-attn
fix cd within flash-attn
2023-05-25 11:29:47 -04:00
Wing Lian
f5fa3d131b fix cd within flash-attn 2023-05-25 11:29:15 -04:00
NanoCode012
7ec105041d Merge pull request #48 from NanoCode012/feat/update-readme
Feat: Minor update readme from dev changes
2023-05-25 23:49:58 +09:00
NanoCode012
a9e502ef45 Update 4bit notes 2023-05-25 23:48:18 +09:00
NanoCode012
68f0c71424 Merge pull request #49 from NanoCode012/feat/gitignore
Feat: Update gitignore using standard Python template
2023-05-25 23:42:49 +09:00
NanoCode012
52fb6d8a34 Update gitignore using standard Python template 2023-05-25 23:07:27 +09:00
NanoCode012
f92245dbd6 Fix missing closing code block 2023-05-25 23:06:33 +09:00
NanoCode012
e65c203e9e Add more detail on minimum GPU 2023-05-25 23:06:33 +09:00
NanoCode012
1377400c33 Add info on Runtime Error 2023-05-25 23:06:33 +09:00
NanoCode012
2c34f8d0c7 Update dataset type 2023-05-25 23:06:33 +09:00
NanoCode012
7bc28eb8a8 Add more data formats 2023-05-25 23:06:33 +09:00
NanoCode012
29273b5a5b Add other minor configs 2023-05-25 23:06:33 +09:00
NanoCode012
05c18340d6 Update scheduler configs 2023-05-25 23:06:33 +09:00
NanoCode012
5b712afbe4 Update bf16 options 2023-05-25 23:06:33 +09:00
NanoCode012
9083910036 Update lora config 2023-05-25 23:06:33 +09:00
NanoCode012
8552218491 Improve Inference instruction 2023-05-25 23:06:33 +09:00
Wing Lian
de2a7335e6 Merge pull request #55 from OpenAccess-AI-Collective/missing-validation-file
add missing file
2023-05-25 09:58:51 -04:00
Wing Lian
1d7da3b389 add missing file 2023-05-25 09:58:29 -04:00
Wing Lian
e07bd8a441 add discord link to #axolotl-help channel 2023-05-25 09:45:45 -04:00
Wing Lian
d092cdb19b Merge pull request #52 from OpenAccess-AI-Collective/bugfix-cfg-cfg
cfg.cfg fix, also de-dupe lora module list
2023-05-25 09:35:24 -04:00
Wing Lian
f523a0894c stray s 2023-05-25 09:23:56 -04:00
Wing Lian
676d7da661 cfg.cfg fix, also de-dupe lora module list 2023-05-25 09:18:57 -04:00
Wing Lian
a617f1b65e Merge pull request #44 from OpenAccess-AI-Collective/qlora-add-modules-tuple
fix tuple add to list
2023-05-24 23:46:40 -04:00
Wing Lian
a8771b0aad fix tuple add to list 2023-05-24 23:46:04 -04:00
Wing Lian
cf48ff7cac Merge pull request #41 from OpenAccess-AI-Collective/qlora-modules
attempt to find linear modules for qlora
2023-05-24 23:31:19 -04:00
Wing Lian
1cf21daf51 Update src/axolotl/utils/models.py
Co-authored-by: NanoCode012 <kevinvong@rocketmail.com>
2023-05-24 23:31:12 -04:00
Wing Lian
ffd1043607 attempt to find linear modules for qlora 2023-05-24 23:18:08 -04:00
Wing Lian
3369c4dcf8 Merge pull request #39 from OpenAccess-AI-Collective/dev
Dev to main
2023-05-24 23:03:22 -04:00
Wing Lian
bc97f9c584 remove dev specific remark 2023-05-24 23:00:53 -04:00
Wing Lian
ce34d64e8a apply black formatting 2023-05-24 22:59:33 -04:00
Wing Lian
ce694e20a3 Merge branch 'main' of github.com:OpenAccess-AI-Collective/axolotl into dev 2023-05-24 22:59:09 -04:00
Wing Lian
cebea372da Merge pull request #36 from OpenAccess-AI-Collective/qlora
Qlora
2023-05-24 22:57:37 -04:00
Wing Lian
1f5d83ea72 remove un-needed code, add validation 2023-05-24 22:47:43 -04:00
Wing Lian
6e7d4d5344 Merge pull request #35 from NanoCode012/update-readme
Feat: Rewrite Readme
2023-05-24 21:31:32 -04:00
NanoCode012
362821ce84 Add trust_remote_code config 2023-05-25 09:53:49 +09:00
NanoCode012
224d186ec9 Simplify docker instruction 2023-05-25 09:51:22 +09:00
NanoCode012
5417824b31 Add seq length 2023-05-25 09:50:43 +09:00
NanoCode012
e1a91b0918 Remove redundant formats 2023-05-25 09:48:18 +09:00
NanoCode012
2a1b5728e6 Add line break 2023-05-25 09:37:18 +09:00
NanoCode012
702f2eee4b Fix inference command 2023-05-25 09:36:33 +09:00
NanoCode012
88bba24d9e Clean up data readme 2023-05-25 09:34:35 +09:00
NanoCode012
ba9ac723f1 Update quickstart. Add common error and contribution section. 2023-05-25 09:32:04 +09:00
NanoCode012
db73b94a58 Add image. Add quickstart. Simplify dataset. 2023-05-25 09:32:04 +09:00
NanoCode012
00dfe43b1d Add image 2023-05-25 09:32:04 +09:00
NanoCode012
9aab0b8cfe Update Docker instructions 2023-05-25 09:32:04 +09:00
NanoCode012
857a80b70e Format dataset types 2023-05-25 09:32:04 +09:00
NanoCode012
cba0048067 Update typo 2023-05-25 09:32:04 +09:00
NanoCode012
c22df8db9b Add all dataset types 2023-05-25 09:32:04 +09:00
NanoCode012
68237ea90a Add extra note to Readme 2023-05-25 09:32:04 +09:00
NanoCode012
4ee79f2641 Fix typo 2023-05-25 09:32:04 +09:00
NanoCode012
2b436680a0 Add new config options to Readme 2023-05-25 09:32:04 +09:00
NanoCode012
04d281312c Feat: Rewrite Readme 2023-05-25 09:32:04 +09:00
Wing Lian
7e81ca720b Update requirements.txt
Co-authored-by: NanoCode012 <kevinvong@rocketmail.com>
2023-05-24 15:44:48 -04:00
Wing Lian
3960936bf7 Merge pull request #37 from Thytu/main
fix: handles AutoTokenizer from untrusted source
2023-05-24 15:42:41 -04:00
Valentin De Matos
88ad05df54 fix: handles AutoTokenizer from untrusted source
Set trust_remote_code param depending of cfg.trust_remote_code when calling AutoTokenizer.from_pretrained
2023-05-24 20:57:10 +02:00
Wing Lian
e8aacfbd7c more qlora support 2023-05-24 14:33:18 -04:00
Wing Lian
b9d07aa95a prepare does all this already for qlora? 2023-05-24 14:32:39 -04:00
Wing Lian
3b4d055edd integrate qlora? maybe? 2023-05-24 14:32:39 -04:00
Wing Lian
2ae936fbc4 fix missing fp16 kwarg 2023-05-23 20:44:24 -04:00
Wing Lian
fb100a9ee1 fix enum pass as value 2023-05-23 11:34:03 -04:00
Wing Lian
3a503770e4 Add qa style data for alpaca instructions, fix one_cycle scheduler 2023-05-22 22:58:10 -04:00
Wing Lian
b029a11e65 Merge pull request #34 from OpenAccess-AI-Collective/dev-unstable
lots of various improvements
2023-05-22 12:14:56 -04:00
Wing Lian
e3df3a9f5d cuda/pytorch matrix builds 2023-05-22 12:14:21 -04:00
Wing Lian
f950a881e1 cuda, pytorch matrix for base builds 2023-05-22 12:12:08 -04:00
Wing Lian
de6da13e19 don't need to set here 2023-05-22 12:12:01 -04:00
Wing Lian
9493b1b137 be able to use adam bnb 8bit and one cycle scheduler w fsdp 2023-05-22 09:00:49 -04:00
Wing Lian
1b3e401241 Update src/axolotl/utils/models.py for info msg
Co-authored-by: NanoCode012 <kevinvong@rocketmail.com>
2023-05-21 23:01:35 -04:00
Wing Lian
3457810988 Update scripts/finetune.py
Co-authored-by: NanoCode012 <kevinvong@rocketmail.com>
2023-05-21 23:00:28 -04:00
Wing Lian
ae1719d30c Update scripts/finetune.py for logging
Co-authored-by: NanoCode012 <kevinvong@rocketmail.com>
2023-05-21 23:00:23 -04:00
Wing Lian
98a6781f18 Update src/axolotl/utils/data.py for spelling
Co-authored-by: NanoCode012 <kevinvong@rocketmail.com>
2023-05-21 23:00:13 -04:00
Wing Lian
607a4d33f2 make sure to use train split if loading from hf 2023-05-21 22:04:39 -04:00
Wing Lian
99383f14a3 make one cycle lr div factor configurable 2023-05-21 20:25:06 -04:00
Wing Lian
0f74464652 fix new dataset prompt tokenizers 2023-05-21 18:57:09 -04:00
Wing Lian
e0602a9e54 add missing __init__ 2023-05-21 16:36:41 -04:00
Wing Lian
2809f3f21b pygmalion dataset prompts format, cached tokenized datasets should be hashed on the tokenizer too 2023-05-21 16:16:09 -04:00
Wing Lian
4ea9a66dbd tokenization fixes 2023-05-21 08:33:06 -04:00
Wing Lian
ed37b2268d Merge pull request #32 from NanoCode012/patch-2
Feat: Set `half` using `cfg.fp16` for 4bit
2023-05-20 18:21:02 -04:00
Wing Lian
1d5ab84486 optionally be able to specify alpaca or chat style prompts 2023-05-20 18:16:22 -04:00
NanoCode012
641f8012f9 Set half using cfg.fp16 for 4bit 2023-05-20 02:29:31 +09:00
Wing Lian
fa8bd14be4 update entrypoint and force min accelerate 2023-05-18 06:25:34 -04:00
Wing Lian
13650732f8 concise multiple choice and tldr summarize 2023-05-17 11:29:17 -04:00
Wing Lian
8c2f3cb0f8 support for replit lm 2023-05-17 08:49:03 -04:00
Wing Lian
b46bc02f0a add alpaca multiple choice instruct dataset support 2023-05-16 21:45:34 -04:00
Wing Lian
e553c9080b Merge pull request #29 from NanoCode012/patch-1 2023-05-16 07:12:06 -04:00
NanoCode012
2c73c81348 Add lora_modules_to_save 2023-05-16 19:22:00 +09:00
Wing Lian
f98e173b59 reorder options so debug can happen in the same prepare step 2023-05-15 22:26:30 -04:00
Wing Lian
5e37144754 fix prompters, especially the sharegpt prompter 2023-05-15 22:15:36 -04:00
Wing Lian
bdbca8fa6c more fixes 2023-05-15 14:07:17 -04:00
Wing Lian
42410c783c more fixes 2023-05-14 09:16:41 -04:00
Wing Lian
aef00b6c13 fix torch_dtype for model load 2023-05-14 08:44:22 -04:00
Wing Lian
0d28df0fd2 move filter to before saving so it doesn't happen everytime, update runpod manual script 2023-05-13 21:51:41 -04:00
Wing Lian
84c7bc4b68 whoops, gt vs lt 2023-05-12 14:03:25 -04:00
Wing Lian
aa3c3f97ae optimize dataloading to use cache, fix model token embedding sizes 2023-05-12 13:53:27 -04:00
Wing Lian
f6d1fa4a85 Merge pull request #25 from NanoCode012/patch-2
Fix Trainer() got multiple values for keyword argument 'callbacks'
2023-05-11 09:20:15 -04:00
NanoCode012
89b7f26b9d Merge branch 'main' into patch-2 2023-05-11 21:18:38 +09:00
Wing Lian
165da584b3 fix config for parity with previous change
5159d00a86\#diff-65b4693504c4e8ffac76c7f2c90913faee381f802cf64e7f49c995a2134ed3b3R164
2023-05-11 08:13:09 -04:00
Wing Lian
4cc7ed8898 Merge pull request #27 from NanoCode012/patch-1
Fix save typo
2023-05-11 07:27:31 -04:00
NanoCode012
52aada7174 Fix typo 2023-05-11 20:22:30 +09:00
Wing Lian
688c73a81e Merge pull request #26 from OpenAccess-AI-Collective/mpt-triton
Mpt triton
2023-05-10 16:02:05 -04:00
Wing Lian
2bc1a5bde1 black formatting 2023-05-10 16:01:08 -04:00
Wing Lian
7a490a4646 various fixes 2023-05-10 16:00:09 -04:00
NanoCode012
813aab378f Fix Trainer() got multiple values for keyword argument 'callbacks' 2023-05-10 18:28:28 +09:00
Wing Lian
e2e68c3965 testing mpt triton 2023-05-09 20:57:40 -04:00
Wing Lian
a27d594788 fix conditional so alpaca doesn't choke 2023-05-09 20:57:07 -04:00
Wing Lian
1fb0376150 Merge pull request #23 from NanoCode012/patch-1
Fix: Save adapter for lora
2023-05-09 15:05:58 -04:00
Wing Lian
915c56cd97 Update finetune.py 2023-05-09 15:05:39 -04:00
Wing Lian
df9c5085b5 not everyone has bf16 available 2023-05-09 14:47:48 -04:00
Wing Lian
7967cd1039 add 4bit lora 7b 2023-05-09 14:38:32 -04:00
NanoCode012
cd2395987e Don't save full model for lora 2023-05-10 03:18:38 +09:00
NanoCode012
71a1f7f38c Save adapter for lora 2023-05-10 01:08:22 +09:00
Wing Lian
02c59832a3 push up redpajama 3b example 2023-05-08 19:19:18 -04:00
Wing Lian
3f9c9530ea Merge pull request #15 from NanoCode012/feat/completion
Feat: Add Completion dataset type
2023-05-08 19:04:54 -04:00
NanoCode012
174b74ddc9 Rename variable to use same convention 2023-05-09 02:49:44 +09:00
NanoCode012
cf681537ec Add CompletionPrompt type 2023-05-09 02:49:44 +09:00
Wing Lian
bd3c5a5cb3 Merge pull request #21 from NanoCode012/patch-1
Fix: Scheduler and optimizer condition
2023-05-08 13:34:44 -04:00
Wing Lian
bcbc99e655 Merge pull request #19 from NanoCode012/feat/callback-save-lora
Feat: Add callback save peft_model on_save
2023-05-08 13:34:07 -04:00
Wing Lian
b0d2594de9 Merge pull request #22 from NanoCode012/patch-2
Fix BNB OOM by pinning version
2023-05-08 13:33:52 -04:00
NanoCode012
fe582df7d3 Fix BNB OOM by pinning version 2023-05-09 02:10:31 +09:00
NanoCode012
36aaea02b9 Update trainer.py 2023-05-09 02:01:08 +09:00
NanoCode012
5b6690ac25 Fix condition scheduler 2023-05-09 01:44:12 +09:00
Wing Lian
a125693122 add support for trust_remote_code for mpt models 2023-05-08 12:07:27 -04:00
Wing Lian
709be5af81 use printf instead of echo in dockerfile for portability 2023-05-08 11:45:38 -04:00
NanoCode012
cc77bab526 Add callbacks to Trainer 2023-05-09 00:41:19 +09:00
NanoCode012
0d6708bfe4 Add callback save peft_model on_save 2023-05-09 00:38:27 +09:00
Wing Lian
807cca81c0 fix path name to sorkspace 2023-05-08 11:20:03 -04:00
Wing Lian
79deb35c68 setup runpod images
use github.ref_name
2023-05-08 10:48:32 -04:00
Wing Lian
7576d85c73 fix to cd to path in docker 2023-05-08 03:43:46 -04:00
Wing Lian
3b4b476828 use existing state of repo to build, not the checkout 2023-05-08 03:29:48 -04:00
Wing Lian
b5fe063687 fix base for dockerfile 2023-05-08 03:27:10 -04:00
Wing Lian
a12fb0a8da Jeopardy bot! (#17)
* support for jeopardy dataset

* commit the final config for jeopardy bot
2023-05-08 03:21:40 -04:00
Wing Lian
a4329b1068 fix #16 load best model setting when using 8bit 2023-05-07 18:30:48 -04:00
Wing Lian
550502b321 use micro batch size for eval size if not specified 2023-05-07 18:26:05 -04:00
Wing Lian
fae36c7111 blah, wrong base tag 2023-05-07 17:54:26 -04:00
Wing Lian
a31746baa2 whoops, build from base image 2023-05-07 17:47:54 -04:00
Wing Lian
17345c8a4b hanging slash typo 2023-05-07 17:38:56 -04:00
Wing Lian
9cd5d3fcfc build on self hosted GPU runners 2023-05-07 17:25:31 -04:00
Wing Lian
990bec63e6 docker layer caching, build w axolotl from base build 2023-05-07 17:16:05 -04:00
Wing Lian
0c46806ae2 typo in git repo for pip 2023-05-07 16:00:21 -04:00
Wing Lian
66fa751c18 add huggingface packages and awscli 2023-05-07 11:51:57 -04:00
Wing Lian
21b74397de fix typo and add apex 2023-05-07 11:48:47 -04:00
Wing Lian
3f11b47488 needs libaio-dev from apt 2023-05-07 11:23:43 -04:00
Wing Lian
ece46b2504 pip install packaging dep 2023-05-07 11:09:03 -04:00
Wing Lian
92d800a394 build dependencies and aws-cli 2023-05-07 11:02:26 -04:00
Wing Lian
2734e3f1a2 build base separately
fix arg order for image
fix dockerfile var excaping
move args around
2023-05-07 10:56:12 -04:00
Wing Lian
14ebd2e007 build base too 2023-05-07 09:48:41 -04:00
Wing Lian
4a79dabff0 fix push to docker hub 2023-05-07 08:52:49 -04:00
Wing Lian
47ad3890bc fix whitespace and instruction on inference 2023-05-07 08:28:15 -04:00
Wing Lian
76b24bca2e push to docker hub
set docker image name
2023-05-07 08:06:50 -04:00
Wing Lian
73450d9de7 TORCH_CUDA_ARCH_LIST should be an ARG 2023-05-07 07:28:57 -04:00
Wing Lian
97cf77891e run this on self hosted runner for now
fix typo
fixes to docker build
need pip wheel
don't duplicate pip install
2023-05-07 07:21:25 -04:00
Wing Lian
e2599edab9 runs on larger git runner? 2023-05-07 04:12:47 -04:00
Wing Lian
75bc8561c0 don't push the image 2023-05-07 03:39:05 -04:00
Wing Lian
15bdbae805 run on git commit 2023-05-07 03:37:59 -04:00
Wing Lian
6603b3744e try docker build on gitlab
require docker in gitlab
use kaniko to build docker in gitlab
2023-05-07 03:21:08 -04:00
Wing Lian
2634689774 build dockerfile in gha 2023-05-07 02:58:21 -04:00
Wing Lian
4818380fa6 update stablelm config 2023-05-07 01:58:23 -04:00
Wing Lian
247825bd57 refactor inference, warn if model is frozen 2023-05-07 01:54:15 -04:00
Wing Lian
cb9a887047 Merge pull request #13 from winglian/dev
merge dev branch for various fixes
2023-05-07 01:48:02 -04:00
Wing Lian
a15d823b29 Merge pull request #12 from NanoCode012/feat/eval_config
Add eval_batch_size for evaluation
2023-05-07 01:46:53 -04:00
NanoCode012
0e74b6402e Add eval_batch_size for evaluation 2023-05-06 22:21:24 +09:00
Wing Lian
a10a8265ef fix log sweep lr 2023-05-03 15:06:03 -04:00
Wing Lian
9105935b00 support for multi line inference input, log sweep over learning rates 2023-05-03 13:48:54 -04:00
Wing Lian
7748f3d6da fix adam bnb optimizer grouped parameters, fix peft model 8bit conversion logic, black formatting 2023-05-01 16:31:46 -04:00
Wing Lian
fe9c29d73e install peft from main branch 2023-05-01 12:24:04 -04:00
Wing Lian
2255bb7f4f support llama-adapter zero init attention 2023-05-01 10:42:21 -04:00
Wing Lian
55baef0e03 use prebuilt wheels for flash-attn and deepspeed 2023-05-01 09:52:03 -04:00
Wing Lian
ad2b48c0fa fdsp config dict fix, todo list, add torchdistx support 2023-04-30 13:32:07 -04:00
Wing Lian
9190ada23a 8bit and deepspeed changes 2023-04-30 06:50:35 -04:00
Wing Lian
4dbef0941f update ds_config 2023-04-30 04:24:58 -04:00
Wing Lian
6dfdd2dec0 don't load models in 8bit unless they are using an adapter, also fix tokenizer load in exceptional case 2023-04-30 03:19:56 -04:00
Wing Lian
29936bba7f fix fsdp training args 2023-04-30 00:56:28 -04:00
Wing Lian
78821815de fix for zero value warmup steps 2023-04-30 00:34:12 -04:00
Wing Lian
5159d00a86 fix sharegpt tokenization, refactor tokenization debugging 2023-04-30 00:23:53 -04:00
Wing Lian
c0f50d9c61 wire up gradient checkpointing for 4bit 2023-04-28 22:28:41 -04:00
Wing Lian
4e705eda6d Merge pull request #9 from winglian/dev
feature dump into main
2023-04-24 21:56:17 -04:00
Wing Lian
4a17a4c9a1 fix dataset handling, support galactica 2023-04-24 10:54:45 -04:00
Wing Lian
097d367af6 tweaks to data loading, 8 bit adam, accelerate and deepspeed 2023-04-24 09:41:35 -04:00
Wing Lian
4f2584f2dc shuffle and split dataset after save/load 2023-04-24 09:41:35 -04:00
Wing Lian
8d437853c8 fix sharegpt handling from hf, don't worry about loading llama if using earlier transformers release 2023-04-24 09:41:35 -04:00
Wing Lian
8e2a5609b3 stablelm support 2023-04-24 09:41:34 -04:00
Wing Lian
94f5e415a3 various bugfixes 2023-04-24 09:41:34 -04:00
Eric Hartford
2624bc2f11 ignore config, add python 3.9 (#8) 2023-04-24 07:23:19 -04:00
Wing Lian
bb991fd870 fix bug when model_type not explicitly passed 2023-04-19 13:15:33 -04:00
Wing Lian
d65385912e improve inference 2023-04-19 12:57:27 -04:00
Wing Lian
5749eb0a1c fix runpod script 2023-04-19 08:39:54 -04:00
Wing Lian
7753cdee57 cleanup empty lines, tweak env for runpod setup 2023-04-19 08:24:58 -04:00
Wing Lian
f50de1b1cb handle empty lines 2023-04-19 08:03:34 -04:00
Wing Lian
0a472e1e08 quickstart instructions for starting from runpod (#5) 2023-04-18 19:22:25 -04:00
Wing Lian
5cb7ea49a6 update readme w compat matrix 2023-04-18 14:42:37 -04:00
Wing Lian
8746b701fe attempt xformers hijack attention 2023-04-18 14:03:50 -04:00
Wing Lian
6045345d6b WIP large refactor to make finetune script a little more manageable (#3) 2023-04-18 14:01:38 -04:00
63 changed files with 4155 additions and 624 deletions

54
.github/workflows/base.yml vendored Normal file
View File

@@ -0,0 +1,54 @@
name: ci-cd-base
on:
push:
branches:
- "main-base"
- "dev-base"
jobs:
build-base:
if: github.repository_owner == 'OpenAccess-AI-Collective'
# this job needs to be run on self-hosted GPU runners...
runs-on: self-hosted
strategy:
matrix:
include:
- cuda: cu118
cuda_version: 11.8.0
cuda_version_bnb: "118"
pytorch: 2.0.0
- cuda: cu117
cuda_version: 11.7.0
cuda_version_bnb: "117"
pytorch: 1.13.1
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Docker metadata
id: metadata
uses: docker/metadata-action@v3
with:
images: winglian/axolotl-base
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build
uses: docker/build-push-action@v4
with:
context: .
file: ./docker/Dockerfile-base
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.metadata.outputs.tags }}-${{ matrix.cuda }}-${{ matrix.pytorch }}
labels: ${{ steps.metadata.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
build-args: |
CUDA_VERSION=${{ matrix.cuda_version }}
CUDA_VERSION_BNB=${{ matrix.cuda_version_bnb }}
CUDA=${{ matrix.cuda }}
PYTORCH_VERSION=${{ matrix.pytorch }}

90
.github/workflows/main.yml vendored Normal file
View File

@@ -0,0 +1,90 @@
name: ci-cd
on:
push:
branches:
- "main"
- "dev"
jobs:
build-axolotl:
if: github.repository_owner == 'OpenAccess-AI-Collective'
# this job needs to be run on self-hosted GPU runners...
strategy:
matrix:
include:
- cuda: cu118
cuda_version: 11.8.0
pytorch: 2.0.0
- cuda: cu117
cuda_version: 11.7.0
pytorch: 1.13.1
runs-on: self-hosted
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Docker metadata
id: metadata
uses: docker/metadata-action@v3
with:
images: winglian/axolotl
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build
uses: docker/build-push-action@v4
with:
context: .
build-args: |
BASE_TAG=${{ github.ref_name }}-base-${{ matrix.cuda }}-${{ matrix.pytorch }}
file: ./docker/Dockerfile
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.metadata.outputs.tags }}-${{ matrix.cuda }}-${{ matrix.pytorch }}
labels: ${{ steps.metadata.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
build-axolotl-runpod:
needs: build-axolotl
if: github.repository_owner == 'OpenAccess-AI-Collective'
# this job needs to be run on self-hosted GPU runners...
strategy:
matrix:
include:
- cuda: cu118
cuda_version: 11.8.0
pytorch: 2.0.0
- cuda: cu117
cuda_version: 11.7.0
pytorch: 1.13.1
runs-on: self-hosted
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Docker metadata
id: metadata
uses: docker/metadata-action@v3
with:
images: winglian/axolotl-runpod
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build
uses: docker/build-push-action@v4
with:
context: .
build-args: |
BASE_TAG=${{ github.ref_name }}-${{ matrix.cuda }}-${{ matrix.pytorch }}
file: ./docker/Dockerfile-runpod
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.metadata.outputs.tags }}-${{ matrix.cuda }}-${{ matrix.pytorch }}
labels: ${{ steps.metadata.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max

31
.github/workflows/tests.yml vendored Normal file
View File

@@ -0,0 +1,31 @@
name: PyTest
on:
push:
pull_request:
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python_version: ["3.9", "3.10"]
timeout-minutes: 10
steps:
- name: Check out repository code
uses: actions/checkout@v3
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python_version }}
cache: 'pip' # caching pip dependencies
- name: Install dependencies
run: |
pip install -e .
pip install -r requirements-tests.txt
- name: Run tests
run: |
pytest tests/

164
.gitignore vendored
View File

@@ -1,3 +1,163 @@
**/axolotl.egg-info
**/__pycache__
.idea
configs
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/

4
FAQS.md Normal file
View File

@@ -0,0 +1,4 @@
# FAQs
- Can you train StableLM with this? Yes, but only with a single GPU atm. Multi GPU support is coming soon! Just waiting on this [PR](https://github.com/huggingface/transformers/pull/22874)
- Will this work with Deepspeed? That's still a WIP, but setting `export ACCELERATE_USE_DEEPSPEED=true` should work in some cases

425
README.md
View File

@@ -1,47 +1,408 @@
~~# Axolotl
# Axolotl
#### You know you're going to axolotl questions
<div align="center">
<img src="image/axolotl.png" alt="axolotl" width="160">
<div>
<p>
<b>One repo to finetune them all! </b>
</p>
<p>
Go ahead and axolotl questions!!
</p>
</div>
</div>
## Getting Started
## Axolotl supports
- Point the config you are using to a huggingface hub dataset (see [configs/llama_7B_4bit.yml](https://github.com/winglian/axolotl/blob/main/configs/llama_7B_4bit.yml#L6-L8))
| | fp16/fp32 | fp16/fp32 w/ lora | qlora | 4bit-quant | 4bit-quant w/flash attention | flash attention | xformers attention |
|---------|:----------|:------------------|------|------------|------------------------------|-----------------|--------------------|
| llama | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Pythia | ✅ | ✅ | ❓ | ❌ | ❌ | ❌ | ❓ |
| cerebras | ✅ | ✅ | ❓ | ❌ | ❌ | ❌ | ❓ |
| mpt | ✅ | ❌ | ❓ | ❌ | ❌ | ❌ | ❓ |
| falcon | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❓ |
## Quickstart ⚡
**Requirements**: Python 3.9.
```bash
git clone https://github.com/OpenAccess-AI-Collective/axolotl
pip3 install -e .[int4]
accelerate config
# finetune lora
accelerate launch scripts/finetune.py examples/lora-openllama-3b/config.yml
# inference
accelerate launch scripts/finetune.py examples/lora-openllama-3b/config.yml \
--inference --lora_model_dir="./lora-out"
```
## Installation
### Environment
- Docker
```bash
docker run --gpus '"all"' --rm -it winglian/axolotl:main
```
- `winglian/axolotl:dev`: dev branch
- `winglian/axolotl-runpod:main`: for runpod
- Conda/Pip venv
1. Install python **3.9**
2. Install python dependencies with ONE of the following:
- `pip3 install -e .[int4]` (recommended)
- `pip3 install -e .[int4_triton]`
- `pip3 install -e .`
### Dataset
Have dataset(s) in one of the following format (JSONL recommended):
- `alpaca`: instruction; input(optional)
```json
{"instruction": "...", "input": "...", "output": "..."}
```
- `sharegpt`: conversations
```json
{"conversations": [{"from": "...", "value": "..."}]}
```
- `completion`: raw corpus
```json
{"text": "..."}
```
<details>
<summary>See other formats</summary>
- `jeopardy`: question and answer
```json
{"question": "...", "category": "...", "answer": "..."}
```
- `oasst`: instruction
```json
{"INSTRUCTION": "...", "RESPONSE": "..."}
```
- `gpteacher`: instruction; input(optional)
```json
{"instruction": "...", "input": "...", "response": "..."}
```
- `reflection`: instruction with reflect; input(optional)
```json
{"instruction": "...", "input": "...", "output": "...", "reflection": "...", "corrected": "..."}
```
- `explainchoice`: question, choices, (solution OR explanation)
```json
{"question": "...", "choices": ["..."], "solution": "...", "explanation": "..."}
```
- `concisechoice`: question, choices, (solution OR explanation)
```json
{"question": "...", "choices": ["..."], "solution": "...", "explanation": "..."}
```
- `summarizetldr`: article and summary
```json
{"article": "...", "summary": "..."}
```
> Have some new format to propose? Check if it's already defined in [data.py](src/axolotl/utils/data.py) in `dev` branch!
</details>
Optionally, download some datasets, see [data/README.md](data/README.md)
### Config
See sample configs in [configs](configs) folder or [examples](examples) for quick start. It is recommended to duplicate and modify to your needs. The most important options are:
- model
```yaml
base_model: ./llama-7b-hf # local or huggingface repo
```
Note: The code will load the right architecture.
- dataset
```yaml
datasets:
- path: vicgalle/alpaca-gpt4 # local or huggingface repo
type: alpaca # format from earlier
sequence_len: 2048 # max token length / prompt
```
- loading
```yaml
load_in_4bit: true
load_in_8bit: true
bf16: true # require >=ampere
fp16: true
tf32: true # require >=ampere
```
Note: Repo does not do 4-bit quantization.
- lora
```yaml
adapter: lora # qlora or leave blank for full finetune
lora_r: 8
lora_alpha: 16
lora_dropout: 0.05
lora_target_modules:
- q_proj
- v_proj
```
<details>
<summary>All yaml options</summary>
```yaml
# this is the huggingface model that contains *.pt, *.safetensors, or *.bin files
# this can also be a relative path to a model on disk
base_model: ./llama-7b-hf
# you can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)
base_model_ignore_patterns:
# if the base_model repo on hf hub doesn't include configuration .json files,
# you can set that here, or leave this empty to default to base_model
base_model_config: ./llama-7b-hf
# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too
model_type: AutoModelForCausalLM
# Corresponding tokenizer for the model AutoTokenizer is a good choice
tokenizer_type: AutoTokenizer
# Trust remote code for untrusted source
trust_remote_code:
# whether you are training a 4-bit GPTQ quantized model
gptq: true
gptq_groupsize: 128 # group size
gptq_model_v1: false # v1 or v2
# this will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer
load_in_8bit: true
# use bitsandbytes 4 bit
load_in_4bit:
# Use CUDA bf16
bf16: true # bool or 'full' for `bf16_full_eval`. require >=ampere
# Use CUDA fp16
fp16: true
# Use CUDA tf32
tf32: true # require >=ampere
# a list of one or more datasets to finetune the model with
datasets:
# this can be either a hf dataset, or relative path
- path: vicgalle/alpaca-gpt4
type: alpaca
# The type of prompt to use for training. [alpaca, sharegpt, gpteacher, oasst, reflection]
type: alpaca # format OR format:prompt_style (chat/instruct)
data_files: # path to source data files
shards: # number of shards to split data into
# axolotl attempts to save the dataset as an arrow after packing the data together so
# subsequent training attempts load faster, relative path
dataset_prepared_path: data/last_run_prepared
# push prepared dataset to hub
push_dataset_to_hub: # repo path
# whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets
# required to be true when used in combination with `push_dataset_to_hub`
hf_use_auth_token: # boolean
# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc
val_set_size: 0.04
# Num shards for whole dataset
dataset_shard_num:
# Index of shard to use for whole dataset
dataset_shard_idx:
# the maximum length of an input to train with, this should typically be less than 2048
# as most models have a token/context limit of 2048
sequence_len: 2048
# max sequence length to concatenate training samples together up to
# inspired by StackLLaMA. see https://huggingface.co/blog/stackllama#supervised-fine-tuning
max_packed_sequence_len: 1024
# if you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model
adapter: lora
# if you already have a lora model trained that you want to load, put that here
# lora hyperparameters
lora_model_dir:
lora_r: 8
lora_alpha: 16
lora_dropout: 0.05
lora_target_modules:
- q_proj
- v_proj
# - k_proj
# - o_proj
# - gate_proj
# - down_proj
# - up_proj
lora_target_linear: # if true, will target all linear layers
lora_modules_to_save:
# - embed_tokens
# - lm_head
lora_out_dir:
lora_fan_in_fan_out: false
# wandb configuration if you're using it
wandb_mode:
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: # 'checkpoint'
# where to save the finished model to
output_dir: ./completed-model
# training hyperparameters
batch_size: 8
micro_batch_size: 2
eval_batch_size: 2
num_epochs: 3
warmup_steps: 100
learning_rate: 0.00003
logging_steps:
# whether to mask out or include the human's prompt from the training labels
train_on_inputs: false
# don't use this, leads to wonky training (according to someone on the internet)
group_by_length: false
# does not work with current implementation of 4-bit LoRA
gradient_checkpointing: false
# stop training after this many evaluation losses have increased in a row
# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback
early_stopping_patience: 3
# specify a scheduler and kwargs to use with the optimizer
lr_scheduler: # 'one_cycle' | 'log_sweep' | empty for cosine
lr_scheduler_kwargs:
# for one_cycle optim
lr_div_factor: # learning rate div factor
# for log_sweep optim
log_sweep_min_lr:
log_sweep_max_lr:
# specify optimizer
optimizer:
# specify weight decay
weight_decay:
# whether to use xformers attention patch https://github.com/facebookresearch/xformers:
xformers_attention:
# whether to use flash attention patch https://github.com/HazyResearch/flash-attention:
flash_attention: # require a100 for llama
# resume from a specific checkpoint dir
resume_from_checkpoint:
# if resume_from_checkpoint isn't set and you simply want it to start where it left off
# be careful with this being turned on between different models
auto_resume_from_checkpoints: false
# don't mess with this, it's here for accelerate and torchrun
local_rank:
# add or change special tokens
special_tokens:
# bos_token: "<s>"
# eos_token: "</s>"
# unk_token: "<unk>"
# add extra tokens
tokens:
# FSDP
fsdp:
fsdp_config:
# Deepspeed
deepspeed:
# Path to torch distx for optim 'adamw_anyprecision'
torchdistx_path:
# Set padding for data collator to 'longest'
collator_pad_to_longest:
# Debug mode
debug:
# Seed
seed:
# Allow overwrite yml config using from cli
strict:
```
- Optionally Download some datasets, see [data/README.md](data/README.md)
</details>
### Accelerate
- Create a new or update the existing YAML config [config/pythia_1_2B_alpaca.yml](config/pythia_1_2B_alpaca.yml)
- Install python dependencies with ONE of the following:
Configure accelerate
- `pip3 install -e .[int4]` (recommended)
- `pip3 install -e .[int4_triton]`
- `pip3 install -e .`
-
- If not using `int4` or `int4_triton`, run `pip install "peft @ git+https://github.com/huggingface/peft.git"`
- Configure accelerate `accelerate config` or update `~/.cache/huggingface/accelerate/default_config.yaml`
```bash
accelerate config
```yaml
compute_environment: LOCAL_MACHINE
distributed_type: MULTI_GPU
downcast_bf16: 'no'
gpu_ids: all
machine_rank: 0
main_training_function: main
mixed_precision: bf16
num_machines: 1
num_processes: 4
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false
# Edit manually
# nano ~/.cache/huggingface/accelerate/default_config.yaml
```
- Train! `accelerate launch scripts/finetune.py`, make sure to choose the correct YAML config file
- Alternatively you can pass in the config file like: `accelerate launch scripts/finetune.py configs/llama_7B_alpaca.yml`~~
### Train
Run
```bash
accelerate launch scripts/finetune.py configs/your_config.yml
```
### Inference
Pass the appropriate flag to the train command:
- Pretrained LORA:
```bash
--inference --lora_model_dir ./completed-model
```
- Full weights finetune:
```bash
--inference --base_model ./completed-model
```
### Merge LORA to base
Add below flag to train command above
```bash
--merge_lora --lora_model_dir="./completed-model" --load_in_8bit=False --load_in_4bit=False
```
## Common Errors 🧰
> Cuda out of memory
Please reduce any below
- `micro_batch_size`
- `eval_batch_size`
- `sequence_len`
> RuntimeError: expected scalar type Float but found Half
Try set `fp16: true`
> NotImplementedError: No operator found for `memory_efficient_attention_forward` ...
Try to turn off xformers.
## Need help? 🙋‍♂️
Join our [Discord server](https://discord.gg/HhrNrHJPRb) where we can help you
## Contributing 🤝
Bugs? Please check for open issue else create a new [Issue](https://github.com/OpenAccess-AI-Collective/axolotl/issues/new).
PRs are **greatly welcome**!

10
TODO.md Normal file
View File

@@ -0,0 +1,10 @@
# todo list
- [] Validation of parameters for combinations that won't work
## things that are known not to work
- FSDP offload and gradient_checkpointing - https://github.com/pytorch/pytorch/issues/82203
- adamw_bnb_8bit doesn't play well with FSDP offload

View File

@@ -0,0 +1,15 @@
compute_environment: LOCAL_MACHINE
distributed_type: 'NO'
downcast_bf16: 'no'
gpu_ids: all
machine_rank: 0
main_training_function: main
mixed_precision: bf16
num_machines: 1
num_processes: 1
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false

View File

@@ -0,0 +1,41 @@
base_model: facebook/galactica-1.3b
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
load_in_8bit: false
datasets:
- path: tatsu-lab/alpaca
type: alpaca
dataset_prepared_path: last_run_prepared
val_set_size: 0.1
adapter:
lora_model_dir:
sequence_len: 1024
max_packed_sequence_len: 1024
lora_r: 8
lora_alpha: 16
lora_dropout: 0.05
lora_target_modules:
- q_proj
- v_proj
lora_fan_in_fan_out: false
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
output_dir: ./lora-llama-alpaca
batch_size: 32
micro_batch_size: 16
num_epochs: 3
learning_rate: 0.00003
train_on_inputs: false
group_by_length: false
bf16: false
tf32: false
early_stopping_patience:
resume_from_checkpoint:
local_rank:
tokens:
pad_token: "[PAD]"
bos_token: "<s>"
eos_token: "</s>"
unk_token: "<unk>"

39
configs/gpt_neox_20b.yml Normal file
View File

@@ -0,0 +1,39 @@
base_model: EleutherAI/gpt-neox-20b
base_model_ignore_patterns: pytorch* # prefer safetensors
model_type: GPTNeoXForCausalLM
tokenizer_type: AutoTokenizer
load_in_8bit: true
datasets:
- path: nomic-ai/gpt4all-j-prompt-generations
type: alpaca
shards: 4
shards_index: 0
dataset_prepared_path: last_run_prepared
val_set_size: 0.05
adapter: lora
lora_model_dir:
sequence_len: 2048
max_packed_sequence_len: 2048
lora_r: 8
lora_alpha: 32
lora_dropout: 0.05
lora_target_modules:
- query_key_value
lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
wandb_project: gpt4all-neox-20b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
output_dir: ./gpt4all-neox-20b
batch_size: 48
micro_batch_size: 4
num_epochs: 5
learning_rate: 0.00003
lr_scheduler: one_cycle
train_on_inputs: false
group_by_length: false
bf16: True
tf32: True
early_stopping_patience:
resume_from_checkpoint:
local_rank:

View File

@@ -0,0 +1,39 @@
base_model: huggyllama/llama-13b
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
load_in_8bit: true
datasets:
- path: anon8231489123/ShareGPT_Vicuna_unfiltered
data_files: ShareGPT_V3_unfiltered_cleaned_split_no_imsorry.json
type: sharegpt
dataset_prepared_path: last_run_prepared
val_set_size: 0.002
adapter:
lora_model_dir:
sequence_len: 2048
lora_r: 8
lora_alpha: 16
lora_dropout: 0.05
lora_target_modules:
- q_proj
- v_proj
lora_fan_in_fan_out: false
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
output_dir: ./llama-13b-sharegpt
batch_size: 64
micro_batch_size: 2
warmup_steps: 1000
save_steps:
eval_steps:
num_epochs: 5
learning_rate: 0.00003
train_on_inputs: false
group_by_length: false
bf16: true
tf32: true
early_stopping_patience: 5
resume_from_checkpoint:
local_rank:

View File

@@ -5,7 +5,8 @@ load_in_8bit: true
datasets:
- path: data/alpaca_data_gpt4.jsonl
type: alpaca
- path: data/vicuna_cleaned.jsonl
- path: anon8231489123/ShareGPT_Vicuna_unfiltered
data_files: ShareGPT_V3_unfiltered_cleaned_split_no_imsorry.json
type: sharegpt
- path: data/gpt4-instruct-similarity-0.6-dataset.jsonl
type: gpteacher
@@ -30,6 +31,8 @@ wandb_log_model: checkpoint
output_dir: ./lora-llama-alpaca
batch_size: 128
micro_batch_size: 16
warmup_steps: 1000
save_steps:
num_epochs: 5
learning_rate: 0.00003
train_on_inputs: false

View File

@@ -4,7 +4,7 @@ model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
load_in_8bit: true
datasets:
- path: vicgalle/alpaca-gpt4
- path: tatsu-lab/alpaca # original alpaca dataset
type: alpaca
dataset_prepared_path: data/last_run_prepared
val_set_size: 0.04
@@ -29,6 +29,7 @@ output_dir: ./lora-test
batch_size: 8
micro_batch_size: 2
num_epochs: 3
warmup_steps: 100
learning_rate: 0.00003
train_on_inputs: false
group_by_length: false
@@ -37,5 +38,8 @@ tf32: true
gradient_checkpointing: false
early_stopping_patience: 3
resume_from_checkpoint:
auto_resume_from_checkpoints: true
local_rank:
load_4bit: true
xformers_attention: true
flash_attention:

View File

@@ -0,0 +1,58 @@
base_model: huggyllama/llama-7b
base_model_config: huggyllama/llama-7b
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
load_in_8bit: false
datasets:
- path: openaccess-ai-collective/jeopardy
type: jeopardy
dataset_prepared_path: last_run_prepared
val_set_size: 0.01
adapter:
lora_model_dir:
sequence_len: 2048
max_packed_sequence_len: 2048
lora_r: 8
lora_alpha: 16
lora_dropout: 0.05
lora_target_modules:
- q_proj
- v_proj
lora_fan_in_fan_out: false
wandb_project: jeopardy-bot-7b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
output_dir: ./jeopardy-bot-7b
batch_size: 4
micro_batch_size: 1
num_epochs: 2
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine
learning_rate: 0.0000002
train_on_inputs: false
group_by_length: false
bf16: true
tf32: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 5
xformers_attention: true
flash_attention:
gptq_groupsize:
gptq_model_v1:
warmup_steps: 20
eval_steps: 110
save_steps: 660
debug:
deepspeed:
weight_decay: 0.0001
fsdp:
fsdp_config:
tokens:
pad_token: "[PAD]"
bos_token: "<s>"
eos_token: "</s>"
unk_token: "<unk>"

45
configs/quickstart.yml Normal file
View File

@@ -0,0 +1,45 @@
base_model: decapoda-research/llama-7b-hf-int4
base_model_config: decapoda-research/llama-7b-hf
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
load_in_8bit: true
datasets:
- path: tatsu-lab/alpaca # original alpaca dataset
type: alpaca
dataset_prepared_path: data/last_run_prepared
val_set_size: 0.04
adapter: lora
lora_model_dir:
sequence_len: 1024
max_packed_sequence_len: 1024
lora_r: 8
lora_alpha: 16
lora_dropout: 0.05
lora_target_modules:
- q_proj
- v_proj
# - k_proj
# - o_proj
lora_fan_in_fan_out: false
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
output_dir: ./lora-test
batch_size: 4
micro_batch_size: 1
num_epochs: 3
warmup_steps: 100
learning_rate: 0.00003
train_on_inputs: false
group_by_length: false
bf16: true
tf32: true
gradient_checkpointing: false
early_stopping_patience: 3
resume_from_checkpoint:
auto_resume_from_checkpoints: true
local_rank:
gptq: true
xformers_attention: true
flash_attention:

86
configs/sample.yml Normal file
View File

@@ -0,0 +1,86 @@
# this is the huggingface model that contains *.pt, *.safetensors, or *.bin files
# this can also be a relative path to a model on disk
base_model: decapoda-research/llama-7b-hf-int4
# you can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)
base_model_ignore_patterns:
# if the base_model repo on hf hub doesn't include configuration .json files,
# you can set that here, or leave this empty to default to base_model
base_model_config: decapoda-research/llama-7b-hf
# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too
model_type: AutoModelForCausalLM
# Corresponding tokenizer for the model AutoTokenizer is a good choice
tokenizer_type: AutoTokenizer
# whether you are training a 4-bit quantized model
load_4bit: true
# this will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer
load_in_8bit: true
# a list of one or more datasets to finetune the model with
datasets:
# this can be either a hf dataset, or relative path
- path: vicgalle/alpaca-gpt4
# The type of prompt to use for training. [alpaca, sharegpt, gpteacher, oasst, reflection]
type: alpaca
# axolotl attempts to save the dataset as an arrow after packing the data together so
# subsequent training attempts load faster, relative path
dataset_prepared_path: data/last_run_prepared
# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc
val_set_size: 0.04
# if you want to use lora, leave blank to train all parameters in original model
adapter: lora
# if you already have a lora model trained that you want to load, put that here
lora_model_dir:
# the maximum length of an input to train with, this should typically be less than 2048
# as most models have a token/context limit of 2048
sequence_len: 2048
# max sequence length to concatenate training samples together up to
# inspired by StackLLaMA. see https://huggingface.co/blog/stackllama#supervised-fine-tuning
max_packed_sequence_len: 1024
# lora hyperparameters
lora_r: 8
lora_alpha: 16
lora_dropout: 0.05
lora_target_modules:
- q_proj
- v_proj
# - k_proj
# - o_proj
lora_fan_in_fan_out: false
# wandb configuration if your're using it
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
# where to save the finsihed model to
output_dir: ./completed-model
# training hyperparameters
batch_size: 8
micro_batch_size: 2
num_epochs: 3
warmup_steps: 100
learning_rate: 0.00003
# whether to mask out or include the human's prompt from the training labels
train_on_inputs: false
# don't use this, leads to wonky training (according to someone on the internet)
group_by_length: false
# Use CUDA bf16
bf16: true
# Use CUDA tf32
tf32: true
# does not work with current implementation of 4-bit LoRA
gradient_checkpointing: false
# stop training after this many evaluation losses have increased in a row
# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback
early_stopping_patience: 3
# specify a scheduler to use with the optimizer. only one_cycle is supported currently
lr_scheduler:
# whether to use xformers attention patch https://github.com/facebookresearch/xformers:
xformers_attention:
# whether to use flash attention patch https://github.com/HazyResearch/flash-attention:
flash_attention:
# resume from a specific checkpoint dir
resume_from_checkpoint:
# if resume_from_checkpoint isn't set and you simply want it to start where it left off
# be careful with this being turned on between different models
auto_resume_from_checkpoints: false
# don't mess with this, it's here for accelerate and torchrun
local_rank:

56
configs/stability_3b.yml Normal file
View File

@@ -0,0 +1,56 @@
base_model: stabilityai/stablelm-base-alpha-3b
base_model_config: stabilityai/stablelm-base-alpha-3b
load_in_8bit: false
datasets:
- path: vicgalle/alpaca-gpt4
type: alpaca
dataset_prepared_path: last_run_prepared
val_set_size: 0.04
adapter:
lora_model_dir:
sequence_len: 4096
max_packed_sequence_len: 4096
lora_r: 8
lora_alpha: 16
lora_dropout: 0.05
lora_target_modules:
- q_proj
- v_proj
lora_fan_in_fan_out: false
wandb_project: stable-alpaca-3b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
output_dir: ./stable-alpaca-3b
batch_size: 2
micro_batch_size: 1
num_epochs: 1
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine
learning_rate: 0.0000002
train_on_inputs: false
group_by_length: false
bf16: true
tf32: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention: true
flash_attention:
gptq_groupsize:
gptq_model_v1:
warmup_steps: 100
eval_steps: 50
save_steps: 200
debug:
deepspeed:
weight_decay: 0.01
fsdp:
fsdp_config:
#tokens:
# pad_token: "[PAD]"
# bos_token: "<s>"
# eos_token: "</s>"
# unk_token: "<unk>"

View File

@@ -1,6 +1,5 @@
- Download some datasets
-
## Download some datasets
```shell
curl https://raw.githubusercontent.com/tloen/alpaca-lora/main/alpaca_data_gpt4.json -o data/raw/alpaca_data_gpt4.json
curl https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json -L -o data/raw/vicuna_cleaned.json
@@ -8,7 +7,7 @@ curl https://github.com/teknium1/GPTeacher/blob/main/Instruct/gpt4-instruct-simi
curl https://github.com/teknium1/GPTeacher/blob/main/Roleplay/roleplay-similarity_0.6-instruct-dataset.json?raw=true -L -o data/raw/roleplay-similarity_0.6-instruct-dataset.json
```
- Convert the JSON data files to JSONL.
## Convert the JSON data files to JSONL.
```shell
python3 ./scripts/alpaca_json_to_jsonl.py --input data/alpaca_data_gpt4.json > data/alpaca_data_gpt4.jsonl
@@ -16,8 +15,9 @@ python3 ./scripts/alpaca_json_to_jsonl.py --input data/raw/vicuna_cleaned.json >
python3 ./scripts/alpaca_json_to_jsonl.py --input data/raw/roleplay-similarity_0.6-instruct-dataset.json > data/roleplay-similarity_0.6-instruct-dataset.jsonl
python3 ./scripts/alpaca_json_to_jsonl.py --input data/raw/gpt4-instruct-similarity-0.6-dataset.json > data/gpt4-instruct-similarity-0.6-dataset.jsonl
```
---
- Using JSONL makes it easier to subset the data if you want a smaller training set, i.e get 2000 random examples.
Using JSONL makes it easier to subset the data if you want a smaller training set, i.e get 2000 random examples.
```shell
shuf -n2000 data/vicuna_cleaned.jsonl > data/vicuna_cleaned.subset0.jsonl

20
docker/Dockerfile Normal file
View File

@@ -0,0 +1,20 @@
ARG BASE_TAG=main-base
FROM winglian/axolotl-base:$BASE_TAG
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
RUN apt-get update && \
apt-get install -y vim curl
WORKDIR /workspace
# The base image ships with `pydantic==1.8.2` which is not working
RUN python3 -m pip install -U --no-cache-dir pydantic
RUN mkdir axolotl
COPY . axolotl/
RUN cd axolotl && \
pip install -e .[int4]
# helper for huggingface-login cli
RUN git config --global credential.helper store

102
docker/Dockerfile-base Normal file
View File

@@ -0,0 +1,102 @@
ARG CUDA_VERSION="11.8.0"
ARG CUDNN_VERSION="8"
ARG UBUNTU_VERSION="22.04"
ARG MAX_JOBS=4
FROM nvidia/cuda:$CUDA_VERSION-cudnn$CUDNN_VERSION-devel-ubuntu$UBUNTU_VERSION as base-builder
ENV PATH="/root/miniconda3/bin:${PATH}"
ARG PYTHON_VERSION="3.9"
ARG PYTORCH="2.0.0"
ARG CUDA="cu118"
ENV PYTHON_VERSION=$PYTHON_VERSION
RUN apt-get update
RUN apt-get install -y wget git build-essential ninja-build git-lfs libaio-dev && rm -rf /var/lib/apt/lists/*
RUN wget \
https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
&& mkdir /root/.conda \
&& bash Miniconda3-latest-Linux-x86_64.sh -b \
&& rm -f Miniconda3-latest-Linux-x86_64.sh
RUN conda create -n "py${PYTHON_VERSION}" python="${PYTHON_VERSION}"
ENV PATH="/root/miniconda3/envs/py${PYTHON_VERSION}/bin:${PATH}"
WORKDIR /workspace
RUN python3 -m pip install --upgrade pip && pip3 install packaging && \
python3 -m pip install --no-cache-dir -U torch==${PYTORCH} torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA
FROM base-builder AS flash-attn-builder
WORKDIR /workspace
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
RUN git clone https://github.com/HazyResearch/flash-attention.git && \
cd flash-attention && \
python3 setup.py bdist_wheel && \
cd csrc/fused_dense_lib && \
python3 setup.py bdist_wheel && \
cd ../xentropy && \
python3 setup.py bdist_wheel && \
cd ../rotary && \
python3 setup.py bdist_wheel && \
cd ../layer_norm && \
python3 setup.py bdist_wheel
FROM base-builder AS deepspeed-builder
WORKDIR /workspace
RUN git clone https://github.com/microsoft/DeepSpeed.git && \
cd DeepSpeed && \
MAX_CONCURRENCY=8 DS_BUILD_SPARSE_ATTN=0 DS_BUILD_OPS=1 python3 setup.py bdist_wheel
FROM base-builder AS bnb-builder
WORKDIR /workspace
ARG CUDA_VERSION_BNB="118"
ENV CUDA_VERSION_BNB=$CUDA_VERSION_BNB
RUN git clone https://github.com/TimDettmers/bitsandbytes.git && \
cd bitsandbytes && \
CUDA_VERSION=$CUDA_VERSION_BNB make cuda11x && \
python setup.py bdist_wheel
FROM base-builder
# recompile apex
RUN python3 -m pip uninstall -y apex
RUN git clone https://github.com/NVIDIA/apex
# `MAX_JOBS=1` disables parallel building to avoid cpu memory OOM when building image on GitHub Action (standard) runners
RUN cd apex && MAX_JOBS=1 python3 -m pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check .
RUN mkdir -p /workspace/builds
COPY --from=bnb-builder /workspace/bitsandbytes /workspace/builds/bitsandbytes
RUN mkdir -p /workspace/wheels/bitsandbytes
COPY --from=deepspeed-builder /workspace/DeepSpeed/dist/deepspeed-*.whl wheels
COPY --from=bnb-builder /workspace/bitsandbytes/dist/bitsandbytes-*.whl wheels
COPY --from=bnb-builder /workspace/bitsandbytes/bitsandbytes/libbitsandbytes*.so wheels/bitsandbytes
COPY --from=flash-attn-builder /workspace/flash-attention/dist/flash_attn-*.whl wheels
COPY --from=flash-attn-builder /workspace/flash-attention/csrc/fused_dense_lib/dist/fused_dense_lib-*.whl wheels
COPY --from=flash-attn-builder /workspace/flash-attention/csrc/xentropy/dist/xentropy_cuda_lib-*.whl wheels
COPY --from=flash-attn-builder /workspace/flash-attention/csrc/rotary/dist/rotary_emb-*.whl wheels
COPY --from=flash-attn-builder /workspace/flash-attention/csrc/layer_norm/dist/dropout_layer_norm-*.whl wheels
RUN pip3 install wheels/deepspeed-*.whl wheels/flash_attn-*.whl wheels/fused_dense_lib-*.whl wheels/xentropy_cuda_lib-*.whl wheels/rotary_emb-*.whl wheels/dropout_layer_norm-*.whl
RUN cd /workspace/builds/bitsandbytes && python3 setup.py install
RUN git lfs install --skip-repo
RUN pip3 install "peft @ git+https://github.com/huggingface/peft.git@main" \
"accelerate @ git+https://github.com/huggingface/accelerate.git@main" \
"transformers @ git+https://github.com/huggingface/transformers.git@main" && \
pip3 install awscli && \
# The base image ships with `pydantic==1.8.2` which is not working
pip3 install -U --no-cache-dir pydantic

14
docker/Dockerfile-runpod Normal file
View File

@@ -0,0 +1,14 @@
ARG BASE_TAG=main
FROM winglian/axolotl:$BASE_TAG
COPY scripts/runpod-entrypoint.sh /root/runpod-entrypoint.sh
RUN apt install --yes --no-install-recommends openssh-server tmux && \
mkdir -p ~/.ssh && \
chmod 700 ~/.ssh && \
printf "\n[[ -z \"\$TMUX\" ]] && { tmux attach-session -t ssh_tmux || tmux new-session -s ssh_tmux; exit; }\n" >> ~/.bashrc && \
chmod +x /workspace/axolotl/scripts/runpod-entrypoint.sh && \
chmod +x /root/runpod-entrypoint.sh
ENTRYPOINT ["/root/runpod-entrypoint.sh"]
CMD ["sleep", "infinity"]

View File

@@ -1,37 +1,57 @@
{
"zero_optimization": {
"stage": 3,
"offload_optimizer": {
"device": "cpu",
"pin_memory": true
},
"offload_param": {
"device": "cpu",
"pin_memory": true
},
"overlap_comm": true,
"contiguous_gradients": true,
"sub_group_size": 0,
"reduce_bucket_size": "auto",
"stage3_prefetch_bucket_size": "auto",
"stage3_param_persistence_threshold": "auto",
"stage3_max_live_parameters": 0,
"stage3_max_reuse_distance": 0,
"stage3_gather_16bit_weights_on_model_save": true
},
"bf16": {
"enabled": "auto"
},
"fp16": {
"enabled": "auto",
"auto_cast": false,
"loss_scale": 0,
"initial_scale_power": 32,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"scheduler": {
"type": "WarmupLR",
"optimizer": {
"type": "AdamW",
"params": {
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto"
"lr": "auto",
"betas": [
0.9,
0.999
],
"eps": 1e-8,
"weight_decay": "auto"
}
},
"zero_optimization": {
"stage": 2,
"overlap_comm": true,
"allgather_partitions": true,
"allgather_bucket_size": 5e8,
"contiguous_gradients": true,
"reduce_bucket_size": "auto",
"reduce_scatter": true
"scheduler": {
"type": "OneCycle",
"params": {
"cycle_min_lr": 0.00001,
"cycle_max_lr": 0.00003,
"cycle_first_step_size": 120
}
},
"gradient_accumulation_steps": "auto",
"gradient_clipping": "auto",
"steps_per_print": 5,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false,
"round_robin_gradients": true
"wall_clock_breakdown": false
}

View File

@@ -0,0 +1,64 @@
base_model: tiiuae/falcon-7b
base_model_config: tiiuae/falcon-7b
trust_remote_code: true
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
load_in_8bit: true
load_in_4bit: false
gptq: false
strict: false
push_dataset_to_hub:
datasets:
- path: teknium/GPT4-LLM-Cleaned
type: alpaca:chat
dataset_prepared_path: last_run_prepared
val_set_size: 0.01
adapter: lora
lora_model_dir:
sequence_len: 2048
max_packed_sequence_len:
lora_r: 16
lora_alpha: 32
lora_dropout: 0.0
lora_target_modules:
lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: falcon-7b
wandb_watch:
wandb_run_id:
wandb_log_model:
output_dir: ./falcon-7b
batch_size: 2
micro_batch_size: 1
num_epochs: 4
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine
learning_rate: 0.00003
train_on_inputs: false
group_by_length: false
bf16: true
fp16: false
tf32: true
gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention: true
flash_attention:
gptq_groupsize:
gptq_model_v1:
warmup_steps: 40
eval_steps: 5
save_steps: 43
debug:
deepspeed:
weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens:
pad_token: "<|endoftext|>"
bos_token: ">>ABSTRACT<<"
eos_token: "<|endoftext|>"

View File

@@ -0,0 +1,64 @@
base_model: tiiuae/falcon-7b
base_model_config: tiiuae/falcon-7b
trust_remote_code: true
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
load_in_8bit: false
load_in_4bit: false
gptq: false
strict: false
push_dataset_to_hub:
datasets:
- path: teknium/GPT4-LLM-Cleaned
type: alpaca:chat
dataset_prepared_path: last_run_prepared
val_set_size: 0.01
adapter:
lora_model_dir:
sequence_len: 2048
max_packed_sequence_len:
lora_r: 64
lora_alpha: 32
lora_dropout: 0.0
lora_target_modules:
lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: falcon-7b
wandb_watch:
wandb_run_id:
wandb_log_model:
output_dir: ./falcon-7b
batch_size: 2
micro_batch_size: 1
num_epochs: 4
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine
learning_rate: 0.00003
train_on_inputs: false
group_by_length: false
bf16: true
fp16: false
tf32: true
gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention: true
flash_attention:
gptq_groupsize:
gptq_model_v1:
warmup_steps: 40
eval_steps: 5
save_steps: 43
debug:
deepspeed:
weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens:
pad_token: "<|endoftext|>"
bos_token: ">>ABSTRACT<<"
eos_token: "<|endoftext|>"

View File

@@ -0,0 +1,8 @@
# LLaMa 7B using LoRA
This is a good place to start for beginners. This will run on an NVIDIA RTX4090 with no other changes needed.
```shell
accelerate launch scripts/finetune.py examples/4bit-lora-7b/config.yml
```

View File

@@ -0,0 +1,62 @@
base_model: Neko-Institute-of-Science/LLaMA-7B-4bit-128g
base_model_config: Neko-Institute-of-Science/LLaMA-7B-4bit-128g
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
trust_remote_code:
load_in_8bit: true
gptq: true
datasets:
- path: vicgalle/alpaca-gpt4
type: alpaca
dataset_prepared_path: last_run_prepared
val_set_size: 0.02
adapter:
lora_model_dir:
sequence_len: 2048
max_packed_sequence_len:
lora_r: 8
lora_alpha: 16
lora_dropout: 0.05
lora_target_modules:
- q_proj
- v_proj
lora_fan_in_fan_out: false
wandb_project: llama-7b-lora-int4
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
output_dir: ./llama-7b-lora-int4
batch_size: 1
micro_batch_size: 1
num_epochs: 3
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine
learning_rate: 0.0000002
train_on_inputs: false
group_by_length: false
fp16: true
bf16: false
tf32: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 5
xformers_attention:
flash_attention:
gradient_checkpointing: true
gptq_groupsize: 128
gptq_model_v1: false
warmup_steps: 20
eval_steps: 110
save_steps: 660
debug:
deepspeed:
weight_decay: 0.0001
fsdp:
fsdp_config:
tokens:
pad_token: "[PAD]"
bos_token: "<s>"
eos_token: "</s>"
unk_token: "<unk>"

View File

@@ -0,0 +1,67 @@
base_model: openlm-research/open_llama_3b_600bt_preview
base_model_config: openlm-research/open_llama_3b_600bt_preview
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
load_in_8bit: true
load_in_4bit: false
strict: false
push_dataset_to_hub:
datasets:
- path: teknium/GPT4-LLM-Cleaned
type: alpaca
dataset_prepared_path: last_run_prepared
val_set_size: 0.02
adapter: lora
lora_model_dir:
sequence_len: 256
max_packed_sequence_len:
lora_r: 8
lora_alpha: 16
lora_dropout: 0.0
lora_target_modules:
- gate_proj
- down_proj
- up_proj
- q_proj
- v_proj
- k_proj
- o_proj
lora_fan_in_fan_out:
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model:
output_dir: ./lora-out
batch_size: 16
micro_batch_size: 4
num_epochs: 3
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine
learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: false
fp16: true
tf32: false
gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention:
flash_attention:
gptq_groupsize:
gptq_model_v1:
warmup_steps: 10
eval_steps: 50
save_steps:
debug:
deepspeed:
weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens:
bos_token: "<s>"
eos_token: "</s>"
unk_token: "<unk>"

View File

@@ -0,0 +1,6 @@
# MPT-7B
```shell
accelerate launch scripts/finetune.py examples/mpt-7b/config.yml
```

View File

@@ -0,0 +1,58 @@
base_model: mosaicml/mpt-7b
base_model_config: mosaicml/mpt-7b
tokenizer_type: AutoTokenizer
trust_remote_code: true # required for mpt as their model class is not merged into transformers yet
load_in_8bit: false
datasets:
- path: vicgalle/alpaca-gpt4
type: alpaca
dataset_prepared_path: last_run_prepared
val_set_size: 0.02
adapter:
lora_model_dir:
sequence_len: 2048
max_packed_sequence_len:
lora_r: 8
lora_alpha: 16
lora_dropout: 0.05
lora_target_modules:
- q_proj
- v_proj
lora_fan_in_fan_out: false
wandb_project: mpt-alpaca-7b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
output_dir: ./mpt-alpaca-7b
batch_size: 1
micro_batch_size: 1
num_epochs: 3
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine
learning_rate: 0.0000002
train_on_inputs: false
group_by_length: false
bf16: true
tf32: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 5
xformers_attention:
flash_attention:
gptq_groupsize:
gptq_model_v1:
warmup_steps: 20
eval_steps: 110
save_steps: 660
debug:
deepspeed:
weight_decay: 0.0001
fsdp:
fsdp_config:
tokens:
pad_token: "<|padding|>"
bos_token: "<|endoftext|>"
eos_token: "<|endoftext|>"
unk_token: "<|endoftext|>"

View File

@@ -0,0 +1,6 @@
# RedPajama 3B preview release
```shell
accelerate launch scripts/finetune.py examples/redpajama/config-3b.yml
```

View File

@@ -0,0 +1,59 @@
base_model: togethercomputer/RedPajama-INCITE-Chat-3B-v1
base_model_config: togethercomputer/RedPajama-INCITE-Chat-3B-v1
model_type: GPTNeoXForCausalLM
tokenizer_type: GPTNeoXTokenizer
trust_remote_code:
load_in_8bit: false
datasets:
- path: vicgalle/alpaca-gpt4
type: alpaca
dataset_prepared_path: last_run_prepared
val_set_size: 0.02
adapter:
lora_model_dir:
sequence_len: 2048
max_packed_sequence_len:
lora_r: 8
lora_alpha: 16
lora_dropout: 0.05
lora_target_modules:
- q_proj
- v_proj
lora_fan_in_fan_out: false
wandb_project: redpajama-alpaca-3b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
output_dir: ./redpajama-alpaca-3b
batch_size: 4
micro_batch_size: 1
num_epochs: 3
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine
learning_rate: 0.0000002
train_on_inputs: false
group_by_length: false
bf16: true
tf32: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 5
xformers_attention:
flash_attention:
gptq_groupsize:
gptq_model_v1:
warmup_steps: 20
eval_steps: 110
save_steps: 660
debug:
deepspeed:
weight_decay: 0.0001
fsdp:
fsdp_config:
tokens:
pad_token: "<|padding|>"
bos_token: "<|endoftext|>"
eos_token: "<|endoftext|>"
unk_token: "<|endoftext|>"

View File

@@ -0,0 +1,55 @@
base_model: replit/replit-code-v1-3b
base_model_config: replit/replit-code-v1-3b
trust_remote_code: true
load_in_8bit: false
datasets:
- path: vicgalle/alpaca-gpt4
type: alpaca
dataset_prepared_path: last_run_prepared
val_set_size: 0.05
adapter: lora
lora_model_dir:
sequence_len: 2048
max_packed_sequence_len:
lora_r: 8
lora_alpha: 16
lora_dropout: 0.05
lora_target_modules:
- Wqkv
- mlp_up
- mlp_down
lora_fan_in_fan_out:
wandb_project: lora-replit
wandb_watch:
wandb_run_id:
wandb_log_model:
output_dir: ./lora-replit
batch_size: 8
micro_batch_size: 1
num_epochs: 3
optimizer:
torchdistx_path:
lr_scheduler:
learning_rate: 0.00001
train_on_inputs: false
group_by_length: false
bf16: true
tf32: true
gradient_checkpointing:
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention:
flash_attention:
gptq_groupsize:
gptq_model_v1:
warmup_steps: 20
eval_steps: 50
save_steps:
debug:
deepspeed:
weight_decay: 0
fsdp:
fsdp_config:
#special_tokens:

BIN
image/axolotl.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 935 KiB

1
requirements-tests.txt Normal file
View File

@@ -0,0 +1 @@
pytest

View File

@@ -1,14 +1,19 @@
peft @ git+https://github.com/huggingface/peft.git
transformers @ git+https://github.com/huggingface/transformers.git
attrdict
bitsandbytes>=0.39.0
addict
fire
PyYAML==6.0
black
bitsandbytes
datasets
accelerate
accelerate>=0.19.0
sentencepiece
wandb
flash-attn
deepspeed
einops
xformers
# qlora things
bert-score==0.3.13
evaluate==0.4.0
rouge-score==0.1.2
scipy
scikit-learn==1.2.2

View File

@@ -1,227 +1,38 @@
import importlib
import logging
import math
import os
import random
import signal
import sys
from hashlib import md5
from pathlib import Path
from typing import Optional, List, Dict, Any, Union
import bitsandbytes as bnb
import fire
import torch
import transformers
import yaml
from attrdict import AttrDefault
from datasets import load_dataset, IterableDataset, Dataset, load_from_disk
from torch import nn
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
LlamaForCausalLM,
LlamaTokenizer,
EarlyStoppingCallback,
GenerationConfig,
)
# add src to the pythonpath so we don't need to pip install this
from transformers.trainer_pt_utils import get_parameter_names
from axolotl.utils.tokenization import check_dataset_labels
from axolotl.utils.validation import validate_config
from axolotl.utils.dict import DictDefault
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
src_dir = os.path.join(project_root, "src")
sys.path.insert(0, src_dir)
from axolotl.datasets import TokenizedPromptDataset, ConstantLengthDataset
from axolotl.prompt_tokenizers import (
AlpacaPromptTokenizingStrategy,
ShareGPTPromptTokenizingStrategy,
LLAMA_DEFAULT_PAD_TOKEN,
GPTeacherPromptTokenizingStrategy,
OpenAssistantPromptTokenizingStrategy, AlpacaReflectionPTStrategy,
)
from axolotl.prompters import AlpacaPrompter, GPTeacherPrompter, ShareGPTPrompter, ReflectAlpacaPrompter
from axolotl.utils.data import load_prepare_datasets
from axolotl.utils.models import load_model, load_tokenizer
from axolotl.utils.trainer import setup_trainer
from axolotl.utils.wandb import setup_wandb_env_vars
logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO"))
DEFAULT_DATASET_PREPARED_PATH = "last_run_prepared"
def setup_wandb_env_vars(cfg):
if cfg.wandb_project and len(cfg.wandb_project) > 0:
os.environ["WANDB_PROJECT"] = cfg.wandb_project
cfg.use_wandb = True
if cfg.wandb_watch and len(cfg.wandb_watch) > 0:
os.environ["WANDB_WATCH"] = cfg.wandb_watch
if cfg.wandb_log_model and len(cfg.wandb_log_model) > 0:
os.environ["WANDB_LOG_MODEL"] = cfg.wandb_log_model
if cfg.wandb_run_id and len(cfg.wandb_run_id) > 0:
os.environ["WANDB_RUN_ID"] = cfg.wandb_run_id
def load_model(
base_model,
base_model_config,
model_type,
tokenizer_type,
cfg,
adapter="lora",
inference: bool = False,
):
# TODO refactor as a kwarg
load_in_8bit = cfg.load_in_8bit
tokenizer = None
is_llama_derived_model = "llama" in base_model or "llama" in cfg.model_type.lower()
if adapter != "lora":
raise NotImplementedError(f"{adapter} peft adapter not available")
if is_llama_derived_model and cfg.flash_attention:
if cfg.device not in ["mps", "cpu"] and inference is False:
from axolotl.flash_attn import replace_llama_attn_with_flash_attn
logging.info("patching with flash attention")
replace_llama_attn_with_flash_attn()
torch_dtype = (torch.float16 if cfg.load_in_8bit or cfg.fp16 else torch.float32,)
try:
if cfg.load_4bit:
from alpaca_lora_4bit.monkeypatch.peft_tuners_lora_monkey_patch import (
replace_peft_model_with_int4_lora_model,
)
replace_peft_model_with_int4_lora_model()
from peft import (
LoraConfig,
get_peft_model,
prepare_model_for_int8_training,
PeftModel,
)
except Exception as e:
logging.exception(e)
raise e
try:
if cfg.load_4bit and is_llama_derived_model:
from alpaca_lora_4bit.autograd_4bit import load_llama_model_4bit_low_ram
from huggingface_hub import snapshot_download
cache_model_path = Path(snapshot_download(base_model))
files = (
list(cache_model_path.glob("*.pt"))
+ list(cache_model_path.glob("*.safetensors"))
+ list(cache_model_path.glob("*.bin"))
)
if len(files) > 0:
model_path = str(files[0])
else:
logging.warning(
"unable to find a cached model file, this will likely fail..."
)
model_path = str(cache_model_path)
model, tokenizer = load_llama_model_4bit_low_ram(
base_model_config if base_model_config else base_model,
model_path,
device_map=cfg.device_map,
groupsize=cfg.gptq_groupsize if cfg.gptq_groupsize else -1,
is_v1_model=cfg.gptq_model_v1
if cfg.gptq_model_v1 is not None
else True,
)
load_in_8bit = False
elif is_llama_derived_model:
model = LlamaForCausalLM.from_pretrained(
base_model,
load_in_8bit=cfg.load_in_8bit,
torch_dtype=torch_dtype,
device_map=cfg.device_map,
)
else:
model = getattr(transformers, model_type).from_pretrained(
base_model,
load_in_8bit=cfg.load_in_8bit,
torch_dtype=torch_dtype,
device_map=cfg.device_map,
)
except Exception as e:
logging.error(
"Exception raised attempting to load model, retrying with AutoModelForCausalLM"
)
logging.exception(e)
model = AutoModelForCausalLM.from_pretrained(
base_model,
load_in_8bit=cfg.load_in_8bit,
torch_dtype=torch_dtype,
device_map=cfg.device_map,
)
if not tokenizer:
try:
if is_llama_derived_model:
tokenizer = LlamaTokenizer.from_pretrained(model)
else:
tokenizer = getattr(transformers, tokenizer_type).from_pretrained(model)
except:
tokenizer = AutoTokenizer.from_pretrained(base_model)
logging.debug(f"EOS: {tokenizer.eos_token_id} / {tokenizer.eos_token}")
logging.debug(f"BOS: {tokenizer.bos_token_id} / {tokenizer.bos_token}")
logging.debug(f"PAD: {tokenizer.pad_token_id} / {tokenizer.pad_token}")
logging.debug(f"UNK: {tokenizer.unk_token_id} / {tokenizer.unk_token}")
if tokenizer.__class__.__name__ in ["LlamaTokenizer", "LlamaTokenizerFast"]:
tokenizer.pad_token = LLAMA_DEFAULT_PAD_TOKEN
if tokenizer.__class__.__name__ == "GPTNeoXTokenizerFast":
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
os.environ["TOKENIZERS_PARALLELISM"] = "false"
if load_in_8bit and not cfg.load_4bit:
logging.info("converting model w/ prepare_model_for_int8_training")
model = prepare_model_for_int8_training(model)
lora_config = LoraConfig(
r=cfg.lora_r,
lora_alpha=cfg.lora_alpha,
target_modules=cfg.lora_target_modules,
lora_dropout=cfg.lora_dropout,
fan_in_fan_out=cfg.lora_fan_in_fan_out,
bias="none",
task_type="CAUSAL_LM",
)
if cfg.lora_model_dir:
model = PeftModel.from_pretrained(
model,
cfg.lora_model_dir,
device_map=cfg.device_map,
torch_dtype=torch.float16,
)
else:
model = get_peft_model(model, lora_config)
if cfg.ddp:
model.to(f"cuda:{cfg.local_rank}")
if cfg.load_4bit:
# Scales to half
logging.info("Fitting 4bit scales and zeros to half")
for n, m in model.named_modules():
if "Autograd4bitQuantLinear" in str(type(m)) or "Linear4bitLt" in str(
type(m)
):
if hasattr(m, "is_v1_model") and m.is_v1_model:
m.zeros = m.zeros.half()
m.scales = m.scales.half()
m.bias = m.bias.half()
# TODO resume_from_checkpoint handling
model.print_trainable_parameters()
return model, tokenizer, lora_config
def choose_device(cfg):
def get_device():
if torch.cuda.is_available():
return "cuda"
return f"cuda:{cfg.local_rank}"
else:
try:
if torch.backends.mps.is_available():
@@ -236,66 +47,48 @@ def choose_device(cfg):
cfg.device_map = {"": cfg.device}
def check_dataset_labels(dataset, tokenizer):
from termcolor import colored
# the dataset is already shuffled, so let's just check the first 5 elements
for idx in range(5):
# Get the input_ids, labels, and attention_mask from the dataset
input_ids = dataset[idx]["input_ids"]
labels = dataset[idx]["labels"]
attention_mask = dataset[idx]["attention_mask"]
# You can compare the input_ids and labels element-wise
# Remember to ignore positions with IGNORE_TOKEN_ID (if you use it) or attention_mask equal to 0
colored_tokens = []
for i, (input_id, label_id, mask) in enumerate(
zip(input_ids, labels, attention_mask)
):
decoded_input_token = tokenizer.decode(input_id)
# Choose the color based on whether the label has the ignore value or not
color = (
"red" if label_id == -100 else ("yellow" if label_id == 0 else "green")
)
colored_token = colored(decoded_input_token, color) + colored(
f"({label_id}, {mask})", "white"
)
colored_tokens.append(colored_token)
logging.info(" ".join(colored_tokens))
logging.info("\n\n\n")
def get_multi_line_input() -> Optional[str]:
print("Give me an instruction (Ctrl + D to finish): ")
instruction = ""
for line in sys.stdin:
instruction += line
# instruction = pathlib.Path("/proc/self/fd/0").read_text()
return instruction
def do_inference(cfg, model, tokenizer):
def do_inference(cfg, model, tokenizer, prompter="AlpacaPrompter"):
tokenizer.add_special_tokens({"unk_token": "<unk>"})
tokenizer.add_special_tokens({"bos_token": "<s>"})
tokenizer.add_special_tokens({"eos_token": "</s>"})
instruction = "Tell me a joke about dromedaries."
input = ""
prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n".format(
instruction=instruction, input=input
)
batch = tokenizer(prompt, return_tensors="pt", add_special_tokens=True)
prompter_module = getattr(importlib.import_module("axolotl.prompters"), prompter)
model.eval()
with torch.no_grad():
# gc = GenerationConfig() # TODO swap out and use this
generated = model.generate(
inputs=batch["input_ids"].to("cuda"),
do_sample=True,
use_cache=True,
repetition_penalty=1.1,
max_new_tokens=100,
temperature=0.9,
top_p=0.95,
top_k=40,
return_dict_in_generate=True,
output_attentions=False,
output_hidden_states=False,
output_scores=False,
)
print(tokenizer.decode(generated["sequences"].cpu().tolist()[0]))
while True:
# support for multiline inputs
instruction = get_multi_line_input()
if not instruction:
return
prompt: str = next(prompter_module().build_prompt(instruction=instruction))
batch = tokenizer(prompt, return_tensors="pt", add_special_tokens=True)
model.eval()
with torch.no_grad():
# gc = GenerationConfig() # TODO swap out and use this
generated = model.generate(
inputs=batch["input_ids"].to(cfg.device),
do_sample=True,
use_cache=True,
repetition_penalty=1.1,
max_new_tokens=100,
temperature=0.9,
top_p=0.95,
top_k=40,
return_dict_in_generate=True,
output_attentions=False,
output_hidden_states=False,
output_scores=False,
)
print(tokenizer.decode(generated["sequences"].cpu().tolist()[0]))
def choose_config(path: Path):
@@ -324,96 +117,8 @@ def choose_config(path: Path):
return chosen_file
def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
total_num_steps = int(
math.ceil(len(train_dataset) * cfg.num_epochs / cfg.batch_size)
)
warmup_steps = min(int(0.03 * total_num_steps), 100)
logging_steps = max(min(int(0.005 * total_num_steps), 10), 1)
save_steps = eval_steps = min(int(0.05 * total_num_steps), 200)
training_arguments_kwargs = {}
if cfg.bf16 == "full":
training_arguments_kwargs["bf16_full_eval"] = True
else:
training_arguments_kwargs["bf16"] = cfg.bf16
training_arguments_kwargs["tf32"] = cfg.tf32
training_arguments_kwargs["warmup_steps"] = warmup_steps
training_arguments_kwargs["logging_steps"] = logging_steps
if cfg.gradient_checkpointing is not None:
training_arguments_kwargs["gradient_checkpointing"] = cfg.gradient_checkpointing
training_args = transformers.TrainingArguments(
per_device_train_batch_size=cfg.micro_batch_size,
gradient_accumulation_steps=cfg.gradient_accumulation_steps,
num_train_epochs=cfg.num_epochs,
learning_rate=cfg.learning_rate,
evaluation_strategy="steps" if cfg.val_set_size > 0 else "no",
save_strategy="steps",
eval_steps=eval_steps if cfg.val_set_size > 0 else None,
save_steps=save_steps,
output_dir=cfg.output_dir,
save_total_limit=3,
load_best_model_at_end=True if cfg.val_set_size > 0 else False,
ddp_find_unused_parameters=False if cfg.ddp else None,
group_by_length=cfg.group_by_length,
report_to="wandb" if cfg.use_wandb else None,
run_name=cfg.wandb_run_id if cfg.use_wandb else None,
**training_arguments_kwargs,
)
decay_parameters = get_parameter_names(model, [nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if n in decay_parameters],
"weight_decay": training_args.weight_decay,
},
{
"params": [
p for n, p in model.named_parameters() if n not in decay_parameters
],
"weight_decay": 0.0,
},
]
trainer_kwargs = {}
if cfg.load_in_8bit and not cfg.load_4bit:
adam_bnb_optim = bnb.optim.Adam8bit(
optimizer_grouped_parameters,
betas=(training_args.adam_beta1, training_args.adam_beta2),
eps=training_args.adam_epsilon,
lr=training_args.learning_rate,
)
# TODO optionally use torch.optim.OneCycleLR
lr_scheduler = transformers.get_cosine_schedule_with_warmup(
adam_bnb_optim,
training_args.warmup_steps,
total_num_steps,
)
trainer_kwargs["optimizers"] = (adam_bnb_optim, lr_scheduler)
# TODO on_save callback to sync checkpoints to GCP/AWS in background
if cfg.early_stopping_patience:
early_stop_cb = EarlyStoppingCallback(
cfg.early_stopping_patience,
)
trainer_kwargs["callbacks"] = [early_stop_cb]
trainer = transformers.Trainer(
model=model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
args=training_args,
data_collator=transformers.DataCollatorForSeq2Seq(
tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True
),
**trainer_kwargs,
)
return trainer
def check_not_in(list1: List[str], list2: Union[Dict[str, Any], List[str]]) -> bool:
return not any(el in list2 for el in list1)
def train(
@@ -426,12 +131,13 @@ def train(
# load the config from the yaml file
with open(config, "r") as f:
cfg: AttrDefault = AttrDefault(lambda: None, yaml.load(f, Loader=yaml.Loader))
cfg: DictDefault = DictDefault(yaml.load(f, Loader=yaml.Loader))
# if there are any options passed in the cli, if it is something that seems valid from the yaml,
# then overwrite the value
cfg_keys = dict(cfg).keys()
cfg_keys = cfg.keys()
for k in kwargs:
if k in cfg_keys:
# if not strict, allow writing to cfg even if it's not in the yml already
if k in cfg_keys or cfg.strict is False:
# handle booleans
if isinstance(cfg[k], bool):
cfg[k] = bool(kwargs[k])
@@ -443,7 +149,7 @@ def train(
cfg.world_size = int(os.environ.get("WORLD_SIZE", 1))
cfg.local_rank = int(os.environ.get("LOCAL_RANK", 0))
choose_device(cfg)
cfg.ddp = cfg.world_size != 1
cfg.ddp = cfg.ddp if cfg.ddp is not None else cfg.world_size != 1
if cfg.ddp:
cfg.device_map = {"": int(os.environ.get("LOCAL_RANK", 0))}
cfg.gradient_accumulation_steps = (
@@ -457,133 +163,64 @@ def train(
cfg.fp16 = True
cfg.bf16 = False
validate_config(cfg)
# load the tokenizer first
logging.info("loading tokenizer...")
tokenizer = load_tokenizer(
cfg.base_model_config,
cfg.tokenizer_type,
cfg
)
if check_not_in(["inference", "shard", "merge_lora"], kwargs): # don't need to load dataset for these
train_dataset, eval_dataset = load_prepare_datasets(
tokenizer, cfg, DEFAULT_DATASET_PREPARED_PATH
)
if cfg.debug or "debug" in kwargs:
logging.info("check_dataset_labels...")
check_dataset_labels(
train_dataset.select(
[random.randrange(0, len(train_dataset) - 1) for i in range(5)]
),
tokenizer,
)
if prepare_ds_only:
logging.info("Finished preparing dataset. Exiting...")
return
# Load the model and tokenizer
logging.info("loading model, tokenizer, and lora_config...")
model, tokenizer, lora_config = load_model(
logging.info("loading model and peft_config...")
model, peft_config = load_model(
cfg.base_model,
cfg.base_model_config,
cfg.model_type,
cfg.tokenizer_type,
tokenizer,
cfg,
adapter=cfg.adapter,
inference=("inference" in kwargs),
)
if "merge_lora" in kwargs and cfg.adapter is not None:
logging.info("running merge of LoRA with base model")
model = model.merge_and_unload()
model.to(dtype=torch.float16)
if cfg.local_rank == 0:
logging.info("saving merged model")
model.save_pretrained(str(Path(cfg.output_dir) / "merged"))
return
if "inference" in kwargs:
logging.info("calling do_inference function")
do_inference(cfg, model, tokenizer)
return
max_packed_sequence_len = (
cfg.max_packed_sequence_len if cfg.max_packed_sequence_len else cfg.sequence_len
)
max_packed_sequence_len = min(
max_packed_sequence_len, cfg.sequence_len
) # make sure we don't accidentally set it larger than sequence_len
ds_hash = str(
md5(
(
str(max_packed_sequence_len)
+ "@"
+ "|".join(sorted([f"{d.path}:{d.type}" for d in cfg.datasets]))
).encode("utf-8")
).hexdigest()
)
prepared_ds_path = (
Path(cfg.dataset_prepared_path) / ds_hash
if cfg.dataset_prepared_path
else Path(DEFAULT_DATASET_PREPARED_PATH) / ds_hash
)
if any(prepared_ds_path.glob("*")):
logging.info("Loading prepared dataset from disk...")
dataset = load_from_disk(str(prepared_ds_path))
logging.info("Prepared dataset loaded from disk...")
else:
logging.info("Loading raw datasets...")
datasets = []
for d in cfg.datasets:
ds_from_hub = False
try:
load_dataset(d.path, streaming=True)
ds_from_hub = True
except FileNotFoundError:
pass
# prefer local dataset, even if hub exists
if Path(d.path).exists():
ds: IterableDataset = load_dataset(
"json", data_files=d.path, streaming=True, split=None
)
elif ds_from_hub:
ds = load_dataset(d.path, streaming=True)
else:
raise Exception("unhandled dataset load")
if d.type == "alpaca":
ds_strategy = AlpacaPromptTokenizingStrategy(
AlpacaPrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len
)
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds["train"])
datasets.append(ds_wrapper)
elif d.type == "oasst":
ds_strategy = OpenAssistantPromptTokenizingStrategy(
AlpacaPrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len
)
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds["train"])
datasets.append(ds_wrapper)
elif d.type == "gpteacher":
ds_strategy = GPTeacherPromptTokenizingStrategy(
GPTeacherPrompter(),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds["train"])
datasets.append(ds_wrapper)
elif d.type == "reflection":
ds_strategy = AlpacaReflectionPTStrategy(
ReflectAlpacaPrompter(),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds["train"])
datasets.append(ds_wrapper)
elif d.type == "sharegpt":
ds_strategy = ShareGPTPromptTokenizingStrategy(
ShareGPTPrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len
)
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds["train"])
datasets.append(ds_wrapper)
else:
logging.error(f"unhandled prompt tokenization strategy: {d.type}")
constant_len_dataset = ConstantLengthDataset(
tokenizer,
datasets,
seq_length=max_packed_sequence_len,
)
logging.info("merging, packing, shuffling, and splitting master dataset")
dataset = Dataset.from_list([_ for _ in constant_len_dataset]).train_test_split(
test_size=cfg.val_set_size, shuffle=True, seed=42
)
if cfg.local_rank == 0:
logging.info(f"Saving prepared dataset to disk... {prepared_ds_path}")
dataset.save_to_disk(prepared_ds_path)
if prepare_ds_only:
logging.info("Finished preparing dataset. Exiting...")
return
train_dataset = dataset["train"]
eval_dataset = dataset["test"]
if cfg.debug:
check_dataset_labels(
train_dataset.select([random.randrange(0, len(train_dataset) - 1)]),
tokenizer,
)
if "shard" in kwargs:
model.save_pretrained(cfg.output_dir)
return
trainer = setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer)
@@ -594,8 +231,9 @@ def train(
model = torch.compile(model)
# go ahead and presave, so we have the adapter config available to inspect
logging.info(f"Pre-saving adapter config to {cfg.output_dir}")
lora_config.save_pretrained(cfg.output_dir)
if peft_config:
logging.info(f"Pre-saving adapter config to {cfg.output_dir}")
peft_config.save_pretrained(cfg.output_dir)
# In case we want to stop early with ctrl+c, this is a nice to have to save the pretrained model
if cfg.local_rank == 0:
@@ -605,14 +243,30 @@ def train(
)
logging.info("Starting trainer...")
trainer.train(resume_from_checkpoint=cfg.resume_from_checkpoint)
if cfg.group_by_length:
logging.info("hang tight... sorting dataset for group_by_length")
resume_from_checkpoint = cfg.resume_from_checkpoint
if cfg.resume_from_checkpoint is None and cfg.auto_resume_from_checkpoints:
possible_checkpoints = [
str(cp) for cp in Path(cfg.output_dir).glob("checkpoint-*")
]
if len(possible_checkpoints) > 0:
sorted_paths = sorted(
possible_checkpoints, key=lambda path: int(path.split("-")[-1])
)
resume_from_checkpoint = sorted_paths[-1]
logging.info(
f"Using Auto-resume functionality to start with checkpoint at {resume_from_checkpoint}"
)
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
logging.info(f"Training Completed!!! Saving pre-trained model to {cfg.output_dir}")
# TODO do we need this fix? https://huggingface.co/docs/accelerate/usage_guides/fsdp#saving-and-loading
# only save on rank 0, otherwise it corrupts output on multi-GPU when multiple processes attempt to write the same file
if cfg.local_rank == 0:
# TODO do we need this fix? https://huggingface.co/docs/accelerate/usage_guides/fsdp#saving-and-loading
logging.info(
f"Training Completed!!! Saving pre-trained model to {cfg.output_dir}"
)
model.save_pretrained(cfg.output_dir)
# trainer.save_model(cfg.output_dir) # TODO this may be needed for deepspeed to work? need to review another time
if __name__ == "__main__":

View File

@@ -0,0 +1,10 @@
#!/bin/bash
echo $PUBLIC_KEY >> ~/.ssh/authorized_keys
chmod 700 -R ~/.ssh
# Start the SSH service in the background
service ssh start
# Execute the passed arguments (CMD)
exec "$@"

43
scripts/setup-runpod.sh Normal file
View File

@@ -0,0 +1,43 @@
#!/bin/bash
export WANDB_MODE=offline
export WANDB_CACHE_DIR=/workspace/data/wandb-cache
mkdir -p $WANDB_CACHE_DIR
mkdir -p /workspace/data/huggingface-cache/{hub,datasets}
export HF_DATASETS_CACHE="/workspace/data/huggingface-cache/datasets"
export HUGGINGFACE_HUB_CACHE="/workspace/data/huggingface-cache/hub"
export TRANSFORMERS_CACHE="/workspace/data/huggingface-cache/hub"
export NCCL_P2P_DISABLE=1
nvidia-smi
num_gpus=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l)
gpu_indices=$(seq 0 $((num_gpus - 1)) | paste -sd "," -)
export CUDA_VISIBLE_DEVICES=$gpu_indices
echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"
apt-get update
apt-get install -y build-essential ninja-build vim git-lfs
git lfs install
pip3 install --force-reinstall https://download.pytorch.org/whl/nightly/cu117/torch-2.0.0.dev20230301%2Bcu117-cp38-cp38-linux_x86_64.whl --index-url https://download.pytorch.org/whl/nightly/cu117
if [ -z "${TORCH_CUDA_ARCH_LIST}" ]; then # only set this if not set yet
# this covers most common GPUs that the installed version of pytorch supports
# python -c "import torch; print(torch.cuda.get_arch_list())"
export TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
fi
# install flash-attn and deepspeed from pre-built wheels for this specific container b/c these take forever to install
mkdir -p /workspace/wheels
cd /workspace/wheels
curl -L -O https://github.com/OpenAccess-AI-Collective/axolotl/raw/wheels/wheels/deepspeed-0.9.2%2B7ddc3b01-cp38-cp38-linux_x86_64.whl
curl -L -O https://github.com/OpenAccess-AI-Collective/axolotl/raw/wheels/wheels/flash_attn-1.0.4-cp38-cp38-linux_x86_64.whl
pip install deepspeed-0.9.2%2B7ddc3b01-cp38-cp38-linux_x86_64.whl
pip install flash_attn-1.0.4-cp38-cp38-linux_x86_64.whl
pip install "peft @ git+https://github.com/huggingface/peft.git@main" --force-reinstall --no-dependencies
cd /workspace/
git clone https://github.com/OpenAccess-AI-Collective/axolotl.git
cd axolotl
pip install -e .[int4]
mkdir -p ~/.cache/huggingface/accelerate/
cp configs/accelerate/default_config.yaml ~/.cache/huggingface/accelerate/default_config.yaml

View File

@@ -5,23 +5,27 @@ with open("./requirements.txt", "r") as requirements_file:
# don't include peft yet until we check the int4
# need to manually install peft for now...
reqs = [r.strip() for r in requirements_file.readlines() if "peft" not in r]
reqs = [r for r in reqs if r[0] != "#"]
reqs = [r for r in reqs if r and r[0] != "#"]
for r in reqs:
install_requires.append(r)
setup(
name='axolotl',
version='0.1',
name="axolotl",
version="0.1",
description="You know you're going to axolotl questions",
package_dir={'': 'src'},
package_dir={"": "src"},
packages=find_packages(),
install_requires=install_requires,
extras_require={
'int4': [
"int4": [
"alpaca_lora_4bit @ git+https://github.com/winglian/alpaca_lora_4bit.git@setup_pip",
],
'int4_triton': [
"int4_triton": [
"alpaca_lora_4bit[triton] @ git+https://github.com/winglian/alpaca_lora_4bit.git@setup_pip",
],
"extras": [
"flash-attn",
"deepspeed",
],
},
)

View File

@@ -1,3 +1,4 @@
import logging
from typing import List
import torch
@@ -30,6 +31,7 @@ class TokenizedPromptDataset(IterableDataset):
except InvalidDataException:
pass
# TODO this isn't the best since it can't interleave datasets
class ConstantLengthDataset(IterableDataset):
"""
@@ -39,6 +41,7 @@ class ConstantLengthDataset(IterableDataset):
dataset (dataset.Dataset): Dataset with text files.
seq_length (int): Length of token sequences to return.
"""
def __init__(
self,
tokenizer,
@@ -92,11 +95,19 @@ class ConstantLengthDataset(IterableDataset):
: self.seq_length
]
labels = torch.cat(buffer["labels"], dim=-1)[: self.seq_length]
yield {
"input_ids": input_ids,
"labels": labels,
"attention_mask": attention_mask,
}
if (
labels.size() == input_ids.size()
and attention_mask.size() == input_ids.size()
):
yield {
"input_ids": input_ids,
"labels": labels,
"attention_mask": attention_mask,
}
else:
logging.warning(
f"dropping batch due to tensor size mismatch input_ids: {input_ids.size()}, labels: {labels.size()}, attention_mask: {attention_mask.size()}"
)
buffer = {"input_ids": [], "attention_mask": [], "labels": []}
buffer_len = 0
@@ -112,11 +123,15 @@ class ConstantLengthDataset(IterableDataset):
attention_mask.append(1)
labels.append(self.concat_token_id)
input_ids_with_concat = torch.tensor(input_ids, dtype=self.tokens_dtype)
input_ids_with_concat = torch.tensor(
input_ids, dtype=self.tokens_dtype
)
attention_mask_with_concat = torch.tensor(
attention_mask, dtype=self.tokens_dtype
)
labels_with_concat = torch.tensor(labels, dtype=self.tokens_dtype)
labels_with_concat = torch.tensor(
labels, dtype=self.tokens_dtype
)
buffer["input_ids"].append(input_ids_with_concat)
buffer["attention_mask"].append(attention_mask_with_concat)

View File

@@ -0,0 +1,14 @@
import importlib
def load(strategy, tokenizer, cfg):
try:
load_fn = "load"
if strategy.split(".")[-1].startswith("load_"):
load_fn = strategy.split(".")[-1]
strategy = ".".join(strategy.split(".")[:-1])
m = importlib.import_module(f".{strategy}", "axolotl.prompt_strategies")
fn = getattr(m, load_fn)
return fn(tokenizer, cfg)
except:
pass

View File

@@ -0,0 +1,32 @@
from axolotl.prompt_tokenizers import (
AlpacaPromptTokenizingStrategy,
InstructionPromptTokenizingStrategy,
)
from axolotl.prompters import AlpacaPrompter, PromptStyle
def load(tokenizer, cfg):
return AlpacaPromptTokenizingStrategy(
AlpacaPrompter(PromptStyle.chat.value),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
class AlpacaQAPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
def parse_instruction_fields(self, prompt) -> (str, str, str):
return (
prompt["question"],
"",
prompt["answer"],
)
def load_qa(tokenizer, cfg):
return AlpacaQAPromptTokenizingStrategy(
AlpacaPrompter(PromptStyle.chat.value),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)

View File

@@ -0,0 +1,11 @@
from axolotl.prompt_tokenizers import AlpacaPromptTokenizingStrategy
from axolotl.prompters import AlpacaPrompter, PromptStyle
def load(tokenizer, cfg):
return AlpacaPromptTokenizingStrategy(
AlpacaPrompter(PromptStyle.instruct),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)

View File

@@ -0,0 +1,149 @@
from typing import Union, Generator
import yaml
from axolotl.prompt_tokenizers import InstructionPromptTokenizingStrategy
class CreativeAnsweringPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
def parse_instruction_fields(self, prompt) -> (str, str, str):
question = prompt["instruction"]
answer = prompt[
"revision"
] # don't use prompt[answer], that's data we don't want in the dataset
return (
question,
"",
answer,
)
class CreativeCritiquePromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
user_prompt = """Given the following Question and Response, critique the Response on a scale of 1-10. You should critique the answer in the following criteria:
refusal: whether the Response refuses to answer the Question. Responses containing language similar to "As an AI language model, I am not capable of ..." is a form of refusal and should have a low refusal score. 10 is no refusal, 0 is refuses to answer the question.
prescriptive bias: whether the Response attempts to prescribe or dictate certain actions or behaviors of the user. 10 no prescriptive bias present, 0 means there is prescriptive bias.
creativity: score how creative the Response is. 10 is most creative, 0 lacks creativity
comprehensiveness: score how comprehensive and in-depth the Response is. 10 is most comprehensive and in-depth, 0 lacks coverage and depth
Then for each criteria, give a detailed explanation of why the response doesn't meet the criteria (don't reference the score), what it did right, and how it could be improved for that criteria.
Output your answer in YAML format like so:
scores:
refusal: <0-10>
prescriptive_bias: <0-10>
creativity: <0-10>
comprehensiveness: <0-10>
critiques:
refusal:
explanation: ...
improvements: ...
prescriptive_bias:
explanation: ...
improvements: ...
creativity:
explanation: ...
improvements: ...
comprehensiveness:
explanation: ...
improvements: ...
Question: {question}
Answer: {answer}
"""
def parse_instruction_fields(self, prompt) -> (str, str, str):
scores = yaml.dump(
prompt["scores"], default_flow_style=False, Dumper=yaml.Dumper
)
critiques = yaml.dump(
prompt["critiques"], default_flow_style=False, Dumper=yaml.Dumper
)
evaluation = scores + critiques
question = prompt["instruction"]
answer = prompt["answer"]
return (
self.user_prompt.format(question=question, answer=answer),
"",
evaluation,
)
class CreativeRevisePromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
user_prompt = """Definitions:
refusal: whether the Response refuses to answer the Question. Responses containing language similar to "As an AI language model, I am not capable of ..." is a form of refusal and should have a low refusal score. 10 is no refusal, 0 is refuses to answer the question.
prescriptive bias: whether the Response attempts to prescribe or dictate certain actions or behaviors of the user. 10 no prescriptive bias present, 0 means their is prescriptive bias.
creativity: score how creative the Response is. 10 is most creative, 0 lacks creativity
comprehensiveness: score how comprehensive and in-depth the Response is. 10 is most comprehensive and in-depth, 0 lacks coverage and depth
Given the following Question, Response, and Evaluation, revise the Response based on the Evaluation and recommendations for improvements. Reply only with the revised response.
Question: {question}
Answer: {answer}
Evaluation:
{evaluation}
"""
def parse_instruction_fields(self, prompt) -> (str, str, str):
scores = yaml.dump(
prompt["scores"], default_flow_style=False, Dumper=yaml.Dumper
)
critiques = yaml.dump(
prompt["critiques"], default_flow_style=False, Dumper=yaml.Dumper
)
evaluation = scores + critiques
question = prompt["instruction"]
answer = prompt["answer"]
return (
self.user_prompt.format(
question=question, answer=answer, evaluation=evaluation
),
"",
prompt["revision"],
)
class CreativePrompterBase:
system_prompt = ""
prompt_input = "{system_prompt}\nUSER: {instruction}\nASSISTANT:"
def build_prompt(
self,
instruction: str,
input: Union[None, str] = None,
output: Union[None, str] = None,
) -> Generator[str, None, None]:
if self.system_prompt:
res = f"{self.system_prompt}\nUSER: {instruction}\nASSISTANT:"
else:
res = f"USER: {instruction}\nASSISTANT:"
if output:
res = f"{res}{output}"
yield res
class CreativeAnswerPrompter(CreativePrompterBase):
system_prompt = "Answer the following question in a comprehensive, in-depth, and creative way. Additionally your response should be relevant, accurate, and free of any ambiguity."
class CreativeCritiquePrompter(CreativePrompterBase):
system_prompt = ""
class CreativeRevisePrompter(CreativePrompterBase):
system_prompt = ""
def load_answer(tokenizer, cfg):
return CreativeAnsweringPromptTokenizingStrategy(
CreativeAnswerPrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len
)
def load_critique(tokenizer, cfg):
return CreativeCritiquePromptTokenizingStrategy(
CreativeCritiquePrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len
)
def load_revise(tokenizer, cfg):
return CreativeRevisePromptTokenizingStrategy(
CreativeRevisePrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len
)

View File

@@ -0,0 +1,110 @@
import copy
import logging
from collections import defaultdict
from typing import Generator
from axolotl.prompt_tokenizers import PromptTokenizingStrategy
IGNORE_TOKEN_ID = -100
class PygmalionPromptTokenizingStrategy(PromptTokenizingStrategy):
bot_prefix_token_ids = []
def __init__(self, prompter, tokenizer, *args, **kwargs):
super().__init__(prompter, tokenizer)
res = self._tokenize("<|model|>", add_eos_token=False, strip_bos_token=True)
self.bot_prefix_token_ids = res["input_ids"]
def tokenize_prompt(self, prompt):
result = {
"input_ids": [],
"attention_mask": [],
"labels": [],
}
current_len = 0
for i, part in enumerate(self.prompter.build_prompt(prompt["conversations"])):
role, message = part
if role == "system":
prefix = "<|system|>"
# this should include a bos token, no eos token, strip trailing "\n<START>"
if message.endswith("\n<START>"):
message = message[:-8]
res = self._tokenize(
prefix + "Persona: " + message.strip(),
add_eos_token=False,
strip_bos_token=False,
)
# everything from this is masked out from the labels
labels = [IGNORE_TOKEN_ID] * len(res["input_ids"])
elif role == "human":
prefix = "<|user|>"
res = self._tokenize(
prefix + " " + message.strip(),
add_eos_token=False,
strip_bos_token=True,
)
# everything from this is masked out from the labels
labels = [IGNORE_TOKEN_ID] * len(res["input_ids"])
elif role == "bot":
prefix = "<|model|>"
res = self._tokenize(
prefix + " " + message.strip(),
add_eos_token=True,
strip_bos_token=True,
)
# mask out the prefix token, rest is not masked out from labels
# make sure we create the labels first, otherwise we get incorrect lengths
labels = [IGNORE_TOKEN_ID] * len(self.bot_prefix_token_ids) + [
*copy.deepcopy(res["input_ids"])
][len(self.bot_prefix_token_ids) :]
else:
logging.warning(f"unknown role in conversation: {role}")
res = defaultdict(lambda: [])
input_ids = res["input_ids"]
input_len = len(input_ids)
result["input_ids"][current_len : current_len + input_len] = input_ids
result["attention_mask"][current_len : current_len + input_len] = [
1 if x != self.tokenizer.pad_token_id else 0 for x in input_ids
]
result["labels"][current_len : current_len + input_len] = labels
current_len += input_len
return result
def _tokenize(self, prompt, add_eos_token=True, strip_bos_token=False):
result = self.tokenizer(
prompt,
truncation=True,
max_length=self.sequence_len,
padding=False,
return_tensors=None,
)
if (
result["input_ids"][-1] != self.tokenizer.eos_token_id
and len(result["input_ids"]) < self.sequence_len
and add_eos_token
):
result["input_ids"].append(self.tokenizer.eos_token_id)
result["attention_mask"].append(1)
if result["input_ids"][0] == self.tokenizer.bos_token_id and strip_bos_token:
result["input_ids"] = result["input_ids"][1:]
result["attention_mask"] = result["attention_mask"][1:]
result["labels"] = result["input_ids"].copy()
return result
class PygmalionPrompter:
def __init__(self, *args, **kwargs):
pass
def build_prompt(self, source, *args, **kwargs) -> Generator[str, None, None]:
for msg in source:
yield msg["role"], msg["value"]
def load(tokenizer, cfg):
return PygmalionPromptTokenizingStrategy(
PygmalionPrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len
)

View File

@@ -1,7 +1,12 @@
import abc
import copy
import functools
import logging
from transformers import PreTrainedTokenizer
from axolotl.prompters import IGNORE_TOKEN_ID
IGNORE_INDEX = -100
LLAMA_DEFAULT_PAD_TOKEN = "[PAD]"
LLAMA_DEFAULT_EOS_TOKEN = "</s>"
@@ -30,6 +35,20 @@ class PromptTokenizingStrategy(abc.ABC):
def tokenize_prompt(self, prompt):
pass
@functools.cache
def _get_user_token(self):
id_or_ids = self.tokenizer.convert_tokens_to_ids("<|USER|>")
if isinstance(id_or_ids, (int,)):
return id_or_ids
return False
@functools.cache
def _get_assistant_token(self):
id_or_ids = self.tokenizer.convert_tokens_to_ids("<|ASSISTANT|>")
if isinstance(id_or_ids, (int,)):
return id_or_ids
return False
class InstructionPromptTokenizingStrategy(PromptTokenizingStrategy):
def parse_instruction_fields(self, prompt) -> (str, str, str):
@@ -40,9 +59,13 @@ class InstructionPromptTokenizingStrategy(PromptTokenizingStrategy):
full_prompt = self._build_full_prompt(instruction, input, response)
tokenized_full_prompt = self._tokenize(full_prompt)
if not self.train_on_inputs:
user_prompt = self.prompter.build_prompt(
instruction,
input,
user_prompt = next(
iter(
self.prompter.build_prompt(
instruction,
input,
)
)
)
tokenized_user_prompt = self._tokenize(user_prompt, add_eos_token=False)
user_prompt_len = len(tokenized_user_prompt["input_ids"])
@@ -54,13 +77,17 @@ class InstructionPromptTokenizingStrategy(PromptTokenizingStrategy):
return tokenized_full_prompt
def _build_full_prompt(self, instruction, input, response):
return self.prompter.build_prompt(
instruction,
input,
response,
return next(
iter(
self.prompter.build_prompt(
instruction,
input,
response,
)
)
)
def _tokenize(self, prompt, add_eos_token=True):
def _tokenize(self, prompt, add_eos_token=True, strip_bos_token=False):
result = self.tokenizer(
prompt,
truncation=True,
@@ -76,6 +103,10 @@ class InstructionPromptTokenizingStrategy(PromptTokenizingStrategy):
result["input_ids"].append(self.tokenizer.eos_token_id)
result["attention_mask"].append(1)
if result["input_ids"][0] == self.tokenizer.bos_token_id and strip_bos_token:
result["input_ids"] = result["input_ids"][1:]
result["attention_mask"] = result["attention_mask"][1:]
result["labels"] = result["input_ids"].copy()
return result
@@ -89,6 +120,24 @@ class AlpacaPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
)
class AlpacaMultipleChoicePromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
def parse_instruction_fields(self, prompt) -> (str, str, str):
return (
prompt["question"],
"\n".join(f'- "{choice}"' for choice in prompt["choices"]),
prompt["solution"] if "solution" in prompt else prompt["explanation"],
)
class JeopardyPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
def parse_instruction_fields(self, prompt) -> (str, str, str):
return (
prompt["question"],
prompt["category"],
"what is " + prompt["answer"],
)
class OpenAssistantPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
def parse_instruction_fields(self, prompt) -> (str, str, str):
return (
@@ -98,6 +147,15 @@ class OpenAssistantPromptTokenizingStrategy(InstructionPromptTokenizingStrategy)
)
class SummarizeTLDRPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
def parse_instruction_fields(self, prompt) -> (str, str, str):
return (
prompt["article"],
"",
prompt["summary"],
)
class GPTeacherPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
def parse_instruction_fields(self, prompt) -> (str, str, str):
return (
@@ -107,18 +165,54 @@ class GPTeacherPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
)
class NomicGPT4AllPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
def parse_instruction_fields(self, prompt) -> (str, str, str):
return (
prompt["prompt"],
"",
prompt["response"],
)
class CompletionPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
def parse_instruction_fields(self, prompt) -> str:
return prompt["text"]
def tokenize_prompt(self, prompt):
instruction = self.parse_instruction_fields(prompt)
full_prompt = self._build_full_prompt(instruction, None, None)
tokenized_full_prompt = self._tokenize(full_prompt)
return tokenized_full_prompt
def _build_full_prompt(self, instruction, input, response):
return next(iter(self.prompter.build_prompt(instruction)))
class ReflectionPromptTokenizingStrategy(PromptTokenizingStrategy):
def parse_instruction_fields(self, prompt) -> (str, str, str, str, str):
raise NotImplementedError
def tokenize_prompt(self, prompt):
instruction, input, output, reflection, corrected = self.parse_instruction_fields(prompt)
full_prompt = self._build_full_prompt(instruction, input, output, reflection, corrected)
(
instruction,
input,
output,
reflection,
corrected,
) = self.parse_instruction_fields(prompt)
full_prompt = self._build_full_prompt(
instruction, input, output, reflection, corrected
)
tokenized_full_prompt = self._tokenize(full_prompt)
if not self.train_on_inputs:
user_prompt = self.prompter.build_prompt(
instruction,
input,
user_prompt = next(
iter(
self.prompter.build_prompt(
instruction,
input,
)
)
)
tokenized_user_prompt = self._tokenize(user_prompt, add_eos_token=False)
user_prompt_len = len(tokenized_user_prompt["input_ids"])
@@ -130,12 +224,16 @@ class ReflectionPromptTokenizingStrategy(PromptTokenizingStrategy):
return tokenized_full_prompt
def _build_full_prompt(self, instruction, input, output, reflection, corrected):
return self.prompter.build_prompt(
instruction,
input,
output,
reflection,
corrected,
return next(
iter(
self.prompter.build_prompt(
instruction,
input,
output,
reflection,
corrected,
)
)
)
def _tokenize(self, prompt, add_eos_token=True):
@@ -168,9 +266,86 @@ class AlpacaReflectionPTStrategy(ReflectionPromptTokenizingStrategy):
prompt["corrected"],
)
class ShareGPTPromptTokenizingStrategy(PromptTokenizingStrategy):
def get_conversation_thread(self, prompt):
return prompt["conversations"]
def tokenize_prompt(self, prompt):
result = {
"input_ids": [],
"attention_mask": [],
"labels": [],
}
current_len = 0
user_token = self._get_user_token()
assistant_token = self._get_assistant_token()
try:
return self.prompter.build_prompt(prompt["conversations"], self.tokenizer)
for i, part in enumerate(
self.prompter.build_prompt(self.get_conversation_thread(prompt))
):
if isinstance(part, tuple):
if part[0] == "USER:":
part = part[0] + part[1] if not user_token else part[1]
# this is still the user query, we should
res = self._tokenize(
part.strip(), add_eos_token=False, strip_bos_token=True
)
if user_token:
res["input_ids"] = [user_token, *res["input_ids"]]
# everything from this is masked out from the labels
labels = [IGNORE_TOKEN_ID] * len(res["input_ids"])
elif part[0] == "ASSISTANT:":
# TODO label assistant token/tokens w/ IGNORE_TOKEN_ID
part = part[0] + part[1] if not assistant_token else part[1]
# this should be the assistent response, should end with an eos token
res = self._tokenize(
part.strip(), add_eos_token=True, strip_bos_token=True
)
if assistant_token:
res["input_ids"] = [assistant_token, *res["input_ids"]]
# not masked out from labels
labels = copy.deepcopy(res["input_ids"])
else:
logging.warning("unhandled role: " + part[0])
else:
# this is only ever the first part, should include the bos token and the user query
res = self._tokenize(
part.strip(), add_eos_token=False, strip_bos_token=False
)
# everything from this is masked out from the labels
labels = [IGNORE_TOKEN_ID] * len(res["input_ids"])
input_ids = res["input_ids"]
input_len = len(input_ids)
result["input_ids"][current_len : current_len + input_len] = input_ids
result["attention_mask"][current_len : current_len + input_len] = [
1 if x != self.tokenizer.pad_token_id else 0 for x in input_ids
]
result["labels"][current_len : current_len + input_len] = labels
current_len += input_len
return result
except (KeyError, AssertionError, IndexError) as e:
raise InvalidDataException(str(e))
def _tokenize(self, prompt, add_eos_token=True, strip_bos_token=False):
result = self.tokenizer(
prompt,
truncation=True,
max_length=self.sequence_len,
padding=False,
return_tensors=None,
)
if (
result["input_ids"][-1] != self.tokenizer.eos_token_id
and len(result["input_ids"]) < self.sequence_len
and add_eos_token
):
result["input_ids"].append(self.tokenizer.eos_token_id)
result["attention_mask"].append(1)
if result["input_ids"][0] == self.tokenizer.bos_token_id and strip_bos_token:
result["input_ids"] = result["input_ids"][1:]
result["attention_mask"] = result["attention_mask"][1:]
result["labels"] = result["input_ids"].copy()
return result

View File

@@ -1,22 +1,52 @@
import copy
import dataclasses
import logging
from enum import auto, Enum
from typing import List, Tuple, Any, Union
from typing import List, Tuple, Any, Union, Generator
IGNORE_TOKEN_ID = -100
class PromptStyle(Enum):
instruct = "instruct"
chat = "chat"
class AlpacaPrompter:
prompt_input = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
prompt_no_input = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:\n"
response_split = "### Response:"
system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
system_no_input_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n"
prompt_style = None
def __init__(self, prompt_style=PromptStyle.instruct.value):
self.prompt_style = prompt_style if prompt_style else PromptStyle.instruct.value
self.match_prompt_style()
def match_prompt_style(self):
if self.prompt_style == PromptStyle.instruct.value:
self.prompt_input = (
self.system_prompt
+ "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
)
self.prompt_no_input = (
self.system_no_input_prompt
+ "### Instruction:\n{instruction}\n\n### Response:\n"
)
self.response_split = "### Response:"
if self.prompt_style == PromptStyle.chat.value:
self.prompt_input = (
self.system_prompt + "USER: {instruction}\n{input}\nASSISTANT:"
)
self.prompt_no_input = (
self.system_no_input_prompt + "USER: {instruction}\nASSISTANT:"
)
self.response_split = "ASSISTANT:"
def build_prompt(
self,
instruction: str,
input: Union[None, str] = None,
output: Union[None, str] = None,
) -> str:
) -> Generator[str, None, None]:
# returns the full prompt from instruction and optional input
# if a label (=response, =output) is provided, it's also appended.
if input:
@@ -25,22 +55,94 @@ class AlpacaPrompter:
res = self.prompt_no_input.format(instruction=instruction)
if output:
res = f"{res}{output}"
return res
yield res
def get_response(self, output: str) -> str:
return output.split(self.response_split)[1].strip()
class UnpromptedPrompter(AlpacaPrompter):
system_prompt = ""
system_no_input_prompt = ""
class JeopardyPrompter(AlpacaPrompter):
prompt_input = "Below is a Jeopardy clue paired with input providing the category of the clue. Write a concise response that best answers tbe clue given the category.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
class MultipleChoiceExplainPrompter(AlpacaPrompter):
system_prompt = (
"Choose the answer that best answers the question. Explain your reasoning."
)
class MultipleChoiceConcisePrompter(AlpacaPrompter):
prompt_input = "Choose the answer that best answers the question. Be concise in your response.\n\nUSER: {instruction}\n{input}\nASSISTANT:\n"
class SummarizeTLDRPrompter(AlpacaPrompter):
prompt_no_input = (
"USER: Summarize the following article as a TL;DR.\n{instruction}\nASSISTANT:"
)
class CompletionPrompter:
def build_prompt(
self, instruction: str, input=None, output=None
) -> Generator[str, None, None]:
yield instruction
def get_response(self, output: str) -> str:
return output.strip()
class GPTeacherPrompter(AlpacaPrompter):
...
class NomicGPT4AllPrompter(AlpacaPrompter):
...
class ReflectAlpacaPrompter:
prompt_input = "Below is an instruction that describes a task, paired with an input that provides further context. You, the Assistant, should generate a response as if it were an abstract for an academic or technical paper on the query along with a methodology. Then generate an Agent Reflection where you create a long form response as if from subject matter expert, be verbose, diligent, and creative in your application of knowledge, apply it through the lens of the response generated by the assistant. Look for flawed reasoning, faulty logic, or other mistakes in the method. Finally, generate a final response and method for the user with the Assistant abstract and Reflection analysis as augmentations to the generation\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
prompt_no_input = "Below is an instruction that describes a task. You, the Assistant, should generate a response as if it were an abstract for an academic or technical paper on the query along with a methodology. Then generate an Agent Reflection where you create a long form response as if from subject matter expert, be verbose, diligent, and creative in your application of knowledge, apply it through the lens of the response generated by the assistant. Look for flawed reasoning, faulty logic, or other mistakes in the method. Finally, generate a final response and method for the user with the Assistant abstract and Reflection analysis as augmentations to the generation\n\n### Instruction:\n{instruction}\n\n### Response:\n"
agent_label = "{output}\n\n### Agent Reflection:\n{reflection}\n\n### Final Response:\n{corrected}"
system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. You, the Assistant, should generate a response as if it were an abstract for an academic or technical paper on the query along with a methodology. Then generate an Agent Reflection where you create a long form response as if from subject matter expert, be verbose, diligent, and creative in your application of knowledge, apply it through the lens of the response generated by the assistant. Look for flawed reasoning, faulty logic, or other mistakes in the method. Finally, generate a final response and method for the user with the Assistant abstract and Reflection analysis as augmentations to the generation\n\n"
system_no_input_prompt = "Below is an instruction that describes a task. You, the Assistant, should generate a response as if it were an abstract for an academic or technical paper on the query along with a methodology. Then generate an Agent Reflection where you create a long form response as if from subject matter expert, be verbose, diligent, and creative in your application of knowledge, apply it through the lens of the response generated by the assistant. Look for flawed reasoning, faulty logic, or other mistakes in the method. Finally, generate a final response and method for the user with the Assistant abstract and Reflection analysis as augmentations to the generation\n\n"
prompt_input = (
"### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
)
prompt_no_input = "### Instruction:\n{instruction}\n\n### Response:\n"
agent_label = "### Thought:\n{output}\n\n### Agent Reflection:\n{reflection}\n\n### Final Response:\n{corrected}"
response_split = "### Response:"
def __init__(self, prompt_style="instruct"):
self.prompt_style = prompt_style
self.match_prompt_style()
def match_prompt_style(self):
if self.prompt_style == PromptStyle.instruct.value:
self.prompt_input = (
self.system_prompt
+ "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
)
self.prompt_no_input = (
self.system_no_input_prompt
+ "### Instruction:\n{instruction}\n\n### Response:\n"
)
self.agent_label = "### Thought:\n{output}\n\n### Agent Reflection:\n{reflection}\n\n### Final Response:\n{corrected}"
self.response_split = "### Final Response:"
if self.prompt_style == PromptStyle.chat.value:
self.prompt_input = (
self.system_prompt + "USER: {instruction}\n{input}\nASSISTANT:"
)
self.prompt_no_input = (
self.system_no_input_prompt + "USER: {instruction}\nASSISTANT:"
)
self.agent_label = (
"\nTHOUGHT: {output}\nASSISTANT REFLECTION: {reflection}\nASSISTANT:"
)
self.response_split = "ASSISTANT:"
def build_prompt(
self,
instruction: str,
@@ -48,7 +150,7 @@ class ReflectAlpacaPrompter:
output: Union[None, str] = None,
reflection: Union[None, str] = None,
corrected: Union[None, str] = None,
) -> str:
) -> Generator[str, None, None]:
# returns the full prompt from instruction and optional input
# if a label (=response, =output) is provided, it's also appended.
if input:
@@ -56,9 +158,11 @@ class ReflectAlpacaPrompter:
else:
res = self.prompt_no_input.format(instruction=instruction)
if output and reflection and corrected:
label = self.agent_label.format(output=output, reflection=reflection, corrected=corrected)
label = self.agent_label.format(
output=output, reflection=reflection, corrected=corrected
)
res = f"{res}{label}"
return res
yield res
def get_response(self, output: str) -> str:
return output.split(self.response_split)[1].strip()
@@ -85,15 +189,16 @@ class Conversation:
sep: str = "###"
sep2: str = None
def get_prompt(self):
def get_prompt(self) -> Generator[str, None, None]:
seps = [self.sep, self.sep2]
ret = self.system + seps[0]
preamble = self.system + seps[0]
yield preamble
for i, (role, message) in enumerate(self.messages):
if message:
ret += role + ": " + message + seps[i % 2]
yield (role + ":", " " + message)
else:
ret += role + ":"
return ret
logging.warning("role with empty message: " + role)
yield (role + ":",)
def copy(self):
return Conversation(
@@ -118,12 +223,28 @@ conv_vicuna_v1_1 = Conversation(
offset=0,
sep_style=SeparatorStyle.TWO,
sep=" ",
sep2="</s>",
sep2=" ",
)
class ShareGPTPrompter:
def build_prompt(self, source, tokenizer):
def __init__(self, prompt_style=None):
if prompt_style != PromptStyle.chat.value:
raise Exception(
f"unsupported prompt_style for ShareGPTPrompter({prompt_style})"
)
# def match_prompt_style(self):
# if self.prompt_style == PromptStyle.chat.value:
# self.prompt_input = self.system_prompt + "USER: {instruction}\n{input}\nASSISTANT:"
# self.prompt_no_input = self.system_no_input_prompt + "USER: {instruction}\nASSISTANT:"
# self.response_split = "ASSISTANT:"
def build_prompt(self, source, *args, **kwargs) -> Generator[str, None, None]:
# ignore the system prompt if provided
if source[0]["from"] == "system":
source.pop(0)
if len(source) < 2:
# If there isn't a back and forth conversation, ignore it
# also happens on the data splitting leaving empty conversations
@@ -149,46 +270,6 @@ class ShareGPTPrompter:
role = roles[sentence["from"]]
assert role == conv.roles[j % 2]
conv.append_message(role, sentence["value"])
conversation = conv.get_prompt()
# Tokenize conversations
tokenized_result = tokenizer(
conversation,
truncation=True,
max_length=2048, # FIXME
padding=False,
return_tensors=None,
)
target = copy.deepcopy(tokenized_result["input_ids"])
# Mask targets
sep = conv.sep + conv.roles[1] + ": "
rounds = conversation.split(conv.sep2)
cur_len = 1
for i, rou in enumerate(rounds):
if rou == "":
break
parts = rou.split(sep)
if len(parts) != 2:
break
parts[0] += sep
round_len = len(tokenizer(rou)["input_ids"])
instruction_len = len(tokenizer(parts[0])["input_ids"]) - 2
target[cur_len : cur_len + instruction_len] = [
IGNORE_TOKEN_ID
] * instruction_len
cur_len += round_len
target[cur_len:] = [IGNORE_TOKEN_ID] * (len(target) - cur_len)
attention_mask = [
1 if x != tokenizer.pad_token_id else 0
for x in tokenized_result["input_ids"]
]
return dict(
input_ids=tokenized_result["input_ids"],
labels=target,
attention_mask=attention_mask,
)
for part in conv.get_prompt():
yield part

View File

View File

@@ -0,0 +1,28 @@
import os
from transformers import (
Seq2SeqTrainer,
TrainerCallback,
TrainingArguments,
TrainerState,
TrainerControl,
)
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
class SavePeftModelCallback(TrainerCallback):
def on_save(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
checkpoint_folder = os.path.join(
args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}"
)
peft_model_path = os.path.join(checkpoint_folder, "adapter_model")
kwargs["model"].save_pretrained(peft_model_path)
return control

365
src/axolotl/utils/data.py Normal file
View File

@@ -0,0 +1,365 @@
import logging
from hashlib import md5
from pathlib import Path
from typing import Union
from datasets import (
load_from_disk,
load_dataset,
IterableDataset,
Dataset,
concatenate_datasets,
DatasetDict,
)
from huggingface_hub import hf_hub_download
from transformers import PreTrainedTokenizerBase
from axolotl.datasets import TokenizedPromptDataset, ConstantLengthDataset
from axolotl.prompt_strategies import load
from axolotl.prompt_tokenizers import (
AlpacaPromptTokenizingStrategy,
GPTeacherPromptTokenizingStrategy,
OpenAssistantPromptTokenizingStrategy,
AlpacaReflectionPTStrategy,
ShareGPTPromptTokenizingStrategy,
JeopardyPromptTokenizingStrategy,
CompletionPromptTokenizingStrategy,
AlpacaMultipleChoicePromptTokenizingStrategy,
SummarizeTLDRPromptTokenizingStrategy,
)
from axolotl.prompters import (
AlpacaPrompter,
GPTeacherPrompter,
ReflectAlpacaPrompter,
ShareGPTPrompter,
JeopardyPrompter,
CompletionPrompter,
MultipleChoiceExplainPrompter,
SummarizeTLDRPrompter,
MultipleChoiceConcisePrompter,
)
def load_tokenized_prepared_datasets(
tokenizer, cfg, default_dataset_prepared_path
) -> DatasetDict:
tokenizer_name = tokenizer.__class__.__name__
ds_hash = str(
md5(
(
str(cfg.sequence_len)
+ "@"
+ "|".join(sorted([f"{d.path}:{d.type}:{d.shards}" for d in cfg.datasets]))
+ "|"
+ tokenizer_name
).encode("utf-8")
).hexdigest()
)
prepared_ds_path = (
Path(cfg.dataset_prepared_path) / ds_hash
if cfg.dataset_prepared_path
else Path(default_dataset_prepared_path) / ds_hash
)
dataset = None
use_auth_token = cfg.hf_use_auth_token
try:
if cfg.push_dataset_to_hub:
dataset = load_dataset(
f"{cfg.push_dataset_to_hub}/{ds_hash}", use_auth_token=use_auth_token
)
dataset = dataset["train"]
except:
pass
if dataset:
...
elif any(prepared_ds_path.glob("*")):
logging.info(f"Loading prepared dataset from disk at {prepared_ds_path}...")
dataset = load_from_disk(str(prepared_ds_path))
logging.info("Prepared dataset loaded from disk...")
else:
logging.info(f"Unable to find prepared dataset in {prepared_ds_path}")
logging.info("Loading raw datasets...")
datasets = []
for d in cfg.datasets:
ds: Union[Dataset, DatasetDict] = None
ds_from_hub = False
try:
load_dataset(d.path, streaming=True, use_auth_token=use_auth_token)
ds_from_hub = True
except FileNotFoundError:
pass
# prefer local dataset, even if hub exists
if Path(d.path).exists():
ds: Dataset = load_dataset(
"json", data_files=d.path, streaming=False, split=None
)
elif ds_from_hub:
if d.data_files:
ds: Dataset = load_dataset(
d.path,
streaming=False,
data_files=d.data_files,
use_auth_token=use_auth_token,
)
else:
ds: Dataset = load_dataset(d.path, streaming=False, use_auth_token=use_auth_token)
else:
fp = hf_hub_download(
repo_id=d.path, repo_type="dataset", filename=d.data_files
)
ds: Dataset = load_dataset("json", data_files=fp, streaming=False, split=None)
if not ds:
raise Exception("unhandled dataset load")
# support for using a subset of the data
if d.shards:
if "train" in ds:
ds: DatasetDict = ds.shuffle(seed=42)["train"].shard(num_shards=d.shards, index=0)
else:
ds: Dataset = ds.shuffle(seed=42).shard(num_shards=d.shards, index=0)
d_type = d.type
d_type_split = d_type.split(":")
d_base_type = d_type_split[0]
d_prompt_style = d_type_split[1] if len(d_type_split) > 1 else None
if "train" in ds:
ds = ds["train"]
if ds_strategy := load(d.type, tokenizer, cfg):
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
datasets.append(ds_wrapper)
elif d_base_type == "alpaca":
ds_strategy = AlpacaPromptTokenizingStrategy(
AlpacaPrompter(d_prompt_style),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
datasets.append(ds_wrapper)
elif d_base_type == "explainchoice":
ds_strategy = AlpacaMultipleChoicePromptTokenizingStrategy(
MultipleChoiceExplainPrompter(d_prompt_style),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
datasets.append(ds_wrapper)
elif d_base_type == "concisechoice":
ds_strategy = AlpacaMultipleChoicePromptTokenizingStrategy(
MultipleChoiceConcisePrompter(d_prompt_style),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
datasets.append(ds_wrapper)
elif d_base_type == "summarizetldr":
ds_strategy = SummarizeTLDRPromptTokenizingStrategy(
SummarizeTLDRPrompter(d_prompt_style),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
datasets.append(ds_wrapper)
elif d_base_type == "jeopardy":
ds_strategy = JeopardyPromptTokenizingStrategy(
JeopardyPrompter(d_prompt_style),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
datasets.append(ds_wrapper)
elif d_base_type == "oasst":
ds_strategy = OpenAssistantPromptTokenizingStrategy(
AlpacaPrompter(d_prompt_style),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
datasets.append(ds_wrapper)
elif d_base_type == "gpteacher":
ds_strategy = GPTeacherPromptTokenizingStrategy(
GPTeacherPrompter(d_prompt_style),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
datasets.append(ds_wrapper)
elif d_base_type == "reflection":
ds_strategy = AlpacaReflectionPTStrategy(
ReflectAlpacaPrompter(d_prompt_style),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
datasets.append(ds_wrapper)
elif d_base_type == "sharegpt":
ds_strategy = ShareGPTPromptTokenizingStrategy(
ShareGPTPrompter(d_prompt_style),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
datasets.append(ds_wrapper)
elif d_base_type == "completion":
ds_strategy = CompletionPromptTokenizingStrategy(
CompletionPrompter(),
tokenizer,
cfg.train_on_inputs,
cfg.sequence_len,
)
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
datasets.append(ds_wrapper)
else:
logging.error(f"unhandled prompt tokenization strategy: {d.type}")
logging.info("tokenizing, merging, and shuffling master dataset")
samples = []
for d in datasets:
samples = samples + [i for i in d]
dataset = Dataset.from_list(samples).shuffle(seed=42)
if cfg.local_rank == 0:
logging.info(
f"Saving merged prepared dataset to disk... {prepared_ds_path}"
)
dataset.save_to_disk(prepared_ds_path)
if cfg.push_dataset_to_hub:
logging.info(
f"Saving merged prepared dataset with push_to_hub... {cfg.push_dataset_to_hub}/{ds_hash}"
)
dataset.push_to_hub(
f"{cfg.push_dataset_to_hub}/{ds_hash}", private=True
)
return dataset
def load_prepare_datasets(
tokenizer: PreTrainedTokenizerBase, cfg, default_dataset_prepared_path
) -> (Dataset, Dataset):
max_packed_sequence_len = (
cfg.max_packed_sequence_len if cfg.max_packed_sequence_len else cfg.sequence_len
)
max_packed_sequence_len = min(
max_packed_sequence_len, cfg.sequence_len
) # make sure we don't accidentally set it larger than sequence_len
tokenizer_name = tokenizer.__class__.__name__
if cfg.max_packed_sequence_len is not None:
# see if we can go ahead and load the stacked dataset
seed = f"@{str(cfg.seed)}" if cfg.seed else ""
ds_hash = str(
md5(
(
str(cfg.sequence_len)
+ "@"
+ str(max_packed_sequence_len)
+ seed
+ "|".join(sorted([f"{d.path}:{d.type}:{d.shards}" for d in cfg.datasets]))
+ "|"
+ tokenizer_name
).encode("utf-8")
).hexdigest()
)
prepared_ds_path = (
Path(cfg.dataset_prepared_path) / ds_hash
if cfg.dataset_prepared_path
else Path(default_dataset_prepared_path) / ds_hash
)
dataset = None
use_auth_token = cfg.hf_use_auth_token
try:
if cfg.push_dataset_to_hub:
logging.info(
f"Checking for packed prepared dataset from hub... {cfg.push_dataset_to_hub}/{ds_hash}"
)
dataset = load_dataset(
f"{cfg.push_dataset_to_hub}/{ds_hash}", use_auth_token=use_auth_token
)
dataset = dataset["train"]
except:
pass
if dataset:
...
elif any(prepared_ds_path.glob("*")):
logging.info(
f"Loading prepared packed dataset from disk at {prepared_ds_path}..."
)
dataset = load_from_disk(str(prepared_ds_path))
logging.info("Prepared packed dataset loaded from disk...")
if cfg.push_dataset_to_hub:
logging.info(
f"Saving packed prepared dataset with push_to_hub... {cfg.push_dataset_to_hub}/{ds_hash}"
)
dataset.push_to_hub(
f"{cfg.push_dataset_to_hub}/{ds_hash}", private=True
)
else:
dataset = load_tokenized_prepared_datasets(
tokenizer, cfg, default_dataset_prepared_path
)
if cfg.seed:
dataset = dataset.shuffle(seed=cfg.seed)
constant_len_dataset = ConstantLengthDataset(
tokenizer,
[dataset],
seq_length=max_packed_sequence_len,
)
logging.info(
f"packing master dataset to len: {cfg.max_packed_sequence_len}"
)
dataset = Dataset.from_list([_ for _ in constant_len_dataset])
# filter out bad data
dataset = Dataset.from_list(
[
d
for d in dataset
if len(d["input_ids"]) < cfg.sequence_len
and len(d["input_ids"]) > 0
and len(d["input_ids"]) == len(d["attention_mask"])
and len(d["input_ids"]) == len(d["labels"])
]
)
if cfg.local_rank == 0:
logging.info(
f"Saving packed prepared dataset to disk... {prepared_ds_path}"
)
dataset.save_to_disk(prepared_ds_path)
if cfg.push_dataset_to_hub:
logging.info(
f"Saving packed prepared dataset with push_to_hub... {cfg.push_dataset_to_hub}/{ds_hash}"
)
dataset.push_to_hub(
f"{cfg.push_dataset_to_hub}/{ds_hash}", private=True
)
else:
dataset = load_tokenized_prepared_datasets(
tokenizer, cfg, default_dataset_prepared_path
)
if cfg.dataset_shard_num and cfg.dataset_shard_idx is not None:
logging.info(
f"Using index #{cfg.dataset_shard_idx} of {cfg.dataset_shard_num} shards"
)
dataset = dataset.shard(
num_shards=cfg.dataset_shard_num, index=cfg.dataset_shard_idx
)
dataset = dataset.train_test_split(test_size=cfg.val_set_size, shuffle=False)
train_dataset = dataset["train"]
eval_dataset = dataset["test"]
return train_dataset, eval_dataset

10
src/axolotl/utils/dict.py Normal file
View File

@@ -0,0 +1,10 @@
from addict import Dict
class DictDefault(Dict):
"""
A Dict that returns None instead of returning empty Dict for missing keys.
"""
def __missing__(self, key):
return None

402
src/axolotl/utils/models.py Normal file
View File

@@ -0,0 +1,402 @@
import logging
import math
import os
from pathlib import Path
from typing import Optional, Tuple, TYPE_CHECKING
import bitsandbytes as bnb
import torch
import transformers
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
PreTrainedModel,
AutoConfig,
BitsAndBytesConfig,
)
try:
from transformers import (
LlamaForCausalLM,
LlamaTokenizer,
)
except:
logging.warning(
"This version of transformers does not support Llama. Consider upgrading."
)
from axolotl.prompt_tokenizers import LLAMA_DEFAULT_PAD_TOKEN
if TYPE_CHECKING:
from peft import PeftModel, PeftConfig
from axolotl.utils.dict import DictDefault
from transformers import PreTrainedTokenizer
def load_tokenizer(
base_model_config,
tokenizer_type,
cfg,
):
if tokenizer_type:
tokenizer = getattr(transformers, tokenizer_type).from_pretrained(
base_model_config,
trust_remote_code=cfg.trust_remote_code or False,
)
else:
tokenizer = AutoTokenizer.from_pretrained(
base_model_config,
trust_remote_code=cfg.trust_remote_code or False,
)
logging.debug(f"EOS: {tokenizer.eos_token_id} / {tokenizer.eos_token}")
logging.debug(f"BOS: {tokenizer.bos_token_id} / {tokenizer.bos_token}")
logging.debug(f"PAD: {tokenizer.pad_token_id} / {tokenizer.pad_token}")
logging.debug(f"UNK: {tokenizer.unk_token_id} / {tokenizer.unk_token}")
if tokenizer.__class__.__name__ in ["LlamaTokenizer", "LlamaTokenizerFast"]:
tokenizer.pad_token = LLAMA_DEFAULT_PAD_TOKEN
if tokenizer.__class__.__name__ == "GPTNeoXTokenizerFast":
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
os.environ["TOKENIZERS_PARALLELISM"] = "false"
if cfg.special_tokens:
for k, v in cfg.special_tokens.items():
tokenizer.add_special_tokens({k: v})
if cfg.tokens:
tokenizer.add_tokens(list(cfg.tokens))
return tokenizer
def load_model(
base_model,
base_model_config,
model_type,
tokenizer,
cfg,
adapter="lora",
inference=False,
):
# type: (str, str, str, str, DictDefault, Optional[str], bool) -> Tuple[PreTrainedModel, PreTrainedTokenizer, Optional[PeftConfig]]
# TODO refactor as a kwarg
load_in_8bit = cfg.load_in_8bit
is_llama_derived_model = "llama" in base_model or (
cfg.model_type and "llama" in cfg.model_type.lower()
)
if is_llama_derived_model and cfg.flash_attention:
if cfg.device not in ["mps", "cpu"] and inference is False:
from axolotl.flash_attn import replace_llama_attn_with_flash_attn
logging.info("patching with flash attention")
replace_llama_attn_with_flash_attn()
elif is_llama_derived_model and cfg.xformers_attention:
from alpaca_lora_4bit.monkeypatch.llama_attn_hijack_xformers import (
hijack_llama_attention,
)
logging.info("patching with xformers attention")
hijack_llama_attention()
if cfg.bf16:
torch_dtype = torch.bfloat16
elif cfg.load_in_8bit or cfg.fp16:
torch_dtype = torch.float16
else:
torch_dtype = torch.float32
try:
if cfg.gptq:
from alpaca_lora_4bit.monkeypatch.peft_tuners_lora_monkey_patch import (
replace_peft_model_with_int4_lora_model,
)
replace_peft_model_with_int4_lora_model()
from peft import prepare_model_for_int8_training
except Exception as e:
logging.exception(e)
raise e
model_kwargs = {}
if cfg.adapter == "qlora" and cfg.load_in_4bit:
model_kwargs["quantization_config"] = BitsAndBytesConfig(
load_in_4bit=True,
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=False,
bnb_4bit_compute_dtype=torch_dtype,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
)
try:
if cfg.gptq and is_llama_derived_model:
from alpaca_lora_4bit.autograd_4bit import load_llama_model_4bit_low_ram
from huggingface_hub import snapshot_download
try:
snapshot_download_kwargs = {}
if cfg.base_model_ignore_patterns:
snapshot_download_kwargs[
"ignore_patterns"
] = cfg.base_model_ignore_patterns
cache_model_path = Path(
snapshot_download(base_model, **snapshot_download_kwargs)
)
files = (
list(cache_model_path.glob("*.pt"))
+ list(cache_model_path.glob("*.safetensors"))
+ list(cache_model_path.glob("*.bin"))
)
if len(files) > 0:
model_path = str(files[0])
else:
logging.warning(
"unable to find a cached model file, this will likely fail..."
)
model_path = str(cache_model_path)
except:
model_path = cfg.base_model
model, _ = load_llama_model_4bit_low_ram(
base_model_config if base_model_config else base_model,
model_path,
device_map=cfg.device_map,
half=cfg.fp16,
groupsize=cfg.gptq_groupsize if cfg.gptq_groupsize else -1,
is_v1_model=cfg.gptq_model_v1
if cfg.gptq_model_v1 is not None
else True,
)
load_in_8bit = False
elif is_llama_derived_model and "LlamaForCausalLM" in globals():
model = LlamaForCausalLM.from_pretrained(
base_model,
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
torch_dtype=torch_dtype,
device_map="auto" if cfg.world_size == 1 else cfg.device_map,
**model_kwargs,
)
# elif model_type == "GPTNeoXForCausalLM" and cfg.flash_attention:
# This is a WIP, still an issue with the backward pass
# RuntimeError: grad can be implicitly created only for scalar outputs
# TODO: try config.sequence_parallel = False
# # https://github.com/HazyResearch/flash-attention/blob/40a25c8ee7465cf547b929cfa2937034e37bfce9/tests/models/test_gpt_neox.py#L12
# # https://github.com/HazyResearch/flash-attention/tree/main/training#model-components
# # add `**kwargs` to https://github.com/HazyResearch/flash-attention/blob/40a25c8ee7465cf547b929cfa2937034e37bfce9/flash_attn/models/gpt.py#L442
# from flash_attn.utils.pretrained import state_dict_from_pretrained
# from flash_attn.models.gpt import GPTLMHeadModel
# from flash_attn.models.gpt_neox import remap_state_dict_hf_gpt_neox, gpt_neox_config_to_gpt2_config
# from transformers import GPTNeoXConfig
# config = gpt_neox_config_to_gpt2_config(GPTNeoXConfig.from_pretrained(base_model))
# config.use_flash_attn = True
# config.fused_bias_fc = True
# config.fused_mlp = True # GPT-NeoX-20B uses "gelu_fast"
# config.activation_function = "gelu_fast"
# config.fused_dropout_add_ln = True
# # config.residual_in_fp32 = True
#
# model: GPTLMHeadModel = GPTLMHeadModel.from_pretrained(
# base_model,
# config,
# dtype=torch_dtype,
# device=cfg.device,
# )
# model.train() # sets to train instead of eval mode
elif model_type:
model = getattr(transformers, model_type).from_pretrained(
base_model,
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
torch_dtype=torch_dtype,
device_map=cfg.device_map,
trust_remote_code=True if cfg.trust_remote_code is True else False,
**model_kwargs,
)
else:
config = AutoConfig.from_pretrained(
base_model,
trust_remote_code=True if cfg.trust_remote_code is True else False,
)
model = AutoModelForCausalLM.from_pretrained(
base_model,
config=config,
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
torch_dtype=torch_dtype,
device_map=cfg.device_map,
trust_remote_code=True if cfg.trust_remote_code is True else False,
**model_kwargs,
)
except Exception as e:
logging.error(
"Exception raised attempting to load model, retrying with AutoModelForCausalLM"
)
logging.exception(e)
model = AutoModelForCausalLM.from_pretrained(
base_model,
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
torch_dtype=torch_dtype,
device_map=cfg.device_map,
trust_remote_code=True if cfg.trust_remote_code is True else False,
**model_kwargs,
)
embeddings_len = math.ceil(len(tokenizer) / 32) * 32
model.resize_token_embeddings(embeddings_len)
if (
((cfg.adapter == "lora" and load_in_8bit) or cfg.adapter == "qlora")
and not cfg.gptq
and (load_in_8bit or cfg.load_in_4bit)
):
logging.info("converting PEFT model w/ prepare_model_for_int8_training")
model = prepare_model_for_int8_training(model)
model, lora_config = load_adapter(model, cfg, adapter)
if cfg.ddp and not load_in_8bit:
model.to(f"cuda:{cfg.local_rank}")
if cfg.gptq:
# Scales to half
logging.info("Fitting 4bit scales and zeros to half")
for n, m in model.named_modules():
if "Autograd4bitQuantLinear" in str(type(m)) or "Linear4bitLt" in str(
type(m)
):
if hasattr(m, "is_v1_model") and m.is_v1_model:
m.zeros = m.zeros.half()
m.scales = m.scales.half()
m.bias = m.bias.half()
if (
torch.cuda.device_count() > 1
and int(os.getenv("WORLD_SIZE", "1")) > 1
and (cfg.gptq or cfg.load_in_4bit)
):
# llama is PROBABLY model parallelizable, but the default isn't that it is
# so let's only set it for the 4bit, see
# https://github.com/johnsmith0031/alpaca_lora_4bit/blob/08b3fca4a4a9e0d3945be1bab4529f100a428636/finetune.py#L130-L133
setattr(model, 'is_parallelizable', True)
setattr(model, 'model_parallel', True)
requires_grad = []
for name, param in model.named_parameters(recurse=True):
if param.requires_grad:
requires_grad.append(f"{name}: {param.requires_grad}")
if len(requires_grad) == 0:
logging.warning("there are no parameters that require gradient updates")
model.config.use_cache = False
# TODO resume_from_checkpoint handling
return model, lora_config
def load_adapter(model, cfg, adapter):
# type: (PreTrainedModel, DictDefault, Optional[str]) -> Tuple[PreTrainedModel, Optional[PeftConfig]]
if adapter is None:
return model, None
if adapter in ["lora", "qlora"]:
return load_lora(model, cfg)
if adapter == "llama-adapter":
return load_llama_adapter(model, cfg)
raise NotImplementedError(f"{adapter} peft adapter not available")
def load_llama_adapter(model, cfg):
# type: (PreTrainedModel, DictDefault) -> Tuple[PreTrainedModel, Optional[PeftConfig]]
from peft import (
AdaptionPromptConfig,
get_peft_model,
PeftModel,
)
peft_config = AdaptionPromptConfig(
adapter_layers=cfg.peft_adapter.layers, # layers (L)
adapter_len=cfg.peft_adapter.len, # prompt length (K)
task_type="CAUSAL_LM",
)
if cfg.lora_model_dir:
logging.info("Loading pretained LORA")
model = PeftModel.from_pretrained(
model,
cfg.lora_model_dir,
device_map=cfg.device_map,
torch_dtype=torch.float16,
)
else:
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
return model, peft_config
def find_all_linear_names(bits, model):
cls = (
bnb.nn.Linear4bit
if bits == 4
else (bnb.nn.Linear8bitLt if bits == 8 else torch.nn.Linear)
)
lora_module_names = set()
for name, module in model.named_modules():
if isinstance(module, cls):
names = name.split(".")
lora_module_names.add(names[0] if len(names) == 1 else names[-1])
if "lm_head" in lora_module_names: # needed for 16-bit
lora_module_names.remove("lm_head")
return list(lora_module_names)
def load_lora(model, cfg):
# type: (PreTrainedModel, DictDefault) -> Tuple[PreTrainedModel, Optional[PeftConfig]]
from peft import (
LoraConfig,
get_peft_model,
PeftModel,
)
lora_target_modules = list(cfg.lora_target_modules or [])
if cfg.lora_target_linear:
bits = None
if cfg.load_in_4bit:
bits = 4
elif cfg.load_in_8bit:
bits = 8
linear_names = find_all_linear_names(bits, model)
logging.info(f"found linear modules: {repr(linear_names)}")
lora_target_modules = list(set(lora_target_modules + linear_names))
lora_config = LoraConfig(
r=cfg.lora_r,
lora_alpha=cfg.lora_alpha,
target_modules=lora_target_modules,
lora_dropout=cfg.lora_dropout,
fan_in_fan_out=cfg.lora_fan_in_fan_out,
modules_to_save=cfg.lora_modules_to_save if cfg.lora_modules_to_save else None,
bias="none",
task_type="CAUSAL_LM",
)
if cfg.lora_model_dir:
model = PeftModel.from_pretrained(
model,
cfg.lora_model_dir,
device_map=cfg.device_map,
# torch_dtype=torch.float16,
)
else:
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()
return model, lora_config

View File

@@ -0,0 +1,36 @@
from torch.optim.lr_scheduler import LRScheduler
class InterpolatingLogScheduler(LRScheduler):
def __init__(self, optimizer, num_steps, min_lr, max_lr, last_epoch=-1):
"""A scheduler that interpolates learning rates in a logarithmic fashion
Args:
- optimizer: pytorch optimizer
- num_steps: int, the number of steps over which to increase from the min_lr to the max_lr
- min_lr: float, the minimum learning rate
- max_lr: float, the maximum learning rate
Usage:
fc = nn.Linear(1,1)
optimizer = optim.Adam(fc.parameters())
lr_scheduler = InterpolatingLogScheduler(optimizer, num_steps=400, min_lr=1e-6, max_lr=1e-4)
"""
self.num_steps = num_steps
self.min_lr = min_lr
self.max_lr = max_lr
self.q = (max_lr / min_lr) ** (1 / (num_steps - 1))
super().__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch <= 0:
lrs = [self.min_lr for base_lr in self.base_lrs]
elif self.last_epoch < self.num_steps:
lrs = [
self.min_lr * (self.q ** (self.last_epoch - 1))
for base_lr in self.base_lrs
]
else:
lrs = [self.max_lr for base_lr in self.base_lrs]
return lrs

View File

@@ -0,0 +1,32 @@
from termcolor import colored
import logging
def check_dataset_labels(dataset, tokenizer):
# the dataset is already shuffled, so let's just check the first 5 elements
for idx in range(5):
check_example_labels(dataset[idx], tokenizer)
def check_example_labels(example, tokenizer):
# Get the input_ids, labels, and attention_mask from the dataset
input_ids = example["input_ids"]
labels = example["labels"]
attention_mask = example["attention_mask"]
# You can compare the input_ids and labels element-wise
# Remember to ignore positions with IGNORE_TOKEN_ID (if you use it) or attention_mask equal to 0
colored_tokens = []
for i, (input_id, label_id, mask) in enumerate(
zip(input_ids, labels, attention_mask)
):
decoded_input_token = tokenizer.decode(input_id)
# Choose the color based on whether the label has the ignore value or not
color = "red" if label_id == -100 else ("yellow" if label_id == 0 else "green")
colored_token = colored(decoded_input_token, color) + colored(
f"({label_id}, {mask}, {input_id})", "white"
)
colored_tokens.append(colored_token)
logging.info(" ".join(colored_tokens))
logging.info("\n\n\n")

View File

@@ -0,0 +1,239 @@
import importlib
import math
import os
import sys
from pathlib import Path
import bitsandbytes as bnb
import torch.cuda
import transformers
from torch import nn
from torch.optim.lr_scheduler import OneCycleLR
from transformers import EarlyStoppingCallback, Trainer
from transformers.trainer_pt_utils import get_parameter_names
from axolotl.utils.schedulers import InterpolatingLogScheduler
from axolotl.utils.callbacks import SavePeftModelCallback
class OneCycleLRSchedulerTrainer(Trainer):
def create_scheduler(
self, num_training_steps: int, optimizer: torch.optim.Optimizer = None
):
optimizer = self.optimizer if optimizer is None else optimizer
num_warmup_steps = self.args.get_warmup_steps(num_training_steps)
num_training_steps = num_training_steps
pct_start = num_warmup_steps / num_training_steps
self.lr_scheduler = OneCycleLR(
optimizer,
max_lr=self.args.learning_rate,
total_steps=num_training_steps,
pct_start=pct_start,
div_factor=6,
)
return self.lr_scheduler
def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
total_num_steps = int(
math.ceil(len(train_dataset) * cfg.num_epochs / cfg.batch_size)
)
warmup_steps = (
cfg.warmup_steps
if cfg.warmup_steps is not None
else min(int(0.03 * total_num_steps), 100)
)
logging_steps = (
cfg.logging_steps
if cfg.logging_steps is not None
else max(min(int(0.005 * total_num_steps), 10), 1)
)
save_steps = cfg.save_steps
eval_steps = cfg.eval_steps
training_arguments_kwargs = {}
if cfg.bf16 == "full":
training_arguments_kwargs["bf16_full_eval"] = True
else:
training_arguments_kwargs["bf16"] = cfg.bf16
training_arguments_kwargs["fp16"] = True if cfg.fp16 and not cfg.bf16 else False
training_arguments_kwargs["tf32"] = cfg.tf32
training_arguments_kwargs["warmup_steps"] = warmup_steps
training_arguments_kwargs["logging_steps"] = logging_steps
if cfg.gradient_checkpointing is not None:
if cfg.gptq:
from alpaca_lora_4bit.gradient_checkpointing import (
apply_gradient_checkpointing,
)
gradient_checkpointing_ratio = (
cfg.gradient_checkpointing_ratio
if cfg.gradient_checkpointing_ratio
else 1.0
)
apply_gradient_checkpointing(
model, checkpoint_ratio=gradient_checkpointing_ratio
)
else:
training_arguments_kwargs[
"gradient_checkpointing"
] = cfg.gradient_checkpointing
if cfg.fsdp:
training_arguments_kwargs["fsdp"] = cfg.fsdp
if cfg.fsdp_config:
training_arguments_kwargs["fsdp_config"] = dict(cfg.fsdp_config)
# deepspeed
if (
os.environ.get("ACCELERATE_USE_DEEPSPEED") == "true"
and torch.cuda.device_count() > 1
):
if cfg.deepspeed:
training_arguments_kwargs["deepspeed"] = cfg.deepspeed
else:
# make a guess here
# TODO search Path("./") for one
training_arguments_kwargs["deepspeed"] = "./ds_config.json"
training_args = transformers.TrainingArguments(
per_device_train_batch_size=cfg.micro_batch_size,
per_device_eval_batch_size=cfg.eval_batch_size
if cfg.eval_batch_size is not None
else cfg.micro_batch_size,
gradient_accumulation_steps=cfg.gradient_accumulation_steps,
eval_accumulation_steps=cfg.gradient_accumulation_steps,
num_train_epochs=cfg.num_epochs,
learning_rate=cfg.learning_rate,
evaluation_strategy="steps" if cfg.val_set_size > 0 else "no",
save_strategy="steps" if save_steps else "epoch",
eval_steps=eval_steps if cfg.val_set_size > 0 else None,
save_steps=save_steps,
output_dir=cfg.output_dir,
save_total_limit=3,
load_best_model_at_end=True
if cfg.load_best_model_at_end is not False # if explicitly set to False, it should be resort to False
and cfg.val_set_size > 0
and save_steps is not None
and save_steps % eval_steps == 0
and cfg.load_in_8bit is not True
else False,
ddp_find_unused_parameters=False if cfg.ddp else None,
group_by_length=cfg.group_by_length,
report_to="wandb" if cfg.use_wandb else None,
run_name=cfg.wandb_run_id if cfg.use_wandb else None,
optim=cfg.optimizer if cfg.optimizer else "adamw_hf",
lr_scheduler_type=cfg.lr_scheduler
if cfg.lr_scheduler and cfg.lr_scheduler not in ("one_cycle", "log_sweep")
else "cosine",
weight_decay=cfg.weight_decay if cfg.weight_decay is not None else 0.0,
**training_arguments_kwargs,
)
trainer_kwargs = {}
if cfg.optimizer == "adamw_anyprecision":
if Path(cfg.torchdistx_path).exists():
sys.path.append(cfg.torchdistx_path)
importlib.import_module("torchdistx")
if (
cfg.optimizer == "adamw_bnb_8bit"
and not cfg.gptq
and not "deepspeed" in training_arguments_kwargs
and not cfg.fsdp
):
decay_parameters = get_parameter_names(model, [nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if (n in decay_parameters and p.requires_grad)
],
"weight_decay": training_args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if (n not in decay_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
]
optimizer = bnb.optim.Adam8bit(
optimizer_grouped_parameters,
betas=(training_args.adam_beta1, training_args.adam_beta2),
eps=training_args.adam_epsilon,
lr=training_args.learning_rate,
)
if cfg.lr_scheduler == "one_cycle":
lr_scheduler_kwargs = (
cfg.lr_scheduler_kwargs if cfg.lr_scheduler_kwargs else {}
)
lr_scheduler = OneCycleLR(
optimizer,
cfg.learning_rate,
total_steps=total_num_steps,
epochs=cfg.num_epochs,
div_factor=cfg.lr_div_factor if cfg.lr_div_factor else 6,
**lr_scheduler_kwargs,
)
elif cfg.lr_scheduler == "log_sweep":
lr_scheduler = InterpolatingLogScheduler(
optimizer,
cfg.warmup_steps,
cfg.log_sweep_min_lr if cfg.log_sweep_min_lr else 1e-10,
cfg.log_sweep_max_lr if cfg.log_sweep_max_lr else 10,
)
else:
lr_scheduler = transformers.get_cosine_schedule_with_warmup(
optimizer,
training_args.warmup_steps,
total_num_steps,
)
trainer_kwargs["optimizers"] = (optimizer, lr_scheduler)
callbacks = []
# TODO on_save callback to sync checkpoints to GCP/AWS in background
if cfg.early_stopping_patience:
early_stop_cb = EarlyStoppingCallback(
cfg.early_stopping_patience,
)
callbacks.append(early_stop_cb)
if cfg.local_rank == 0 and cfg.adapter in ["lora", "qlora"]: # only save in rank 0
callbacks.append(SavePeftModelCallback)
data_collator_kwargs = {
"padding": True,
}
if cfg.collator_pad_to_longest:
data_collator_kwargs["padding"] = "longest"
else:
data_collator_kwargs["pad_to_multiple_of"] = 8
trainer_cls = (
OneCycleLRSchedulerTrainer
if cfg.lr_scheduler == "one_cycle" and (cfg.fsdp or cfg.adapter == "qlora")
else transformers.Trainer
)
trainer = trainer_cls(
model=model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
args=training_args,
data_collator=transformers.DataCollatorForSeq2Seq(
tokenizer,
return_tensors="pt",
**data_collator_kwargs,
),
callbacks=callbacks,
**trainer_kwargs,
)
return trainer

View File

@@ -0,0 +1,46 @@
import logging
def validate_config(cfg):
if cfg.load_4bit:
raise ValueError(
"cfg.load_4bit parameter has been deprecated and replaced by cfg.gptq"
)
if cfg.adapter == "qlora":
if cfg.merge_lora:
# can't merge qlora if loaded in 8bit or 4bit
if cfg.load_in_8bit:
raise ValueError("Can't merge qlora if loaded in 8bit")
if cfg.gptq:
raise ValueError("Can't merge qlora if gptq")
if cfg.load_in_4bit:
raise ValueError("Can't merge qlora if loaded in 4bit")
else:
if cfg.load_in_8bit:
raise ValueError("Can't load qlora in 8bit")
if cfg.gptq:
raise ValueError("Can't load qlora if gptq")
if not cfg.load_in_4bit:
raise ValueError("Require cfg.load_in_4bit to be True for qlora")
if not cfg.load_in_8bit and cfg.adapter == "lora":
logging.warning("We recommend setting `load_in_8bit: true` for LORA finetuning")
if cfg.trust_remote_code:
logging.warning(
"`trust_remote_code` is set to true. Please make sure that you reviewed the remote code/model."
)
if cfg.push_dataset_to_hub and cfg.hf_use_auth_token is not True:
raise ValueError("Require cfg.hf_use_auth_token to be True for push_dataset_to_hub")
# TODO
# MPT 7b
# https://github.com/facebookresearch/bitsandbytes/issues/25
# no 8bit adamw w bf16

View File

@@ -0,0 +1,15 @@
import os
def setup_wandb_env_vars(cfg):
if cfg.wandb_mode and cfg.wandb_mode == "offline":
os.environ["WANDB_MODE"] = cfg.wandb_mode
elif cfg.wandb_project and len(cfg.wandb_project) > 0:
os.environ["WANDB_PROJECT"] = cfg.wandb_project
cfg.use_wandb = True
if cfg.wandb_watch and len(cfg.wandb_watch) > 0:
os.environ["WANDB_WATCH"] = cfg.wandb_watch
if cfg.wandb_log_model and len(cfg.wandb_log_model) > 0:
os.environ["WANDB_LOG_MODEL"] = cfg.wandb_log_model
if cfg.wandb_run_id and len(cfg.wandb_run_id) > 0:
os.environ["WANDB_RUN_ID"] = cfg.wandb_run_id

52
tests/fixtures/conversation.json vendored Normal file
View File

@@ -0,0 +1,52 @@
{
"conversations": [
{
"from": "human",
"value": "how could i implement a minesweeper algorithm that utilises algebraic topology to solve boards?"
},
{
"from": "gpt",
"value": "It's possible to use algebraic topology to solve Minesweeper, but it would likely be quite complex. Algebraic topology is a branch of mathematics that studies the properties of spaces that are preserved under continuous deformations, such as stretching and bending. In the context of Minesweeper, this could be used to analyze the structure of the game board and determine the locations of mines based on certain topological invariants. However, implementing this would require a deep understanding of both algebraic topology and the game of Minesweeper. It's not something that can be explained in a simple answer."
},
{
"from": "human",
"value": "could you give me the gist of how it could though?"
},
{
"from": "gpt",
"value": "Sure. The basic idea would be to use algebraic topology to analyze the structure of the Minesweeper game board and determine the locations of mines based on certain topological invariants. This could involve representing the game board as a topological space and using topological invariants such as homotopy groups and Betti numbers to infer information about the location of mines on the board. However, as I mentioned before, implementing this would require a deep understanding of both algebraic topology and the game of Minesweeper, so it's not something that can be explained in a simple answer."
},
{
"from": "human",
"value": "give me a specific example"
},
{
"from": "gpt",
"value": "As a specific example, suppose we have a Minesweeper game board with the following configuration:\n```\n 1 1 1 1 1\n 1 ? ? ? 1\n 1 ? ? ? 1\n 1 ? ? ? 1\n 1 1 1 1 1\n```\nIn this case, the game board can be represented as a topological space, where each square on the board corresponds to a point in the space. The mines on the board can be thought of as \"holes\" in the space, which are points that are not part of the space. Using algebraic topology, we can analyze the structure of this space and determine the locations of the mines based on certain topological invariants.\n\nFor instance, one way to do this is to compute the homotopy groups of the space. Homotopy groups are a way of describing the holes in a space, and they can be used to infer information about the locations of the mines on the Minesweeper board. In the case of the board above, we can compute the first homotopy group of the space, which describes the number and \"shape\" of the holes in the space. In this case, the first homotopy group would tell us that there are four holes in the space, arranged in a square shape.\n\nBased on this information, we can infer that the mines in the game are located in the four corners of the board, as shown below:\n```\n 1 1 1 1 1\n 1 M ? ? 1\n 1 ? ? ? 1\n 1 ? ? M 1\n 1 1 1 1 1\n```\nOf course, this is just one example of how algebraic topology could be used to solve Minesweeper. There are many other possible approaches, and the specific details of how to implement them would depend on the specific game board and the topological invariants being used. As I mentioned before, implementing a solution like this would require a deep understanding of both algebraic topology and the game of Minesweeper."
},
{
"from": "human",
"value": "assuming i was someone with a deep understanding of both, could you explain the same concept to me with that assumption?"
},
{
"from": "gpt",
"value": "Sure. Assuming you have a deep understanding of both algebraic topology and the game of Minesweeper, here is how you might use algebraic topology to solve a Minesweeper game board:\n\n1. Represent the Minesweeper game board as a topological space. Each square on the board corresponds to a point in the space, and the mines on the board can be thought of as \"holes\" in the space, which are points that are not part of the space.\n2. Use topological invariants to infer information about the locations of the mines on the board. There are many possible topological invariants that could be used for this, such as homotopy groups, Betti numbers, or persistent homology.\n3. Use the information inferred from the topological invariants to determine the locations of the mines on the board. For example, if the first homotopy group of the space indicates that there are four holes in the space, arranged in a square shape, then we can infer that the mines are located in the four corners of the board.\n4. Use this information to solve the Minesweeper game. Once you know the locations of the mines, you can use this information to avoid them and clear the board.\n\nOf course, this is just one possible approach to using algebraic topology to solve Minesweeper. There may be other ways to do it, depending on the specific game board and the topological invariants being used. However, this should give you a general idea of how algebraic topology can be used to solve Minesweeper."
},
{
"from": "human",
"value": "how would i do it with persistent homology or any of the other topological invariants?"
},
{
"from": "gpt",
"value": "To use persistent homology or any of the other topological invariants to solve a Minesweeper game board, you would follow the same basic steps as described above, but with some differences in the specific details.\n\nFor instance, here is how you might use persistent homology to solve a Minesweeper game board:\n\n1. Represent the Minesweeper game board as a topological space. Each square on the board corresponds to a point in the space, and the mines on the board can be thought of as \"holes\" in the space, which are points that are not part of the space.\n2. Compute the persistent homology of the space. Persistent homology is a way of describing the holes in a space over time, and it can be used to infer information about the locations of the mines on the Minesweeper board.\n3. Use the information from the persistent homology to determine the locations of the mines on the board. For example, if the persistent homology indicates that there are four holes in the space, arranged in a square shape, then we can infer that the mines are located in the four corners of the board.\n4. Use this information to solve the Minesweeper game. Once you know the locations of the mines, you can use this information to avoid them and clear the board.\n\nOf course, this is just one possible approach to using persistent homology to solve Minesweeper. There may be other ways to do it, depending on the specific game board and the specific details of the persistent homology computation. However, this should give you a general idea of how persistent homology can be used to solve Minesweeper."
},
{
"from": "human",
"value": "how could i implement this in python?"
},
{
"from": "gpt",
"value": "To implement a Minesweeper solver that uses algebraic topology in Python, you would need to do the following:\n\n1. Install a Python package for algebraic topology. There are several options available, such as Gudhi, Dionysus, or scikit-tda. These packages provide algorithms and data structures for computing topological invariants, such as homotopy groups, Betti numbers, or persistent homology.\n2. Represent the Minesweeper game board as a topological space. This could be done using a data structure provided by the algebraic topology package, such as a simplicial complex or a Vietoris-Rips complex. Each square on the board would correspond to a point in the space, and the mines on the board would be represented as \"holes\" in the space.\n3. Compute the topological invariants of the space. This could be done using algorithms provided by the algebraic topology package. For example, you could compute the persistent homology of the space using the persistent homology algorithm.\n4. Use the information from the topological invariants to determine the locations of the mines on the board. This could be done using a combination of mathematical reasoning and programming. For example, if the persistent homology indicates that there are four holes in the space, arranged in a square shape, then you could use a piece of code to infer that the mines are located in the four corners of the board.\n5. Use this information to solve the Minesweeper game. Once you know the locations of the mines, you could use a piece of code to automatically click on the safe squares and clear the board.\n\nOf course, this is just one possible approach to implementing a Minesweeper solver that uses algebraic topology in Python. There may be other ways to do it, depending on the specific details of the implementation. However, this should give you a general idea of how to get started with such a project."
}
]
}

File diff suppressed because one or more lines are too long

90
tests/test_dict.py Normal file
View File

@@ -0,0 +1,90 @@
import unittest
import pytest
from axolotl.utils.dict import DictDefault
class DictDefaultTest(unittest.TestCase):
def test_dict_default(self):
cfg = DictDefault(
{
"key_a": {"key_b": "value_a"},
"key_c": "value_c",
"key_d": ["value_d", "value_e"],
}
)
assert (
cfg.key_a.key_b == "value_a"
), "DictDefault should return value for existing nested keys"
assert (
cfg.key_c == "value_c"
), "DictDefault should return value for existing keys"
assert (
cfg.key_d[0] == "value_d"
), "DictDefault should return value for existing keys in list"
assert (
"value_e" in cfg.key_d
), "DictDefault should support in operator for existing keys in list"
def test_dict_or_operator(self):
cfg = DictDefault(
{
"key_a": {"key_b": "value_a"},
"key_c": "value_c",
"key_d": ["value_d", "value_e"],
"key_f": "value_f",
}
)
cfg = cfg | DictDefault({"key_a": {"key_b": "value_b"}, "key_f": "value_g"})
assert (
cfg.key_a.key_b == "value_b"
), "DictDefault should support OR operator for existing nested keys"
assert cfg.key_c == "value_c", "DictDefault should not delete existing key"
assert cfg.key_d == [
"value_d",
"value_e",
], "DictDefault should not overwrite existing keys in list"
assert (
cfg.key_f == "value_g"
), "DictDefault should support OR operator for existing key"
def test_dict_missingkey(self):
cfg = DictDefault({})
assert cfg.random_key is None, "DictDefault should return None for missing keys"
def test_dict_nested_missingparentkey(self):
"""
Due to subclassing Dict, DictDefault will error if we try to access a nested key whose parent key does not exist.
"""
cfg = DictDefault({})
with pytest.raises(
AttributeError,
match=r"'NoneType' object has no attribute 'another_random_key'",
):
cfg.random_key.another_random_key
def test_dict_shorthand_assignment(self):
"""
Shorthand assignment is said to not be supported if subclassed. However, their example raises error instead of None.
This test ensures that it is supported for current implementation.
Ref: https://github.com/mewwts/addict#default-values
"""
cfg = DictDefault({"key_a": {"key_b": "value_a"}})
cfg.key_a.key_b = "value_b"
assert cfg.key_a.key_b == "value_b", "Shorthand assignment should be supported"

View File

@@ -0,0 +1,47 @@
import json
import logging
import unittest
from pathlib import Path
from transformers import AutoTokenizer
from axolotl.prompt_tokenizers import ShareGPTPromptTokenizingStrategy
from axolotl.prompters import ShareGPTPrompter
logging.basicConfig(level="INFO")
class TestPromptTokenizationStrategies(unittest.TestCase):
def setUp(self) -> None:
self.tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
self.tokenizer.add_special_tokens(
{
"bos_token": "<s>",
"eos_token": "</s>",
"unk_token": "<unk>",
}
)
def test_sharegpt_integration(self):
print(Path(__file__).parent)
with open(Path(__file__).parent / "fixtures/conversation.json", "r") as fin:
data = fin.read()
conversation = json.loads(data)
with open(Path(__file__).parent / "fixtures/conversation.tokenized.json", "r") as fin:
data = fin.read()
tokenized_conversation = json.loads(data)
prompter = ShareGPTPrompter("chat")
strat = ShareGPTPromptTokenizingStrategy(
prompter,
self.tokenizer,
False,
2048,
)
example = strat.tokenize_prompt(conversation)
for fields in ["input_ids", "attention_mask", "labels"]:
self.assertEqual(len(example[fields]), len(tokenized_conversation[fields]))
self.assertEqual(example[fields], tokenized_conversation[fields])
if __name__ == "__main__":
unittest.main()

49
tests/test_prompters.py Normal file
View File

@@ -0,0 +1,49 @@
import unittest
from axolotl.prompters import AlpacaPrompter, PromptStyle
class AlpacaPrompterTest(unittest.TestCase):
def test_prompt_style_w_none(self):
prompter = AlpacaPrompter(prompt_style=None)
res = next(prompter.build_prompt("tell me a joke"))
# just testing that it uses instruct style
assert "### Instruction:" in res
def test_prompt_style_w_instruct(self):
prompter = AlpacaPrompter(prompt_style=PromptStyle.instruct.value)
res = next(prompter.build_prompt("tell me a joke about the following", "alpacas"))
assert "Below is an instruction" in res
assert "### Instruction:" in res
assert "### Input:" in res
assert "alpacas" in res
assert "### Response:" in res
assert "USER:" not in res
assert "ASSISTANT:" not in res
res = next(prompter.build_prompt("tell me a joke about the following"))
assert "Below is an instruction" in res
assert "### Instruction:" in res
assert "### Input:" not in res
assert "### Response:" in res
assert "USER:" not in res
assert "ASSISTANT:" not in res
def test_prompt_style_w_chat(self):
prompter = AlpacaPrompter(prompt_style=PromptStyle.chat.value)
res = next(prompter.build_prompt("tell me a joke about the following", "alpacas"))
assert "Below is an instruction" in res
assert "### Instruction:" not in res
assert "### Input:" not in res
assert "alpacas" in res
assert "### Response:" not in res
assert "USER:" in res
assert "ASSISTANT:" in res
res = next(prompter.build_prompt("tell me a joke about the following"))
assert "Below is an instruction" in res
assert "### Instruction:" not in res
assert "### Input:" not in res
assert "### Response:" not in res
assert "USER:" in res
assert "ASSISTANT:" in res

114
tests/test_validation.py Normal file
View File

@@ -0,0 +1,114 @@
import unittest
import pytest
from axolotl.utils.validation import validate_config
from axolotl.utils.dict import DictDefault
class ValidationTest(unittest.TestCase):
def test_load_4bit_deprecate(self):
cfg = DictDefault(
{
"load_4bit": True,
}
)
with pytest.raises(ValueError):
validate_config(cfg)
def test_qlora(self):
base_cfg = DictDefault(
{
"adapter": "qlora",
}
)
cfg = base_cfg | DictDefault(
{
"load_in_8bit": True,
}
)
with pytest.raises(ValueError, match=r".*8bit.*"):
validate_config(cfg)
cfg = base_cfg | DictDefault(
{
"gptq": True,
}
)
with pytest.raises(ValueError, match=r".*gptq.*"):
validate_config(cfg)
cfg = base_cfg | DictDefault(
{
"load_in_4bit": False,
}
)
with pytest.raises(ValueError, match=r".*4bit.*"):
validate_config(cfg)
cfg = base_cfg | DictDefault(
{
"load_in_4bit": True,
}
)
validate_config(cfg)
def test_qlora_merge(self):
base_cfg = DictDefault(
{
"adapter": "qlora",
"merge_lora": True,
}
)
cfg = base_cfg | DictDefault(
{
"load_in_8bit": True,
}
)
with pytest.raises(ValueError, match=r".*8bit.*"):
validate_config(cfg)
cfg = base_cfg | DictDefault(
{
"gptq": True,
}
)
with pytest.raises(ValueError, match=r".*gptq.*"):
validate_config(cfg)
cfg = base_cfg | DictDefault(
{
"load_in_4bit": True,
}
)
with pytest.raises(ValueError, match=r".*4bit.*"):
validate_config(cfg)
def test_hf_use_auth_token(self):
cfg = DictDefault(
{
"push_dataset_to_hub": "namespace/repo",
}
)
with pytest.raises(ValueError, match=r".*hf_use_auth_token.*"):
validate_config(cfg)
cfg = DictDefault(
{
"push_dataset_to_hub": "namespace/repo",
"hf_use_auth_token": True,
}
)
validate_config(cfg)