diff --git a/.nojekyll b/.nojekyll index 6ed1b461a..446ec107d 100644 --- a/.nojekyll +++ b/.nojekyll @@ -1 +1 @@ -1fc273fc \ No newline at end of file +907d35b7 \ No newline at end of file diff --git a/docs/dataset-formats/index.html b/docs/dataset-formats/index.html index d3b9e1dd3..cad6a42eb 100644 --- a/docs/dataset-formats/index.html +++ b/docs/dataset-formats/index.html @@ -363,7 +363,7 @@ Description - + Pre-training @@ -371,7 +371,7 @@ Description Data format for a pre-training completion task. - + Instruction Tuning @@ -379,7 +379,7 @@ Description Instruction tuning formats for supervised fine-tuning. - + Conversation @@ -387,7 +387,7 @@ Description Conversation format for supervised fine-tuning. - + Template-Free @@ -395,7 +395,7 @@ Description Construct prompts without a template. - + Custom Pre-Tokenized Dataset diff --git a/docs/lr_groups.html b/docs/lr_groups.html new file mode 100644 index 000000000..1684b6e0d --- /dev/null +++ b/docs/lr_groups.html @@ -0,0 +1,773 @@ + + + + + + + + + + +Learning Rate Groups – Axolotl + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Learning Rate Groups

+
+ +
+
+ Setting different learning rates by module name +
+
+ + +
+ + + + +
+ + + +
+ + +
+

Background

+

Inspired by LoRA+, Axolotl allows practitioners to specify separate learning rates for each module or groups of modules in a model.

+
+
+

Example

+
lr_groups:
+  - name: o_proj
+    modules:
+      - self_attn.o_proj.weight
+    lr: 1e-6
+  - name: q_proj
+    modules:
+      - model.layers.2.self_attn.q_proj.weight
+    lr: 1e-5
+
+learning_rate: 2e-5
+

In this example, we have a default learning rate of 2e-5 across the entire model, but we have a separate learning rate of 1e-6 for all the self attention o_proj modules across all layers, and a learning are of 1e-5 to the 3rd layer’s self attention q_proj module.

+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/search.json b/search.json index a482ec4de..25c7d5b13 100644 --- a/search.json +++ b/search.json @@ -336,103 +336,212 @@ ] }, { - "objectID": "docs/fsdp_qlora.html", - "href": "docs/fsdp_qlora.html", - "title": "FDSP + QLoRA", + "objectID": "docs/lr_groups.html", + "href": "docs/lr_groups.html", + "title": "Learning Rate Groups", "section": "", - "text": "Using FSDP with QLoRA is essential for fine-tuning larger (70b+ parameter) LLMs on consumer GPUs. For example, you can use FSDP + QLoRA to train a 70b model on two 24GB GPUs1.\nBelow, we describe how to use this feature in Axolotl.", - "crumbs": [ - "How-To Guides", - "FDSP + QLoRA" - ] + "text": "Inspired by LoRA+, Axolotl allows practitioners to specify separate learning rates for each module or groups of modules in a model." }, { - "objectID": "docs/fsdp_qlora.html#background", - "href": "docs/fsdp_qlora.html#background", - "title": "FDSP + QLoRA", + "objectID": "docs/lr_groups.html#background", + "href": "docs/lr_groups.html#background", + "title": "Learning Rate Groups", "section": "", - "text": "Using FSDP with QLoRA is essential for fine-tuning larger (70b+ parameter) LLMs on consumer GPUs. For example, you can use FSDP + QLoRA to train a 70b model on two 24GB GPUs1.\nBelow, we describe how to use this feature in Axolotl.", + "text": "Inspired by LoRA+, Axolotl allows practitioners to specify separate learning rates for each module or groups of modules in a model." + }, + { + "objectID": "docs/lr_groups.html#example", + "href": "docs/lr_groups.html#example", + "title": "Learning Rate Groups", + "section": "Example", + "text": "Example\nlr_groups:\n - name: o_proj\n modules:\n - self_attn.o_proj.weight\n lr: 1e-6\n - name: q_proj\n modules:\n - model.layers.2.self_attn.q_proj.weight\n lr: 1e-5\n\nlearning_rate: 2e-5\nIn this example, we have a default learning rate of 2e-5 across the entire model, but we have a separate learning rate of 1e-6 for all the self attention o_proj modules across all layers, and a learning are of 1e-5 to the 3rd layer’s self attention q_proj module." + }, + { + "objectID": "docs/debugging.html", + "href": "docs/debugging.html", + "title": "Debugging", + "section": "", + "text": "This document provides some tips and tricks for debugging Axolotl. It also provides an example configuration for debugging with VSCode. A good debugging setup is essential to understanding how Axolotl code works behind the scenes.", "crumbs": [ "How-To Guides", - "FDSP + QLoRA" + "Debugging" ] }, { - "objectID": "docs/fsdp_qlora.html#usage", - "href": "docs/fsdp_qlora.html#usage", - "title": "FDSP + QLoRA", - "section": "Usage", - "text": "Usage\nTo enable QLoRA with FSDP, you need to perform the following steps:\n\n![Tip] See the example config file in addition to reading these instructions.\n\n\nSet adapter: qlora in your axolotl config file.\nEnable FSDP in your axolotl config, as described here.\nUse one of the supported model types: llama, mistral or mixtral.", + "objectID": "docs/debugging.html#table-of-contents", + "href": "docs/debugging.html#table-of-contents", + "title": "Debugging", + "section": "Table of Contents", + "text": "Table of Contents\n\nGeneral Tips\nDebugging with VSCode\n\nBackground\nConfiguration\nCustomizing your debugger\nVideo Tutorial\n\nDebugging With Docker\n\nSetup\nAttach To Container\nVideo - Attaching To Docker On Remote Host", "crumbs": [ "How-To Guides", - "FDSP + QLoRA" + "Debugging" ] }, { - "objectID": "docs/fsdp_qlora.html#example-config", - "href": "docs/fsdp_qlora.html#example-config", - "title": "FDSP + QLoRA", - "section": "Example Config", - "text": "Example Config\nexamples/llama-2/qlora-fsdp.yml contains an example of how to enable QLoRA + FSDP in axolotl.", + "objectID": "docs/debugging.html#general-tips", + "href": "docs/debugging.html#general-tips", + "title": "Debugging", + "section": "General Tips", + "text": "General Tips\nWhile debugging it’s helpful to simplify your test scenario as much as possible. Here are some tips for doing so:\n\n[!Important] All of these tips are incorporated into the example configuration for debugging with VSCode below.\n\n\nMake sure you are using the latest version of axolotl: This project changes often and bugs get fixed fast. Check your git branch and make sure you have pulled the latest changes from main.\nEliminate concurrency: Restrict the number of processes to 1 for both training and data preprocessing:\n\nSet CUDA_VISIBLE_DEVICES to a single GPU, ex: export CUDA_VISIBLE_DEVICES=0.\nSet dataset_processes: 1 in your axolotl config or run the training command with --dataset_processes=1.\n\nUse a small dataset: Construct or use a small dataset from HF Hub. When using a small dataset, you will often have to make sure sample_packing: False and eval_sample_packing: False to avoid errors. If you are in a pinch and don’t have time to construct a small dataset but want to use from the HF Hub, you can shard the data (this will still tokenize the entire dataset, but will only use a fraction of the data for training. For example, to shard the dataset into 20 pieces, add the following to your axolotl config): yaml dataset: ... shards: 20\nUse a small model: A good example of a small model is TinyLlama/TinyLlama-1.1B-Chat-v1.0.\nMinimize iteration time: Make sure the training loop finishes as fast as possible, with these settings.\n\nmicro_batch_size: 1\nmax_steps: 1\nval_set_size: 0\n\nClear Caches: Axolotl caches certain steps and so does the underlying HuggingFace trainer. You may want to clear some of these caches when debugging.\n\nData preprocessing: When debugging data preprocessing, which includes prompt template formation, you may want to delete the directory set in dataset_prepared_path: in your axolotl config. If you didn’t set this value, the default is last_run_prepared.\nHF Hub: If you are debugging data preprocessing, you should clear the relevant HF cache HuggingFace cache, by deleting the appropriate ~/.cache/huggingface/datasets/... folder(s).\nThe recommended approach is to redirect all outputs and caches to a temporary folder and delete selected subfolders before each run. This is demonstrated in the example configuration below.", "crumbs": [ "How-To Guides", - "FDSP + QLoRA" + "Debugging" ] }, { - "objectID": "docs/fsdp_qlora.html#references", - "href": "docs/fsdp_qlora.html#references", - "title": "FDSP + QLoRA", - "section": "References", - "text": "References\n\nPR #1378 enabling QLoRA in FSDP in Axolotl.\nBlog Post from the Answer.AI team describing the work that enabled QLoRA in FSDP.\nRelated HuggingFace PRs Enabling FDSP + QLoRA:\n\nAccelerate PR#2544\nTransformers PR#29587\nTRL PR#1416\nPEFT PR#1550", + "objectID": "docs/debugging.html#debugging-with-vscode", + "href": "docs/debugging.html#debugging-with-vscode", + "title": "Debugging", + "section": "Debugging with VSCode", + "text": "Debugging with VSCode\n\nBackground\nThe below example shows how to configure VSCode to debug data preprocessing of the chat_template format. This is the format used when you have the following in your axolotl config:\ndatasets:\n - path: <path to your chat_template formatted dataset> # example on HF Hub: fozziethebeat/alpaca_messages_2k_test\n type: chat_template\n\n[!Important] If you are already familiar with advanced VSCode debugging, you can skip the below explanation and look at the files .vscode/launch.json and .vscode/tasks.json for an example configuration.\n\n\n[!Tip] If you prefer to watch a video, rather than read, you can skip to the video tutorial below (but doing both is recommended).\n\n\n\nSetup\nMake sure you have an editable install of Axolotl, which ensures that changes you make to the code are reflected at runtime. Run the following commands from the root of this project:\npip3 install packaging\npip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'\n\nRemote Hosts\nIf you developing on a remote host, you can easily use VSCode to debug remotely. To do so, you will need to follow this remote - SSH guide. You can also see the video below on Docker and Remote SSH debugging.\n\n\n\nConfiguration\nThe easiest way to get started is to modify the .vscode/launch.json file in this project. This is just an example configuration, so you may need to modify or copy it to suit your needs.\nFor example, to mimic the command cd devtools && CUDA_VISIBLE_DEVICES=0 accelerate launch -m axolotl.cli.train dev_chat_template.yml, you would use the below configuration1. Note that we add additional flags that override the axolotl config and incorporate the tips above (see the comments). We also set the working directory to devtools and set the env variable HF_HOME to a temporary folder that is later partially deleted. This is because we want to delete the HF dataset cache before each run in order to ensure that the data preprocessing code is run from scratch.\n// .vscode/launch.json\n{\n \"version\": \"0.2.0\",\n \"configurations\": [\n {\n \"name\": \"Debug axolotl prompt - chat_template\",\n \"type\": \"python\",\n \"module\": \"accelerate.commands.launch\",\n \"request\": \"launch\",\n \"args\": [\n \"-m\", \"axolotl.cli.train\", \"dev_chat_template.yml\",\n // The flags below simplify debugging by overriding the axolotl config\n // with the debugging tips above. Modify as needed.\n \"--dataset_processes=1\", // limits data preprocessing to one process\n \"--max_steps=1\", // limits training to just one step\n \"--batch_size=1\", // minimizes batch size\n \"--micro_batch_size=1\", // minimizes batch size\n \"--val_set_size=0\", // disables validation\n \"--sample_packing=False\", // disables sample packing which is necessary for small datasets\n \"--eval_sample_packing=False\",// disables sample packing on eval set\n \"--dataset_prepared_path=temp_debug/axolotl_outputs/data\", // send data outputs to a temp folder\n \"--output_dir=temp_debug/axolotl_outputs/model\" // send model outputs to a temp folder\n ],\n \"console\": \"integratedTerminal\", // show output in the integrated terminal\n \"cwd\": \"${workspaceFolder}/devtools\", // set working directory to devtools from the root of the project\n \"justMyCode\": true, // step through only axolotl code\n \"env\": {\"CUDA_VISIBLE_DEVICES\": \"0\", // Since we aren't doing distributed training, we need to limit to one GPU\n \"HF_HOME\": \"${workspaceFolder}/devtools/temp_debug/.hf-cache\"}, // send HF cache to a temp folder\n \"preLaunchTask\": \"cleanup-for-dataprep\", // delete temp folders (see below)\n }\n ]\n}\nAdditional notes about this configuration:\n\nThe argument justMyCode is set to true such that you step through only the axolotl code. If you want to step into dependencies, set this to false.\nThe preLaunchTask: cleanup-for-dataprep is defined in .vscode/tasks.json and is used to delete the following folders before debugging, which is essential to ensure that the data pre-processing code is run from scratch:\n\n./devtools/temp_debug/axolotl_outputs\n./devtools/temp_debug/.hf-cache/datasets\n\n\n\n[!Tip] You may not want to delete these folders. For example, if you are debugging model training instead of data pre-processing, you may NOT want to delete the cache or output folders. You may also need to add additional tasks to the tasks.json file depending on your use case.\n\nBelow is the ./vscode/tasks.json file that defines the cleanup-for-dataprep task. This task is run before each debugging session when you use the above configuration. Note how there are two tasks that delete the two folders mentioned above. The third task cleanup-for-dataprep is a composite task that combines the two tasks. A composite task is necessary because VSCode does not allow you to specify multiple tasks in the preLaunchTask argument of the launch.json file.\n// .vscode/tasks.json\n// this file is used by launch.json\n{\n \"version\": \"2.0.0\",\n \"tasks\": [\n // this task changes into the devtools directory and deletes the temp_debug/axolotl_outputs folder\n {\n \"label\": \"delete-outputs\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/axolotl_outputs\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task changes into the devtools directory and deletes the `temp_debug/.hf-cache/datasets` folder\n {\n \"label\": \"delete-temp-hf-dataset-cache\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/.hf-cache/datasets\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task combines the two tasks above\n {\n \"label\": \"cleanup-for-dataprep\",\n \"dependsOn\": [\"delete-outputs\", \"delete-temp-hf-dataset-cache\"],\n }\n ]\n}\n\n\nCustomizing your debugger\nYour debugging use case may differ from the example above. The easiest thing to do is to put your own axolotl config in the devtools folder and modify the launch.json file to use your config. You may also want to modify the preLaunchTask to delete different folders or not delete anything at all.\n\n\nVideo Tutorial\nThe following video tutorial walks through the above configuration and demonstrates how to debug with VSCode, (click the image below to watch):\n\n\n\nHamel Husain’s tutorial: Debugging Axolotl w/VSCode", "crumbs": [ "How-To Guides", - "FDSP + QLoRA" + "Debugging" ] }, { - "objectID": "docs/fsdp_qlora.html#footnotes", - "href": "docs/fsdp_qlora.html#footnotes", - "title": "FDSP + QLoRA", + "objectID": "docs/debugging.html#debugging-with-docker", + "href": "docs/debugging.html#debugging-with-docker", + "title": "Debugging", + "section": "Debugging With Docker", + "text": "Debugging With Docker\nUsing official Axolotl Docker images is a great way to debug your code, and is a very popular way to use Axolotl. Attaching VSCode to Docker takes a few more steps.\n\nSetup\nOn the host that is running axolotl (ex: if you are using a remote host), clone the axolotl repo and change your current directory to the root:\ngit clone https://github.com/axolotl-ai-cloud/axolotl\ncd axolotl\n\n[!Tip] If you already have axolotl cloned on your host, make sure you have the latest changes and change into the root of the project.\n\nNext, run the desired docker image and mount the current directory. Below is a docker command you can run to do this:2\ndocker run --privileged --gpus '\"all\"' --shm-size 10g --rm -it --name axolotl --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 --mount type=bind,src=\"${PWD}\",target=/workspace/axolotl -v ${HOME}/.cache/huggingface:/root/.cache/huggingface axolotlai/axolotl:main-py3.10-cu118-2.0.1\n\n[!Tip] To understand which containers are available, see the Docker section of the README and the DockerHub repo. For details of how the Docker containers are built, see axolotl’s Docker CI builds.\n\nYou will now be in the container. Next, perform an editable install of Axolotl:\npip3 install packaging\npip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'\n\n\nAttach To Container\nNext, if you are using a remote host, Remote into this host with VSCode. If you are using a local host, you can skip this step.\nNext, select Dev Containers: Attach to Running Container... using the command palette (CMD + SHIFT + P) in VSCode. You will be prompted to select a container to attach to. Select the container you just created. You will now be in the container with a working directory that is at the root of the project. Any changes you make to the code will be reflected both in the container and on the host.\nNow you are ready to debug as described above (see Debugging with VSCode).\n\n\nVideo - Attaching To Docker On Remote Host\nHere is a short video that demonstrates how to attach to a Docker container on a remote host:\n\n\n\nHamel Husain’s tutorial: Debugging Axolotl Part 2: Attaching to Docker on a Remote Host", + "crumbs": [ + "How-To Guides", + "Debugging" + ] + }, + { + "objectID": "docs/debugging.html#footnotes", + "href": "docs/debugging.html#footnotes", + "title": "Debugging", "section": "Footnotes", - "text": "Footnotes\n\n\nThis was enabled by this work from the Answer.AI team.↩︎", + "text": "Footnotes\n\n\nThe config actually mimics the command CUDA_VISIBLE_DEVICES=0 python -m accelerate.commands.launch -m axolotl.cli.train devtools/chat_template.yml, but this is the same thing.↩︎\nMany of the below flags are recommended best practices by Nvidia when using nvidia-container-toolkit. You can read more about these flags here.↩︎", "crumbs": [ "How-To Guides", - "FDSP + QLoRA" + "Debugging" ] }, { - "objectID": "docs/dataset_preprocessing.html", - "href": "docs/dataset_preprocessing.html", - "title": "Dataset Preprocessing", + "objectID": "docs/faq.html", + "href": "docs/faq.html", + "title": "FAQ", "section": "", - "text": "Dataset pre-processing is the step where Axolotl takes each dataset you’ve configured alongside the (dataset format)[../dataset-formats/] and prompt strategies to: - parse the dataset based on the dataset format - transform the dataset to how you would interact with the model based on the prompt strategy - tokenize the dataset based on the configured model & tokenizer - shuffle and merge multiple datasets together if using more than one\nThe processing of the datasets can happen one of two ways:\n\nBefore kicking off training by calling python -m axolotl.cli.preprocess /path/to/your.yaml --debug\nWhen training is started\n\nWhat are the benefits of pre-processing? When training interactively or for sweeps (e.g. you are restarting the trainer often), processing the datasets can oftentimes be frustratingly slow. Pre-processing will cache the tokenized/formatted datasets according to a hash of dependent training parameters so that it will intelligently pull from its cache when possible.\nThe path of the cache is controlled by dataset_prepared_path: and is often left blank in example YAMLs as this leads to a more robust solution that prevents unexpectedly reusing cached data.\nIf dataset_prepared_path: is left empty, when training, the processed dataset will be cached in a default path of ./last_run_prepared/, but will ignore anything already cached there. By explicitly setting dataset_prepared_path: ./last_run_prepared, the trainer will use whatever pre-processed data is in the cache.\nWhat are the edge cases? Let’s say you are writing a custom prompt strategy or using a user-defined prompt template. Because the trainer cannot readily detect these changes, we cannot change the calculated hash value for the pre-processed dataset. If you have dataset_prepared_path: ... set and change your prompt templating logic, it may not pick up the changes you made and you will be training over the old prompt." + "text": "Q: The trainer stopped and hasn’t progressed in several minutes.\n\nA: Usually an issue with the GPUs communicating with each other. See the NCCL doc\n\nQ: Exitcode -9\n\nA: This usually happens when you run out of system RAM.\n\nQ: Exitcode -7 while using deepspeed\n\nA: Try upgrading deepspeed w: pip install -U deepspeed\n\nQ: AttributeError: ‘DummyOptim’ object has no attribute ‘step’\n\nA: You may be using deepspeed with single gpu. Please don’t set deepspeed: in yaml or cli.", + "crumbs": [ + "FAQ" + ] }, { - "objectID": "docs/batch_vs_grad.html", - "href": "docs/batch_vs_grad.html", - "title": "Batch size vs Gradient accumulation", + "objectID": "docs/multi-node.html", + "href": "docs/multi-node.html", + "title": "Multi Node", "section": "", - "text": "Gradient accumulation means accumulating gradients over several mini-batches and updating the model weights afterward. When the samples in each batch are diverse, this technique doesn’t significantly impact learning.\nThis method allows for effective training with larger effective batch sizes without needing proportionally larger memory. Here’s why:\n\nMemory Consumption with Batch Size: The primary reason increasing the batch size impacts memory is due to the storage requirements for intermediate activations. When you forward propagate a batch through a network, you have to store the activations at each layer for each sample in the batch, because these activations are used during backpropagation to compute gradients. Therefore, larger batches mean more activations, leading to greater GPU memory consumption.\nGradient Accumulation: With gradient accumulation, you’re effectively simulating a larger batch size by accumulating gradients over several smaller batches (or micro-batches). However, at any given time, you’re only forward and backward propagating a micro-batch. This means you only store activations for the micro-batch, not the full accumulated batch. As a result, you can simulate the effect of a larger batch size without the memory cost of storing activations for a large batch.\n\nExample 1: Micro batch size: 3 Gradient accumulation steps: 2 Number of GPUs: 3 Total batch size = 3 * 2 * 3 = 18\n| GPU 1 | GPU 2 | GPU 3 |\n|----------------|----------------|----------------|\n| S1, S2, S3 | S4, S5, S6 | S7, S8, S9 |\n| e1, e2, e3 | e4, e5, e6 | e7, e8, e9 |\n|----------------|----------------|----------------|\n| → (accumulate) | → (accumulate) | → (accumulate) |\n|----------------|----------------|----------------|\n| S10, S11, S12 | S13, S14, S15 | S16, S17, S18 |\n| e10, e11, e12 | e13, e14, e15 | e16, e17, e18 |\n|----------------|----------------|----------------|\n| → (apply) | → (apply) | → (apply) |\n\nAccumulated gradient for the weight w1 after the second iteration (considering all GPUs):\nTotal gradient for w1 = e1 + e2 + e3 + e4 + e5 + e6 + e7 + e8 + e9 + e10 + e11 + e12 + e13 + e14 + e15 + e16 + e17 + e18\n\nWeight update for w1:\nw1_new = w1_old - learning rate x (Total gradient for w1 / 18)\nExample 2: Micro batch size: 2 Gradient accumulation steps: 1 Number of GPUs: 3 Total batch size = 2 * 1 * 3 = 6\n| GPU 1 | GPU 2 | GPU 3 |\n|-----------|-----------|-----------|\n| S1, S2 | S3, S4 | S5, S6 |\n| e1, e2 | e3, e4 | e5, e6 |\n|-----------|-----------|-----------|\n| → (apply) | → (apply) | → (apply) |\n\nAccumulated gradient for the weight w1 (considering all GPUs):\nTotal gradient for w1 = e1 + e2 + e3 + e4 + e5 + e6\n\nWeight update for w1:\nw1_new = w1_old - learning rate × (Total gradient for w1 / 6)" - }, - { - "objectID": "docs/multimodal.html", - "href": "docs/multimodal.html", - "title": "MultiModal / Vision Language Models (BETA)", - "section": "", - "text": "MultiModal / Vision Language Models (BETA)\n\nSupported Models\n\nMllama, i.e. llama with vision models\n\n\n\nUsage\nCurrently multimodal support is limited and doesn’t have full feature parity. To finetune a multimodal Llama w/ LoRA, you’ll need to use the following in YAML in combination with the rest of the required hyperparams.\nbase_model: alpindale/Llama-3.2-11B-Vision-Instruct\nprocessor_type: AutoProcessor\nskip_prepare_dataset: true\n\nchat_template: llama3_2_vision\ndatasets:\n - path: HuggingFaceH4/llava-instruct-mix-vsft\n type: chat_template\n split: train[:1%]\n field_messages: messages\nremove_unused_columns: false\nsample_packing: false\n\n# only finetune the Language model, leave the vision model and vision tower frozen\nlora_target_modules: 'language_model.model.layers.[\\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'" - }, - { - "objectID": "docs/mac.html", - "href": "docs/mac.html", - "title": "Mac M-series", - "section": "", - "text": "Currently Axolotl on Mac is partially usable, many of the dependencies of Axolotl including Pytorch do not support MPS or have incomplete support.\nCurrent support:\n\nSupport for all models\nFull training of models\nLoRA training\nSample packing\nFP16 and BF16 (awaiting AMP support for MPS in Pytorch)\nTri-dao’s flash-attn (until it is supported use spd_attention as an alternative)\nxformers\nbitsandbytes (meaning no 4/8 bits loading and bnb optimizers)\nqlora\nDeepSpeed\n\nUntested: - FSDP", + "text": "You will need to create a configuration for accelerate, either by using accelerate config and follow the instructions or you can use one of the preset below:\n~/.cache/huggingface/accelerate/default_config.yaml\nConfigure your model to use FSDP with for example:", "crumbs": [ "How-To Guides", - "Mac M-series" + "Multi Node" ] }, + { + "objectID": "docs/multi-node.html#machine-configuration", + "href": "docs/multi-node.html#machine-configuration", + "title": "Multi Node", + "section": "Machine configuration", + "text": "Machine configuration\nOn each machine you need a copy of Axolotl, we suggest using the same commit to ensure compatibility.\nYou will also need to have the same configuration file for your model on each machine.\nOn the main machine only, make sure the port you set as main_process_port is open in TCP and reachable by other machines.\nAll you have to do now is launch using accelerate as you would usually do on each machine and voila, the processes will start once you have launched accelerate on every machine.", + "crumbs": [ + "How-To Guides", + "Multi Node" + ] + }, + { + "objectID": "docs/unsloth.html", + "href": "docs/unsloth.html", + "title": "Unsloth", + "section": "", + "text": "Overview\nUnsloth provides hand-written optimized kernels for LLM finetuning that slightly improve speed and VRAM over standard industry baselines.\n\n\nInstallation\nThe following will install the correct unsloth and extras from source.\npython scripts/unsloth_install.py | sh\n\n\nUsing unsloth w Axolotl\nAxolotl exposes a few configuration options to try out unsloth and get most of the performance gains.\nOur unsloth integration is currently limited to the following model architectures: - llama\nThese options are specific to LoRA finetuning and cannot be used for multi-GPU finetuning\nunsloth_lora_mlp: true\nunsloth_lora_qkv: true\nunsloth_lora_o: true\nThese options are composable and can be used with multi-gpu finetuning\nunsloth_cross_entropy_loss: true\nunsloth_rms_norm: true\nunsloth_rope: true\n\n\nLimitations\n\nSingle GPU only; e.g. no multi-gpu support\nNo deepspeed or FSDP support (requires multi-gpu)\nLoRA + QLoRA support only. No full fine tunes or fp8 support.\nLimited model architecture support. Llama, Phi, Gemma, Mistral only\nNo MoE support.", + "crumbs": [ + "How-To Guides", + "Unsloth" + ] + }, + { + "objectID": "examples/colab-notebooks/colab-axolotl-example.html", + "href": "examples/colab-notebooks/colab-axolotl-example.html", + "title": "Setting up", + "section": "", + "text": "import torch\n# Check so there is a gpu available, a T4(free tier) is enough to run this notebook\nassert (torch.cuda.is_available()==True)\n!pip install --no-build-isolation axolotl[deepspeed]" + }, + { + "objectID": "examples/colab-notebooks/colab-axolotl-example.html#hugging-face-login-optional", + "href": "examples/colab-notebooks/colab-axolotl-example.html#hugging-face-login-optional", + "title": "Setting up", + "section": "Hugging Face login (optional)", + "text": "Hugging Face login (optional)\n\nfrom huggingface_hub import notebook_login\nnotebook_login()" + }, + { + "objectID": "examples/colab-notebooks/colab-axolotl-example.html#example-configuration", + "href": "examples/colab-notebooks/colab-axolotl-example.html#example-configuration", + "title": "Setting up", + "section": "Example configuration", + "text": "Example configuration\n\nimport yaml\n\nyaml_string = \"\"\"\nbase_model: NousResearch/Meta-Llama-3.1-8B\n\nload_in_8bit: false\nload_in_4bit: true\nstrict: false\n\ndatasets:\n - path: tatsu-lab/alpaca\n type: alpaca\ndataset_prepared_path: last_run_prepared\nval_set_size: 0.05\noutput_dir: ./outputs/lora-out\n\nsequence_len: 2048\nsample_packing: true\neval_sample_packing: true\npad_to_sequence_len: true\n\nadapter: qlora\nlora_model_dir:\nlora_r: 32\nlora_alpha: 16\nlora_dropout: 0.05\nlora_target_linear: true\nlora_fan_in_fan_out:\nlora_modules_to_save:\n - embed_tokens\n - lm_head\n\nwandb_project:\nwandb_entity:\nwandb_watch:\nwandb_name:\nwandb_log_model:\n\ngradient_accumulation_steps: 2\nmicro_batch_size: 1\nnum_epochs: 1\noptimizer: paged_adamw_8bit\nlr_scheduler: cosine\nlearning_rate: 2e-5\n\ntrain_on_inputs: false\ngroup_by_length: false\nbf16: auto\nfp16:\ntf32: false\n\ngradient_checkpointing: true\nearly_stopping_patience:\nresume_from_checkpoint:\nlogging_steps: 1\nxformers_attention:\nflash_attention: false\nsdp_attention: true\n\nwarmup_steps: 1\nmax_steps: 25\nevals_per_epoch: 1\neval_table_size:\nsaves_per_epoch: 1\ndebug:\ndeepspeed:\nweight_decay: 0.0\nfsdp:\nfsdp_config:\nspecial_tokens:\n pad_token: <|end_of_text|>\n\"\"\"\n\n\n# Convert the YAML string to a Python dictionary\nyaml_dict = yaml.safe_load(yaml_string)\n\n# Specify your file path\nfile_path = 'test_axolotl.yaml'\n\n# Write the YAML file\nwith open(file_path, 'w') as file:\n yaml.dump(yaml_dict, file)\n\nAbove we have a configuration file with base LLM model and datasets specified, among many other things. Axolotl can automatically detect whether the specified datasets are on HuggingFace repo or local machine.\nThe Axolotl configuration options encompass model and dataset selection, data pre-processing, and training. Let’s go through them line by line:\n\n“base model”: String value, specifies the underlying pre-trained LLM that will be used for finetuning\n\nNext we have options for model weights quantization. Quantization allows for reduction in occupied memory on GPUs.\n\n“load_in_8bit”: Boolean value, whether to quantize the model weights into 8-bit integer.\n“load_in_4bit”: Boolean value, whether to quantize the model weights into 4-bit integer.\n“strict”: Boolean value. If false, it allows for overriding established configuration options in the yaml file when executing in command-line interface.\n“datasets”: a list of dicts that contain path and type of data sets as well as other optional configurations where datasets are concerned. Supports multiple datasets.\n“val_set_size”: Either a float value less than one or an integer less than the total size of dataset. Sets the size of validation set from the whole dataset. If float, sets the proportion of the dataset assigned for validation. If integer, sets the direct size of validation set.\n“output_dir”: String value. Path of trained model.\n\nFor data preprocessing:\n\n“sequence_len”: Integer. Specifies the maximum sequence length of the input. Typically 2048 or less.\n“pad_to_sequence_len”: Boolean. Padding input to maximum sequence length.\n“sample_packing”: Boolean. Specifies whether to use multi-packing with block diagonal attention.\n“special_tokens”: Python dict, optional. Allows users to specify the additional special tokens to be ignored by the tokenizer.\n\nFor LoRA configuration and its hyperparamters:\n\n“adapter”: String. Either “lora” or “qlora”, depending on user’s choice.\n“lora_model_dir”: String, Optional. Path to directory that contains LoRA model, if there is already a trained LoRA model the user would like to use.\n“lora_r”: Integer. Refers to the rank of LoRA decomposition matrices. Higher value will reduce LoRA efficiency. Recommended to be set to 8.\n“lora_alpha”: Integer. Scale the weight matrices by \\(\\frac{\\text{lora_alpha}}{\\text{lora_r}}\\)Recommended to be fixed at 16.\n“lora_dropout”: Float that is 1 or less. The dropout probability of a lora layer.\n“lora_target_linear”: Boolean. If true, lora will target all linear modules in the transformers architecture.\n“lora_modules_to_save”: If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.\n\nSee LoRA for detailed explanation of LoRA implementation.\nFor the training configurations:\n\n“gradient_accumulation_steps”: Integer. The number of steps over which to accumulate gradient for batch training. E.g. if 2, backprop is performed every two steps.\n“micro_batch_size”: Integer. Batch size per gpu / gradient_accumulation_steps\n“num_epochs”: Integer. Number of epochs. One epoch is when training has looped over every batch in the whole data set once.\n“optimizer”: The optimizer to use for the training.\n“learning_rate”: The learning rate.\n“lr_scheduler”: The learning rate scheduler to use for adjusting learning rate during training.\n“train_on_inputs”: Boolean. Whether to ignore or include the user’s prompt from the training labels.\n“group_by_length”: Boolean. Whether to group similarly sized data to minimize padding.\n“bf16”: Either “auto”, “true”, or “false”. Whether to use CUDA bf16 floating point format. If set to “auto”, will automatically apply bf16 should the gpu supports it.\n“fp16”: Optional. Specifies whether to use CUDA fp16. Automatically set to true if “bf16” is set to true. Otherwise false.\n“tf32”: Boolean. Whether to use CUDA tf32. Will override bf16.\n“gradient_checkpointing”: Boolean. Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\n“gradient_checkpointing_kwargs”: Python Dict. Fed into the trainer.\n“logging_steps”: Integer. Log training information over every specified number of steps.\n“flash_attention”: Boolean. Whether to use the flash attention mechanism.\n“sdp_attention”: Boolean. Whether to use the Scaled Dot Product attention mechanism (the attention mechanism in the original implementation of transformers.)\n“warmup_steps”: Integer. The number of pre-training steps where a very low learning rate is used.\n“evals_per_epoch”: Integer. Number of evaluations to be performed within one training epoch.\n“saves_per_epoch”: Integer. Number of times the model is saved in one training epoch.\n“weight_decay”: Positive Float. Sets the “strength” of weight decay (i.e. setting the coefficient of L2 regularization)\n\nThe above is but a snippet aiming to get users familiarized with the types of streamlined configuration options axolotl provides. For a full list of configuration options, see here\nTrain the model\n\n!accelerate launch -m axolotl.cli.train /content/test_axolotl.yaml\n\nPredict with trained model\n\n!accelerate launch -m axolotl.cli.inference /content/test_axolotl.yaml \\\n --lora_model_dir=\"./outputs/lora-out\" --gradio" + }, + { + "objectID": "examples/colab-notebooks/colab-axolotl-example.html#deeper-dive", + "href": "examples/colab-notebooks/colab-axolotl-example.html#deeper-dive", + "title": "Setting up", + "section": "Deeper Dive", + "text": "Deeper Dive\nIt is also helpful to gain some familiarity over some of the core inner workings of axolotl" + }, + { + "objectID": "examples/colab-notebooks/colab-axolotl-example.html#configuration-normalization", + "href": "examples/colab-notebooks/colab-axolotl-example.html#configuration-normalization", + "title": "Setting up", + "section": "Configuration Normalization", + "text": "Configuration Normalization\nAxolotl uses a custom Dict class, called DictDefault to store configurations specified in the yaml configuration file (into a Python variable named cfg). The definition for this custom Dict can be found in the utils/dict.py\nDictDefault is amended such that calling a missing key from it will result in a None return type. This is important because if some configuration options aren’t specified by the user, the None type allows Axolotl to perform boolean operations to determine the default settings for missing configurations. For more examples on how this is done, check out utils/config/init.py" + }, + { + "objectID": "examples/colab-notebooks/colab-axolotl-example.html#loading-models-tokenizers-and-trainer", + "href": "examples/colab-notebooks/colab-axolotl-example.html#loading-models-tokenizers-and-trainer", + "title": "Setting up", + "section": "Loading Models, Tokenizers, and Trainer", + "text": "Loading Models, Tokenizers, and Trainer\nIf we inspect cli.train.py, we will find that most of the heavy lifting were done by the function train() which is itself imported from src/axolotl/train.py.\ntrain() takes care of loading the appropriate tokenizer and pre-trained model through load_model() and load_tokenizer() from src/axolotl/utils/models.py respectively.\nload_tokenizer() loads in the appropriate tokenizer given the desired model, as well as chat templates.\nModelLoader class follows after tokenizer has been selected. It will automatically discern the base model type, load in the desired model, as well as applying model-appropriate attention mechanism modifications (e.g. flash attention). Depending on which base model the user chooses in the configuration, ModelLoader will utilize the corresponding “attention hijacking” script. For example, if the user specified the base model to be NousResearch/Meta-Llama-3.1-8B, which is of llama type, and set flash_attn to True, ModelLoader will load in llama_attn_hijack_flash.py. For a list of supported attention hijacking, please refer to the directory /src/axolotl/monkeypatch/\nAnother important operation encompassed in train() is setting up the training that takes into account of user-specified traning configurations (e.g. num_epochs, optimizer) through the use of setup_trainer() from /src/axolotl/utils/trainer.py, which in turn relies on modules from /src/axolotl/core/trainer_builder.py. trainer_builder.py provides a list of trainer object options bespoke for the task type (Causal or Reinforcement learning (‘dpo’, ‘ipo’, ‘kto’) )" + }, + { + "objectID": "examples/colab-notebooks/colab-axolotl-example.html#monkey-patch", + "href": "examples/colab-notebooks/colab-axolotl-example.html#monkey-patch", + "title": "Setting up", + "section": "Monkey patch", + "text": "Monkey patch\nThe Monkey patch directory is where model architecture/optimization patching scripts are stored (these are modifications that are not implemented in the official releases, hence the name monkey patch). It includes attention jacking, ReLoRA, and unsloth optimization." + }, + { + "objectID": "TODO.html", + "href": "TODO.html", + "title": "todo list", + "section": "", + "text": "[] Validation of parameters for combinations that won’t work\n\n\n\n\nFSDP offload and gradient_checkpointing - https://github.com/pytorch/pytorch/issues/82203\nadamw_bnb_8bit doesn’t play well with FSDP offload" + }, + { + "objectID": "TODO.html#things-that-are-known-not-to-work", + "href": "TODO.html#things-that-are-known-not-to-work", + "title": "todo list", + "section": "", + "text": "FSDP offload and gradient_checkpointing - https://github.com/pytorch/pytorch/issues/82203\nadamw_bnb_8bit doesn’t play well with FSDP offload" + }, + { + "objectID": "src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html", + "href": "src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html", + "title": "Axolotl", + "section": "", + "text": "Acknowledgements\nPortions of this Cut Cross Entropy Software may utilize the following copyrighted material, the use of which is hereby acknowledged.\n\nPyTorch\nFrom PyTorch:\n\nCopyright (c) 2016- Facebook, Inc (Adam Paszke)\nCopyright (c) 2014- Facebook, Inc (Soumith Chintala)\nCopyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\nCopyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\nCopyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\nCopyright (c) 2011-2013 NYU (Clement Farabet)\nCopyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\nCopyright (c) 2006 Idiap Research Institute (Samy Bengio)\nCopyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n\nFrom Caffe2:\n\nCopyright (c) 2016-present, Facebook Inc. All rights reserved.\n\nAll contributions by Facebook:\nCopyright (c) 2016 Facebook Inc.\n\nAll contributions by Google:\nCopyright (c) 2015 Google Inc.\nAll rights reserved.\n\nAll contributions by Yangqing Jia:\nCopyright (c) 2015 Yangqing Jia\nAll rights reserved.\n\nAll contributions by Kakao Brain:\nCopyright 2019-2020 Kakao Brain\n\nAll contributions by Cruise LLC:\nCopyright (c) 2022 Cruise LLC.\nAll rights reserved.\n\nAll contributions by Arm:\nCopyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates\n\nAll contributions from Caffe:\nCopyright(c) 2013, 2014, 2015, the respective contributors\nAll rights reserved.\n\nAll other contributions:\nCopyright(c) 2015, 2016 the respective contributors\nAll rights reserved.\n\nCaffe2 uses a copyright model similar to Caffe: each contributor holds\ncopyright over their contributions to Caffe2. The project versioning records\nall such contribution and copyright details. If a contributor wants to further\nmark their specific copyright on a particular contribution, they should\nindicate their copyright solely in the commit message of the change when it is\ncommitted.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America\nand IDIAP Research Institute nor the names of its contributors may be\nused to endorse or promote products derived from this software without\nspecific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\nTriton\n/*\n* Copyright 2018-2020 Philippe Tillet\n* Copyright 2020-2022 OpenAI\n*\n* Permission is hereby granted, free of charge, to any person obtaining\n* a copy of this software and associated documentation files\n* (the \"Software\"), to deal in the Software without restriction,\n* including without limitation the rights to use, copy, modify, merge,\n* publish, distribute, sublicense, and/or sell copies of the Software,\n* and to permit persons to whom the Software is furnished to do so,\n* subject to the following conditions:\n*\n* The above copyright notice and this permission notice shall be\n* included in all copies or substantial portions of the Software.\n*\n* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n*/\nTransformers\nCopyright 2018- The Hugging Face team. All rights reserved.\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License." + }, + { + "objectID": "src/axolotl/integrations/LICENSE.html", + "href": "src/axolotl/integrations/LICENSE.html", + "title": "Axolotl", + "section": "", + "text": "AXOLOTL COMMUNITY LICENSE AGREEMENT\nThis Axolotl Community License Agreement (“Agreement”) is entered into by and between Axolotl AI Corp. (“Axolotl”) and any individual or entity (“Licensee”) who wishes to use the Software (as defined below) in accordance with the terms and conditions set forth in this Agreement.\n\nDefinitions 1.1 “Licensee” refers to any individual or entity who has obtained a copy of the Software under this Agreement. 1.2 “Plugin Integration” means independent integration software modules which may or may not be offered by Axolotl, which may be licensed separately by their respective authors and/or licensors. 1.3 “Software” refers to the specific sub-directory of the Axolotl, Inc. software located at https://github.com/axolotl-ai-cloud/axolotl/tree/main/src/axolotl/integrations and its subdirectories which permits Plugin Integrations to integrate with the Axolotl service.\nGrant of License 2.1 Axolotl hereby grants Licensee a worldwide, non-exclusive, royalty-free, license to use, copy, modify, merge, publish, distribute, sublicense, and/or otherwise exploit the Software, subject to the following conditions: - Licensee must comply with all the terms and conditions of this Agreement. - Licensee must include the original copyright notice and disclaimer of warranty in all copies or substantial portions of the Software. 2.2 Licensee may use the Software for any lawful purpose, except as restricted in Section 3.\nRestrictions 3.1 Licensee shall not use the Software for any activity that constitutes a commercial activity of offering for free or for sale any services, platform, or equivalent to third parties for the purposes of allowing such third parties to fine-tune artificial intelligence models. 3.2 Licensee shall not: - Use the Software for any illegal or unauthorized purpose. - Reverse engineer, decompile, or disassemble the Software. - Remove or modify any copyright, trademark, or other proprietary notices contained in the Software. - Use the Software in a way that could damage, disable, overburden, or impair the functionality of the Software or interfere with any third-party use of the Software. 3.3 Axolotl reserves the right to restrict certain Plugin Integrations for use with the Software. To the extent Licensee integrates a permitted, applicable Plugin Integration with the Software, Licensee shall comply with any additional terms and conditions imposed by the licensors of such Plugin Integration for use of such Plugin Integrations. Licensee shall contact Axolotl if it has questions about whether its use of the Software falls beyond the scope of this Agreement.\nIntellectual Property Rights 4.1 Axolotl and its contributors retain all intellectual property rights in and to the Software. Licensee acknowledges that this Agreement does not transfer any ownership rights or intellectual property rights to Licensee.\nDisclaimer of Warranty 5.1 THE SOFTWARE IS PROVIDED “AS IS,” WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\nTermination 6.1 Axolotl may terminate this Agreement at any time if Licensee fails to comply with any of the terms and conditions set forth herein. Upon termination, Licensee shall cease all use of the Software and destroy any copies in its possession.\nGoverning Law 7.1 This Agreement shall be governed by and construed in accordance with the laws of the State of California, without regards to conflicts of laws provisions thereof.\nEntire Agreement 8.1 This Agreement constitutes the entire agreement between Axolotl and Licensee with respect to the subject matter hereof and supersedes all prior or contemporaneous understandings or agreements between the parties concerning the Software, whether written or oral. Axolotl may update the terms of this Agreement from time to time, and Licensee’s continued use of the Software after any such updates shall constitute acceptance of updated terms on a go-forward basis. Axolotl will use commercially reasonable efforts to provide Licensee notice of any material updates. By using the Software, Licensee acknowledges that it has read, understood, and agrees to be bound by the terms and conditions of this Agreement.\n\nThis Agreement was last updated on August 23, 2024." + }, { "objectID": "index.html", "href": "index.html", @@ -534,189 +643,101 @@ ] }, { - "objectID": "src/axolotl/integrations/LICENSE.html", - "href": "src/axolotl/integrations/LICENSE.html", - "title": "Axolotl", + "objectID": "docs/mac.html", + "href": "docs/mac.html", + "title": "Mac M-series", "section": "", - "text": "AXOLOTL COMMUNITY LICENSE AGREEMENT\nThis Axolotl Community License Agreement (“Agreement”) is entered into by and between Axolotl AI Corp. (“Axolotl”) and any individual or entity (“Licensee”) who wishes to use the Software (as defined below) in accordance with the terms and conditions set forth in this Agreement.\n\nDefinitions 1.1 “Licensee” refers to any individual or entity who has obtained a copy of the Software under this Agreement. 1.2 “Plugin Integration” means independent integration software modules which may or may not be offered by Axolotl, which may be licensed separately by their respective authors and/or licensors. 1.3 “Software” refers to the specific sub-directory of the Axolotl, Inc. software located at https://github.com/axolotl-ai-cloud/axolotl/tree/main/src/axolotl/integrations and its subdirectories which permits Plugin Integrations to integrate with the Axolotl service.\nGrant of License 2.1 Axolotl hereby grants Licensee a worldwide, non-exclusive, royalty-free, license to use, copy, modify, merge, publish, distribute, sublicense, and/or otherwise exploit the Software, subject to the following conditions: - Licensee must comply with all the terms and conditions of this Agreement. - Licensee must include the original copyright notice and disclaimer of warranty in all copies or substantial portions of the Software. 2.2 Licensee may use the Software for any lawful purpose, except as restricted in Section 3.\nRestrictions 3.1 Licensee shall not use the Software for any activity that constitutes a commercial activity of offering for free or for sale any services, platform, or equivalent to third parties for the purposes of allowing such third parties to fine-tune artificial intelligence models. 3.2 Licensee shall not: - Use the Software for any illegal or unauthorized purpose. - Reverse engineer, decompile, or disassemble the Software. - Remove or modify any copyright, trademark, or other proprietary notices contained in the Software. - Use the Software in a way that could damage, disable, overburden, or impair the functionality of the Software or interfere with any third-party use of the Software. 3.3 Axolotl reserves the right to restrict certain Plugin Integrations for use with the Software. To the extent Licensee integrates a permitted, applicable Plugin Integration with the Software, Licensee shall comply with any additional terms and conditions imposed by the licensors of such Plugin Integration for use of such Plugin Integrations. Licensee shall contact Axolotl if it has questions about whether its use of the Software falls beyond the scope of this Agreement.\nIntellectual Property Rights 4.1 Axolotl and its contributors retain all intellectual property rights in and to the Software. Licensee acknowledges that this Agreement does not transfer any ownership rights or intellectual property rights to Licensee.\nDisclaimer of Warranty 5.1 THE SOFTWARE IS PROVIDED “AS IS,” WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\nTermination 6.1 Axolotl may terminate this Agreement at any time if Licensee fails to comply with any of the terms and conditions set forth herein. Upon termination, Licensee shall cease all use of the Software and destroy any copies in its possession.\nGoverning Law 7.1 This Agreement shall be governed by and construed in accordance with the laws of the State of California, without regards to conflicts of laws provisions thereof.\nEntire Agreement 8.1 This Agreement constitutes the entire agreement between Axolotl and Licensee with respect to the subject matter hereof and supersedes all prior or contemporaneous understandings or agreements between the parties concerning the Software, whether written or oral. Axolotl may update the terms of this Agreement from time to time, and Licensee’s continued use of the Software after any such updates shall constitute acceptance of updated terms on a go-forward basis. Axolotl will use commercially reasonable efforts to provide Licensee notice of any material updates. By using the Software, Licensee acknowledges that it has read, understood, and agrees to be bound by the terms and conditions of this Agreement.\n\nThis Agreement was last updated on August 23, 2024." - }, - { - "objectID": "src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html", - "href": "src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html", - "title": "Axolotl", - "section": "", - "text": "Acknowledgements\nPortions of this Cut Cross Entropy Software may utilize the following copyrighted material, the use of which is hereby acknowledged.\n\nPyTorch\nFrom PyTorch:\n\nCopyright (c) 2016- Facebook, Inc (Adam Paszke)\nCopyright (c) 2014- Facebook, Inc (Soumith Chintala)\nCopyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\nCopyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\nCopyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\nCopyright (c) 2011-2013 NYU (Clement Farabet)\nCopyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\nCopyright (c) 2006 Idiap Research Institute (Samy Bengio)\nCopyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n\nFrom Caffe2:\n\nCopyright (c) 2016-present, Facebook Inc. All rights reserved.\n\nAll contributions by Facebook:\nCopyright (c) 2016 Facebook Inc.\n\nAll contributions by Google:\nCopyright (c) 2015 Google Inc.\nAll rights reserved.\n\nAll contributions by Yangqing Jia:\nCopyright (c) 2015 Yangqing Jia\nAll rights reserved.\n\nAll contributions by Kakao Brain:\nCopyright 2019-2020 Kakao Brain\n\nAll contributions by Cruise LLC:\nCopyright (c) 2022 Cruise LLC.\nAll rights reserved.\n\nAll contributions by Arm:\nCopyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates\n\nAll contributions from Caffe:\nCopyright(c) 2013, 2014, 2015, the respective contributors\nAll rights reserved.\n\nAll other contributions:\nCopyright(c) 2015, 2016 the respective contributors\nAll rights reserved.\n\nCaffe2 uses a copyright model similar to Caffe: each contributor holds\ncopyright over their contributions to Caffe2. The project versioning records\nall such contribution and copyright details. If a contributor wants to further\nmark their specific copyright on a particular contribution, they should\nindicate their copyright solely in the commit message of the change when it is\ncommitted.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America\nand IDIAP Research Institute nor the names of its contributors may be\nused to endorse or promote products derived from this software without\nspecific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\nTriton\n/*\n* Copyright 2018-2020 Philippe Tillet\n* Copyright 2020-2022 OpenAI\n*\n* Permission is hereby granted, free of charge, to any person obtaining\n* a copy of this software and associated documentation files\n* (the \"Software\"), to deal in the Software without restriction,\n* including without limitation the rights to use, copy, modify, merge,\n* publish, distribute, sublicense, and/or sell copies of the Software,\n* and to permit persons to whom the Software is furnished to do so,\n* subject to the following conditions:\n*\n* The above copyright notice and this permission notice shall be\n* included in all copies or substantial portions of the Software.\n*\n* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n*/\nTransformers\nCopyright 2018- The Hugging Face team. All rights reserved.\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License." - }, - { - "objectID": "TODO.html", - "href": "TODO.html", - "title": "todo list", - "section": "", - "text": "[] Validation of parameters for combinations that won’t work\n\n\n\n\nFSDP offload and gradient_checkpointing - https://github.com/pytorch/pytorch/issues/82203\nadamw_bnb_8bit doesn’t play well with FSDP offload" - }, - { - "objectID": "TODO.html#things-that-are-known-not-to-work", - "href": "TODO.html#things-that-are-known-not-to-work", - "title": "todo list", - "section": "", - "text": "FSDP offload and gradient_checkpointing - https://github.com/pytorch/pytorch/issues/82203\nadamw_bnb_8bit doesn’t play well with FSDP offload" - }, - { - "objectID": "examples/colab-notebooks/colab-axolotl-example.html", - "href": "examples/colab-notebooks/colab-axolotl-example.html", - "title": "Setting up", - "section": "", - "text": "import torch\n# Check so there is a gpu available, a T4(free tier) is enough to run this notebook\nassert (torch.cuda.is_available()==True)\n!pip install --no-build-isolation axolotl[deepspeed]" - }, - { - "objectID": "examples/colab-notebooks/colab-axolotl-example.html#hugging-face-login-optional", - "href": "examples/colab-notebooks/colab-axolotl-example.html#hugging-face-login-optional", - "title": "Setting up", - "section": "Hugging Face login (optional)", - "text": "Hugging Face login (optional)\n\nfrom huggingface_hub import notebook_login\nnotebook_login()" - }, - { - "objectID": "examples/colab-notebooks/colab-axolotl-example.html#example-configuration", - "href": "examples/colab-notebooks/colab-axolotl-example.html#example-configuration", - "title": "Setting up", - "section": "Example configuration", - "text": "Example configuration\n\nimport yaml\n\nyaml_string = \"\"\"\nbase_model: NousResearch/Meta-Llama-3.1-8B\n\nload_in_8bit: false\nload_in_4bit: true\nstrict: false\n\ndatasets:\n - path: tatsu-lab/alpaca\n type: alpaca\ndataset_prepared_path: last_run_prepared\nval_set_size: 0.05\noutput_dir: ./outputs/lora-out\n\nsequence_len: 2048\nsample_packing: true\neval_sample_packing: true\npad_to_sequence_len: true\n\nadapter: qlora\nlora_model_dir:\nlora_r: 32\nlora_alpha: 16\nlora_dropout: 0.05\nlora_target_linear: true\nlora_fan_in_fan_out:\nlora_modules_to_save:\n - embed_tokens\n - lm_head\n\nwandb_project:\nwandb_entity:\nwandb_watch:\nwandb_name:\nwandb_log_model:\n\ngradient_accumulation_steps: 2\nmicro_batch_size: 1\nnum_epochs: 1\noptimizer: paged_adamw_8bit\nlr_scheduler: cosine\nlearning_rate: 2e-5\n\ntrain_on_inputs: false\ngroup_by_length: false\nbf16: auto\nfp16:\ntf32: false\n\ngradient_checkpointing: true\nearly_stopping_patience:\nresume_from_checkpoint:\nlogging_steps: 1\nxformers_attention:\nflash_attention: false\nsdp_attention: true\n\nwarmup_steps: 1\nmax_steps: 25\nevals_per_epoch: 1\neval_table_size:\nsaves_per_epoch: 1\ndebug:\ndeepspeed:\nweight_decay: 0.0\nfsdp:\nfsdp_config:\nspecial_tokens:\n pad_token: <|end_of_text|>\n\"\"\"\n\n\n# Convert the YAML string to a Python dictionary\nyaml_dict = yaml.safe_load(yaml_string)\n\n# Specify your file path\nfile_path = 'test_axolotl.yaml'\n\n# Write the YAML file\nwith open(file_path, 'w') as file:\n yaml.dump(yaml_dict, file)\n\nAbove we have a configuration file with base LLM model and datasets specified, among many other things. Axolotl can automatically detect whether the specified datasets are on HuggingFace repo or local machine.\nThe Axolotl configuration options encompass model and dataset selection, data pre-processing, and training. Let’s go through them line by line:\n\n“base model”: String value, specifies the underlying pre-trained LLM that will be used for finetuning\n\nNext we have options for model weights quantization. Quantization allows for reduction in occupied memory on GPUs.\n\n“load_in_8bit”: Boolean value, whether to quantize the model weights into 8-bit integer.\n“load_in_4bit”: Boolean value, whether to quantize the model weights into 4-bit integer.\n“strict”: Boolean value. If false, it allows for overriding established configuration options in the yaml file when executing in command-line interface.\n“datasets”: a list of dicts that contain path and type of data sets as well as other optional configurations where datasets are concerned. Supports multiple datasets.\n“val_set_size”: Either a float value less than one or an integer less than the total size of dataset. Sets the size of validation set from the whole dataset. If float, sets the proportion of the dataset assigned for validation. If integer, sets the direct size of validation set.\n“output_dir”: String value. Path of trained model.\n\nFor data preprocessing:\n\n“sequence_len”: Integer. Specifies the maximum sequence length of the input. Typically 2048 or less.\n“pad_to_sequence_len”: Boolean. Padding input to maximum sequence length.\n“sample_packing”: Boolean. Specifies whether to use multi-packing with block diagonal attention.\n“special_tokens”: Python dict, optional. Allows users to specify the additional special tokens to be ignored by the tokenizer.\n\nFor LoRA configuration and its hyperparamters:\n\n“adapter”: String. Either “lora” or “qlora”, depending on user’s choice.\n“lora_model_dir”: String, Optional. Path to directory that contains LoRA model, if there is already a trained LoRA model the user would like to use.\n“lora_r”: Integer. Refers to the rank of LoRA decomposition matrices. Higher value will reduce LoRA efficiency. Recommended to be set to 8.\n“lora_alpha”: Integer. Scale the weight matrices by \\(\\frac{\\text{lora_alpha}}{\\text{lora_r}}\\)Recommended to be fixed at 16.\n“lora_dropout”: Float that is 1 or less. The dropout probability of a lora layer.\n“lora_target_linear”: Boolean. If true, lora will target all linear modules in the transformers architecture.\n“lora_modules_to_save”: If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.\n\nSee LoRA for detailed explanation of LoRA implementation.\nFor the training configurations:\n\n“gradient_accumulation_steps”: Integer. The number of steps over which to accumulate gradient for batch training. E.g. if 2, backprop is performed every two steps.\n“micro_batch_size”: Integer. Batch size per gpu / gradient_accumulation_steps\n“num_epochs”: Integer. Number of epochs. One epoch is when training has looped over every batch in the whole data set once.\n“optimizer”: The optimizer to use for the training.\n“learning_rate”: The learning rate.\n“lr_scheduler”: The learning rate scheduler to use for adjusting learning rate during training.\n“train_on_inputs”: Boolean. Whether to ignore or include the user’s prompt from the training labels.\n“group_by_length”: Boolean. Whether to group similarly sized data to minimize padding.\n“bf16”: Either “auto”, “true”, or “false”. Whether to use CUDA bf16 floating point format. If set to “auto”, will automatically apply bf16 should the gpu supports it.\n“fp16”: Optional. Specifies whether to use CUDA fp16. Automatically set to true if “bf16” is set to true. Otherwise false.\n“tf32”: Boolean. Whether to use CUDA tf32. Will override bf16.\n“gradient_checkpointing”: Boolean. Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing\n“gradient_checkpointing_kwargs”: Python Dict. Fed into the trainer.\n“logging_steps”: Integer. Log training information over every specified number of steps.\n“flash_attention”: Boolean. Whether to use the flash attention mechanism.\n“sdp_attention”: Boolean. Whether to use the Scaled Dot Product attention mechanism (the attention mechanism in the original implementation of transformers.)\n“warmup_steps”: Integer. The number of pre-training steps where a very low learning rate is used.\n“evals_per_epoch”: Integer. Number of evaluations to be performed within one training epoch.\n“saves_per_epoch”: Integer. Number of times the model is saved in one training epoch.\n“weight_decay”: Positive Float. Sets the “strength” of weight decay (i.e. setting the coefficient of L2 regularization)\n\nThe above is but a snippet aiming to get users familiarized with the types of streamlined configuration options axolotl provides. For a full list of configuration options, see here\nTrain the model\n\n!accelerate launch -m axolotl.cli.train /content/test_axolotl.yaml\n\nPredict with trained model\n\n!accelerate launch -m axolotl.cli.inference /content/test_axolotl.yaml \\\n --lora_model_dir=\"./outputs/lora-out\" --gradio" - }, - { - "objectID": "examples/colab-notebooks/colab-axolotl-example.html#deeper-dive", - "href": "examples/colab-notebooks/colab-axolotl-example.html#deeper-dive", - "title": "Setting up", - "section": "Deeper Dive", - "text": "Deeper Dive\nIt is also helpful to gain some familiarity over some of the core inner workings of axolotl" - }, - { - "objectID": "examples/colab-notebooks/colab-axolotl-example.html#configuration-normalization", - "href": "examples/colab-notebooks/colab-axolotl-example.html#configuration-normalization", - "title": "Setting up", - "section": "Configuration Normalization", - "text": "Configuration Normalization\nAxolotl uses a custom Dict class, called DictDefault to store configurations specified in the yaml configuration file (into a Python variable named cfg). The definition for this custom Dict can be found in the utils/dict.py\nDictDefault is amended such that calling a missing key from it will result in a None return type. This is important because if some configuration options aren’t specified by the user, the None type allows Axolotl to perform boolean operations to determine the default settings for missing configurations. For more examples on how this is done, check out utils/config/init.py" - }, - { - "objectID": "examples/colab-notebooks/colab-axolotl-example.html#loading-models-tokenizers-and-trainer", - "href": "examples/colab-notebooks/colab-axolotl-example.html#loading-models-tokenizers-and-trainer", - "title": "Setting up", - "section": "Loading Models, Tokenizers, and Trainer", - "text": "Loading Models, Tokenizers, and Trainer\nIf we inspect cli.train.py, we will find that most of the heavy lifting were done by the function train() which is itself imported from src/axolotl/train.py.\ntrain() takes care of loading the appropriate tokenizer and pre-trained model through load_model() and load_tokenizer() from src/axolotl/utils/models.py respectively.\nload_tokenizer() loads in the appropriate tokenizer given the desired model, as well as chat templates.\nModelLoader class follows after tokenizer has been selected. It will automatically discern the base model type, load in the desired model, as well as applying model-appropriate attention mechanism modifications (e.g. flash attention). Depending on which base model the user chooses in the configuration, ModelLoader will utilize the corresponding “attention hijacking” script. For example, if the user specified the base model to be NousResearch/Meta-Llama-3.1-8B, which is of llama type, and set flash_attn to True, ModelLoader will load in llama_attn_hijack_flash.py. For a list of supported attention hijacking, please refer to the directory /src/axolotl/monkeypatch/\nAnother important operation encompassed in train() is setting up the training that takes into account of user-specified traning configurations (e.g. num_epochs, optimizer) through the use of setup_trainer() from /src/axolotl/utils/trainer.py, which in turn relies on modules from /src/axolotl/core/trainer_builder.py. trainer_builder.py provides a list of trainer object options bespoke for the task type (Causal or Reinforcement learning (‘dpo’, ‘ipo’, ‘kto’) )" - }, - { - "objectID": "examples/colab-notebooks/colab-axolotl-example.html#monkey-patch", - "href": "examples/colab-notebooks/colab-axolotl-example.html#monkey-patch", - "title": "Setting up", - "section": "Monkey patch", - "text": "Monkey patch\nThe Monkey patch directory is where model architecture/optimization patching scripts are stored (these are modifications that are not implemented in the official releases, hence the name monkey patch). It includes attention jacking, ReLoRA, and unsloth optimization." - }, - { - "objectID": "docs/unsloth.html", - "href": "docs/unsloth.html", - "title": "Unsloth", - "section": "", - "text": "Overview\nUnsloth provides hand-written optimized kernels for LLM finetuning that slightly improve speed and VRAM over standard industry baselines.\n\n\nInstallation\nThe following will install the correct unsloth and extras from source.\npython scripts/unsloth_install.py | sh\n\n\nUsing unsloth w Axolotl\nAxolotl exposes a few configuration options to try out unsloth and get most of the performance gains.\nOur unsloth integration is currently limited to the following model architectures: - llama\nThese options are specific to LoRA finetuning and cannot be used for multi-GPU finetuning\nunsloth_lora_mlp: true\nunsloth_lora_qkv: true\nunsloth_lora_o: true\nThese options are composable and can be used with multi-gpu finetuning\nunsloth_cross_entropy_loss: true\nunsloth_rms_norm: true\nunsloth_rope: true\n\n\nLimitations\n\nSingle GPU only; e.g. no multi-gpu support\nNo deepspeed or FSDP support (requires multi-gpu)\nLoRA + QLoRA support only. No full fine tunes or fp8 support.\nLimited model architecture support. Llama, Phi, Gemma, Mistral only\nNo MoE support.", + "text": "Currently Axolotl on Mac is partially usable, many of the dependencies of Axolotl including Pytorch do not support MPS or have incomplete support.\nCurrent support:\n\nSupport for all models\nFull training of models\nLoRA training\nSample packing\nFP16 and BF16 (awaiting AMP support for MPS in Pytorch)\nTri-dao’s flash-attn (until it is supported use spd_attention as an alternative)\nxformers\nbitsandbytes (meaning no 4/8 bits loading and bnb optimizers)\nqlora\nDeepSpeed\n\nUntested: - FSDP", "crumbs": [ "How-To Guides", - "Unsloth" + "Mac M-series" ] }, { - "objectID": "docs/multi-node.html", - "href": "docs/multi-node.html", - "title": "Multi Node", + "objectID": "docs/multimodal.html", + "href": "docs/multimodal.html", + "title": "MultiModal / Vision Language Models (BETA)", "section": "", - "text": "You will need to create a configuration for accelerate, either by using accelerate config and follow the instructions or you can use one of the preset below:\n~/.cache/huggingface/accelerate/default_config.yaml\nConfigure your model to use FSDP with for example:", - "crumbs": [ - "How-To Guides", - "Multi Node" - ] + "text": "MultiModal / Vision Language Models (BETA)\n\nSupported Models\n\nMllama, i.e. llama with vision models\n\n\n\nUsage\nCurrently multimodal support is limited and doesn’t have full feature parity. To finetune a multimodal Llama w/ LoRA, you’ll need to use the following in YAML in combination with the rest of the required hyperparams.\nbase_model: alpindale/Llama-3.2-11B-Vision-Instruct\nprocessor_type: AutoProcessor\nskip_prepare_dataset: true\n\nchat_template: llama3_2_vision\ndatasets:\n - path: HuggingFaceH4/llava-instruct-mix-vsft\n type: chat_template\n split: train[:1%]\n field_messages: messages\nremove_unused_columns: false\nsample_packing: false\n\n# only finetune the Language model, leave the vision model and vision tower frozen\nlora_target_modules: 'language_model.model.layers.[\\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'" }, { - "objectID": "docs/multi-node.html#machine-configuration", - "href": "docs/multi-node.html#machine-configuration", - "title": "Multi Node", - "section": "Machine configuration", - "text": "Machine configuration\nOn each machine you need a copy of Axolotl, we suggest using the same commit to ensure compatibility.\nYou will also need to have the same configuration file for your model on each machine.\nOn the main machine only, make sure the port you set as main_process_port is open in TCP and reachable by other machines.\nAll you have to do now is launch using accelerate as you would usually do on each machine and voila, the processes will start once you have launched accelerate on every machine.", - "crumbs": [ - "How-To Guides", - "Multi Node" - ] - }, - { - "objectID": "docs/faq.html", - "href": "docs/faq.html", - "title": "FAQ", + "objectID": "docs/batch_vs_grad.html", + "href": "docs/batch_vs_grad.html", + "title": "Batch size vs Gradient accumulation", "section": "", - "text": "Q: The trainer stopped and hasn’t progressed in several minutes.\n\nA: Usually an issue with the GPUs communicating with each other. See the NCCL doc\n\nQ: Exitcode -9\n\nA: This usually happens when you run out of system RAM.\n\nQ: Exitcode -7 while using deepspeed\n\nA: Try upgrading deepspeed w: pip install -U deepspeed\n\nQ: AttributeError: ‘DummyOptim’ object has no attribute ‘step’\n\nA: You may be using deepspeed with single gpu. Please don’t set deepspeed: in yaml or cli.", - "crumbs": [ - "FAQ" - ] + "text": "Gradient accumulation means accumulating gradients over several mini-batches and updating the model weights afterward. When the samples in each batch are diverse, this technique doesn’t significantly impact learning.\nThis method allows for effective training with larger effective batch sizes without needing proportionally larger memory. Here’s why:\n\nMemory Consumption with Batch Size: The primary reason increasing the batch size impacts memory is due to the storage requirements for intermediate activations. When you forward propagate a batch through a network, you have to store the activations at each layer for each sample in the batch, because these activations are used during backpropagation to compute gradients. Therefore, larger batches mean more activations, leading to greater GPU memory consumption.\nGradient Accumulation: With gradient accumulation, you’re effectively simulating a larger batch size by accumulating gradients over several smaller batches (or micro-batches). However, at any given time, you’re only forward and backward propagating a micro-batch. This means you only store activations for the micro-batch, not the full accumulated batch. As a result, you can simulate the effect of a larger batch size without the memory cost of storing activations for a large batch.\n\nExample 1: Micro batch size: 3 Gradient accumulation steps: 2 Number of GPUs: 3 Total batch size = 3 * 2 * 3 = 18\n| GPU 1 | GPU 2 | GPU 3 |\n|----------------|----------------|----------------|\n| S1, S2, S3 | S4, S5, S6 | S7, S8, S9 |\n| e1, e2, e3 | e4, e5, e6 | e7, e8, e9 |\n|----------------|----------------|----------------|\n| → (accumulate) | → (accumulate) | → (accumulate) |\n|----------------|----------------|----------------|\n| S10, S11, S12 | S13, S14, S15 | S16, S17, S18 |\n| e10, e11, e12 | e13, e14, e15 | e16, e17, e18 |\n|----------------|----------------|----------------|\n| → (apply) | → (apply) | → (apply) |\n\nAccumulated gradient for the weight w1 after the second iteration (considering all GPUs):\nTotal gradient for w1 = e1 + e2 + e3 + e4 + e5 + e6 + e7 + e8 + e9 + e10 + e11 + e12 + e13 + e14 + e15 + e16 + e17 + e18\n\nWeight update for w1:\nw1_new = w1_old - learning rate x (Total gradient for w1 / 18)\nExample 2: Micro batch size: 2 Gradient accumulation steps: 1 Number of GPUs: 3 Total batch size = 2 * 1 * 3 = 6\n| GPU 1 | GPU 2 | GPU 3 |\n|-----------|-----------|-----------|\n| S1, S2 | S3, S4 | S5, S6 |\n| e1, e2 | e3, e4 | e5, e6 |\n|-----------|-----------|-----------|\n| → (apply) | → (apply) | → (apply) |\n\nAccumulated gradient for the weight w1 (considering all GPUs):\nTotal gradient for w1 = e1 + e2 + e3 + e4 + e5 + e6\n\nWeight update for w1:\nw1_new = w1_old - learning rate × (Total gradient for w1 / 6)" }, { - "objectID": "docs/debugging.html", - "href": "docs/debugging.html", - "title": "Debugging", + "objectID": "docs/dataset_preprocessing.html", + "href": "docs/dataset_preprocessing.html", + "title": "Dataset Preprocessing", "section": "", - "text": "This document provides some tips and tricks for debugging Axolotl. It also provides an example configuration for debugging with VSCode. A good debugging setup is essential to understanding how Axolotl code works behind the scenes.", + "text": "Dataset pre-processing is the step where Axolotl takes each dataset you’ve configured alongside the (dataset format)[../dataset-formats/] and prompt strategies to: - parse the dataset based on the dataset format - transform the dataset to how you would interact with the model based on the prompt strategy - tokenize the dataset based on the configured model & tokenizer - shuffle and merge multiple datasets together if using more than one\nThe processing of the datasets can happen one of two ways:\n\nBefore kicking off training by calling python -m axolotl.cli.preprocess /path/to/your.yaml --debug\nWhen training is started\n\nWhat are the benefits of pre-processing? When training interactively or for sweeps (e.g. you are restarting the trainer often), processing the datasets can oftentimes be frustratingly slow. Pre-processing will cache the tokenized/formatted datasets according to a hash of dependent training parameters so that it will intelligently pull from its cache when possible.\nThe path of the cache is controlled by dataset_prepared_path: and is often left blank in example YAMLs as this leads to a more robust solution that prevents unexpectedly reusing cached data.\nIf dataset_prepared_path: is left empty, when training, the processed dataset will be cached in a default path of ./last_run_prepared/, but will ignore anything already cached there. By explicitly setting dataset_prepared_path: ./last_run_prepared, the trainer will use whatever pre-processed data is in the cache.\nWhat are the edge cases? Let’s say you are writing a custom prompt strategy or using a user-defined prompt template. Because the trainer cannot readily detect these changes, we cannot change the calculated hash value for the pre-processed dataset. If you have dataset_prepared_path: ... set and change your prompt templating logic, it may not pick up the changes you made and you will be training over the old prompt." + }, + { + "objectID": "docs/fsdp_qlora.html", + "href": "docs/fsdp_qlora.html", + "title": "FDSP + QLoRA", + "section": "", + "text": "Using FSDP with QLoRA is essential for fine-tuning larger (70b+ parameter) LLMs on consumer GPUs. For example, you can use FSDP + QLoRA to train a 70b model on two 24GB GPUs1.\nBelow, we describe how to use this feature in Axolotl.", "crumbs": [ "How-To Guides", - "Debugging" + "FDSP + QLoRA" ] }, { - "objectID": "docs/debugging.html#table-of-contents", - "href": "docs/debugging.html#table-of-contents", - "title": "Debugging", - "section": "Table of Contents", - "text": "Table of Contents\n\nGeneral Tips\nDebugging with VSCode\n\nBackground\nConfiguration\nCustomizing your debugger\nVideo Tutorial\n\nDebugging With Docker\n\nSetup\nAttach To Container\nVideo - Attaching To Docker On Remote Host", + "objectID": "docs/fsdp_qlora.html#background", + "href": "docs/fsdp_qlora.html#background", + "title": "FDSP + QLoRA", + "section": "", + "text": "Using FSDP with QLoRA is essential for fine-tuning larger (70b+ parameter) LLMs on consumer GPUs. For example, you can use FSDP + QLoRA to train a 70b model on two 24GB GPUs1.\nBelow, we describe how to use this feature in Axolotl.", "crumbs": [ "How-To Guides", - "Debugging" + "FDSP + QLoRA" ] }, { - "objectID": "docs/debugging.html#general-tips", - "href": "docs/debugging.html#general-tips", - "title": "Debugging", - "section": "General Tips", - "text": "General Tips\nWhile debugging it’s helpful to simplify your test scenario as much as possible. Here are some tips for doing so:\n\n[!Important] All of these tips are incorporated into the example configuration for debugging with VSCode below.\n\n\nMake sure you are using the latest version of axolotl: This project changes often and bugs get fixed fast. Check your git branch and make sure you have pulled the latest changes from main.\nEliminate concurrency: Restrict the number of processes to 1 for both training and data preprocessing:\n\nSet CUDA_VISIBLE_DEVICES to a single GPU, ex: export CUDA_VISIBLE_DEVICES=0.\nSet dataset_processes: 1 in your axolotl config or run the training command with --dataset_processes=1.\n\nUse a small dataset: Construct or use a small dataset from HF Hub. When using a small dataset, you will often have to make sure sample_packing: False and eval_sample_packing: False to avoid errors. If you are in a pinch and don’t have time to construct a small dataset but want to use from the HF Hub, you can shard the data (this will still tokenize the entire dataset, but will only use a fraction of the data for training. For example, to shard the dataset into 20 pieces, add the following to your axolotl config): yaml dataset: ... shards: 20\nUse a small model: A good example of a small model is TinyLlama/TinyLlama-1.1B-Chat-v1.0.\nMinimize iteration time: Make sure the training loop finishes as fast as possible, with these settings.\n\nmicro_batch_size: 1\nmax_steps: 1\nval_set_size: 0\n\nClear Caches: Axolotl caches certain steps and so does the underlying HuggingFace trainer. You may want to clear some of these caches when debugging.\n\nData preprocessing: When debugging data preprocessing, which includes prompt template formation, you may want to delete the directory set in dataset_prepared_path: in your axolotl config. If you didn’t set this value, the default is last_run_prepared.\nHF Hub: If you are debugging data preprocessing, you should clear the relevant HF cache HuggingFace cache, by deleting the appropriate ~/.cache/huggingface/datasets/... folder(s).\nThe recommended approach is to redirect all outputs and caches to a temporary folder and delete selected subfolders before each run. This is demonstrated in the example configuration below.", + "objectID": "docs/fsdp_qlora.html#usage", + "href": "docs/fsdp_qlora.html#usage", + "title": "FDSP + QLoRA", + "section": "Usage", + "text": "Usage\nTo enable QLoRA with FSDP, you need to perform the following steps:\n\n![Tip] See the example config file in addition to reading these instructions.\n\n\nSet adapter: qlora in your axolotl config file.\nEnable FSDP in your axolotl config, as described here.\nUse one of the supported model types: llama, mistral or mixtral.", "crumbs": [ "How-To Guides", - "Debugging" + "FDSP + QLoRA" ] }, { - "objectID": "docs/debugging.html#debugging-with-vscode", - "href": "docs/debugging.html#debugging-with-vscode", - "title": "Debugging", - "section": "Debugging with VSCode", - "text": "Debugging with VSCode\n\nBackground\nThe below example shows how to configure VSCode to debug data preprocessing of the chat_template format. This is the format used when you have the following in your axolotl config:\ndatasets:\n - path: <path to your chat_template formatted dataset> # example on HF Hub: fozziethebeat/alpaca_messages_2k_test\n type: chat_template\n\n[!Important] If you are already familiar with advanced VSCode debugging, you can skip the below explanation and look at the files .vscode/launch.json and .vscode/tasks.json for an example configuration.\n\n\n[!Tip] If you prefer to watch a video, rather than read, you can skip to the video tutorial below (but doing both is recommended).\n\n\n\nSetup\nMake sure you have an editable install of Axolotl, which ensures that changes you make to the code are reflected at runtime. Run the following commands from the root of this project:\npip3 install packaging\npip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'\n\nRemote Hosts\nIf you developing on a remote host, you can easily use VSCode to debug remotely. To do so, you will need to follow this remote - SSH guide. You can also see the video below on Docker and Remote SSH debugging.\n\n\n\nConfiguration\nThe easiest way to get started is to modify the .vscode/launch.json file in this project. This is just an example configuration, so you may need to modify or copy it to suit your needs.\nFor example, to mimic the command cd devtools && CUDA_VISIBLE_DEVICES=0 accelerate launch -m axolotl.cli.train dev_chat_template.yml, you would use the below configuration1. Note that we add additional flags that override the axolotl config and incorporate the tips above (see the comments). We also set the working directory to devtools and set the env variable HF_HOME to a temporary folder that is later partially deleted. This is because we want to delete the HF dataset cache before each run in order to ensure that the data preprocessing code is run from scratch.\n// .vscode/launch.json\n{\n \"version\": \"0.2.0\",\n \"configurations\": [\n {\n \"name\": \"Debug axolotl prompt - chat_template\",\n \"type\": \"python\",\n \"module\": \"accelerate.commands.launch\",\n \"request\": \"launch\",\n \"args\": [\n \"-m\", \"axolotl.cli.train\", \"dev_chat_template.yml\",\n // The flags below simplify debugging by overriding the axolotl config\n // with the debugging tips above. Modify as needed.\n \"--dataset_processes=1\", // limits data preprocessing to one process\n \"--max_steps=1\", // limits training to just one step\n \"--batch_size=1\", // minimizes batch size\n \"--micro_batch_size=1\", // minimizes batch size\n \"--val_set_size=0\", // disables validation\n \"--sample_packing=False\", // disables sample packing which is necessary for small datasets\n \"--eval_sample_packing=False\",// disables sample packing on eval set\n \"--dataset_prepared_path=temp_debug/axolotl_outputs/data\", // send data outputs to a temp folder\n \"--output_dir=temp_debug/axolotl_outputs/model\" // send model outputs to a temp folder\n ],\n \"console\": \"integratedTerminal\", // show output in the integrated terminal\n \"cwd\": \"${workspaceFolder}/devtools\", // set working directory to devtools from the root of the project\n \"justMyCode\": true, // step through only axolotl code\n \"env\": {\"CUDA_VISIBLE_DEVICES\": \"0\", // Since we aren't doing distributed training, we need to limit to one GPU\n \"HF_HOME\": \"${workspaceFolder}/devtools/temp_debug/.hf-cache\"}, // send HF cache to a temp folder\n \"preLaunchTask\": \"cleanup-for-dataprep\", // delete temp folders (see below)\n }\n ]\n}\nAdditional notes about this configuration:\n\nThe argument justMyCode is set to true such that you step through only the axolotl code. If you want to step into dependencies, set this to false.\nThe preLaunchTask: cleanup-for-dataprep is defined in .vscode/tasks.json and is used to delete the following folders before debugging, which is essential to ensure that the data pre-processing code is run from scratch:\n\n./devtools/temp_debug/axolotl_outputs\n./devtools/temp_debug/.hf-cache/datasets\n\n\n\n[!Tip] You may not want to delete these folders. For example, if you are debugging model training instead of data pre-processing, you may NOT want to delete the cache or output folders. You may also need to add additional tasks to the tasks.json file depending on your use case.\n\nBelow is the ./vscode/tasks.json file that defines the cleanup-for-dataprep task. This task is run before each debugging session when you use the above configuration. Note how there are two tasks that delete the two folders mentioned above. The third task cleanup-for-dataprep is a composite task that combines the two tasks. A composite task is necessary because VSCode does not allow you to specify multiple tasks in the preLaunchTask argument of the launch.json file.\n// .vscode/tasks.json\n// this file is used by launch.json\n{\n \"version\": \"2.0.0\",\n \"tasks\": [\n // this task changes into the devtools directory and deletes the temp_debug/axolotl_outputs folder\n {\n \"label\": \"delete-outputs\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/axolotl_outputs\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task changes into the devtools directory and deletes the `temp_debug/.hf-cache/datasets` folder\n {\n \"label\": \"delete-temp-hf-dataset-cache\",\n \"type\": \"shell\",\n \"command\": \"rm -rf temp_debug/.hf-cache/datasets\",\n \"options\":{ \"cwd\": \"${workspaceFolder}/devtools\"},\n \"problemMatcher\": []\n },\n // this task combines the two tasks above\n {\n \"label\": \"cleanup-for-dataprep\",\n \"dependsOn\": [\"delete-outputs\", \"delete-temp-hf-dataset-cache\"],\n }\n ]\n}\n\n\nCustomizing your debugger\nYour debugging use case may differ from the example above. The easiest thing to do is to put your own axolotl config in the devtools folder and modify the launch.json file to use your config. You may also want to modify the preLaunchTask to delete different folders or not delete anything at all.\n\n\nVideo Tutorial\nThe following video tutorial walks through the above configuration and demonstrates how to debug with VSCode, (click the image below to watch):\n\n\n\nHamel Husain’s tutorial: Debugging Axolotl w/VSCode", + "objectID": "docs/fsdp_qlora.html#example-config", + "href": "docs/fsdp_qlora.html#example-config", + "title": "FDSP + QLoRA", + "section": "Example Config", + "text": "Example Config\nexamples/llama-2/qlora-fsdp.yml contains an example of how to enable QLoRA + FSDP in axolotl.", "crumbs": [ "How-To Guides", - "Debugging" + "FDSP + QLoRA" ] }, { - "objectID": "docs/debugging.html#debugging-with-docker", - "href": "docs/debugging.html#debugging-with-docker", - "title": "Debugging", - "section": "Debugging With Docker", - "text": "Debugging With Docker\nUsing official Axolotl Docker images is a great way to debug your code, and is a very popular way to use Axolotl. Attaching VSCode to Docker takes a few more steps.\n\nSetup\nOn the host that is running axolotl (ex: if you are using a remote host), clone the axolotl repo and change your current directory to the root:\ngit clone https://github.com/axolotl-ai-cloud/axolotl\ncd axolotl\n\n[!Tip] If you already have axolotl cloned on your host, make sure you have the latest changes and change into the root of the project.\n\nNext, run the desired docker image and mount the current directory. Below is a docker command you can run to do this:2\ndocker run --privileged --gpus '\"all\"' --shm-size 10g --rm -it --name axolotl --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 --mount type=bind,src=\"${PWD}\",target=/workspace/axolotl -v ${HOME}/.cache/huggingface:/root/.cache/huggingface axolotlai/axolotl:main-py3.10-cu118-2.0.1\n\n[!Tip] To understand which containers are available, see the Docker section of the README and the DockerHub repo. For details of how the Docker containers are built, see axolotl’s Docker CI builds.\n\nYou will now be in the container. Next, perform an editable install of Axolotl:\npip3 install packaging\npip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'\n\n\nAttach To Container\nNext, if you are using a remote host, Remote into this host with VSCode. If you are using a local host, you can skip this step.\nNext, select Dev Containers: Attach to Running Container... using the command palette (CMD + SHIFT + P) in VSCode. You will be prompted to select a container to attach to. Select the container you just created. You will now be in the container with a working directory that is at the root of the project. Any changes you make to the code will be reflected both in the container and on the host.\nNow you are ready to debug as described above (see Debugging with VSCode).\n\n\nVideo - Attaching To Docker On Remote Host\nHere is a short video that demonstrates how to attach to a Docker container on a remote host:\n\n\n\nHamel Husain’s tutorial: Debugging Axolotl Part 2: Attaching to Docker on a Remote Host", + "objectID": "docs/fsdp_qlora.html#references", + "href": "docs/fsdp_qlora.html#references", + "title": "FDSP + QLoRA", + "section": "References", + "text": "References\n\nPR #1378 enabling QLoRA in FSDP in Axolotl.\nBlog Post from the Answer.AI team describing the work that enabled QLoRA in FSDP.\nRelated HuggingFace PRs Enabling FDSP + QLoRA:\n\nAccelerate PR#2544\nTransformers PR#29587\nTRL PR#1416\nPEFT PR#1550", "crumbs": [ "How-To Guides", - "Debugging" + "FDSP + QLoRA" ] }, { - "objectID": "docs/debugging.html#footnotes", - "href": "docs/debugging.html#footnotes", - "title": "Debugging", + "objectID": "docs/fsdp_qlora.html#footnotes", + "href": "docs/fsdp_qlora.html#footnotes", + "title": "FDSP + QLoRA", "section": "Footnotes", - "text": "Footnotes\n\n\nThe config actually mimics the command CUDA_VISIBLE_DEVICES=0 python -m accelerate.commands.launch -m axolotl.cli.train devtools/chat_template.yml, but this is the same thing.↩︎\nMany of the below flags are recommended best practices by Nvidia when using nvidia-container-toolkit. You can read more about these flags here.↩︎", + "text": "Footnotes\n\n\nThis was enabled by this work from the Answer.AI team.↩︎", "crumbs": [ "How-To Guides", - "Debugging" + "FDSP + QLoRA" ] }, { diff --git a/sitemap.xml b/sitemap.xml index 8b975c054..8e4eaa97b 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,114 +2,118 @@ https://axolotl-ai-cloud.github.io/axolotl/FAQS.html - 2025-01-24T17:55:32.133Z + 2025-01-24T17:56:38.669Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/index.html - 2025-01-24T17:55:32.134Z + 2025-01-24T17:56:38.671Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/inst_tune.html - 2025-01-24T17:55:32.134Z + 2025-01-24T17:56:38.671Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/template_free.html - 2025-01-24T17:55:32.135Z + 2025-01-24T17:56:38.671Z https://axolotl-ai-cloud.github.io/axolotl/docs/amd_hpc.html - 2025-01-24T17:55:32.134Z + 2025-01-24T17:56:38.670Z https://axolotl-ai-cloud.github.io/axolotl/docs/input_output.html - 2025-01-24T17:55:32.136Z + 2025-01-24T17:56:38.672Z https://axolotl-ai-cloud.github.io/axolotl/docs/config.html - 2025-01-24T17:55:32.134Z + 2025-01-24T17:56:38.670Z - https://axolotl-ai-cloud.github.io/axolotl/docs/fsdp_qlora.html - 2025-01-24T17:55:32.135Z - - - https://axolotl-ai-cloud.github.io/axolotl/docs/dataset_preprocessing.html - 2025-01-24T17:55:32.135Z - - - https://axolotl-ai-cloud.github.io/axolotl/docs/batch_vs_grad.html - 2025-01-24T17:55:32.134Z - - - https://axolotl-ai-cloud.github.io/axolotl/docs/multimodal.html - 2025-01-24T17:55:32.136Z - - - https://axolotl-ai-cloud.github.io/axolotl/docs/mac.html - 2025-01-24T17:55:32.136Z - - - https://axolotl-ai-cloud.github.io/axolotl/index.html - 2025-01-24T17:55:32.148Z - - - https://axolotl-ai-cloud.github.io/axolotl/src/axolotl/integrations/LICENSE.html - 2025-01-24T17:55:32.150Z - - - https://axolotl-ai-cloud.github.io/axolotl/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html - 2025-01-24T17:55:32.150Z - - - https://axolotl-ai-cloud.github.io/axolotl/TODO.html - 2025-01-24T17:55:32.133Z - - - https://axolotl-ai-cloud.github.io/axolotl/examples/colab-notebooks/colab-axolotl-example.html - 2025-01-24T17:55:32.137Z - - - https://axolotl-ai-cloud.github.io/axolotl/docs/unsloth.html - 2025-01-24T17:55:32.136Z - - - https://axolotl-ai-cloud.github.io/axolotl/docs/multi-node.html - 2025-01-24T17:55:32.136Z - - - https://axolotl-ai-cloud.github.io/axolotl/docs/faq.html - 2025-01-24T17:55:32.135Z + https://axolotl-ai-cloud.github.io/axolotl/docs/lr_groups.html + 2025-01-24T17:56:38.672Z https://axolotl-ai-cloud.github.io/axolotl/docs/debugging.html - 2025-01-24T17:55:32.135Z + 2025-01-24T17:56:38.671Z + + + https://axolotl-ai-cloud.github.io/axolotl/docs/faq.html + 2025-01-24T17:56:38.671Z + + + https://axolotl-ai-cloud.github.io/axolotl/docs/multi-node.html + 2025-01-24T17:56:38.672Z + + + https://axolotl-ai-cloud.github.io/axolotl/docs/unsloth.html + 2025-01-24T17:56:38.672Z + + + https://axolotl-ai-cloud.github.io/axolotl/examples/colab-notebooks/colab-axolotl-example.html + 2025-01-24T17:56:38.673Z + + + https://axolotl-ai-cloud.github.io/axolotl/TODO.html + 2025-01-24T17:56:38.669Z + + + https://axolotl-ai-cloud.github.io/axolotl/src/axolotl/integrations/cut_cross_entropy/ACKNOWLEDGEMENTS.html + 2025-01-24T17:56:38.687Z + + + https://axolotl-ai-cloud.github.io/axolotl/src/axolotl/integrations/LICENSE.html + 2025-01-24T17:56:38.686Z + + + https://axolotl-ai-cloud.github.io/axolotl/index.html + 2025-01-24T17:56:38.684Z + + + https://axolotl-ai-cloud.github.io/axolotl/docs/mac.html + 2025-01-24T17:56:38.672Z + + + https://axolotl-ai-cloud.github.io/axolotl/docs/multimodal.html + 2025-01-24T17:56:38.672Z + + + https://axolotl-ai-cloud.github.io/axolotl/docs/batch_vs_grad.html + 2025-01-24T17:56:38.670Z + + + https://axolotl-ai-cloud.github.io/axolotl/docs/dataset_preprocessing.html + 2025-01-24T17:56:38.671Z + + + https://axolotl-ai-cloud.github.io/axolotl/docs/fsdp_qlora.html + 2025-01-24T17:56:38.671Z https://axolotl-ai-cloud.github.io/axolotl/docs/rlhf.html - 2025-01-24T17:55:32.136Z + 2025-01-24T17:56:38.672Z https://axolotl-ai-cloud.github.io/axolotl/docs/multipack.html - 2025-01-24T17:55:32.136Z + 2025-01-24T17:56:38.672Z https://axolotl-ai-cloud.github.io/axolotl/docs/nccl.html - 2025-01-24T17:55:32.136Z + 2025-01-24T17:56:38.672Z https://axolotl-ai-cloud.github.io/axolotl/docs/torchao.html - 2025-01-24T17:55:32.136Z + 2025-01-24T17:56:38.672Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/tokenized.html - 2025-01-24T17:55:32.135Z + 2025-01-24T17:56:38.671Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/pretraining.html - 2025-01-24T17:55:32.134Z + 2025-01-24T17:56:38.671Z https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/conversation.html - 2025-01-24T17:55:32.134Z + 2025-01-24T17:56:38.671Z