diff --git a/.nojekyll b/.nojekyll index f5286d857..c66f02374 100644 --- a/.nojekyll +++ b/.nojekyll @@ -1 +1 @@ -adaa8fa1 \ No newline at end of file +86fed2ad \ No newline at end of file diff --git a/docs/dataset-formats/index.html b/docs/dataset-formats/index.html index ecff3cea9..1ae415c63 100644 --- a/docs/dataset-formats/index.html +++ b/docs/dataset-formats/index.html @@ -351,7 +351,7 @@ Description - + Conversation @@ -359,7 +359,7 @@ Description Conversation format for supervised fine-tuning. - + Instruction Tuning @@ -367,7 +367,7 @@ Description Instruction tuning formats for supervised fine-tuning. - + Pre-training @@ -375,7 +375,7 @@ Description Data format for a pre-training completion task. - + Template-Free @@ -383,7 +383,7 @@ Description Construct prompts without a template. - + Custom Pre-Tokenized Dataset diff --git a/index.html b/index.html index e3d1504f0..cdee17e1a 100644 --- a/index.html +++ b/index.html @@ -498,7 +498,7 @@ pre > code.sourceCode > span > a:first-child::before { text-decoration: underlin
git clone https://github.com/OpenAccess-AI-Collective/axolotl
 cd axolotl
 
-pip3 install packaging
+pip3 install packaging ninja
 pip3 install -e '.[flash-attn,deepspeed]'

Usage

@@ -941,7 +941,7 @@ w1_new = w1_old - learning rate × (Total gradient for w1 / 6)
git clone https://github.com/OpenAccess-AI-Collective/axolotl
 cd axolotl
 
-pip3 install packaging
+pip3 install packaging ninja
 pip3 install -e '.[flash-attn,deepspeed]'
 
 pip3 install -r requirements-dev.txt -r requirements-tests.txt
diff --git a/search.json b/search.json
index b5a04ba7f..ea6b202d2 100644
--- a/search.json
+++ b/search.json
@@ -641,7 +641,7 @@
     "href": "index.html",
     "title": "Axolotl",
     "section": "",
-    "text": "Axolotl is a tool designed to streamline the fine-tuning of various AI models, offering support for multiple configurations and architectures.\nFeatures: - Train various Huggingface models such as llama, pythia, falcon, mpt - Supports fullfinetune, lora, qlora, relora, and gptq - Customize configurations using a simple yaml file or CLI overwrite - Load different dataset formats, use custom formats, or bring your own tokenized datasets - Integrated with xformer, flash attention, rope scaling, and multipacking - Works with single GPU or multiple GPUs via FSDP or Deepspeed - Easily run with Docker locally or on the cloud - Log results and optionally checkpoints to wandb or mlflow - And more!\n  \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nfp16/fp32\nlora\nqlora\ngptq\ngptq w/flash attn\nflash attn\nxformers attn\n\n\n\n\nllama\n✅\n✅\n✅\n✅\n✅\n✅\n✅\n\n\nMistral\n✅\n✅\n✅\n✅\n✅\n✅\n✅\n\n\nMixtral-MoE\n✅\n✅\n✅\n❓\n❓\n❓\n❓\n\n\nPythia\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\ncerebras\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\nbtlm\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\nmpt\n✅\n❌\n❓\n❌\n❌\n❌\n❓\n\n\nfalcon\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\ngpt-j\n✅\n✅\n✅\n❌\n❌\n❓\n❓\n\n\nXGen\n✅\n❓\n✅\n❓\n❓\n❓\n✅\n\n\nphi\n✅\n✅\n✅\n❓\n❓\n❓\n❓\n\n\nRWKV\n✅\n❓\n❓\n❓\n❓\n❓\n❓\n\n\nQwen\n✅\n✅\n✅\n❓\n❓\n❓\n❓\n\n\nGemma\n✅\n✅\n✅\n❓\n❓\n✅\n❓\n\n\n\n✅: supported ❌: not supported ❓: untested\n\n\n\nGet started with Axolotl in just a few steps! This quickstart guide will walk you through setting up and running a basic fine-tuning task.\nRequirements: Python >=3.10 and Pytorch >=2.1.1.\ngit clone https://github.com/OpenAccess-AI-Collective/axolotl\ncd axolotl\n\npip3 install packaging\npip3 install -e '.[flash-attn,deepspeed]'\n\n\n# preprocess datasets - optional but recommended\nCUDA_VISIBLE_DEVICES=\"\" python -m axolotl.cli.preprocess examples/openllama-3b/lora.yml\n\n# finetune lora\naccelerate launch -m axolotl.cli.train examples/openllama-3b/lora.yml\n\n# inference\naccelerate launch -m axolotl.cli.inference examples/openllama-3b/lora.yml \\\n    --lora_model_dir=\"./lora-out\"\n\n# gradio\naccelerate launch -m axolotl.cli.inference examples/openllama-3b/lora.yml \\\n    --lora_model_dir=\"./lora-out\" --gradio\n\n# remote yaml files - the yaml config can be hosted on a public URL\n# Note: the yaml config must directly link to the **raw** yaml\naccelerate launch -m axolotl.cli.train https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/examples/openllama-3b/lora.yml\n\n\n\n\n\n\n\n\ndocker run --gpus '\"all\"' --rm -it winglian/axolotl:main-latest\nOr run on the current files for development:\ndocker compose up -d\n\n[!Tip] If you want to debug axolotl or prefer to use Docker as your development environment, see the debugging guide’s section on Docker.\n\n\n\nDocker advanced\n\nA more powerful Docker command to run would be this:\ndocker run --privileged --gpus '\"all\"' --shm-size 10g --rm -it --name axolotl --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 --mount type=bind,src=\"${PWD}\",target=/workspace/axolotl -v ${HOME}/.cache/huggingface:/root/.cache/huggingface winglian/axolotl:main-latest\nIt additionally: * Prevents memory issues when running e.g. deepspeed (e.g. you could hit SIGBUS/signal 7 error) through --ipc and --ulimit args. * Persists the downloaded HF data (models etc.) and your modifications to axolotl code through --mount/-v args. * The --name argument simply makes it easier to refer to the container in vscode (Dev Containers: Attach to Running Container...) or in your terminal. * The --privileged flag gives all capabilities to the container. * The --shm-size 10g argument increases the shared memory size. Use this if you see exitcode: -7 errors using deepspeed.\nMore information on nvidia website\n\n\n\n\n\nInstall python >=3.10\nInstall pytorch stable https://pytorch.org/get-started/locally/\nInstall Axolotl along with python dependencies bash         pip3 install packaging         pip3 install -e '.[flash-attn,deepspeed]'\n(Optional) Login to Huggingface to use gated models/datasets. bash         huggingface-cli login Get the token at huggingface.co/settings/tokens\n\n\n\n\nFor cloud GPU providers that support docker images, use winglian/axolotl-cloud:main-latest\n\non Latitude.sh use this direct link\non JarvisLabs.ai use this direct link\non RunPod use this direct link\n\n\n\n\n\n\n\n\nClick to Expand\n\n\nInstall python\n\nsudo apt update\nsudo apt install -y python3.10\n\nsudo update-alternatives --install /usr/bin/python python /usr/bin/python3.10 1\nsudo update-alternatives --config python # pick 3.10 if given option\npython -V # should be 3.10\n\nInstall pip\n\nwget https://bootstrap.pypa.io/get-pip.py\npython get-pip.py\n\nInstall torch\n\npip3 install -U torch --index-url https://download.pytorch.org/whl/cu118\n\nAxolotl\n\ngit clone https://github.com/OpenAccess-AI-Collective/axolotl\ncd axolotl\n\npip3 install packaging\npip3 install -e '.[flash-attn,deepspeed]'\npip3 install protobuf==3.20.3\npip3 install -U --ignore-installed requests Pillow psutil scipy\n\nSet path\n\nexport LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH\n\n\n\n\n\n\nClick to Expand\n\nUse a Deeplearning linux OS with cuda and pytorch installed. Then follow instructions on quickstart.\nMake sure to run the below to uninstall xla.\npip uninstall -y torch_xla[tpu]\n\n\n\n\n\nPlease use WSL or Docker!\n\n\n\nUse the below instead of the install method in QuickStart.\npip3 install -e '.'\nMore info: mac.md\n\n\n\nPlease use this example notebook.\n\n\n\nTo launch on GPU instances (both on-demand and spot instances) on 7+ clouds (GCP, AWS, Azure, OCI, and more), you can use SkyPilot:\npip install \"skypilot-nightly[gcp,aws,azure,oci,lambda,kubernetes,ibm,scp]\"  # choose your clouds\nsky check\nGet the example YAMLs of using Axolotl to finetune mistralai/Mistral-7B-v0.1:\ngit clone https://github.com/skypilot-org/skypilot.git\ncd skypilot/llm/axolotl\nUse one command to launch:\n# On-demand\nHF_TOKEN=xx sky launch axolotl.yaml --env HF_TOKEN\n\n# Managed spot (auto-recovery on preemption)\nHF_TOKEN=xx BUCKET=<unique-name> sky spot launch axolotl-spot.yaml --env HF_TOKEN --env BUCKET\n\n\n\n\nAxolotl supports a variety of dataset formats. It is recommended to use a JSONL. The schema of the JSONL depends upon the task and the prompt template you wish to use. Instead of a JSONL, you can also use a HuggingFace dataset with columns for each JSONL field.\nSee these docs for more information on how to use different dataset formats.\n\n\n\nSee examples for quick start. It is recommended to duplicate and modify to your needs. The most important options are:\n\nmodel\nbase_model: ./llama-7b-hf # local or huggingface repo\nNote: The code will load the right architecture.\ndataset\ndatasets:\n    # huggingface repo\n  - path: vicgalle/alpaca-gpt4\n    type: alpaca\n\n    # huggingface repo with specific configuration/subset\n  - path: EleutherAI/pile\n    name: enron_emails\n    type: completion # format from earlier\n    field: text # Optional[str] default: text, field to use for completion data\n\n    # huggingface repo with multiple named configurations/subsets\n  - path: bigcode/commitpackft\n    name:\n      - ruby\n      - python\n      - typescript\n    type: ... # unimplemented custom format\n\n    # fastchat conversation\n    # See 'conversation' options: https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py\n  - path: ...\n    type: sharegpt\n    conversation: chatml # default: vicuna_v1.1\n\n    # local\n  - path: data.jsonl # or json\n    ds_type: json # see other options below\n    type: alpaca\n\n    # dataset with splits, but no train split\n  - path: knowrohit07/know_sql\n    type: context_qa.load_v2\n    train_on_split: validation\n\n    # loading from s3 or gcs\n    # s3 creds will be loaded from the system default and gcs only supports public access\n  - path: s3://path_to_ds # Accepts folder with arrow/parquet or file path like above. Supports s3, gcs.\n    ...\n\n    # Loading Data From a Public URL\n    # - The file format is `json` (which includes `jsonl`) by default. For different formats, adjust the `ds_type` option accordingly.\n  - path: https://some.url.com/yourdata.jsonl # The URL should be a direct link to the file you wish to load. URLs must use HTTPS protocol, not HTTP.\n    ds_type: json # this is the default, see other options below.\nloading\nload_in_4bit: true\nload_in_8bit: true\n\nbf16: auto # require >=ampere, auto will detect if your GPU supports this and choose automatically.\nfp16: # leave empty to use fp16 when bf16 is 'auto'. set to false if you want to fallback to fp32\ntf32: true # require >=ampere\n\nbfloat16: true # require >=ampere, use instead of bf16 when you don't want AMP (automatic mixed precision)\nfloat16: true # use instead of fp16 when you don't want AMP\nNote: Repo does not do 4-bit quantization.\nlora\nadapter: lora # 'qlora' or leave blank for full finetune\nlora_r: 8\nlora_alpha: 16\nlora_dropout: 0.05\nlora_target_modules:\n  - q_proj\n  - v_proj\n\n\n\nSee these docs for all config options.\n\n\nUnderstanding of batch size and gradient accumulation steps\n\n Gradient accumulation means accumulating gradients over several mini-batches and updating the model weights afterward. When the samples in each batch are diverse, this technique doesn’t significantly impact learning.\nThis method allows for effective training with larger effective batch sizes without needing proportionally larger memory. Here’s why:\n\nMemory Consumption with Batch Size: The primary reason increasing the batch size impacts memory is due to the storage requirements for intermediate activations. When you forward propagate a batch through a network, you have to store the activations at each layer for each sample in the batch, because these activations are used during backpropagation to compute gradients. Therefore, larger batches mean more activations, leading to greater GPU memory consumption.\nGradient Accumulation: With gradient accumulation, you’re effectively simulating a larger batch size by accumulating gradients over several smaller batches (or micro-batches). However, at any given time, you’re only forward and backward propagating a micro-batch. This means you only store activations for the micro-batch, not the full accumulated batch. As a result, you can simulate the effect of a larger batch size without the memory cost of storing activations for a large batch.\n\nExample 1: Micro batch size: 3 Gradient accumulation steps: 2 Number of GPUs: 3 Total batch size = 3 * 2 * 3 = 18\n| GPU 1          | GPU 2          | GPU 3          |\n|----------------|----------------|----------------|\n| S1, S2, S3     | S4, S5, S6     | S7, S8, S9     |\n| e1, e2, e3     | e4, e5, e6     | e7, e8, e9     |\n|----------------|----------------|----------------|\n| → (accumulate) | → (accumulate) | → (accumulate) |\n|----------------|----------------|----------------|\n| S10, S11, S12  | S13, S14, S15  | S16, S17, S18  |\n| e10, e11, e12  | e13, e14, e15  | e16, e17, e18  |\n|----------------|----------------|----------------|\n| → (apply)      | → (apply)      | → (apply)      |\n\nAccumulated gradient for the weight w1 after the second iteration (considering all GPUs):\nTotal gradient for w1 = e1 + e2 + e3 + e4 + e5 + e6 + e7 + e8 + e9 + e10 + e11 + e12 + e13 + e14 + e15 + e16 + e17 + e18\n\nWeight update for w1:\nw1_new = w1_old - learning rate x (Total gradient for w1 / 18)\nExample 2: Micro batch size: 2 Gradient accumulation steps: 1 Number of GPUs: 3 Total batch size = 2 * 1 * 3 = 6\n| GPU 1     | GPU 2     | GPU 3     |\n|-----------|-----------|-----------|\n| S1, S2    | S3, S4    | S5, S6    |\n| e1, e2    | e3, e4    | e5, e6    |\n|-----------|-----------|-----------|\n| → (apply) | → (apply) | → (apply) |\n\nAccumulated gradient for the weight w1 (considering all GPUs):\nTotal gradient for w1 = e1 + e2 + e3 + e4 + e5 + e6\n\nWeight update for w1:\nw1_new = w1_old - learning rate × (Total gradient for w1 / 6)\n\n\n\n\n\nRun\naccelerate launch -m axolotl.cli.train your_config.yml\n\n[!TIP] You can also reference a config file that is hosted on a public URL, for example accelerate launch -m axolotl.cli.train https://yourdomain.com/your_config.yml\n\n\n\nYou can optionally pre-tokenize dataset with the following before finetuning. This is recommended for large datasets.\n\nSet dataset_prepared_path: to a local folder for saving and loading pre-tokenized dataset.\n(Optional): Set push_dataset_to_hub: hf_user/repo to push it to Huggingface.\n(Optional): Use --debug to see preprocessed examples.\n\npython -m axolotl.cli.preprocess your_config.yml\n\n\n\nBelow are the options available in axolotl for training with multiple GPUs. Note that DeepSpeed is the recommended multi-GPU option currently because FSDP may experience loss instability.\n\n\nDeepspeed is an optimization suite for multi-gpu systems allowing you to train much larger models than you might typically be able to fit into your GPU’s VRAM. More information about the various optimization types for deepspeed is available at https://huggingface.co/docs/accelerate/main/en/usage_guides/deepspeed#what-is-integrated\nWe provide several default deepspeed JSON configurations for ZeRO stage 1, 2, and 3.\ndeepspeed: deepspeed_configs/zero1.json\naccelerate launch -m axolotl.cli.train examples/llama-2/config.py --deepspeed deepspeed_configs/zero1.json\n\n\n\n\nllama FSDP\n\nfsdp:\n  - full_shard\n  - auto_wrap\nfsdp_config:\n  fsdp_offload_params: true\n  fsdp_state_dict_type: FULL_STATE_DICT\n  fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer\n\n\n\nAxolotl supports training with FSDP and QLoRA, see these docs for more information.\n\n\n\nMake sure your WANDB_API_KEY environment variable is set (recommended) or you login to wandb with wandb login.\n\nwandb options\n\nwandb_mode:\nwandb_project:\nwandb_entity:\nwandb_watch:\nwandb_name:\nwandb_log_model:\n\n\n\nIt is important to have special tokens like delimiters, end-of-sequence, beginning-of-sequence in your tokenizer’s vocabulary. This will help you avoid tokenization issues and help your model train better. You can do this in axolotl like this:\nspecial_tokens:\n  bos_token: \"<s>\"\n  eos_token: \"</s>\"\n  unk_token: \"<unk>\"\ntokens: # these are delimiters\n  - \"<|im_start|>\"\n  - \"<|im_end|>\"\nWhen you include these tokens in your axolotl config, axolotl adds these tokens to the tokenizer’s vocabulary.\n\n\n\n\n\nAxolotl allows you to load your model in an interactive terminal playground for quick experimentation. The config file is the same config file used for training.\nPass the appropriate flag to the inference command, depending upon what kind of model was trained:\n\nPretrained LORA:\npython -m axolotl.cli.inference examples/your_config.yml --lora_model_dir=\"./lora-output-dir\"\nFull weights finetune:\npython -m axolotl.cli.inference examples/your_config.yml --base_model=\"./completed-model\"\nFull weights finetune w/ a prompt from a text file:\ncat /tmp/prompt.txt | python -m axolotl.cli.inference examples/your_config.yml \\\n  --base_model=\"./completed-model\" --prompter=None --load_in_8bit=True\n– With gradio hosting\npython -m axolotl.cli.inference examples/your_config.yml --gradio\n\nPlease use --sample_packing False if you have it on and receive the error similar to below:\n\nRuntimeError: stack expects each tensor to be equal size, but got [1, 32, 1, 128] at entry 0 and [1, 32, 8, 128] at entry 1\n\n\n\n\nThe following command will merge your LORA adapater with your base model. You can optionally pass the argument --lora_model_dir to specify the directory where your LORA adapter was saved, otherwhise, this will be inferred from output_dir in your axolotl config file. The merged model is saved in the sub-directory {lora_model_dir}/merged.\npython3 -m axolotl.cli.merge_lora your_config.yml --lora_model_dir=\"./completed-model\"\nYou may need to use the gpu_memory_limit and/or lora_on_cpu config options to avoid running out of memory. If you still run out of CUDA memory, you can try to merge in system RAM with\nCUDA_VISIBLE_DEVICES=\"\" python3 -m axolotl.cli.merge_lora ...\nalthough this will be very slow, and using the config options above are recommended instead.\n\n\n\n\nSee also the FAQ’s and debugging guide.\n\nIf you encounter a ‘Cuda out of memory’ error, it means your GPU ran out of memory during the training process. Here’s how to resolve it:\n\nPlease reduce any below - micro_batch_size - eval_batch_size - gradient_accumulation_steps - sequence_len\nIf it does not help, try running without deepspeed and without accelerate (replace “accelerate launch” with “python”) in the command.\nUsing adamw_bnb_8bit might also save you some memory.\n\nfailed (exitcode: -9)\n\nUsually means your system has run out of system memory. Similarly, you should consider reducing the same settings as when you run out of VRAM. Additionally, look into upgrading your system RAM which should be simpler than GPU upgrades.\n\nRuntimeError: expected scalar type Float but found Half\n\nTry set fp16: true\n\nNotImplementedError: No operator found for memory_efficient_attention_forward …\n\nTry to turn off xformers.\n\naccelerate config missing\n\nIt’s safe to ignore it.\n\nNCCL Timeouts during training\n\nSee the NCCL guide.\n\n\nFor many formats, Axolotl constructs prompts by concatenating token ids after tokenizing strings. The reason for concatenating token ids rather than operating on strings is to maintain precise accounting for attention masks.\nIf you decode a prompt constructed by axolotl, you might see spaces between tokens (or lack thereof) that you do not expect, especially around delimiters and special tokens. When you are starting out with a new format, you should always do the following:\n\nMaterialize some data using python -m axolotl.cli.preprocess your_config.yml --debug, and then decode the first few rows with your model’s tokenizer.\nDuring inference, right before you pass a tensor of token ids to your model, decode these tokens back into a string.\nMake sure the inference string from #2 looks exactly like the data you fine tuned on from #1, including spaces and new lines. If they aren’t the same, adjust your inference server accordingly.\nAs an additional troubleshooting step, you can look at the token ids between 1 and 2 to make sure they are identical.\n\nHaving misalignment between your prompts during training and inference can cause models to perform very poorly, so it is worth checking this. See this blog post for a concrete example.\n\n\n\n\nSee this debugging guide for tips on debugging Axolotl, along with an example configuration for debugging with VSCode.\n\n\n\nJoin our Discord server where we our community members can help you.\nNeed dedicated support? Please contact us at ✉️wing@openaccessaicollective.org for dedicated support options.\n\n\n\nBuilding something cool with Axolotl? Consider adding a badge to your model card.\n[<img src=\"https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png\" alt=\"Built with Axolotl\" width=\"200\" height=\"32\"/>](https://github.com/OpenAccess-AI-Collective/axolotl)\n\n\n\n\nCheck out some of the projects and models that have been built using Axolotl! Have a model you’d like to add to our Community Showcase? Open a PR with your model.\nOpen Access AI Collective - Minotaur 13b - Manticore 13b - Hippogriff 30b\nPocketDoc Labs - Dan’s PersonalityEngine 13b LoRA\n\n\n\nPlease read the contributing guide\nBugs? Please check the open issues else create a new Issue.\nPRs are greatly welcome!\nPlease run below to setup env\ngit clone https://github.com/OpenAccess-AI-Collective/axolotl\ncd axolotl\n\npip3 install packaging\npip3 install -e '.[flash-attn,deepspeed]'\n\npip3 install -r requirements-dev.txt -r requirements-tests.txt\npre-commit install\n\n# test\npytest tests/\n\n# optional: run against all files\npre-commit run --all-files\nThanks to all of our contributors to date. Help drive open source AI progress forward by contributing to Axolotl.\n  \n\n\n\nOpenAccess AI Collective is run by volunteer contributors such as winglian, NanoCode012, tmm1, mhenrichsen, casper-hansen, hamelsmu and many more who help us accelerate forward by fixing bugs, answering community questions and implementing new features. Axolotl needs donations from sponsors for the compute needed to run our unit & integration tests, troubleshooting community issues, and providing bounties. If you love axolotl, consider sponsoring the project via GitHub Sponsors, Ko-fi or reach out directly to wing@openaccessaicollective.org.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJarvisLabs.ai",
+    "text": "Axolotl is a tool designed to streamline the fine-tuning of various AI models, offering support for multiple configurations and architectures.\nFeatures: - Train various Huggingface models such as llama, pythia, falcon, mpt - Supports fullfinetune, lora, qlora, relora, and gptq - Customize configurations using a simple yaml file or CLI overwrite - Load different dataset formats, use custom formats, or bring your own tokenized datasets - Integrated with xformer, flash attention, rope scaling, and multipacking - Works with single GPU or multiple GPUs via FSDP or Deepspeed - Easily run with Docker locally or on the cloud - Log results and optionally checkpoints to wandb or mlflow - And more!\n  \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nfp16/fp32\nlora\nqlora\ngptq\ngptq w/flash attn\nflash attn\nxformers attn\n\n\n\n\nllama\n✅\n✅\n✅\n✅\n✅\n✅\n✅\n\n\nMistral\n✅\n✅\n✅\n✅\n✅\n✅\n✅\n\n\nMixtral-MoE\n✅\n✅\n✅\n❓\n❓\n❓\n❓\n\n\nPythia\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\ncerebras\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\nbtlm\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\nmpt\n✅\n❌\n❓\n❌\n❌\n❌\n❓\n\n\nfalcon\n✅\n✅\n✅\n❌\n❌\n❌\n❓\n\n\ngpt-j\n✅\n✅\n✅\n❌\n❌\n❓\n❓\n\n\nXGen\n✅\n❓\n✅\n❓\n❓\n❓\n✅\n\n\nphi\n✅\n✅\n✅\n❓\n❓\n❓\n❓\n\n\nRWKV\n✅\n❓\n❓\n❓\n❓\n❓\n❓\n\n\nQwen\n✅\n✅\n✅\n❓\n❓\n❓\n❓\n\n\nGemma\n✅\n✅\n✅\n❓\n❓\n✅\n❓\n\n\n\n✅: supported ❌: not supported ❓: untested\n\n\n\nGet started with Axolotl in just a few steps! This quickstart guide will walk you through setting up and running a basic fine-tuning task.\nRequirements: Python >=3.10 and Pytorch >=2.1.1.\ngit clone https://github.com/OpenAccess-AI-Collective/axolotl\ncd axolotl\n\npip3 install packaging ninja\npip3 install -e '.[flash-attn,deepspeed]'\n\n\n# preprocess datasets - optional but recommended\nCUDA_VISIBLE_DEVICES=\"\" python -m axolotl.cli.preprocess examples/openllama-3b/lora.yml\n\n# finetune lora\naccelerate launch -m axolotl.cli.train examples/openllama-3b/lora.yml\n\n# inference\naccelerate launch -m axolotl.cli.inference examples/openllama-3b/lora.yml \\\n    --lora_model_dir=\"./lora-out\"\n\n# gradio\naccelerate launch -m axolotl.cli.inference examples/openllama-3b/lora.yml \\\n    --lora_model_dir=\"./lora-out\" --gradio\n\n# remote yaml files - the yaml config can be hosted on a public URL\n# Note: the yaml config must directly link to the **raw** yaml\naccelerate launch -m axolotl.cli.train https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/examples/openllama-3b/lora.yml\n\n\n\n\n\n\n\n\ndocker run --gpus '\"all\"' --rm -it winglian/axolotl:main-latest\nOr run on the current files for development:\ndocker compose up -d\n\n[!Tip] If you want to debug axolotl or prefer to use Docker as your development environment, see the debugging guide’s section on Docker.\n\n\n\nDocker advanced\n\nA more powerful Docker command to run would be this:\ndocker run --privileged --gpus '\"all\"' --shm-size 10g --rm -it --name axolotl --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 --mount type=bind,src=\"${PWD}\",target=/workspace/axolotl -v ${HOME}/.cache/huggingface:/root/.cache/huggingface winglian/axolotl:main-latest\nIt additionally: * Prevents memory issues when running e.g. deepspeed (e.g. you could hit SIGBUS/signal 7 error) through --ipc and --ulimit args. * Persists the downloaded HF data (models etc.) and your modifications to axolotl code through --mount/-v args. * The --name argument simply makes it easier to refer to the container in vscode (Dev Containers: Attach to Running Container...) or in your terminal. * The --privileged flag gives all capabilities to the container. * The --shm-size 10g argument increases the shared memory size. Use this if you see exitcode: -7 errors using deepspeed.\nMore information on nvidia website\n\n\n\n\n\nInstall python >=3.10\nInstall pytorch stable https://pytorch.org/get-started/locally/\nInstall Axolotl along with python dependencies bash         pip3 install packaging         pip3 install -e '.[flash-attn,deepspeed]'\n(Optional) Login to Huggingface to use gated models/datasets. bash         huggingface-cli login Get the token at huggingface.co/settings/tokens\n\n\n\n\nFor cloud GPU providers that support docker images, use winglian/axolotl-cloud:main-latest\n\non Latitude.sh use this direct link\non JarvisLabs.ai use this direct link\non RunPod use this direct link\n\n\n\n\n\n\n\n\nClick to Expand\n\n\nInstall python\n\nsudo apt update\nsudo apt install -y python3.10\n\nsudo update-alternatives --install /usr/bin/python python /usr/bin/python3.10 1\nsudo update-alternatives --config python # pick 3.10 if given option\npython -V # should be 3.10\n\nInstall pip\n\nwget https://bootstrap.pypa.io/get-pip.py\npython get-pip.py\n\nInstall torch\n\npip3 install -U torch --index-url https://download.pytorch.org/whl/cu118\n\nAxolotl\n\ngit clone https://github.com/OpenAccess-AI-Collective/axolotl\ncd axolotl\n\npip3 install packaging\npip3 install -e '.[flash-attn,deepspeed]'\npip3 install protobuf==3.20.3\npip3 install -U --ignore-installed requests Pillow psutil scipy\n\nSet path\n\nexport LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH\n\n\n\n\n\n\nClick to Expand\n\nUse a Deeplearning linux OS with cuda and pytorch installed. Then follow instructions on quickstart.\nMake sure to run the below to uninstall xla.\npip uninstall -y torch_xla[tpu]\n\n\n\n\n\nPlease use WSL or Docker!\n\n\n\nUse the below instead of the install method in QuickStart.\npip3 install -e '.'\nMore info: mac.md\n\n\n\nPlease use this example notebook.\n\n\n\nTo launch on GPU instances (both on-demand and spot instances) on 7+ clouds (GCP, AWS, Azure, OCI, and more), you can use SkyPilot:\npip install \"skypilot-nightly[gcp,aws,azure,oci,lambda,kubernetes,ibm,scp]\"  # choose your clouds\nsky check\nGet the example YAMLs of using Axolotl to finetune mistralai/Mistral-7B-v0.1:\ngit clone https://github.com/skypilot-org/skypilot.git\ncd skypilot/llm/axolotl\nUse one command to launch:\n# On-demand\nHF_TOKEN=xx sky launch axolotl.yaml --env HF_TOKEN\n\n# Managed spot (auto-recovery on preemption)\nHF_TOKEN=xx BUCKET=<unique-name> sky spot launch axolotl-spot.yaml --env HF_TOKEN --env BUCKET\n\n\n\n\nAxolotl supports a variety of dataset formats. It is recommended to use a JSONL. The schema of the JSONL depends upon the task and the prompt template you wish to use. Instead of a JSONL, you can also use a HuggingFace dataset with columns for each JSONL field.\nSee these docs for more information on how to use different dataset formats.\n\n\n\nSee examples for quick start. It is recommended to duplicate and modify to your needs. The most important options are:\n\nmodel\nbase_model: ./llama-7b-hf # local or huggingface repo\nNote: The code will load the right architecture.\ndataset\ndatasets:\n    # huggingface repo\n  - path: vicgalle/alpaca-gpt4\n    type: alpaca\n\n    # huggingface repo with specific configuration/subset\n  - path: EleutherAI/pile\n    name: enron_emails\n    type: completion # format from earlier\n    field: text # Optional[str] default: text, field to use for completion data\n\n    # huggingface repo with multiple named configurations/subsets\n  - path: bigcode/commitpackft\n    name:\n      - ruby\n      - python\n      - typescript\n    type: ... # unimplemented custom format\n\n    # fastchat conversation\n    # See 'conversation' options: https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py\n  - path: ...\n    type: sharegpt\n    conversation: chatml # default: vicuna_v1.1\n\n    # local\n  - path: data.jsonl # or json\n    ds_type: json # see other options below\n    type: alpaca\n\n    # dataset with splits, but no train split\n  - path: knowrohit07/know_sql\n    type: context_qa.load_v2\n    train_on_split: validation\n\n    # loading from s3 or gcs\n    # s3 creds will be loaded from the system default and gcs only supports public access\n  - path: s3://path_to_ds # Accepts folder with arrow/parquet or file path like above. Supports s3, gcs.\n    ...\n\n    # Loading Data From a Public URL\n    # - The file format is `json` (which includes `jsonl`) by default. For different formats, adjust the `ds_type` option accordingly.\n  - path: https://some.url.com/yourdata.jsonl # The URL should be a direct link to the file you wish to load. URLs must use HTTPS protocol, not HTTP.\n    ds_type: json # this is the default, see other options below.\nloading\nload_in_4bit: true\nload_in_8bit: true\n\nbf16: auto # require >=ampere, auto will detect if your GPU supports this and choose automatically.\nfp16: # leave empty to use fp16 when bf16 is 'auto'. set to false if you want to fallback to fp32\ntf32: true # require >=ampere\n\nbfloat16: true # require >=ampere, use instead of bf16 when you don't want AMP (automatic mixed precision)\nfloat16: true # use instead of fp16 when you don't want AMP\nNote: Repo does not do 4-bit quantization.\nlora\nadapter: lora # 'qlora' or leave blank for full finetune\nlora_r: 8\nlora_alpha: 16\nlora_dropout: 0.05\nlora_target_modules:\n  - q_proj\n  - v_proj\n\n\n\nSee these docs for all config options.\n\n\nUnderstanding of batch size and gradient accumulation steps\n\n Gradient accumulation means accumulating gradients over several mini-batches and updating the model weights afterward. When the samples in each batch are diverse, this technique doesn’t significantly impact learning.\nThis method allows for effective training with larger effective batch sizes without needing proportionally larger memory. Here’s why:\n\nMemory Consumption with Batch Size: The primary reason increasing the batch size impacts memory is due to the storage requirements for intermediate activations. When you forward propagate a batch through a network, you have to store the activations at each layer for each sample in the batch, because these activations are used during backpropagation to compute gradients. Therefore, larger batches mean more activations, leading to greater GPU memory consumption.\nGradient Accumulation: With gradient accumulation, you’re effectively simulating a larger batch size by accumulating gradients over several smaller batches (or micro-batches). However, at any given time, you’re only forward and backward propagating a micro-batch. This means you only store activations for the micro-batch, not the full accumulated batch. As a result, you can simulate the effect of a larger batch size without the memory cost of storing activations for a large batch.\n\nExample 1: Micro batch size: 3 Gradient accumulation steps: 2 Number of GPUs: 3 Total batch size = 3 * 2 * 3 = 18\n| GPU 1          | GPU 2          | GPU 3          |\n|----------------|----------------|----------------|\n| S1, S2, S3     | S4, S5, S6     | S7, S8, S9     |\n| e1, e2, e3     | e4, e5, e6     | e7, e8, e9     |\n|----------------|----------------|----------------|\n| → (accumulate) | → (accumulate) | → (accumulate) |\n|----------------|----------------|----------------|\n| S10, S11, S12  | S13, S14, S15  | S16, S17, S18  |\n| e10, e11, e12  | e13, e14, e15  | e16, e17, e18  |\n|----------------|----------------|----------------|\n| → (apply)      | → (apply)      | → (apply)      |\n\nAccumulated gradient for the weight w1 after the second iteration (considering all GPUs):\nTotal gradient for w1 = e1 + e2 + e3 + e4 + e5 + e6 + e7 + e8 + e9 + e10 + e11 + e12 + e13 + e14 + e15 + e16 + e17 + e18\n\nWeight update for w1:\nw1_new = w1_old - learning rate x (Total gradient for w1 / 18)\nExample 2: Micro batch size: 2 Gradient accumulation steps: 1 Number of GPUs: 3 Total batch size = 2 * 1 * 3 = 6\n| GPU 1     | GPU 2     | GPU 3     |\n|-----------|-----------|-----------|\n| S1, S2    | S3, S4    | S5, S6    |\n| e1, e2    | e3, e4    | e5, e6    |\n|-----------|-----------|-----------|\n| → (apply) | → (apply) | → (apply) |\n\nAccumulated gradient for the weight w1 (considering all GPUs):\nTotal gradient for w1 = e1 + e2 + e3 + e4 + e5 + e6\n\nWeight update for w1:\nw1_new = w1_old - learning rate × (Total gradient for w1 / 6)\n\n\n\n\n\nRun\naccelerate launch -m axolotl.cli.train your_config.yml\n\n[!TIP] You can also reference a config file that is hosted on a public URL, for example accelerate launch -m axolotl.cli.train https://yourdomain.com/your_config.yml\n\n\n\nYou can optionally pre-tokenize dataset with the following before finetuning. This is recommended for large datasets.\n\nSet dataset_prepared_path: to a local folder for saving and loading pre-tokenized dataset.\n(Optional): Set push_dataset_to_hub: hf_user/repo to push it to Huggingface.\n(Optional): Use --debug to see preprocessed examples.\n\npython -m axolotl.cli.preprocess your_config.yml\n\n\n\nBelow are the options available in axolotl for training with multiple GPUs. Note that DeepSpeed is the recommended multi-GPU option currently because FSDP may experience loss instability.\n\n\nDeepspeed is an optimization suite for multi-gpu systems allowing you to train much larger models than you might typically be able to fit into your GPU’s VRAM. More information about the various optimization types for deepspeed is available at https://huggingface.co/docs/accelerate/main/en/usage_guides/deepspeed#what-is-integrated\nWe provide several default deepspeed JSON configurations for ZeRO stage 1, 2, and 3.\ndeepspeed: deepspeed_configs/zero1.json\naccelerate launch -m axolotl.cli.train examples/llama-2/config.py --deepspeed deepspeed_configs/zero1.json\n\n\n\n\nllama FSDP\n\nfsdp:\n  - full_shard\n  - auto_wrap\nfsdp_config:\n  fsdp_offload_params: true\n  fsdp_state_dict_type: FULL_STATE_DICT\n  fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer\n\n\n\nAxolotl supports training with FSDP and QLoRA, see these docs for more information.\n\n\n\nMake sure your WANDB_API_KEY environment variable is set (recommended) or you login to wandb with wandb login.\n\nwandb options\n\nwandb_mode:\nwandb_project:\nwandb_entity:\nwandb_watch:\nwandb_name:\nwandb_log_model:\n\n\n\nIt is important to have special tokens like delimiters, end-of-sequence, beginning-of-sequence in your tokenizer’s vocabulary. This will help you avoid tokenization issues and help your model train better. You can do this in axolotl like this:\nspecial_tokens:\n  bos_token: \"<s>\"\n  eos_token: \"</s>\"\n  unk_token: \"<unk>\"\ntokens: # these are delimiters\n  - \"<|im_start|>\"\n  - \"<|im_end|>\"\nWhen you include these tokens in your axolotl config, axolotl adds these tokens to the tokenizer’s vocabulary.\n\n\n\n\n\nAxolotl allows you to load your model in an interactive terminal playground for quick experimentation. The config file is the same config file used for training.\nPass the appropriate flag to the inference command, depending upon what kind of model was trained:\n\nPretrained LORA:\npython -m axolotl.cli.inference examples/your_config.yml --lora_model_dir=\"./lora-output-dir\"\nFull weights finetune:\npython -m axolotl.cli.inference examples/your_config.yml --base_model=\"./completed-model\"\nFull weights finetune w/ a prompt from a text file:\ncat /tmp/prompt.txt | python -m axolotl.cli.inference examples/your_config.yml \\\n  --base_model=\"./completed-model\" --prompter=None --load_in_8bit=True\n– With gradio hosting\npython -m axolotl.cli.inference examples/your_config.yml --gradio\n\nPlease use --sample_packing False if you have it on and receive the error similar to below:\n\nRuntimeError: stack expects each tensor to be equal size, but got [1, 32, 1, 128] at entry 0 and [1, 32, 8, 128] at entry 1\n\n\n\n\nThe following command will merge your LORA adapater with your base model. You can optionally pass the argument --lora_model_dir to specify the directory where your LORA adapter was saved, otherwhise, this will be inferred from output_dir in your axolotl config file. The merged model is saved in the sub-directory {lora_model_dir}/merged.\npython3 -m axolotl.cli.merge_lora your_config.yml --lora_model_dir=\"./completed-model\"\nYou may need to use the gpu_memory_limit and/or lora_on_cpu config options to avoid running out of memory. If you still run out of CUDA memory, you can try to merge in system RAM with\nCUDA_VISIBLE_DEVICES=\"\" python3 -m axolotl.cli.merge_lora ...\nalthough this will be very slow, and using the config options above are recommended instead.\n\n\n\n\nSee also the FAQ’s and debugging guide.\n\nIf you encounter a ‘Cuda out of memory’ error, it means your GPU ran out of memory during the training process. Here’s how to resolve it:\n\nPlease reduce any below - micro_batch_size - eval_batch_size - gradient_accumulation_steps - sequence_len\nIf it does not help, try running without deepspeed and without accelerate (replace “accelerate launch” with “python”) in the command.\nUsing adamw_bnb_8bit might also save you some memory.\n\nfailed (exitcode: -9)\n\nUsually means your system has run out of system memory. Similarly, you should consider reducing the same settings as when you run out of VRAM. Additionally, look into upgrading your system RAM which should be simpler than GPU upgrades.\n\nRuntimeError: expected scalar type Float but found Half\n\nTry set fp16: true\n\nNotImplementedError: No operator found for memory_efficient_attention_forward …\n\nTry to turn off xformers.\n\naccelerate config missing\n\nIt’s safe to ignore it.\n\nNCCL Timeouts during training\n\nSee the NCCL guide.\n\n\nFor many formats, Axolotl constructs prompts by concatenating token ids after tokenizing strings. The reason for concatenating token ids rather than operating on strings is to maintain precise accounting for attention masks.\nIf you decode a prompt constructed by axolotl, you might see spaces between tokens (or lack thereof) that you do not expect, especially around delimiters and special tokens. When you are starting out with a new format, you should always do the following:\n\nMaterialize some data using python -m axolotl.cli.preprocess your_config.yml --debug, and then decode the first few rows with your model’s tokenizer.\nDuring inference, right before you pass a tensor of token ids to your model, decode these tokens back into a string.\nMake sure the inference string from #2 looks exactly like the data you fine tuned on from #1, including spaces and new lines. If they aren’t the same, adjust your inference server accordingly.\nAs an additional troubleshooting step, you can look at the token ids between 1 and 2 to make sure they are identical.\n\nHaving misalignment between your prompts during training and inference can cause models to perform very poorly, so it is worth checking this. See this blog post for a concrete example.\n\n\n\n\nSee this debugging guide for tips on debugging Axolotl, along with an example configuration for debugging with VSCode.\n\n\n\nJoin our Discord server where we our community members can help you.\nNeed dedicated support? Please contact us at ✉️wing@openaccessaicollective.org for dedicated support options.\n\n\n\nBuilding something cool with Axolotl? Consider adding a badge to your model card.\n[<img src=\"https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png\" alt=\"Built with Axolotl\" width=\"200\" height=\"32\"/>](https://github.com/OpenAccess-AI-Collective/axolotl)\n\n\n\n\nCheck out some of the projects and models that have been built using Axolotl! Have a model you’d like to add to our Community Showcase? Open a PR with your model.\nOpen Access AI Collective - Minotaur 13b - Manticore 13b - Hippogriff 30b\nPocketDoc Labs - Dan’s PersonalityEngine 13b LoRA\n\n\n\nPlease read the contributing guide\nBugs? Please check the open issues else create a new Issue.\nPRs are greatly welcome!\nPlease run below to setup env\ngit clone https://github.com/OpenAccess-AI-Collective/axolotl\ncd axolotl\n\npip3 install packaging ninja\npip3 install -e '.[flash-attn,deepspeed]'\n\npip3 install -r requirements-dev.txt -r requirements-tests.txt\npre-commit install\n\n# test\npytest tests/\n\n# optional: run against all files\npre-commit run --all-files\nThanks to all of our contributors to date. Help drive open source AI progress forward by contributing to Axolotl.\n  \n\n\n\nOpenAccess AI Collective is run by volunteer contributors such as winglian, NanoCode012, tmm1, mhenrichsen, casper-hansen, hamelsmu and many more who help us accelerate forward by fixing bugs, answering community questions and implementing new features. Axolotl needs donations from sponsors for the compute needed to run our unit & integration tests, troubleshooting community issues, and providing bounties. If you love axolotl, consider sponsoring the project via GitHub Sponsors, Ko-fi or reach out directly to wing@openaccessaicollective.org.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJarvisLabs.ai",
     "crumbs": [
       "Home"
     ]
@@ -661,7 +661,7 @@
     "href": "index.html#quickstart",
     "title": "Axolotl",
     "section": "",
-    "text": "Get started with Axolotl in just a few steps! This quickstart guide will walk you through setting up and running a basic fine-tuning task.\nRequirements: Python >=3.10 and Pytorch >=2.1.1.\ngit clone https://github.com/OpenAccess-AI-Collective/axolotl\ncd axolotl\n\npip3 install packaging\npip3 install -e '.[flash-attn,deepspeed]'\n\n\n# preprocess datasets - optional but recommended\nCUDA_VISIBLE_DEVICES=\"\" python -m axolotl.cli.preprocess examples/openllama-3b/lora.yml\n\n# finetune lora\naccelerate launch -m axolotl.cli.train examples/openllama-3b/lora.yml\n\n# inference\naccelerate launch -m axolotl.cli.inference examples/openllama-3b/lora.yml \\\n    --lora_model_dir=\"./lora-out\"\n\n# gradio\naccelerate launch -m axolotl.cli.inference examples/openllama-3b/lora.yml \\\n    --lora_model_dir=\"./lora-out\" --gradio\n\n# remote yaml files - the yaml config can be hosted on a public URL\n# Note: the yaml config must directly link to the **raw** yaml\naccelerate launch -m axolotl.cli.train https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/examples/openllama-3b/lora.yml",
+    "text": "Get started with Axolotl in just a few steps! This quickstart guide will walk you through setting up and running a basic fine-tuning task.\nRequirements: Python >=3.10 and Pytorch >=2.1.1.\ngit clone https://github.com/OpenAccess-AI-Collective/axolotl\ncd axolotl\n\npip3 install packaging ninja\npip3 install -e '.[flash-attn,deepspeed]'\n\n\n# preprocess datasets - optional but recommended\nCUDA_VISIBLE_DEVICES=\"\" python -m axolotl.cli.preprocess examples/openllama-3b/lora.yml\n\n# finetune lora\naccelerate launch -m axolotl.cli.train examples/openllama-3b/lora.yml\n\n# inference\naccelerate launch -m axolotl.cli.inference examples/openllama-3b/lora.yml \\\n    --lora_model_dir=\"./lora-out\"\n\n# gradio\naccelerate launch -m axolotl.cli.inference examples/openllama-3b/lora.yml \\\n    --lora_model_dir=\"./lora-out\" --gradio\n\n# remote yaml files - the yaml config can be hosted on a public URL\n# Note: the yaml config must directly link to the **raw** yaml\naccelerate launch -m axolotl.cli.train https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/examples/openllama-3b/lora.yml",
     "crumbs": [
       "Home"
     ]
@@ -731,7 +731,7 @@
     "href": "index.html#contributing",
     "title": "Axolotl",
     "section": "",
-    "text": "Please read the contributing guide\nBugs? Please check the open issues else create a new Issue.\nPRs are greatly welcome!\nPlease run below to setup env\ngit clone https://github.com/OpenAccess-AI-Collective/axolotl\ncd axolotl\n\npip3 install packaging\npip3 install -e '.[flash-attn,deepspeed]'\n\npip3 install -r requirements-dev.txt -r requirements-tests.txt\npre-commit install\n\n# test\npytest tests/\n\n# optional: run against all files\npre-commit run --all-files\nThanks to all of our contributors to date. Help drive open source AI progress forward by contributing to Axolotl.",
+    "text": "Please read the contributing guide\nBugs? Please check the open issues else create a new Issue.\nPRs are greatly welcome!\nPlease run below to setup env\ngit clone https://github.com/OpenAccess-AI-Collective/axolotl\ncd axolotl\n\npip3 install packaging ninja\npip3 install -e '.[flash-attn,deepspeed]'\n\npip3 install -r requirements-dev.txt -r requirements-tests.txt\npre-commit install\n\n# test\npytest tests/\n\n# optional: run against all files\npre-commit run --all-files\nThanks to all of our contributors to date. Help drive open source AI progress forward by contributing to Axolotl.",
     "crumbs": [
       "Home"
     ]
diff --git a/sitemap.xml b/sitemap.xml
index 45e161276..3e20ff3ce 100644
--- a/sitemap.xml
+++ b/sitemap.xml
@@ -2,82 +2,82 @@
 
   
     https://OpenAccess-AI-Collective.github.io/axolotl/TODO.html
-    2024-04-02T03:49:13.586Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/examples/colab-notebooks/colab-axolotl-example.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.249Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/input_output.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/debugging.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/nccl.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/dataset-formats/template_free.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/dataset-formats/inst_tune.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/dataset-formats/conversation.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/rlhf.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/faq.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/FAQS.html
-    2024-04-02T03:49:13.586Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/fsdp_qlora.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/dataset-formats/pretraining.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/dataset-formats/tokenized.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/dataset-formats/index.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/mac.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/multipack.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/config.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/docs/multi-node.html
-    2024-04-02T03:49:13.590Z
+    2024-04-02T08:36:52.245Z
   
   
     https://OpenAccess-AI-Collective.github.io/axolotl/index.html
-    2024-04-02T03:49:13.602Z
+    2024-04-02T08:36:52.257Z