Compare commits

..

1 Commits

Author SHA1 Message Date
Wing Lian
c578c8f256 Validation for Muon optimizer with DS/FSDP 2025-04-01 09:29:54 -04:00
223 changed files with 1767 additions and 4627 deletions

View File

@@ -1,14 +0,0 @@
[run]
source = axolotl
omit =
*/tests/*
setup.py
[report]
exclude_lines =
pragma: no cover
def __repr__
raise NotImplementedError
if __name__ == .__main__.:
pass
raise ImportError

View File

@@ -52,12 +52,6 @@ jobs:
python_version: "3.11" python_version: "3.11"
pytorch: nightly pytorch: nightly
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX" torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
- cuda: "128"
cuda_version: 12.8.1
cudnn_version: ""
python_version: "3.11"
pytorch: next
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -79,7 +73,7 @@ jobs:
uses: docker/build-push-action@v4 uses: docker/build-push-action@v4
with: with:
context: . context: .
file: ${{ matrix.pytorch == 'nightly' && './docker/Dockerfile-base-nightly' || matrix.pytorch == 'next' && './docker/Dockerfile-base-next' || './docker/Dockerfile-base' }} file: ${{ matrix.pytorch == 'nightly' && './docker/Dockerfile-base-nightly' || './docker/Dockerfile-base' }}
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.metadata.outputs.tags }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }} tags: ${{ steps.metadata.outputs.tags }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
labels: ${{ steps.metadata.outputs.labels }} labels: ${{ steps.metadata.outputs.labels }}

View File

@@ -29,7 +29,7 @@ jobs:
cuda_version: 12.4.1 cuda_version: 12.4.1
python_version: "3.11" python_version: "3.11"
pytorch: 2.6.0 pytorch: 2.6.0
axolotl_extras: vllm axolotl_extras:
is_latest: true is_latest: true
runs-on: axolotl-gpu-runner runs-on: axolotl-gpu-runner
steps: steps:

View File

@@ -24,13 +24,6 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
include: include:
- cuda: 124
cuda_version: 12.4.1
python_version: "3.11"
pytorch: 2.6.0
axolotl_extras: vllm
num_gpus: 2
nightly_build: "true"
- cuda: 124 - cuda: 124
cuda_version: 12.4.1 cuda_version: 12.4.1
python_version: "3.11" python_version: "3.11"
@@ -45,6 +38,13 @@ jobs:
axolotl_extras: vllm axolotl_extras: vllm
num_gpus: 2 num_gpus: 2
nightly_build: "true" nightly_build: "true"
- cuda: 124
cuda_version: 12.4.1
python_version: "3.11"
pytorch: 2.6.0
axolotl_extras: vllm
num_gpus: 2
nightly_build: "true"
runs-on: [self-hosted, modal] runs-on: [self-hosted, modal]
timeout-minutes: 120 timeout-minutes: 120
steps: steps:

View File

@@ -102,16 +102,9 @@ jobs:
- name: Run tests - name: Run tests
run: | run: |
pytest -v -n8 --dist loadfile --ignore=tests/e2e/ --ignore=tests/patched/ --ignore=tests/cli/ tests/ --cov=axolotl --cov-report=xml pytest -v -n8 --dist loadfile --ignore=tests/e2e/ --ignore=tests/patched/ --ignore=tests/cli/ tests/
pytest -v tests/patched/ --cov=axolotl --cov-append --cov-report=xml pytest -v tests/patched/
pytest -v tests/cli/ --cov=axolotl --cov-append --cov-report=xml pytest -v tests/cli/
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v5
with:
files: ./coverage.xml
flags: unittests,pytorch-${{ matrix.pytorch_version }}
fail_ci_if_error: false
- name: cleanup pip cache - name: cleanup pip cache
run: | run: |
@@ -218,7 +211,7 @@ jobs:
- cuda: 124 - cuda: 124
cuda_version: 12.4.1 cuda_version: 12.4.1
python_version: "3.11" python_version: "3.11"
pytorch: 2.6.0 pytorch: 2.5.1
num_gpus: 1 num_gpus: 1
axolotl_extras: vllm axolotl_extras: vllm
steps: steps:
@@ -265,7 +258,7 @@ jobs:
- cuda: 124 - cuda: 124
cuda_version: 12.4.1 cuda_version: 12.4.1
python_version: "3.11" python_version: "3.11"
pytorch: 2.5.1 pytorch: 2.6.0
num_gpus: 1 num_gpus: 1
axolotl_extras: vllm axolotl_extras: vllm
steps: steps:

1
CNAME
View File

@@ -1 +0,0 @@
docs.axolotl.ai

View File

@@ -9,7 +9,6 @@
<p align="center"> <p align="center">
<img src="https://img.shields.io/github/license/axolotl-ai-cloud/axolotl.svg?color=blue" alt="GitHub License"> <img src="https://img.shields.io/github/license/axolotl-ai-cloud/axolotl.svg?color=blue" alt="GitHub License">
<img src="https://github.com/axolotl-ai-cloud/axolotl/actions/workflows/tests.yml/badge.svg" alt="tests"> <img src="https://github.com/axolotl-ai-cloud/axolotl/actions/workflows/tests.yml/badge.svg" alt="tests">
<a href="https://codecov.io/gh/axolotl-ai-cloud/axolotl"><img src="https://codecov.io/gh/axolotl-ai-cloud/axolotl/branch/main/graph/badge.svg" alt="codecov"></a>
<a href="https://github.com/axolotl-ai-cloud/axolotl/releases"><img src="https://img.shields.io/github/release/axolotl-ai-cloud/axolotl.svg" alt="Releases"></a> <a href="https://github.com/axolotl-ai-cloud/axolotl/releases"><img src="https://img.shields.io/github/release/axolotl-ai-cloud/axolotl.svg" alt="Releases"></a>
<br/> <br/>
<a href="https://github.com/axolotl-ai-cloud/axolotl/graphs/contributors"><img src="https://img.shields.io/github/contributors-anon/axolotl-ai-cloud/axolotl?color=yellow&style=flat-square" alt="contributors" style="height: 20px;"></a> <a href="https://github.com/axolotl-ai-cloud/axolotl/graphs/contributors"><img src="https://img.shields.io/github/contributors-anon/axolotl-ai-cloud/axolotl?color=yellow&style=flat-square" alt="contributors" style="height: 20px;"></a>
@@ -64,7 +63,7 @@ axolotl fetch examples
axolotl fetch deepspeed_configs # OPTIONAL axolotl fetch deepspeed_configs # OPTIONAL
``` ```
Other installation approaches are described [here](https://docs.axolotl.ai/docs/installation.html). Other installation approaches are described [here](https://axolotl-ai-cloud.github.io/axolotl/docs/installation.html).
### Your First Fine-tune ### Your First Fine-tune
@@ -79,7 +78,7 @@ axolotl fetch examples --dest path/to/folder
axolotl train examples/llama-3/lora-1b.yml axolotl train examples/llama-3/lora-1b.yml
``` ```
That's it! Check out our [Getting Started Guide](https://docs.axolotl.ai/docs/getting-started.html) for a more detailed walkthrough. That's it! Check out our [Getting Started Guide](https://axolotl-ai-cloud.github.io/axolotl/docs/getting-started.html) for a more detailed walkthrough.
## ✨ Key Features ## ✨ Key Features
@@ -92,20 +91,20 @@ That's it! Check out our [Getting Started Guide](https://docs.axolotl.ai/docs/ge
## 📚 Documentation ## 📚 Documentation
- [Installation Options](https://docs.axolotl.ai/docs/installation.html) - Detailed setup instructions for different environments - [Installation Options](https://axolotl-ai-cloud.github.io/axolotl/docs/installation.html) - Detailed setup instructions for different environments
- [Configuration Guide](https://docs.axolotl.ai/docs/config.html) - Full configuration options and examples - [Configuration Guide](https://axolotl-ai-cloud.github.io/axolotl/docs/config.html) - Full configuration options and examples
- [Dataset Guide](https://docs.axolotl.ai/docs/dataset-formats/) - Supported formats and how to use them - [Dataset Guide](https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/) - Supported formats and how to use them
- [Multi-GPU Training](https://docs.axolotl.ai/docs/multi-gpu.html) - [Multi-GPU Training](https://axolotl-ai-cloud.github.io/axolotl/docs/multi-gpu.html)
- [Multi-Node Training](https://docs.axolotl.ai/docs/multi-node.html) - [Multi-Node Training](https://axolotl-ai-cloud.github.io/axolotl/docs/multi-node.html)
- [Multipacking](https://docs.axolotl.ai/docs/multipack.html) - [Multipacking](https://axolotl-ai-cloud.github.io/axolotl/docs/multipack.html)
- [API Reference](https://docs.axolotl.ai/docs/api/) - Auto-generated code documentation - [API Reference](https://axolotl-ai-cloud.github.io/axolotl/docs/api/) - Auto-generated code documentation
- [FAQ](https://docs.axolotl.ai/docs/faq.html) - Frequently asked questions - [FAQ](https://axolotl-ai-cloud.github.io/axolotl/docs/faq.html) - Frequently asked questions
## 🤝 Getting Help ## 🤝 Getting Help
- Join our [Discord community](https://discord.gg/HhrNrHJPRb) for support - Join our [Discord community](https://discord.gg/HhrNrHJPRb) for support
- Check out our [Examples](https://github.com/axolotl-ai-cloud/axolotl/tree/main/examples/) directory - Check out our [Examples](https://github.com/axolotl-ai-cloud/axolotl/tree/main/examples/) directory
- Read our [Debugging Guide](https://docs.axolotl.ai/docs/debugging.html) - Read our [Debugging Guide](https://axolotl-ai-cloud.github.io/axolotl/docs/debugging.html)
- Need dedicated support? Please contact [wing@axolotl.ai](mailto:wing@axolotl.ai) for options - Need dedicated support? Please contact [wing@axolotl.ai](mailto:wing@axolotl.ai) for options
## 🌟 Contributing ## 🌟 Contributing

View File

@@ -231,7 +231,6 @@ website:
- docs/reward_modelling.qmd - docs/reward_modelling.qmd
- docs/lr_groups.qmd - docs/lr_groups.qmd
- docs/lora_optims.qmd - docs/lora_optims.qmd
- docs/dataset_loading.qmd
- section: "Core Concepts" - section: "Core Concepts"
contents: contents:

View File

@@ -3,59 +3,10 @@ set -e
python -c "import torch; assert '$PYTORCH_VERSION' in torch.__version__" python -c "import torch; assert '$PYTORCH_VERSION' in torch.__version__"
# Run unit tests with initial coverage report pytest -v --durations=10 -n8 --ignore=tests/e2e/ --ignore=tests/patched/ --ignore=tests/cli /workspace/axolotl/tests/
pytest -v --durations=10 -n8 \ pytest -v --durations=10 /workspace/axolotl/tests/e2e/patched/lora_kernels # running these with the other patches causes a failure
--ignore=tests/e2e/ \ pytest -v --durations=10 --ignore=tests/e2e/patched/lora_kernels /workspace/axolotl/tests/e2e/patched
--ignore=tests/patched/ \ pytest -v --durations=10 -n1 /workspace/axolotl/tests/e2e/solo/
--ignore=tests/cli \ pytest -v --durations=10 /workspace/axolotl/tests/e2e/integrations/
/workspace/axolotl/tests/ \ pytest -v --durations=10 /workspace/axolotl/tests/cli
--cov=axolotl \ pytest -v --durations=10 --ignore=tests/e2e/solo/ --ignore=tests/e2e/patched/ --ignore=tests/e2e/multigpu/ --ignore=tests/e2e/integrations/ --ignore=tests/cli /workspace/axolotl/tests/e2e/
--cov-report=xml:coverage.xml
# Run lora kernels tests with coverage append
pytest -v --durations=10 \
/workspace/axolotl/tests/e2e/patched/lora_kernels \
--cov=axolotl \
--cov-append
# Run patched tests excluding lora kernels with coverage append
pytest -v --durations=10 \
--ignore=tests/e2e/patched/lora_kernels \
/workspace/axolotl/tests/e2e/patched \
--cov=axolotl \
--cov-append
# Run solo tests with coverage append
pytest -v --durations=10 -n1 \
/workspace/axolotl/tests/e2e/solo/ \
--cov=axolotl \
--cov-append
# Run integration tests with coverage append
pytest -v --durations=10 \
/workspace/axolotl/tests/e2e/integrations/ \
--cov=axolotl \
--cov-append
pytest -v --durations=10 /workspace/axolotl/tests/cli \
--cov=axolotl \
--cov-append
# Run remaining e2e tests with coverage append and final report
pytest -v --durations=10 \
--ignore=tests/e2e/solo/ \
--ignore=tests/e2e/patched/ \
--ignore=tests/e2e/multigpu/ \
--ignore=tests/e2e/integrations/ \
--ignore=tests/cli \
/workspace/axolotl/tests/e2e/ \
--cov=axolotl \
--cov-append \
--cov-report=xml:coverage.xml
# Upload coverage to Codecov
if [ -f e2e-coverage.xml ]; then
codecov -f e2e-coverage.xml -F e2e,pytorch-${PYTORCH_VERSION}
else
echo "Coverage file not found. Coverage report may have failed."
fi

View File

@@ -68,7 +68,7 @@ def run_cmd(cmd: str, run_folder: str):
@app.function( @app.function(
image=cicd_image, image=cicd_image,
gpu=GPU_CONFIG, gpu=GPU_CONFIG,
timeout=90 * 60, timeout=60 * 60,
cpu=8.0, cpu=8.0,
memory=131072 * N_GPUS, memory=131072 * N_GPUS,
volumes=VOLUME_CONFIG, volumes=VOLUME_CONFIG,

View File

@@ -2,24 +2,5 @@
set -e set -e
# only run one test at a time so as not to OOM the GPU # only run one test at a time so as not to OOM the GPU
pytest -v --durations=10 -n2 /workspace/axolotl/tests/e2e/multigpu/ --ignore=/workspace/axolotl/tests/e2e/multigpu/solo/ pytest -v -n2 /workspace/axolotl/tests/e2e/multigpu/ --ignore=/workspace/axolotl/tests/e2e/multigpu/solo/
pytest -v --durations=10 -n1 /workspace/axolotl/tests/e2e/multigpu/solo/ pytest -v -n1 /workspace/axolotl/tests/e2e/multigpu/solo/
# Only run two tests at a time to avoid OOM on GPU (with coverage collection)
pytest -v -n2 \
--ignore=/workspace/axolotl/tests/e2e/multigpu/solo/
/workspace/axolotl/tests/e2e/multigpu/ \
--cov=axolotl \
--cov-report=xml:multigpu-coverage.xml
pytest -v --durations=10 -n1 /workspace/axolotl/tests/e2e/multigpu/solo/ \
--cov=axolotl \
--cov-append \
--cov-report=xml:multigpu-coverage.xml
# Upload coverage to Codecov
if [ -f multigpu-coverage.xml ]; then
codecov -f multigpu-coverage.xml -F multigpu,docker-tests,pytorch-${PYTORCH_VERSION}
else
echo "Coverage file not found. Coverage report may have failed."
fi

View File

@@ -1,51 +0,0 @@
codecov:
require_ci_to_pass: yes
coverage:
precision: 2
round: down
range: "70...100"
status:
project:
default:
# basic
target: auto
threshold: 0%
base: auto
# advanced
branches: null
if_no_uploads: error
if_not_found: success
if_ci_failed: error
only_pulls: false
flags: null
paths: null
patch:
default:
# basic
target: auto
threshold: 0%
base: auto
# advanced
branches: null
if_no_uploads: error
if_not_found: success
if_ci_failed: error
only_pulls: false
flags: null
paths: null
parsers:
gcov:
branch_detection:
conditional: yes
loop: yes
method: no
macro: no
comment:
layout: "reach,diff,flags,files,footer"
behavior: default
require_changes: no
require_base: no
require_head: yes

View File

@@ -20,9 +20,9 @@ WORKDIR /workspace/axolotl
# If AXOLOTL_EXTRAS is set, append it in brackets # If AXOLOTL_EXTRAS is set, append it in brackets
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \ RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \ pip install --no-build-isolation -e .[deepspeed,flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
else \ else \
pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray] $AXOLOTL_ARGS; \ pip install --no-build-isolation -e .[deepspeed,flash-attn,optimizers,ray] $AXOLOTL_ARGS; \
fi fi
RUN python scripts/unsloth_install.py | sh RUN python scripts/unsloth_install.py | sh

View File

@@ -29,7 +29,7 @@ ENV PATH="/root/miniconda3/envs/py${PYTHON_VERSION}/bin:${PATH}"
WORKDIR /workspace WORKDIR /workspace
RUN python3 -m pip install --upgrade pip && pip3 install -U packaging==23.2 setuptools==75.8.0 wheel && \ RUN python3 -m pip install --upgrade pip && pip3 install -U packaging==23.2 setuptools==75.8.0 wheel && \
python3 -m pip install --no-cache-dir -U torch==${PYTORCH_VERSION}+cu${CUDA} torchvision --extra-index-url https://download.pytorch.org/whl/cu$CUDA && \ python3 -m pip install --no-cache-dir -U torch==${PYTORCH_VERSION}+cu${CUDA} --extra-index-url https://download.pytorch.org/whl/cu$CUDA && \
python3 -m pip install --no-cache-dir "causal_conv1d @ git+https://github.com/Dao-AILab/causal-conv1d.git@main" && \ python3 -m pip install --no-cache-dir "causal_conv1d @ git+https://github.com/Dao-AILab/causal-conv1d.git@main" && \
python3 -m pip install --no-cache-dir "mamba_ssm @ git+https://github.com/state-spaces/mamba.git@main" python3 -m pip install --no-cache-dir "mamba_ssm @ git+https://github.com/state-spaces/mamba.git@main"

View File

@@ -1,38 +0,0 @@
ARG CUDA_VERSION="12.8.1"
ARG CUDNN_VERSION="8"
ARG UBUNTU_VERSION="22.04"
ARG MAX_JOBS=4
FROM nvidia/cuda:$CUDA_VERSION-cudnn$CUDNN_VERSION-devel-ubuntu$UBUNTU_VERSION AS base-builder
ENV PATH="/root/miniconda3/bin:${PATH}"
ARG PYTHON_VERSION="3.11"
ARG PYTORCH_VERSION="next"
ARG CUDA="128"
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
ENV PYTHON_VERSION=$PYTHON_VERSION
ENV TORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST
RUN apt-get update \
&& apt-get install -y wget git build-essential ninja-build git-lfs libaio-dev pkg-config && rm -rf /var/lib/apt/lists/* \
&& wget \
https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
&& mkdir /root/.conda \
&& bash Miniconda3-latest-Linux-x86_64.sh -b \
&& rm -f Miniconda3-latest-Linux-x86_64.sh \
&& conda create -n "py${PYTHON_VERSION}" python="${PYTHON_VERSION}"
ENV PATH="/root/miniconda3/envs/py${PYTHON_VERSION}/bin:${PATH}"
WORKDIR /workspace
RUN python3 -m pip install --upgrade pip && pip3 install packaging && \
python3 -m pip install --no-cache-dir -U torch==2.7.0 --extra-index-url https://download.pytorch.org/whl/test/cu$CUDA && \
python3 -m pip install --no-cache-dir "causal_conv1d @ git+https://github.com/Dao-AILab/causal-conv1d.git@main" && \
python3 -m pip install --no-cache-dir "mamba_ssm @ git+https://github.com/state-spaces/mamba.git@main"
RUN git lfs install --skip-repo && \
pip3 install awscli && \
pip3 install -U --no-cache-dir pydantic==2.10.6

View File

@@ -90,7 +90,7 @@ lora_on_cpu: true
# List[str]. Add plugins to extend the pipeline. # List[str]. Add plugins to extend the pipeline.
# See `src/axolotl/integrations` for the available plugins or doc below for more details. # See `src/axolotl/integrations` for the available plugins or doc below for more details.
# https://docs.axolotl.ai/docs/custom_integrations.html # https://axolotl-ai-cloud.github.io/axolotl/docs/custom_integrations.html
plugins: plugins:
# - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin # - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
@@ -109,7 +109,7 @@ datasets:
preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`) preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`)
name: # Optional[str] name of dataset configuration to load name: # Optional[str] name of dataset configuration to load
split: train # Optional[str] name of dataset split to load from train_on_split: train # Optional[str] name of dataset split to load from
revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets. revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets.
trust_remote_code: # Optional[bool] Trust remote code for untrusted source trust_remote_code: # Optional[bool] Trust remote code for untrusted source
@@ -165,9 +165,7 @@ datasets:
content: value content: value
# ... # ...
# Optional[Dict[str, List]]. Roles mapping in the messages. # Optional[Dict[str, List]]. Roles mapping in the messages. The default is:
# The format is {target_role: [source_roles]}. All source roles will be mapped to the target role.
# The default is:
roles: roles:
user: ["human", "user"] user: ["human", "user"]
assistant: ["gpt", "assistant"] assistant: ["gpt", "assistant"]
@@ -394,7 +392,7 @@ lora_fan_in_fan_out: false
# Apply custom LoRA autograd functions and activation function Triton kernels for # Apply custom LoRA autograd functions and activation function Triton kernels for
# speed and memory savings # speed and memory savings
# See: https://docs.axolotl.ai/docs/lora_optims.html # See: https://axolotl-ai-cloud.github.io/axolotl/docs/lora_optims.html
lora_mlp_kernel: true lora_mlp_kernel: true
lora_qkv_kernel: true lora_qkv_kernel: true
lora_o_kernel: true lora_o_kernel: true
@@ -512,8 +510,7 @@ train_on_inputs: false
# Note that training loss may have an oscillating pattern with this enabled. # Note that training loss may have an oscillating pattern with this enabled.
group_by_length: false group_by_length: false
# Whether to use gradient checkpointing. Available options are: true, false, "offload". # Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing
# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing
gradient_checkpointing: false gradient_checkpointing: false
# additional kwargs to pass to the trainer for gradient checkpointing # additional kwargs to pass to the trainer for gradient checkpointing
# gradient_checkpointing_kwargs: # gradient_checkpointing_kwargs:
@@ -688,14 +685,11 @@ ddp_broadcast_buffers:
# Use in long context training to prevent OOM when sequences cannot fit into a single GPU's VRAM. # Use in long context training to prevent OOM when sequences cannot fit into a single GPU's VRAM.
# E.g., if 4 GPUs are available, set this value to 2 to split each sequence into two equal-sized # E.g., if 4 GPUs are available, set this value to 2 to split each sequence into two equal-sized
# subsequences, or set to 4 to split into four equal-sized subsequences. # subsequences, or set to 4 to split into four equal-sized subsequences.
# See https://docs.axolotl.ai/docs/sequence_parallelism.html for more details. # See https://axolotl-ai-cloud.github.io/axolotl/docs/sequence_parallelism.html for more details.
sequence_parallel_degree: sequence_parallel_degree:
# Optional; strides across the key dimension. Larger values use more memory but should make training faster. # Optional; strides across the key dimension. Larger values use more memory but should make training faster.
# Must evenly divide the number of KV heads in your model. # Must evenly divide the number of KV heads in your model.
heads_k_stride: 1 heads_k_stride: 1
# One of "varlen_llama3", "batch_ring", "batch_zigzag", "batch_stripe". Defaults to "varlen_llama3"
# in the sample packing case, and "batch_ring" in the non-sample packing case.
ring_attn_func:
# Path to torch distx for optim 'adamw_anyprecision' # Path to torch distx for optim 'adamw_anyprecision'
torchdistx_path: torchdistx_path:

View File

@@ -13,13 +13,6 @@ As there are a lot of available options in Axolotl, this guide aims to provide a
Axolotl supports 3 kinds of training methods: pre-training, supervised fine-tuning, and preference-based post-training (e.g. DPO, ORPO, PRMs). Each method has their own dataset format which are described below. Axolotl supports 3 kinds of training methods: pre-training, supervised fine-tuning, and preference-based post-training (e.g. DPO, ORPO, PRMs). Each method has their own dataset format which are described below.
::: {.callout-tip}
This guide will mainly use JSONL as an introduction. Please refer to the [dataset loading docs](../dataset_loading.qmd) to understand how to load datasets from other sources.
For `pretraining_dataset:` specifically, please refer to the [Pre-training section](#pre-training).
:::
## Pre-training ## Pre-training
When aiming to train on large corpora of text datasets, pre-training is your go-to choice. Due to the size of these datasets, downloading the entire-datasets before beginning training would be prohibitively time-consuming. Axolotl supports [streaming](https://huggingface.co/docs/datasets/en/stream) to only load batches into memory at a time. When aiming to train on large corpora of text datasets, pre-training is your go-to choice. Due to the size of these datasets, downloading the entire-datasets before beginning training would be prohibitively time-consuming. Axolotl supports [streaming](https://huggingface.co/docs/datasets/en/stream) to only load batches into memory at a time.
@@ -457,7 +450,10 @@ datasets:
type: alpaca type: alpaca
``` ```
Axolotl supports many kinds of instruction dataset. All of them can be found in the [Instruction Dataset Documentation](inst_tune.qmd) with their respective type and sample row format. Axolotl supports many kinds of instruction dataset. All of them can be found here (https://axolotl-ai-cloud.github.io/axolotl/docs/dataset-formats/inst_tune.html) with their respective type and sample row format.
Reference: [Instruction Dataset Documentation](inst_tune.qmd).
#### Custom Instruct Prompt Format #### Custom Instruct Prompt Format

View File

@@ -1,276 +0,0 @@
---
title: Dataset Loading
description: Understanding how to load datasets from different sources
back-to-top-navigation: true
toc: true
toc-depth: 5
---
## Overview
Datasets can be loaded in a number of different ways depending on the how it is saved (the extension of the file) and where it is stored.
## Loading Datasets
We use the `datasets` library to load datasets and a mix of `load_dataset` and `load_from_disk` to load them.
You may recognize the similar named configs between `load_dataset` and the `datasets` section of the config file.
```yaml
datasets:
- path:
name:
data_files:
split:
revision:
trust_remote_code:
```
::: {.callout-tip}
Do not feel overwhelmed by the number of options here. A lot of them are optional. In fact, the most common config to use would be `path` and sometimes `data_files`.
:::
This matches the API of [`datasets.load_dataset`](https://github.com/huggingface/datasets/blob/0b5998ac62f08e358f8dcc17ec6e2f2a5e9450b6/src/datasets/load.py#L1838-L1858), so if you're familiar with that, you will feel right at home.
For HuggingFace's guide to load different dataset types, see [here](https://huggingface.co/docs/datasets/loading).
For full details on the config, see [config.qmd](config.qmd).
::: {.callout-note}
You can set multiple datasets in the config file by more than one entry under `datasets`.
```yaml
datasets:
- path: /path/to/your/dataset
- path: /path/to/your/other/dataset
```
:::
### Local dataset
#### Files
Usually, to load a JSON file, you would do something like this:
```python
from datasets import load_dataset
dataset = load_dataset("json", data_files="data.json")
```
Which translates to the following config:
```yaml
datasets:
- path: json
data_files: /path/to/your/file.jsonl
```
However, to make things easier, we have added a few shortcuts for loading local dataset files.
You can just point the `path` to the file or directory along with the `ds_type` to load the dataset. The below example shows for a JSON file:
```yaml
datasets:
- path: /path/to/your/file.jsonl
ds_type: json
```
This works for CSV, JSON, Parquet, and Arrow files.
::: {.callout-tip}
If `path` points to a file and `ds_type` is not specified, we will automatically infer the dataset type from the file extension, so you could omit `ds_type` if you'd like.
:::
#### Directory
If you're loading a directory, you can point the `path` to the directory.
Then, you have two options:
##### Loading entire directory
You do not need any additional configs.
We will attempt to load in the following order:
- datasets saved with `datasets.save_to_disk`
- loading entire directory of files (such as with parquet/arrow files)
```yaml
datasets:
- path: /path/to/your/directory
```
##### Loading specific files in directory
Provide `data_files` with a list of files to load.
```yaml
datasets:
# single file
- path: /path/to/your/directory
ds_type: csv
data_files: file1.csv
# multiple files
- path: /path/to/your/directory
ds_type: json
data_files:
- file1.jsonl
- file2.jsonl
# multiple files for parquet
- path: /path/to/your/directory
ds_type: parquet
data_files:
- file1.parquet
- file2.parquet
```
### HuggingFace Hub
The method you use to load the dataset depends on how the dataset was created, whether a folder was uploaded directly or a HuggingFace Dataset was pushed.
::: {.callout-note}
If you're using a private dataset, you will need to enable the `hf_use_auth_token` flag in the root-level of the config file.
:::
#### Folder uploaded
This would mean that the dataset is a single file or file(s) uploaded to the Hub.
```yaml
datasets:
- path: org/dataset-name
data_files:
- file1.jsonl
- file2.jsonl
```
#### HuggingFace Dataset
This means that the dataset is created as a HuggingFace Dataset and pushed to the Hub via `datasets.push_to_hub`.
```yaml
datasets:
- path: org/dataset-name
```
::: {.callout-note}
There are some other configs which may be required like `name`, `split`, `revision`, `trust_remote_code`, etc depending on the dataset.
:::
### Remote Filesystems
Via the `storage_options` config under `load_dataset`, you can load datasets from remote filesystems like S3, GCS, Azure, and OCI.
::: {.callout-warning}
This is currently experimental. Please let us know if you run into any issues!
:::
The only difference between the providers is that you need to prepend the path with the respective protocols.
```yaml
datasets:
# Single file
- path: s3://bucket-name/path/to/your/file.jsonl
# Directory
- path: s3://bucket-name/path/to/your/directory
```
For directory, we load via `load_from_disk`.
#### S3
Prepend the path with `s3://`.
The credentials are pulled in the following order:
- `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_SESSION_TOKEN` environment variables
- from the `~/.aws/credentials` file
- for nodes on EC2, the IAM metadata provider
::: {.callout-note}
We assume you have credentials setup and not using anonymous access. If you want to use anonymous access, let us know! We may have to open a config option for this.
:::
Other environment variables that can be set can be found in [boto3 docs](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#using-environment-variables)
#### GCS
Prepend the path with `gs://` or `gcs://`.
The credentials are loaded in the following order:
- gcloud credentials
- for nodes on GCP, the google metadata service
- anonymous access
#### Azure
##### Gen 1
Prepend the path with `adl://`.
Ensure you have the following environment variables set:
- `AZURE_STORAGE_TENANT_ID`
- `AZURE_STORAGE_CLIENT_ID`
- `AZURE_STORAGE_CLIENT_SECRET`
##### Gen 2
Prepend the path with `abfs://` or `az://`.
Ensure you have the following environment variables set:
- `AZURE_STORAGE_ACCOUNT_NAME`
- `AZURE_STORAGE_ACCOUNT_KEY`
Other environment variables that can be set can be found in [adlfs docs](https://github.com/fsspec/adlfs?tab=readme-ov-file#setting-credentials)
#### OCI
Prepend the path with `oci://`.
It would attempt to read in the following order:
- `OCIFS_IAM_TYPE`, `OCIFS_CONFIG_LOCATION`, and `OCIFS_CONFIG_PROFILE` environment variables
- when on OCI resource, resource principal
Other environment variables:
- `OCI_REGION_METADATA`
Please see the [ocifs docs](https://ocifs.readthedocs.io/en/latest/getting-connected.html#Using-Environment-Variables).
### HTTPS
The path should start with `https://`.
```yaml
datasets:
- path: https://path/to/your/dataset/file.jsonl
```
This must be publically accessible.
## Next steps
Now that you know how to load datasets, you can learn more on how to load your specific dataset format into your target output format [dataset formats docs](dataset-formats).

View File

@@ -17,7 +17,6 @@ We currently support several common model architectures, including (but not limi
- `qwen2` - `qwen2`
- `gemma` - `gemma`
- `gemma2` - `gemma2`
- `gemma3`
<details> <details>

View File

@@ -36,9 +36,6 @@ deepspeed: deepspeed_configs/zero1.json
### Usage {#sec-deepspeed-usage} ### Usage {#sec-deepspeed-usage}
```{.bash} ```{.bash}
# Fetch deepspeed configs (if not already present)
axolotl fetch deepspeed_configs
# Passing arg via config # Passing arg via config
axolotl train config.yml axolotl train config.yml
@@ -51,20 +48,10 @@ axolotl train config.yml --deepspeed deepspeed_configs/zero1.json
We provide default configurations for: We provide default configurations for:
- ZeRO Stage 1 (`zero1.json`) - ZeRO Stage 1 (`zero1.json`)
- ZeRO Stage 1 with torch compile (`zero1_torch_compile.json`)
- ZeRO Stage 2 (`zero2.json`) - ZeRO Stage 2 (`zero2.json`)
- ZeRO Stage 3 (`zero3.json`) - ZeRO Stage 3 (`zero3.json`)
- ZeRO Stage 3 with bf16 (`zero3_bf16.json`)
- ZeRO Stage 3 with bf16 and CPU offload params(`zero3_bf16_cpuoffload_params.json`)
- ZeRO Stage 3 with bf16 and CPU offload params and optimizer (`zero3_bf16_cpuoffload_all.json`)
::: {.callout-tip} Choose based on your memory requirements and performance needs.
Choose the configuration that offloads the least amount to memory while still being able to fit on VRAM for best performance.
Start from Stage 1 -> Stage 2 -> Stage 3.
:::
## FSDP {#sec-fsdp} ## FSDP {#sec-fsdp}

View File

@@ -9,7 +9,6 @@ format:
## Supported Models ## Supported Models
- [Mllama](#sec-mllama) - [Mllama](#sec-mllama)
- [Llama4](#sec-llama4)
- [Pixtral](#sec-pixtral) - [Pixtral](#sec-pixtral)
- [Llava-1.5](#sec-llava-15) - [Llava-1.5](#sec-llava-15)
- [Mistral-Small-3.1](#sec-mistral-small-31) - [Mistral-Small-3.1](#sec-mistral-small-31)
@@ -64,14 +63,6 @@ base_model: meta-llama/Llama-3.2-11B-Vision-Instruct
chat_template: llama3_2_vision chat_template: llama3_2_vision
``` ```
### Llama4 {#sec-llama4}
```yaml
base_model: meta-llama/Llama-4-Scout-17B-16E-Instruct
chat_template: llama4
```
### Pixtral {#sec-pixtral} ### Pixtral {#sec-pixtral}
```yaml ```yaml

View File

@@ -530,7 +530,7 @@ trl:
``` ```
```bash ```bash
CUDA_VISIBLE_DEVICES=2,3 axolotl vllm-serve grpo.yaml CUDA_VISIBLE_DEVICES=2,3 axolotl vllm_serve grpo.yaml
``` ```
Your `vLLM` instance will now attempt to spin up, and it's time to kick off training utilizing our remaining two GPUs. In another terminal, execute: Your `vLLM` instance will now attempt to spin up, and it's time to kick off training utilizing our remaining two GPUs. In another terminal, execute:

View File

@@ -27,9 +27,6 @@ To enable sequence parallelism, add the following to your configuration file:
sequence_parallel_degree: 4 # Split sequences across 4 GPUs sequence_parallel_degree: 4 # Split sequences across 4 GPUs
# Optional; strides across the key dimension. Larger values use more memory but should make training faster. # Optional; strides across the key dimension. Larger values use more memory but should make training faster.
heads_k_stride: 1 heads_k_stride: 1
# Optional; one of "varlen_llama3", "batch_ring", "batch_zigzag", "batch_stripe". Defaults to
# "varlen_llama3" when `sample_packing: true`, and "batch_ring" otherwise.
ring_attn_func:
``` ```
The `sequence_parallel_degree` should be a divisor of the total number of GPUs. For example: The `sequence_parallel_degree` should be a divisor of the total number of GPUs. For example:

View File

@@ -8,6 +8,10 @@ tokenizer_type: GPT2Tokenizer
trust_remote_code: true trust_remote_code: true
tokenizer_use_fast: true tokenizer_use_fast: true
tokenizer_legacy: true tokenizer_legacy: true
load_in_8bit: false
load_in_4bit: false
strict: false
push_dataset_to_hub: push_dataset_to_hub:
hf_use_auth_token: true hf_use_auth_token: true
datasets: datasets:
@@ -30,6 +34,7 @@ lora_alpha:
lora_dropout: lora_dropout:
lora_target_modules: lora_target_modules:
lora_target_linear: lora_target_linear:
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -53,12 +58,16 @@ learning_rate: 0.000085
train_on_inputs: true train_on_inputs: true
group_by_length: false group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: true tf32: true
gradient_checkpointing: false gradient_checkpointing: false
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
sdp_attention: sdp_attention:
flash_optimum: flash_optimum:
@@ -71,6 +80,8 @@ evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
save_total_limit: save_total_limit:
debug:
deepspeed:
weight_decay: 0.1 weight_decay: 0.1
special_tokens: special_tokens:
pad_token: "<|endoftext|>" pad_token: "<|endoftext|>"

View File

@@ -4,6 +4,7 @@ base_model: cerebras/Cerebras-GPT-1.3B
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
push_dataset_to_hub: push_dataset_to_hub:
datasets: datasets:
- path: teknium/GPT4-LLM-Cleaned - path: teknium/GPT4-LLM-Cleaned
@@ -21,6 +22,7 @@ lora_target_modules:
- c_attn - c_attn
- c_proj - c_proj
lora_target_linear: lora_target_linear:
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
wandb_watch: wandb_watch:
@@ -34,10 +36,15 @@ optimizer: paged_adamw_8bit
torchdistx_path: torchdistx_path:
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention: true xformers_attention: true
flash_attention: flash_attention:
@@ -46,6 +53,10 @@ gptq_model_v1:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.1 weight_decay: 0.1
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: "<|endoftext|>" pad_token: "<|endoftext|>"

View File

@@ -7,6 +7,7 @@ tokenizer_type: CodeLlamaTokenizer
load_in_8bit: true load_in_8bit: true
load_in_4bit: false load_in_4bit: false
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
@@ -25,6 +26,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -39,18 +41,29 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
s2_attention:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
bos_token: "<s>" bos_token: "<s>"
eos_token: "</s>" eos_token: "</s>"

View File

@@ -7,6 +7,7 @@ tokenizer_type: CodeLlamaTokenizer
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
@@ -25,7 +26,9 @@ pad_to_sequence_len: true
lora_r: 32 lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_modules:
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -40,18 +43,28 @@ optimizer: paged_adamw_32bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
bos_token: "<s>" bos_token: "<s>"
eos_token: "</s>" eos_token: "</s>"

View File

@@ -7,6 +7,7 @@ tokenizer_type: CodeLlamaTokenizer
load_in_8bit: true load_in_8bit: true
load_in_4bit: false load_in_4bit: false
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
@@ -25,6 +26,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -39,18 +41,29 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
s2_attention:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
bos_token: "<s>" bos_token: "<s>"
eos_token: "</s>" eos_token: "</s>"

View File

@@ -7,6 +7,7 @@ tokenizer_type: CodeLlamaTokenizer
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
@@ -25,7 +26,9 @@ pad_to_sequence_len: true
lora_r: 32 lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_modules:
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -40,18 +43,28 @@ optimizer: paged_adamw_32bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
bos_token: "<s>" bos_token: "<s>"
eos_token: "</s>" eos_token: "</s>"

View File

@@ -7,6 +7,7 @@ tokenizer_type: CodeLlamaTokenizer
load_in_8bit: true load_in_8bit: true
load_in_4bit: false load_in_4bit: false
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
@@ -25,6 +26,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -39,18 +41,29 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
s2_attention:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
bos_token: "<s>" bos_token: "<s>"
eos_token: "</s>" eos_token: "</s>"

View File

@@ -7,6 +7,7 @@ tokenizer_type: CodeLlamaTokenizer
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
@@ -25,7 +26,9 @@ pad_to_sequence_len: true
lora_r: 32 lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_modules:
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -40,18 +43,28 @@ optimizer: paged_adamw_32bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
bos_token: "<s>" bos_token: "<s>"
eos_token: "</s>" eos_token: "</s>"

View File

@@ -4,6 +4,7 @@ tokenizer_type: AutoTokenizer
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
# huggingface repo # huggingface repo
chat_template: cohere chat_template: cohere
@@ -43,16 +44,28 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_ratio: 0.1 warmup_ratio: 0.1
evals_per_epoch: evals_per_epoch:
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:

View File

@@ -4,6 +4,10 @@ base_model: LnL-AI/dbrx-base-converted-v2
trust_remote_code: true trust_remote_code: true
load_in_8bit: false
load_in_4bit: false
strict: false
datasets: datasets:
- path: tatsu-lab/alpaca - path: tatsu-lab/alpaca
type: alpaca type: alpaca
@@ -44,20 +48,26 @@ optimizer: paged_adamw_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: false # don't use with fsdp_activation_checkpointing gradient_checkpointing: false # don't use with fsdp_activation_checkpointing
gradient_checkpointing_kwargs: gradient_checkpointing_kwargs:
use_reentrant: false use_reentrant: false
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: evals_per_epoch:
saves_per_epoch: 1 saves_per_epoch: 1
debug:
weight_decay: 0.0 weight_decay: 0.0
fsdp: fsdp:
- full_shard - full_shard

View File

@@ -6,6 +6,7 @@ trust_remote_code: true
load_in_8bit: true load_in_8bit: true
load_in_4bit: false load_in_4bit: false
strict: false
datasets: datasets:
- path: tatsu-lab/alpaca - path: tatsu-lab/alpaca
@@ -47,20 +48,26 @@ optimizer: paged_adamw_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: false # don't use with fsdp_activation_checkpointing gradient_checkpointing: false # don't use with fsdp_activation_checkpointing
gradient_checkpointing_kwargs: gradient_checkpointing_kwargs:
use_reentrant: false use_reentrant: false
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: evals_per_epoch:
saves_per_epoch: 1 saves_per_epoch: 1
debug:
weight_decay: 0.0 weight_decay: 0.0
fsdp: fsdp:
- full_shard - full_shard

View File

@@ -4,6 +4,10 @@ base_model: LnL-AI/dbrx-base-converted-v2
trust_remote_code: true trust_remote_code: true
load_in_8bit: false
load_in_4bit: false
strict: false
datasets: datasets:
- path: tatsu-lab/alpaca - path: tatsu-lab/alpaca
type: alpaca type: alpaca
@@ -31,19 +35,25 @@ optimizer: paged_adamw_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
gradient_checkpointing_kwargs: gradient_checkpointing_kwargs:
use_reentrant: false use_reentrant: false
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: evals_per_epoch:
saves_per_epoch: 1 saves_per_epoch: 1
debug:
weight_decay: 0.0 weight_decay: 0.0
deepspeed: deepspeed_configs/zero3_bf16.json deepspeed: deepspeed_configs/zero3_bf16.json

View File

@@ -1,58 +0,0 @@
base_model: agentica-org/DeepCoder-14B-Preview
# Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name
load_in_8bit: true
load_in_4bit: false
strict: false
datasets:
- path: fozziethebeat/alpaca_messages_2k_test
type: chat_template
field_messages: messages
message_property_mappings:
role: role
content: content
dataset_prepared_path:
val_set_size: 0.05
output_dir: ./outputs/lora-out
sequence_len: 4096
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true
adapter: lora
lora_model_dir:
lora_r: 32
lora_alpha: 16
lora_dropout: 0.05
lora_target_linear: true
wandb_project:
wandb_entity:
wandb_watch:
wandb_name:
wandb_log_model:
gradient_accumulation_steps: 2
micro_batch_size: 2
num_epochs: 4
optimizer: adamw_bnb_8bit
lr_scheduler: cosine
learning_rate: 0.0002
bf16: auto
tf32: true
gradient_checkpointing: true
resume_from_checkpoint:
logging_steps: 1
flash_attention: true
warmup_steps: 10
evals_per_epoch: 1
saves_per_epoch: 1
weight_decay: 0.0
special_tokens:

View File

@@ -1,58 +0,0 @@
base_model: deepcogito/cogito-v1-preview-llama-3B
# Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name
load_in_8bit: true
load_in_4bit: false
strict: false
datasets:
- path: fozziethebeat/alpaca_messages_2k_test
type: chat_template
field_messages: messages
message_property_mappings:
role: role
content: content
dataset_prepared_path:
val_set_size: 0.05
output_dir: ./outputs/lora-out
sequence_len: 4096
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true
adapter: lora
lora_model_dir:
lora_r: 32
lora_alpha: 16
lora_dropout: 0.05
lora_target_linear: true
wandb_project:
wandb_entity:
wandb_watch:
wandb_name:
wandb_log_model:
gradient_accumulation_steps: 2
micro_batch_size: 2
num_epochs: 1
optimizer: adamw_bnb_8bit
lr_scheduler: cosine
learning_rate: 0.0002
bf16: auto
tf32: true
gradient_checkpointing: true
resume_from_checkpoint:
logging_steps: 1
flash_attention: true
warmup_steps: 10
evals_per_epoch: 1
saves_per_epoch: 1
weight_decay: 0.0
special_tokens:

View File

@@ -1,58 +0,0 @@
base_model: deepcogito/cogito-v1-preview-qwen-14B
# Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name
load_in_8bit: true
load_in_4bit: false
strict: false
datasets:
- path: fozziethebeat/alpaca_messages_2k_test
type: chat_template
field_messages: messages
message_property_mappings:
role: role
content: content
dataset_prepared_path:
val_set_size: 0.05
output_dir: ./outputs/lora-out
sequence_len: 4096
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true
adapter: lora
lora_model_dir:
lora_r: 32
lora_alpha: 16
lora_dropout: 0.05
lora_target_linear: true
wandb_project:
wandb_entity:
wandb_watch:
wandb_name:
wandb_log_model:
gradient_accumulation_steps: 2
micro_batch_size: 2
num_epochs: 1
optimizer: adamw_bnb_8bit
lr_scheduler: cosine
learning_rate: 0.0002
bf16: auto
tf32: true
gradient_checkpointing: true
resume_from_checkpoint:
logging_steps: 1
flash_attention: true
warmup_steps: 10
evals_per_epoch: 1
saves_per_epoch: 1
weight_decay: 0.0
special_tokens:

View File

@@ -3,6 +3,10 @@ base_model: deepseek-ai/DeepSeek-V2-Lite
# hub_model_id: username/custom_model_name # hub_model_id: username/custom_model_name
trust_remote_code: true trust_remote_code: true
load_in_8bit: false
load_in_4bit: false
strict: false
datasets: datasets:
- path: tatsu-lab/alpaca - path: tatsu-lab/alpaca
type: alpaca type: alpaca
@@ -27,19 +31,27 @@ optimizer: adamw_torch_fused
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 2e-5 learning_rate: 2e-5
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
gradient_checkpointing_kwargs: gradient_checkpointing_kwargs:
use_reentrant: false use_reentrant: false
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 100 warmup_steps: 100
evals_per_epoch: 2 evals_per_epoch: 2
eval_table_size:
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
special_tokens: special_tokens:
fsdp: fsdp:

View File

@@ -6,6 +6,7 @@ trust_remote_code: true
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
plugins: plugins:
@@ -51,19 +52,27 @@ optimizer: adamw_torch_fused
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 2e-5 learning_rate: 2e-5
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
gradient_checkpointing_kwargs: gradient_checkpointing_kwargs:
use_reentrant: false use_reentrant: false
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 100 warmup_steps: 100
evals_per_epoch: 2 evals_per_epoch: 2
eval_table_size:
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
special_tokens: special_tokens:
fsdp: fsdp:

View File

@@ -11,6 +11,7 @@ trust_remote_code: true
load_in_8bit: true load_in_8bit: true
load_in_4bit: false load_in_4bit: false
gptq: false gptq: false
strict: false
push_dataset_to_hub: push_dataset_to_hub:
datasets: datasets:
- path: teknium/GPT4-LLM-Cleaned - path: teknium/GPT4-LLM-Cleaned
@@ -24,7 +25,9 @@ max_packed_sequence_len:
lora_r: 16 lora_r: 16
lora_alpha: 32 lora_alpha: 32
lora_dropout: 0.0 lora_dropout: 0.0
lora_target_modules:
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
wandb_watch: wandb_watch:
@@ -38,10 +41,15 @@ optimizer: adamw_bnb_8bit
torchdistx_path: torchdistx_path:
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.00003 learning_rate: 0.00003
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention: true xformers_attention: true
flash_attention: flash_attention:
@@ -50,7 +58,11 @@ gptq_model_v1:
warmup_steps: 40 warmup_steps: 40
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: "<|endoftext|>" pad_token: "<|endoftext|>"
bos_token: "<|endoftext|>" bos_token: "<|endoftext|>"

View File

@@ -15,6 +15,7 @@ load_in_8bit: false
# enable 4bit for QLoRA # enable 4bit for QLoRA
load_in_4bit: true load_in_4bit: true
gptq: false gptq: false
strict: false
push_dataset_to_hub: push_dataset_to_hub:
datasets: datasets:
- path: QingyiSi/Alpaca-CoT - path: QingyiSi/Alpaca-CoT
@@ -37,7 +38,9 @@ lora_alpha: 16
# 0.05 for 33B and 65B models # 0.05 for 33B and 65B models
lora_dropout: 0.05 lora_dropout: 0.05
# add LoRA modules on all linear layers of the base model # add LoRA modules on all linear layers of the base model
lora_target_modules:
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -64,7 +67,10 @@ lr_scheduler: cosine
# - 2e-4 for 7b & 13b # - 2e-4 for 7b & 13b
# - 1e-4 for 33b & 64b # - 1e-4 for 33b & 64b
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
# stop training after this many evaluation losses have increased in a row # stop training after this many evaluation losses have increased in a row
@@ -72,6 +78,7 @@ gradient_checkpointing: true
early_stopping_patience: 3 early_stopping_patience: 3
resume_from_checkpoint: resume_from_checkpoint:
auto_resume_from_checkpoints: true auto_resume_from_checkpoints: true
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention: true xformers_attention: true
flash_attention: flash_attention:
@@ -80,7 +87,11 @@ gptq_model_v1:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.000001 weight_decay: 0.000001
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: "<|endoftext|>" pad_token: "<|endoftext|>"
bos_token: "<|endoftext|>" bos_token: "<|endoftext|>"

View File

@@ -7,7 +7,11 @@ tokenizer_type: AutoTokenizer
# required by falcon custom model code: https://huggingface.co/tiiuae/falcon-7b/tree/main # required by falcon custom model code: https://huggingface.co/tiiuae/falcon-7b/tree/main
trust_remote_code: true trust_remote_code: true
load_in_8bit: false
load_in_4bit: false
gptq: false gptq: false
strict: false
push_dataset_to_hub: push_dataset_to_hub:
datasets: datasets:
- path: teknium/GPT4-LLM-Cleaned - path: teknium/GPT4-LLM-Cleaned
@@ -21,7 +25,9 @@ max_packed_sequence_len:
lora_r: 64 lora_r: 64
lora_alpha: 32 lora_alpha: 32
lora_dropout: 0.0 lora_dropout: 0.0
lora_target_modules:
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
wandb_watch: wandb_watch:
@@ -35,10 +41,15 @@ optimizer: adamw_bnb_8bit
torchdistx_path: torchdistx_path:
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.00003 learning_rate: 0.00003
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention: true xformers_attention: true
flash_attention: flash_attention:
@@ -47,7 +58,11 @@ gptq_model_v1:
warmup_steps: 40 warmup_steps: 40
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: "<|endoftext|>" pad_token: "<|endoftext|>"
bos_token: "<|endoftext|>" bos_token: "<|endoftext|>"

View File

@@ -8,6 +8,7 @@ tokenizer_type: AutoTokenizer
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
# huggingface repo # huggingface repo
datasets: datasets:
@@ -41,16 +42,28 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_ratio: 0.1 warmup_ratio: 0.1
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:

View File

@@ -7,6 +7,7 @@ tokenizer_type: AutoTokenizer
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
# huggingface repo # huggingface repo
chat_template: gemma chat_template: gemma
@@ -47,16 +48,28 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_ratio: 0.1 warmup_ratio: 0.1
evals_per_epoch: evals_per_epoch:
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:

View File

@@ -6,6 +6,10 @@ tokenizer_type: AutoTokenizer
# Automatically upload checkpoint and final model to HF # Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name # hub_model_id: username/custom_model_name
load_in_8bit: false
load_in_4bit: false
strict: false
reward_model: true reward_model: true
chat_template: gemma chat_template: gemma
datasets: datasets:
@@ -34,6 +38,8 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: true bf16: true
fp16: fp16:
tf32: true tf32: true
@@ -41,12 +47,21 @@ tf32: true
gradient_checkpointing: true gradient_checkpointing: true
gradient_checkpointing_kwargs: gradient_checkpointing_kwargs:
use_reentrant: false use_reentrant: false
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_ratio: 0.1 warmup_ratio: 0.1
evals_per_epoch: evals_per_epoch:
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:

View File

@@ -10,6 +10,7 @@ ddp_find_unused_parameters: true
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
# huggingface repo # huggingface repo
chat_template: gemma3 chat_template: gemma3
@@ -49,18 +50,30 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
gradient_checkpointing_kwargs: gradient_checkpointing_kwargs:
use_reentrant: false use_reentrant: false
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_ratio: 0.1 warmup_ratio: 0.1
evals_per_epoch: evals_per_epoch:
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:

View File

@@ -1,7 +1,6 @@
base_model: google/gemma-3-4b-it base_model: google/gemma-3-4b-it
processor_type: AutoProcessor processor_type: AutoProcessor
strict: false
load_in_4bit: true
# these 3 lines are needed for now to handle vision chat templates w images # these 3 lines are needed for now to handle vision chat templates w images
skip_prepare_dataset: true skip_prepare_dataset: true
@@ -21,7 +20,7 @@ dataset_prepared_path: last_run_prepared
val_set_size: 0.01 val_set_size: 0.01
output_dir: ./outputs/out output_dir: ./outputs/out
adapter: qlora adapter: lora
lora_model_dir: lora_model_dir:
sequence_len: 2048 sequence_len: 2048
@@ -45,6 +44,8 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: true bf16: true
fp16: fp16:
tf32: true tf32: true
@@ -52,6 +53,7 @@ tf32: true
gradient_checkpointing: true gradient_checkpointing: true
gradient_checkpointing_kwargs: gradient_checkpointing_kwargs:
use_reentrant: false use_reentrant: false
local_rank:
logging_steps: 1 logging_steps: 1
flash_attention: true flash_attention: true
eager_attention: eager_attention:
@@ -59,4 +61,8 @@ eager_attention:
warmup_ratio: 0.1 warmup_ratio: 0.1
evals_per_epoch: 1 evals_per_epoch: 1
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:

View File

@@ -1,60 +0,0 @@
base_model: google/gemma-3-4b-it
load_in_4bit: true
# gemma3 doesn't seem to play nice with ddp
ddp_find_unused_parameters: true
chat_template: gemma3
datasets:
- path: cgato/SlimOrcaDedupCleaned
type: chat_template
field_messages: conversations
message_property_mappings:
role: from
content: value
dataset_prepared_path: last_run_prepared
val_set_size: 0.01
output_dir: ./outputs/out
adapter: qlora
lora_model_dir:
sequence_len: 2048
sample_packing: true
pad_to_sequence_len: true
lora_r: 32
lora_alpha: 16
lora_dropout: 0.05
lora_target_modules: 'language_model.model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
wandb_project:
wandb_entity:
wandb_watch:
wandb_name:
wandb_log_model:
gradient_accumulation_steps: 4
micro_batch_size: 2
num_epochs: 1
optimizer: adamw_bnb_8bit
lr_scheduler: cosine
learning_rate: 0.0002
bf16: true
fp16:
tf32: true
gradient_checkpointing: true
gradient_checkpointing_kwargs:
use_reentrant: false
logging_steps: 1
flash_attention: true
eager_attention:
warmup_ratio: 0.1
evals_per_epoch: 1
saves_per_epoch: 1
weight_decay: 0.0

View File

@@ -4,6 +4,7 @@ base_model: EleutherAI/gpt-j-6b
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
push_dataset_to_hub: push_dataset_to_hub:
datasets: datasets:
- path: teknium/GPT4-LLM-Cleaned - path: teknium/GPT4-LLM-Cleaned
@@ -17,7 +18,9 @@ max_packed_sequence_len:
lora_r: 8 lora_r: 8
lora_alpha: 32 lora_alpha: 32
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_modules:
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
wandb_watch: wandb_watch:
@@ -31,10 +34,15 @@ optimizer: paged_adamw_8bit
torchdistx_path: torchdistx_path:
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0001 learning_rate: 0.0001
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention: true xformers_attention: true
flash_attention: flash_attention:
@@ -43,6 +51,10 @@ gptq_model_v1:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.1 weight_decay: 0.1
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: "<|endoftext|>" pad_token: "<|endoftext|>"

View File

@@ -6,6 +6,7 @@ trust_remote_code: true
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
@@ -39,18 +40,26 @@ optimizer: paged_adamw_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.00001 learning_rate: 0.00001
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
gradient_checkpointing_kwargs: gradient_checkpointing_kwargs:
use_reentrant: false use_reentrant: false
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: evals_per_epoch:
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
special_tokens: special_tokens:

View File

@@ -5,6 +5,7 @@ trust_remote_code: true
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
@@ -38,20 +39,26 @@ optimizer: paged_adamw_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.00001 learning_rate: 0.00001
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
gradient_checkpointing_kwargs: gradient_checkpointing_kwargs:
use_reentrant: false use_reentrant: false
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: evals_per_epoch:
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed: deepspeed_configs/zero2.json deepspeed: deepspeed_configs/zero2.json
weight_decay: 0.0 weight_decay: 0.0
special_tokens: special_tokens:

View File

@@ -5,6 +5,7 @@ tokenizer_type: AutoTokenizer
# hub_model_id: username/custom_model_name # hub_model_id: username/custom_model_name
load_in_4bit: true load_in_4bit: true
strict: false
use_tensorboard: true use_tensorboard: true
chat_template: jamba chat_template: jamba
datasets: datasets:
@@ -38,6 +39,8 @@ optimizer: adamw_torch_fused
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.00001 learning_rate: 0.00001
train_on_inputs: false
group_by_length: false
bf16: true bf16: true
tf32: true tf32: true

View File

@@ -33,9 +33,13 @@ optimizer: adamw_bnb_8bit
torchdistx_path: torchdistx_path:
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.00003 learning_rate: 0.00003
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
tf32: true tf32: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 5 logging_steps: 5
xformers_attention: true xformers_attention: true
flash_attention: flash_attention:
@@ -44,7 +48,11 @@ gptq_model_v1:
warmup_steps: 20 warmup_steps: 20
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.1 weight_decay: 0.1
fsdp:
fsdp_config:
tokens: tokens:
bos_token: "<s>" bos_token: "<s>"
eos_token: "</s>" eos_token: "</s>"

View File

@@ -5,6 +5,10 @@ tokenizer_type: LlamaTokenizer
# Automatically upload checkpoint and final model to HF # Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name # hub_model_id: username/custom_model_name
load_in_8bit: false
load_in_4bit: false
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
type: alpaca type: alpaca
@@ -22,6 +26,7 @@ lora_r:
lora_alpha: lora_alpha:
lora_dropout: lora_dropout:
lora_target_linear: lora_target_linear:
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -36,12 +41,18 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
flash_attn_cross_entropy: false flash_attn_cross_entropy: false
flash_attn_rms_norm: true flash_attn_rms_norm: true
@@ -50,8 +61,11 @@ flash_attn_fuse_mlp: true
warmup_steps: 100 warmup_steps: 100
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed: #deepspeed_configs/zero2.json # multi-gpu only deepspeed: #deepspeed_configs/zero2.json # multi-gpu only
weight_decay: 0.1 weight_decay: 0.1
fsdp:
fsdp_config:
special_tokens: special_tokens:

View File

@@ -10,6 +10,9 @@ gptq_disable_exllama: true
tokenizer_use_fast: true tokenizer_use_fast: true
tokenizer_legacy: true tokenizer_legacy: true
load_in_8bit: false
load_in_4bit: false
strict: false
push_dataset_to_hub: push_dataset_to_hub:
hf_use_auth_token: true hf_use_auth_token: true
datasets: datasets:
@@ -30,6 +33,7 @@ lora_target_modules:
- q_proj - q_proj
- v_proj - v_proj
lora_target_linear: lora_target_linear:
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_watch: wandb_watch:
wandb_name: wandb_name:
@@ -46,19 +50,26 @@ torchdistx_path:
lr_scheduler: cosine lr_scheduler: cosine
lr_quadratic_warmup: true lr_quadratic_warmup: true
learning_rate: 0.000017 learning_rate: 0.000017
train_on_inputs: false
group_by_length: false
bf16: false bf16: false
fp16: false fp16: false
float16: true float16: true
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: flash_attention:
sdp_attention: sdp_attention:
flash_optimum: flash_optimum:
warmup_steps: 100 warmup_steps: 100
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.1 weight_decay: 0.1
special_tokens: special_tokens:
bos_token: "<s>" bos_token: "<s>"

View File

@@ -5,6 +5,10 @@ tokenizer_type: LlamaTokenizer
# Automatically upload checkpoint and final model to HF # Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name # hub_model_id: username/custom_model_name
load_in_8bit: false
load_in_4bit: false
strict: false
datasets: datasets:
- path: teknium/GPT4-LLM-Cleaned - path: teknium/GPT4-LLM-Cleaned
type: alpaca type: alpaca
@@ -22,6 +26,7 @@ lora_r:
lora_alpha: lora_alpha:
lora_dropout: lora_dropout:
lora_target_linear: lora_target_linear:
lora_fan_in_fan_out:
lisa_n_layers: 4 lisa_n_layers: 4
lisa_step_interval: 20 lisa_step_interval: 20
@@ -40,12 +45,18 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 5e-5 # recommendation from lisa paper for 7b learning_rate: 5e-5 # recommendation from lisa paper for 7b
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
flash_attn_cross_entropy: false flash_attn_cross_entropy: false
flash_attn_rms_norm: true flash_attn_rms_norm: true
@@ -54,8 +65,13 @@ flash_attn_fuse_mlp: true
warmup_steps: 100 warmup_steps: 100
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.1 weight_decay: 0.1
fsdp:
fsdp_config:
special_tokens: special_tokens:
bos_token: "<s>" bos_token: "<s>"
eos_token: "</s>" eos_token: "</s>"

View File

@@ -5,6 +5,10 @@ tokenizer_type: LlamaTokenizer
# Automatically upload checkpoint and final model to HF # Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name # hub_model_id: username/custom_model_name
load_in_8bit: false
load_in_4bit: false
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
type: alpaca type: alpaca
@@ -22,6 +26,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
peft: peft:
loftq_config: loftq_config:
loftq_bits: 4 loftq_bits: 4
@@ -39,16 +44,29 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
s2_attention:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:

View File

@@ -7,6 +7,7 @@ tokenizer_type: LlamaTokenizer
load_in_8bit: true load_in_8bit: true
load_in_4bit: false load_in_4bit: false
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
@@ -25,6 +26,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -39,16 +41,29 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
s2_attention:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:

View File

@@ -7,6 +7,7 @@ tokenizer_type: LlamaTokenizer
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: yahma/alpaca-cleaned - path: yahma/alpaca-cleaned
@@ -25,7 +26,9 @@ pad_to_sequence_len: true
lora_r: 32 lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_modules:
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -40,19 +43,28 @@ optimizer: adamw_torch_fused
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.00001 learning_rate: 0.00001
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
gradient_checkpointing_kwargs: gradient_checkpointing_kwargs:
use_reentrant: true use_reentrant: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp: fsdp:
- full_shard - full_shard

View File

@@ -7,6 +7,7 @@ tokenizer_type: LlamaTokenizer
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
@@ -25,7 +26,9 @@ pad_to_sequence_len: true
lora_r: 32 lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_modules:
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -40,16 +43,27 @@ optimizer: paged_adamw_32bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:

View File

@@ -5,6 +5,7 @@ tokenizer_type: LlamaTokenizer
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: teknium/GPT4-LLM-Cleaned - path: teknium/GPT4-LLM-Cleaned
@@ -23,7 +24,9 @@ pad_to_sequence_len: true
lora_r: 8 lora_r: 8
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_modules:
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
relora_steps: 150 relora_steps: 150
relora_warmup_steps: 10 relora_warmup_steps: 10
@@ -42,18 +45,28 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
bos_token: "<s>" bos_token: "<s>"
eos_token: "</s>" eos_token: "</s>"

View File

@@ -4,6 +4,7 @@ processor_type: AutoProcessor
# Automatically upload checkpoint and final model to HF # Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name # hub_model_id: username/custom_model_name
strict: false
# these 3 lines are needed for now to handle vision chat templates w images # these 3 lines are needed for now to handle vision chat templates w images
skip_prepare_dataset: true skip_prepare_dataset: true
@@ -44,11 +45,14 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: true bf16: true
fp16: fp16:
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
local_rank:
logging_steps: 1 logging_steps: 1
flash_attention: true flash_attention: true
eager_attention: eager_attention:
@@ -56,4 +60,8 @@ eager_attention:
warmup_ratio: 0.1 warmup_ratio: 0.1
evals_per_epoch: 1 evals_per_epoch: 1
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:

View File

@@ -9,6 +9,7 @@ liger_rms_norm: true
liger_glu_activation: true liger_glu_activation: true
liger_fused_linear_cross_entropy: true liger_fused_linear_cross_entropy: true
strict: false
chat_template: llama3 chat_template: llama3
datasets: datasets:
@@ -41,19 +42,27 @@ optimizer: adamw_torch_fused
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 2e-5 learning_rate: 2e-5
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
gradient_checkpointing_kwargs: gradient_checkpointing_kwargs:
use_reentrant: false use_reentrant: false
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 100 warmup_steps: 100
evals_per_epoch: 2 evals_per_epoch: 2
eval_table_size:
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp: fsdp:
- full_shard - full_shard

View File

@@ -2,6 +2,10 @@ base_model: NousResearch/Meta-Llama-3.1-8B
# Automatically upload checkpoint and final model to HF # Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name # hub_model_id: username/custom_model_name
load_in_8bit: false
load_in_4bit: false
strict: false
datasets: datasets:
- path: tatsu-lab/alpaca - path: tatsu-lab/alpaca
type: alpaca type: alpaca
@@ -26,19 +30,29 @@ optimizer: paged_adamw_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 2e-5 learning_rate: 2e-5
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
gradient_checkpointing_kwargs: gradient_checkpointing_kwargs:
use_reentrant: false use_reentrant: false
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 100 warmup_steps: 100
evals_per_epoch: 2 evals_per_epoch: 2
eval_table_size:
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: <|end_of_text|> pad_token: <|end_of_text|>

View File

@@ -7,6 +7,7 @@ tokenizer_type: AutoTokenizer
load_in_8bit: true load_in_8bit: true
load_in_4bit: false load_in_4bit: false
strict: false
chat_template: llama3 chat_template: llama3
rl: dpo rl: dpo
@@ -41,6 +42,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -55,15 +57,28 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
s2_attention:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:

View File

@@ -7,6 +7,7 @@ tokenizer_type: AutoTokenizer
load_in_8bit: true load_in_8bit: true
load_in_4bit: false load_in_4bit: false
strict: false
chat_template: llama3 chat_template: llama3
datasets: datasets:
@@ -36,6 +37,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -50,17 +52,30 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
s2_attention:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: <|end_of_text|> pad_token: <|end_of_text|>

View File

@@ -7,6 +7,7 @@ tokenizer_type: AutoTokenizer
load_in_8bit: true load_in_8bit: true
load_in_4bit: false load_in_4bit: false
strict: false
chat_template: llama3 chat_template: llama3
rl: dpo rl: dpo
@@ -57,6 +58,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -71,15 +73,28 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
s2_attention:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:

View File

@@ -7,6 +7,7 @@ tokenizer_type: AutoTokenizer
load_in_8bit: true load_in_8bit: true
load_in_4bit: false load_in_4bit: false
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
@@ -30,6 +31,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
lora_modules_to_save: lora_modules_to_save:
- embed_tokens - embed_tokens
- lm_head - lm_head
@@ -47,17 +49,30 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
s2_attention:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: <|end_of_text|> pad_token: <|end_of_text|>

View File

@@ -2,6 +2,10 @@ base_model: NousResearch/Llama-3.2-1B
# Automatically upload checkpoint and final model to HF # Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name # hub_model_id: username/custom_model_name
load_in_8bit: false
load_in_4bit: false
strict: false
datasets: datasets:
- path: teknium/GPT4-LLM-Cleaned - path: teknium/GPT4-LLM-Cleaned
type: alpaca type: alpaca
@@ -20,6 +24,7 @@ lora_r: 16
lora_alpha: 32 lora_alpha: 32
# Currently, we don't support dropout with our custom Triton kernels # Currently, we don't support dropout with our custom Triton kernels
# lora_dropout: 0.05 # lora_dropout: 0.05
lora_fan_in_fan_out:
lora_target_modules: lora_target_modules:
- gate_proj - gate_proj
- down_proj - down_proj
@@ -48,12 +53,18 @@ optimizer: adamw_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
loss_watchdog_threshold: 5.0 loss_watchdog_threshold: 5.0
@@ -62,6 +73,10 @@ loss_watchdog_patience: 3
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: "<|end_of_text|>" pad_token: "<|end_of_text|>"

View File

@@ -2,6 +2,10 @@ base_model: NousResearch/Llama-3.2-1B
# Automatically upload checkpoint and final model to HF # Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name # hub_model_id: username/custom_model_name
load_in_8bit: false
load_in_4bit: false
strict: false
datasets: datasets:
- path: teknium/GPT4-LLM-Cleaned - path: teknium/GPT4-LLM-Cleaned
type: alpaca type: alpaca
@@ -20,6 +24,7 @@ pad_to_sequence_len: true
lora_r: 16 lora_r: 16
lora_alpha: 32 lora_alpha: 32
lora_dropout: 0.05 lora_dropout: 0.05
lora_fan_in_fan_out:
lora_target_modules: lora_target_modules:
- gate_proj - gate_proj
- down_proj - down_proj
@@ -42,12 +47,18 @@ optimizer: adamw_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
loss_watchdog_threshold: 5.0 loss_watchdog_threshold: 5.0
@@ -56,9 +67,11 @@ loss_watchdog_patience: 3
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed: deepspeed_configs/zero3.json deepspeed: deepspeed_configs/zero3.json
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: "<|end_of_text|>" pad_token: "<|end_of_text|>"

View File

@@ -7,6 +7,7 @@ tokenizer_type: AutoTokenizer
load_in_8bit: true load_in_8bit: true
load_in_4bit: false load_in_4bit: false
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
@@ -32,6 +33,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
lora_modules_to_save: lora_modules_to_save:
- embed_tokens - embed_tokens
- lm_head - lm_head
@@ -49,17 +51,30 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
s2_attention:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: <|end_of_text|> pad_token: <|end_of_text|>

View File

@@ -2,6 +2,10 @@ base_model: NousResearch/Llama-3.2-1B
# Automatically upload checkpoint and final model to HF # Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name # hub_model_id: username/custom_model_name
load_in_8bit: false
load_in_4bit: false
strict: false
datasets: datasets:
- path: teknium/GPT4-LLM-Cleaned - path: teknium/GPT4-LLM-Cleaned
type: alpaca type: alpaca
@@ -20,6 +24,7 @@ pad_to_sequence_len: true
lora_r: 16 lora_r: 16
lora_alpha: 32 lora_alpha: 32
lora_dropout: 0.05 lora_dropout: 0.05
lora_fan_in_fan_out:
lora_target_modules: lora_target_modules:
- gate_proj - gate_proj
- down_proj - down_proj
@@ -42,12 +47,18 @@ optimizer: adamw_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
loss_watchdog_threshold: 5.0 loss_watchdog_threshold: 5.0
@@ -56,6 +67,10 @@ loss_watchdog_patience: 3
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: "<|end_of_text|>" pad_token: "<|end_of_text|>"

View File

@@ -7,6 +7,7 @@ tokenizer_type: AutoTokenizer
load_in_8bit: true load_in_8bit: true
load_in_4bit: false load_in_4bit: false
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
@@ -26,6 +27,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
lora_modules_to_save: lora_modules_to_save:
- embed_tokens - embed_tokens
- lm_head - lm_head
@@ -43,17 +45,30 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
s2_attention:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: <|end_of_text|> pad_token: <|end_of_text|>

View File

@@ -4,6 +4,7 @@ base_model: meta-llama/Llama-3.2-1B
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
rl: kto rl: kto
rl_beta: 0.5 rl_beta: 0.5
@@ -31,6 +32,7 @@ lora_r: 32
lora_alpha: 64 lora_alpha: 64
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -45,19 +47,31 @@ optimizer: adamw_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
gradient_checkpointing_kwargs: gradient_checkpointing_kwargs:
use_reentrant: false use_reentrant: false
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 20 warmup_steps: 20
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: "<|end_of_text|>" pad_token: "<|end_of_text|>"

View File

@@ -4,6 +4,7 @@ base_model: NousResearch/Llama-3.2-1B
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: teknium/GPT4-LLM-Cleaned - path: teknium/GPT4-LLM-Cleaned
@@ -23,6 +24,7 @@ pad_to_sequence_len: true
lora_r: 32 lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_fan_in_fan_out:
lora_target_modules: lora_target_modules:
- gate_proj - gate_proj
- down_proj - down_proj
@@ -45,12 +47,18 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
loss_watchdog_threshold: 5.0 loss_watchdog_threshold: 5.0
@@ -58,7 +66,13 @@ loss_watchdog_patience: 3
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: "<|end_of_text|>" pad_token: "<|end_of_text|>"

View File

@@ -5,6 +5,7 @@ tokenizer_type: AutoTokenizer
# hub_model_id: username/custom_model_name # hub_model_id: username/custom_model_name
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: tatsu-lab/alpaca - path: tatsu-lab/alpaca
@@ -23,6 +24,7 @@ pad_to_sequence_len: true
lora_r: 16 lora_r: 16
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_modules:
lora_target_linear: true lora_target_linear: true
gradient_accumulation_steps: 4 gradient_accumulation_steps: 4
@@ -32,6 +34,8 @@ optimizer: adamw_torch_fused
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.00001 learning_rate: 0.00001
train_on_inputs: false
group_by_length: false
bf16: true bf16: true
tf32: true tf32: true

View File

@@ -7,6 +7,7 @@ tokenizer_type: AutoTokenizer # PreTrainedTokenizerFast
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: tatsu-lab/alpaca - path: tatsu-lab/alpaca
@@ -25,7 +26,9 @@ pad_to_sequence_len: true
lora_r: 8 lora_r: 8
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_modules:
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -40,19 +43,28 @@ optimizer: adamw_torch_fused
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.00001 learning_rate: 0.00001
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
gradient_checkpointing_kwargs: gradient_checkpointing_kwargs:
use_reentrant: true use_reentrant: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp: fsdp:
- full_shard - full_shard

View File

@@ -7,6 +7,7 @@ tokenizer_type: AutoTokenizer
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: aaditya/alpaca_subset_1 - path: aaditya/alpaca_subset_1
@@ -25,7 +26,9 @@ pad_to_sequence_len: true
lora_r: 32 lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_modules:
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -40,17 +43,28 @@ optimizer: paged_adamw_32bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
pad_token: "<|end_of_text|>" pad_token: "<|end_of_text|>"

View File

@@ -1,28 +0,0 @@
# Llama 4 by Meta AI
## Flash Attention vs Flex Attention
While Flash Attention to support is "enabled" for Llama-4, the upstream implementation is not correct and usage of Flex Attention is recommended.
## Available Examples
### Llama 4 Scout 17Bx16Experts (109B)
Flex Attention
- [Text Single GPU (H100) QLoRA](./scout-qlora-single-h100-flex.yaml)
- [Text Multi GPU QLoRA w/ FSDP2](./scout-qlora-flexattn-fsdp2.yaml)
[//]: # (Flash Attention &#40;Do not use&#41;)
[//]: # (- [Multi-Modal/Vision QLoRA w/ FSDP1]&#40;./scout-vision-qlora-fsdp.yaml&#41;)
[//]: # (- [Text Single GPU &#40;H100&#41; QLoRA]&#40;./scout-qlora-single-h100.yaml&#41;)
[//]: # (- [Text Multi GPU QLoRA w/ FSDP1]&#40;./scout-qlora-fsdp1.yaml&#41;)
Our Single H100 implementation for Llama 4 Scout uses only 64.5GB VRAM for post-training with 4k context length @ 519 tokens/second. [WandB logs here](https://wandb.ai/axolotl-ai/llama4-flexattn-qlora/runs/wpie7dkj)
Multi-GPU (4xH100) for Llama 4 Scout uses 62.8GB VRAM/GPU @ 4k contenxt length @ 280tps/gpu, [WandB logs here](https://wandb.ai/axolotl-ai/llama4-flexattn-qlora/runs/2lkezdj8)
### Llama 4 Maverick 17Bx128Experts (400B)
Coming Soon

View File

@@ -1,88 +0,0 @@
base_model: axolotl-quants/Llama-4-Maverick-17B-128E-Linearized-bnb-nf4-bf16
model_type: Llama4ForConditionalGeneration
# Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name
plugins:
- axolotl.integrations.liger.LigerPlugin
liger_glu_activation: true
liger_rms_norm: true
liger_layer_norm: true
llama4_linearized_experts: true
load_in_4bit: true
adapter: qlora
lora_r: 32
lora_alpha: 64
lora_target_modules:
- self_attn.q_proj
- self_attn.k_proj
- self_attn.v_proj
- self_attn.o_proj
- shared_expert.gate_proj
- shared_expert.up_proj
- shared_expert.down_proj
# - experts.gate_projs.[0-9]+$
# - experts.up_projs.[0-9]+$
# - experts.down_projs.[0-9]+$
lora_modules_to_save:
# - lm_head
# - embed_tokens
chat_template: llama4
datasets:
- path: mlabonne/FineTome-100k
type: chat_template
split: train[:20%]
field_messages: conversations
message_property_mappings:
role: from
content: value
dataset_prepared_path: last_run_prepared
val_set_size: 0.0
output_dir: ./outputs/out
sequence_len: 4096
sample_packing: true
pad_to_sequence_len: true
gradient_accumulation_steps: 1
micro_batch_size: 1
num_epochs: 1
optimizer: adamw_torch_fused
lr_scheduler: cosine
learning_rate: 1e-4
bf16: true
tf32: true
logging_steps: 1
flash_attention: true
gradient_checkpointing: offload
gradient_checkpointing_kwargs:
use_reentrant: false
warmup_steps: 20
evals_per_epoch: 1
saves_per_epoch: 1
weight_decay: 0.0
fsdp:
- auto_wrap
- full_shard
fsdp_config:
fsdp_transformer_layer_cls_to_wrap: Llama4TextDecoderLayer
fsdp_limit_all_gathers: true
fsdp_sync_module_states: true
fsdp_offload_params: true
fsdp_use_orig_params: false
fsdp_cpu_ram_efficient_loading: true
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_state_dict_type: FULL_STATE_DICT
fsdp_sharding_strategy: FULL_SHARD
special_tokens:
pad_token: <|finetune_right_pad_id|>
eos_token: <|eot|>

View File

@@ -1,92 +0,0 @@
base_model: axolotl-quants/Llama-4-Scout-17B-16E-Linearized-bnb-nf4-bf16
model_type: Llama4ForConditionalGeneration
# Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name
# torch_compile: true
plugins:
- axolotl.integrations.liger.LigerPlugin
liger_glu_activation: true
liger_rms_norm: true
liger_layer_norm: true
llama4_linearized_experts: true
load_in_4bit: true
adapter: qlora
lora_r: 32
lora_alpha: 64
lora_target_modules:
- self_attn.q_proj
- self_attn.k_proj
- self_attn.v_proj
- self_attn.o_proj
- shared_expert.gate_proj
- shared_expert.up_proj
- shared_expert.down_proj
# - experts.gate_projs.[0-9]+$
# - experts.up_projs.[0-9]+$
# - experts.down_projs.[0-9]+$
lora_modules_to_save:
- lm_head
- embed_tokens
chat_template: llama4
datasets:
- path: mlabonne/FineTome-100k
type: chat_template
split: train[:20%]
field_messages: conversations
message_property_mappings:
role: from
content: value
dataset_prepared_path: last_run_prepared
val_set_size: 0.0
output_dir: ./outputs/out
sequence_len: 4096
sample_packing: true
pad_to_sequence_len: true
wandb_project:
wandb_entity:
wandb_watch:
wandb_name:
wandb_log_model:
gradient_accumulation_steps: 1
micro_batch_size: 1
num_epochs: 1
optimizer: adamw_torch_fused
lr_scheduler: cosine
learning_rate: 2e-5
bf16: true
tf32: true
logging_steps: 1
flash_attention: true
warmup_steps: 100
evals_per_epoch: 1
saves_per_epoch: 1
weight_decay: 0.0
fsdp:
- auto_wrap
- full_shard
fsdp_config:
fsdp_transformer_layer_cls_to_wrap: Llama4TextDecoderLayer
fsdp_limit_all_gathers: true
fsdp_sync_module_states: true
fsdp_offload_params: true
fsdp_use_orig_params: false
fsdp_cpu_ram_efficient_loading: true
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_state_dict_type: FULL_STATE_DICT
fsdp_sharding_strategy: FULL_SHARD
fsdp_activation_checkpointing: true
special_tokens:
pad_token: <|finetune_right_pad_id|>
eos_token: <|eot|>

View File

@@ -1,85 +0,0 @@
base_model: axolotl-quants/Llama-4-Scout-17B-16E-Linearized-bnb-nf4-bf16
model_type: Llama4ForConditionalGeneration
# Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name
plugins:
- axolotl.integrations.liger.LigerPlugin
liger_glu_activation: true
liger_rms_norm: true
liger_layer_norm: true
llama4_linearized_experts: true
load_in_4bit: true
adapter: qlora
lora_r: 32
lora_alpha: 64
lora_target_modules:
- self_attn.q_proj
- self_attn.k_proj
- self_attn.v_proj
- self_attn.o_proj
- shared_expert.gate_proj
- shared_expert.up_proj
- shared_expert.down_proj
# - experts.gate_projs.[0-9]+$
# - experts.up_projs.[0-9]+$
# - experts.down_projs.[0-9]+$
lora_modules_to_save:
# - lm_head
# - embed_tokens
lora_mlp_kernel: true
lora_qkv_kernel: true
lora_o_kernel: true
chat_template: llama4
datasets:
- path: mlabonne/FineTome-100k
type: chat_template
split: train[:20%]
field_messages: conversations
message_property_mappings:
role: from
content: value
dataset_prepared_path: last_run_prepared
val_set_size: 0.0
output_dir: ./outputs/out
sequence_len: 4096 # up to 8k will work on a single H100
sample_packing: true
pad_to_sequence_len: true
wandb_project:
wandb_entity:
wandb_watch:
wandb_name:
wandb_log_model:
gradient_accumulation_steps: 1
micro_batch_size: 1
num_epochs: 1
optimizer: adamw_torch_4bit
lr_scheduler: cosine
learning_rate: 1e-4
bf16: true
tf32: true
logging_steps: 1
flash_attention: true
gradient_checkpointing: offload
gradient_checkpointing_kwargs:
use_reentrant: false
warmup_steps: 20
evals_per_epoch: 1
saves_per_epoch: 1
weight_decay: 0.0
special_tokens:
pad_token: <|finetune_right_pad_id|>
eos_token: <|eot|>

View File

@@ -1,88 +0,0 @@
base_model: axolotl-quants/Llama-4-Scout-17B-16E-Linearized-bnb-nf4-bf16
model_type: Llama4ForConditionalGeneration
processor_type: Llama4Processor
# Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name
# these 3 lines are needed for now to handle vision chat templates w images
skip_prepare_dataset: true
remove_unused_columns: false
sample_packing: false
sequence_len: 4096
plugins:
- axolotl.integrations.liger.LigerPlugin
liger_glu_activation: true
liger_rms_norm: true
liger_layer_norm: true
llama4_linearized_experts: true # use Axolotl's customized model
load_in_4bit: true
adapter: qlora
lora_r: 32
lora_alpha: 64
lora_target_modules:
- self_attn.q_proj
- self_attn.k_proj
- self_attn.v_proj
- self_attn.o_proj
- shared_expert.gate_proj
- shared_expert.up_proj
- shared_expert.down_proj
- vision_adapter.mlp.fc1
- vision_adapter.mlp.fc2
# - experts.gate_projs.[0-9]+$
# - experts.up_projs.[0-9]+$
# - experts.down_projs.[0-9]+$
lora_modules_to_save:
- lm_head
- embed_tokens
chat_template: llama4
datasets:
- path: HuggingFaceH4/llava-instruct-mix-vsft
type: chat_template
split: train[:1%]
field_messages: messages
dataset_prepared_path: last_run_prepared
val_set_size: 0.0
output_dir: ./outputs/out
gradient_accumulation_steps: 1
micro_batch_size: 1
num_epochs: 1
optimizer: adamw_torch_4bit
lr_scheduler: cosine
learning_rate: 2e-5
bf16: true
tf32: true
logging_steps: 1
flash_attention: true
warmup_steps: 100
evals_per_epoch: 1
saves_per_epoch: 1
weight_decay: 0.0
fsdp:
- auto_wrap
- full_shard
fsdp_config:
fsdp_transformer_layer_cls_to_wrap: Llama4TextDecoderLayer
fsdp_limit_all_gathers: true
fsdp_sync_module_states: true
fsdp_offload_params: true
fsdp_use_orig_params: false
fsdp_cpu_ram_efficient_loading: true
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_state_dict_type: FULL_STATE_DICT
fsdp_sharding_strategy: FULL_SHARD
fsdp_activation_checkpointing: true
special_tokens:
pad_token: <|finetune_right_pad_id|>
eos_token: <|eot|>

View File

@@ -1,86 +0,0 @@
base_model: axolotl-quants/Llama-4-Scout-17B-16E-Linearized-bnb-nf4-bf16
model_type: Llama4ForConditionalGeneration
# Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name
plugins:
- axolotl.integrations.liger.LigerPlugin
liger_glu_activation: true
liger_rms_norm: true
liger_layer_norm: true
llama4_linearized_experts: true
load_in_4bit: true
adapter: qlora
lora_r: 32
lora_alpha: 64
lora_target_modules:
- self_attn.q_proj
- self_attn.k_proj
- self_attn.v_proj
- self_attn.o_proj
- shared_expert.gate_proj
- shared_expert.up_proj
- shared_expert.down_proj
# - experts.gate_projs.[0-9]+$
# - experts.up_projs.[0-9]+$
# - experts.down_projs.[0-9]+$
lora_modules_to_save:
# - lm_head
# - embed_tokens
chat_template: llama4
datasets:
- path: mlabonne/FineTome-100k
type: chat_template
split: train[:20%]
field_messages: conversations
message_property_mappings:
role: from
content: value
dataset_prepared_path: last_run_prepared
val_set_size: 0.0
output_dir: ./outputs/out
sequence_len: 4096
sample_packing: true
pad_to_sequence_len: true
gradient_accumulation_steps: 1
micro_batch_size: 2
num_epochs: 3
optimizer: adamw_torch_4bit
lr_scheduler: cosine
learning_rate: 1e-4
bf16: true
tf32: true
logging_steps: 1
flex_attention: true
flex_attn_compile_kwargs:
dynamic: false
mode: max-autotune-no-cudagraphs
warmup_steps: 10
evals_per_epoch: 1
saves_per_epoch: 1
weight_decay: 0.0
fsdp:
- auto_wrap
- full_shard
fsdp_config:
fsdp_version: 2
fsdp_offload_params: false
fsdp_cpu_ram_efficient_loading: true
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_transformer_layer_cls_to_wrap: Llama4TextDecoderLayer
fsdp_state_dict_type: SHARDED_STATE_DICT
fsdp_sharding_strategy: FULL_SHARD
fsdp_reshard_after_forward: true
fsdp_activation_checkpointing: true
special_tokens:
pad_token: <|finetune_right_pad_id|>
eos_token: <|eot|>

View File

@@ -1,85 +0,0 @@
base_model: axolotl-quants/Llama-4-Scout-17B-16E-Linearized-bnb-nf4-bf16
model_type: Llama4ForConditionalGeneration
# Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name
plugins:
- axolotl.integrations.liger.LigerPlugin
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
liger_glu_activation: true
liger_rms_norm: true
liger_layer_norm: true
cut_cross_entropy: true
llama4_linearized_experts: true # needed with custom linearized experts model
load_in_4bit: true
adapter: qlora
lora_r: 32
lora_alpha: 64
lora_target_modules:
- self_attn.q_proj
- self_attn.k_proj
- self_attn.v_proj
- self_attn.o_proj
- shared_expert.gate_proj
- shared_expert.up_proj
- shared_expert.down_proj
# - experts.gate_projs.[0-9]+$ # optionally train the moe experts
# - experts.up_projs.[0-9]+$
# - experts.down_projs.[0-9]+$
lora_modules_to_save:
# - lm_head # needed if modifying vocabulary
# - embed_tokens
lora_mlp_kernel: true
lora_qkv_kernel: true
lora_o_kernel: true
chat_template: llama4
datasets:
- path: mlabonne/FineTome-100k
type: chat_template
split: train[:20%]
field_messages: conversations
message_property_mappings:
role: from
content: value
dataset_prepared_path: last_run_prepared
val_set_size: 0.0
output_dir: ./outputs/out
sequence_len: 4096 # up to 8k will work on a single H100
sample_packing: true
pad_to_sequence_len: true
gradient_accumulation_steps: 1
micro_batch_size: 1
num_epochs: 1
optimizer: adamw_torch_4bit
lr_scheduler: cosine
learning_rate: 1e-4
bf16: true
tf32: true
torch_compile: true
flex_attention: true
flex_attn_compile_kwargs:
dynamic: false
mode: max-autotune-no-cudagraphs
gradient_checkpointing: offload
gradient_checkpointing_kwargs:
use_reentrant: false
logging_steps: 1
warmup_steps: 20
evals_per_epoch: 1
saves_per_epoch: 1
weight_decay: 0.0
special_tokens:
pad_token: <|finetune_right_pad_id|>
eos_token: <|eot|>

View File

@@ -1,89 +0,0 @@
base_model: axolotl-quants/Llama-4-Scout-17B-16E-Linearized-bnb-nf4-bf16
model_type: Llama4ForConditionalGeneration
processor_type: Llama4Processor
# Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name
# these 3 lines are needed for now to handle vision chat templates w images
skip_prepare_dataset: true
remove_unused_columns: false
sample_packing: false
sequence_len: 4096
plugins:
- axolotl.integrations.liger.LigerPlugin
liger_glu_activation: true
liger_rms_norm: true
liger_layer_norm: true
llama4_linearized_experts: true # use Axolotl's customized model
load_in_4bit: true
adapter: qlora
lora_r: 32
lora_alpha: 64
lora_target_modules:
- self_attn.q_proj
- self_attn.k_proj
- self_attn.v_proj
- self_attn.o_proj
- shared_expert.gate_proj
- shared_expert.up_proj
- shared_expert.down_proj
- vision_adapter.mlp.fc1
- vision_adapter.mlp.fc2
# - experts.gate_projs.[0-9]+$
# - experts.up_projs.[0-9]+$
# - experts.down_projs.[0-9]+$
lora_modules_to_save:
- lm_head
- embed_tokens
chat_template: llama4
datasets:
- path: HuggingFaceH4/llava-instruct-mix-vsft
type: chat_template
split: train[:1%]
field_messages: messages
dataset_prepared_path: last_run_prepared
val_set_size: 0.0
output_dir: ./outputs/out
gradient_accumulation_steps: 1
micro_batch_size: 1
num_epochs: 1
optimizer: adamw_torch_4bit
lr_scheduler: cosine
learning_rate: 1e-4
bf16: true
tf32: true
logging_steps: 1
flex_attention: true
flex_attn_compile_kwargs:
dynamic: false
mode: max-autotune-no-cudagraphs
warmup_steps: 10
evals_per_epoch: 1
saves_per_epoch: 1
weight_decay: 0.0
fsdp:
- auto_wrap
- full_shard
fsdp_config:
fsdp_version: 2
fsdp_offload_params: false
fsdp_cpu_ram_efficient_loading: true
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_transformer_layer_cls_to_wrap: Llama4TextDecoderLayer
fsdp_state_dict_type: SHARDED_STATE_DICT
fsdp_sharding_strategy: FULL_SHARD
fsdp_reshard_after_forward: true
fsdp_activation_checkpointing: true
special_tokens:
pad_token: <|finetune_right_pad_id|>
eos_token: <|eot|>

View File

@@ -1,5 +1,6 @@
base_model: llava-hf/llava-1.5-7b-hf base_model: llava-hf/llava-1.5-7b-hf
processor_type: AutoProcessor processor_type: AutoProcessor
strict: false
# these 3 lines are needed for now to handle vision chat templates w images # these 3 lines are needed for now to handle vision chat templates w images
skip_prepare_dataset: true skip_prepare_dataset: true
@@ -40,11 +41,14 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: true bf16: true
fp16: fp16:
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
local_rank:
logging_steps: 1 logging_steps: 1
flash_attention: true flash_attention: true
eager_attention: eager_attention:
@@ -52,4 +56,8 @@ eager_attention:
warmup_ratio: 0.1 warmup_ratio: 0.1
evals_per_epoch: 1 evals_per_epoch: 1
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:

View File

@@ -6,6 +6,10 @@ tokenizer_config: EleutherAI/gpt-neox-20b
# Automatically upload checkpoint and final model to HF # Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name # hub_model_id: username/custom_model_name
load_in_8bit: false
load_in_4bit: false
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
type: alpaca type: alpaca
@@ -34,17 +38,27 @@ train_on_inputs: false
group_by_length: true group_by_length: true
bf16: auto bf16: auto
fp16:
tf32: true tf32: true
gradient_checkpointing: false gradient_checkpointing: false
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: flash_attention:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
tokens: tokens:
save_safetensors: False save_safetensors: False

View File

@@ -7,6 +7,10 @@ tokenizer_type: LlamaTokenizer
trust_remote_code: true trust_remote_code: true
load_in_8bit: false
load_in_4bit: false
strict: false
unfrozen_parameters: unfrozen_parameters:
- ^lm_head.weight$ - ^lm_head.weight$
- ^model.embed_tokens.weight$ - ^model.embed_tokens.weight$
@@ -36,19 +40,27 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0001 learning_rate: 0.0001
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
save_total_limit: 1 save_total_limit: 1
save_steps: save_steps:
debug:
deepspeed: deepspeed_configs/zero3_bf16_cpuoffload_params.json deepspeed: deepspeed_configs/zero3_bf16_cpuoffload_params.json
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
eos_token: "<|im_end|>" eos_token: "<|im_end|>"
tokens: tokens:

View File

@@ -5,6 +5,10 @@ tokenizer_type: LlamaTokenizer
# Automatically upload checkpoint and final model to HF # Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name # hub_model_id: username/custom_model_name
load_in_8bit: false
load_in_4bit: false
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
type: alpaca type: alpaca
@@ -30,16 +34,28 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.000005 learning_rate: 0.000005
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:

View File

@@ -5,6 +5,10 @@ tokenizer_type: LlamaTokenizer
# Automatically upload checkpoint and final model to HF # Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name # hub_model_id: username/custom_model_name
load_in_8bit: false
load_in_4bit: false
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
type: alpaca type: alpaca
@@ -24,6 +28,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules: lora_target_modules:
- gate_proj - gate_proj
- down_proj - down_proj
@@ -46,13 +51,18 @@ optimizer: adamw_torch_fused
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16: false fp16: false
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: false flash_attention: false
sdp_attention: true sdp_attention: true
@@ -61,6 +71,12 @@ loss_watchdog_patience: 3
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_table_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:

View File

@@ -7,6 +7,7 @@ tokenizer_type: LlamaTokenizer
load_in_8bit: true load_in_8bit: true
load_in_4bit: false load_in_4bit: false
strict: false
datasets: datasets:
- path: mhenrichsen/alpaca_2k_test - path: mhenrichsen/alpaca_2k_test
@@ -26,6 +27,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules: lora_target_modules:
- gate_proj - gate_proj
- down_proj - down_proj
@@ -48,12 +50,18 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
loss_watchdog_threshold: 5.0 loss_watchdog_threshold: 5.0
@@ -61,6 +69,12 @@ loss_watchdog_patience: 3
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:

View File

@@ -12,6 +12,7 @@ tokenizer_type: LlamaTokenizer
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
chat_template: chatml chat_template: chatml
rl: dpo rl: dpo
@@ -39,6 +40,7 @@ lora_r: 8
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.2 lora_dropout: 0.2
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules: lora_target_modules:
- gate_proj - gate_proj
@@ -65,18 +67,31 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0001 learning_rate: 0.0001
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: false flash_attention: false
s2_attention:
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:
bos_token: "<|im_start|>" bos_token: "<|im_start|>"
eos_token: "<|im_end|>" eos_token: "<|im_end|>"

View File

@@ -9,6 +9,7 @@ trust_remote_code: true
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: tatsu-lab/alpaca - path: tatsu-lab/alpaca
@@ -31,6 +32,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -45,12 +47,18 @@ optimizer: paged_adamw_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
loss_watchdog_threshold: 5.0 loss_watchdog_threshold: 5.0
@@ -58,8 +66,10 @@ loss_watchdog_patience: 3
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
weight_decay: 0.0 weight_decay: 0.0
fsdp: fsdp:
- full_shard - full_shard

View File

@@ -7,6 +7,7 @@ tokenizer_type: LlamaTokenizer
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
rl: orpo rl: orpo
orpo_alpha: 0.1 orpo_alpha: 0.1
@@ -31,6 +32,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules: lora_target_modules:
- gate_proj - gate_proj
- down_proj - down_proj
@@ -53,12 +55,18 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
loss_watchdog_threshold: 5.0 loss_watchdog_threshold: 5.0
@@ -66,6 +74,12 @@ loss_watchdog_patience: 3
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:

View File

@@ -1,5 +1,6 @@
base_model: mistralai/Mistral-Small-3.1-24B-Instruct-2503 base_model: mistralai/Mistral-Small-3.1-24B-Instruct-2503
processor_type: AutoProcessor processor_type: AutoProcessor
strict: false
load_in_8bit: true load_in_8bit: true
@@ -42,11 +43,14 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: true bf16: true
fp16: fp16:
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
local_rank:
logging_steps: 1 logging_steps: 1
flash_attention: false # PixtralVisionModel does not support Flash Attention 2.0 yet. flash_attention: false # PixtralVisionModel does not support Flash Attention 2.0 yet.
eager_attention: eager_attention:
@@ -54,5 +58,9 @@ eager_attention:
warmup_ratio: 0.1 warmup_ratio: 0.1
evals_per_epoch: 1 evals_per_epoch: 1
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:

View File

@@ -7,6 +7,7 @@ tokenizer_type: LlamaTokenizer
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: tatsu-lab/alpaca - path: tatsu-lab/alpaca
@@ -29,6 +30,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -43,12 +45,18 @@ optimizer: adamw_torch_fused
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
loss_watchdog_threshold: 5.0 loss_watchdog_threshold: 5.0
@@ -56,8 +64,10 @@ loss_watchdog_patience: 3
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
weight_decay: 0.0 weight_decay: 0.0
fsdp: fsdp:
- full_shard - full_shard

View File

@@ -9,6 +9,7 @@ trust_remote_code: true
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: tatsu-lab/alpaca - path: tatsu-lab/alpaca
@@ -31,6 +32,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: wandb_project:
wandb_entity: wandb_entity:
@@ -45,12 +47,18 @@ optimizer: adamw_torch_fused
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: true tf32: true
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
loss_watchdog_threshold: 5.0 loss_watchdog_threshold: 5.0
@@ -58,8 +66,10 @@ loss_watchdog_patience: 3
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
weight_decay: 0.0 weight_decay: 0.0
fsdp: fsdp:
- full_shard - full_shard

View File

@@ -9,6 +9,7 @@ trust_remote_code: true
load_in_8bit: false load_in_8bit: false
load_in_4bit: true load_in_4bit: true
strict: false
datasets: datasets:
- path: tatsu-lab/alpaca - path: tatsu-lab/alpaca
@@ -40,6 +41,7 @@ lora_r: 32
lora_alpha: 16 lora_alpha: 16
lora_dropout: 0.05 lora_dropout: 0.05
lora_target_linear: true lora_target_linear: true
lora_fan_in_fan_out:
#lora_target_modules: #lora_target_modules:
# - gate # - gate
# - q_proj # - q_proj
@@ -63,12 +65,18 @@ optimizer: adamw_bnb_8bit
lr_scheduler: cosine lr_scheduler: cosine
learning_rate: 0.0002 learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: auto bf16: auto
fp16:
tf32: false tf32: false
gradient_checkpointing: true gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint: resume_from_checkpoint:
local_rank:
logging_steps: 1 logging_steps: 1
xformers_attention:
flash_attention: true flash_attention: true
loss_watchdog_threshold: 5.0 loss_watchdog_threshold: 5.0
@@ -76,8 +84,12 @@ loss_watchdog_patience: 3
warmup_steps: 10 warmup_steps: 10
evals_per_epoch: 4 evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1 saves_per_epoch: 1
debug:
deepspeed: deepspeed_configs/zero2.json deepspeed: deepspeed_configs/zero2.json
weight_decay: 0.0 weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens: special_tokens:

Some files were not shown because too many files have changed in this diff Show More