Compare commits
2 Commits
feat/torch
...
accelerato
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d260eeb57d | ||
|
|
5a7f007d20 |
@@ -210,8 +210,6 @@ axolotl lm-eval config.yml
|
||||
Configuration options:
|
||||
|
||||
```yaml
|
||||
lm_eval_model: # model to evaluate (local or hf path)
|
||||
|
||||
# List of tasks to evaluate
|
||||
lm_eval_tasks:
|
||||
- arc_challenge
|
||||
@@ -220,7 +218,7 @@ lm_eval_batch_size: # Batch size for evaluation
|
||||
output_dir: # Directory to save evaluation results
|
||||
```
|
||||
|
||||
See [LM Eval Harness integration docs](https://docs.axolotl.ai/docs/custom_integrations.html#language-model-evaluation-harness-lm-eval) for full configuration details.
|
||||
See [LM Eval Harness](https://github.com/EleutherAI/lm-evaluation-harness) for more details.
|
||||
|
||||
### delinearize-llama4
|
||||
|
||||
|
||||
@@ -258,6 +258,11 @@ class TrainerBuilderBase(abc.ABC):
|
||||
bf16 = bf16 if bf16 is not None else False
|
||||
training_args_kwargs["bf16"] = bf16
|
||||
|
||||
if self.cfg.fp8:
|
||||
training_args_kwargs["fp8"] = True
|
||||
if self.cfg.fp8_enable_fsdp_float8_all_gather:
|
||||
training_args_kwargs["enable_fsdp_float8_all_gather:"] = True
|
||||
|
||||
def _configure_scheduler(self, training_args_kwargs: dict):
|
||||
if self.cfg.lr_scheduler in ["one_cycle", "rex"]:
|
||||
training_args_kwargs["lr_scheduler_type"] = "cosine"
|
||||
|
||||
@@ -584,11 +584,9 @@ class AxolotlTrainer(
|
||||
|
||||
super().create_accelerator_and_postprocess()
|
||||
|
||||
def additional_accelerator_args(
|
||||
self, fp8: bool = False, enable_fsdp_float8_all_gather: bool = False, **kwargs
|
||||
) -> dict[str, Any]:
|
||||
ret_kwargs = {}
|
||||
if fp8:
|
||||
def build_fp8_accelerator_args(self) -> dict[str, Any]:
|
||||
args = {}
|
||||
if self.args.fp8:
|
||||
from accelerate.utils import AORecipeKwargs
|
||||
from torchao.float8 import Float8LinearConfig
|
||||
|
||||
@@ -596,15 +594,22 @@ class AxolotlTrainer(
|
||||
# scaling strategy. See more details here:
|
||||
# https://github.com/pytorch/ao/tree/main/torchao/float8.
|
||||
config = Float8LinearConfig(
|
||||
enable_fsdp_float8_all_gather=enable_fsdp_float8_all_gather,
|
||||
force_recompute_fp8_weight_in_bwd=enable_fsdp_float8_all_gather is True,
|
||||
enable_fsdp_float8_all_gather=self.args.enable_fsdp_float8_all_gather,
|
||||
force_recompute_fp8_weight_in_bwd=self.args.enable_fsdp_float8_all_gather
|
||||
is True,
|
||||
)
|
||||
|
||||
ret_kwargs["mixed_precision"] = "fp8"
|
||||
ret_kwargs["kwargs_handlers"] = [AORecipeKwargs(config=config)] # type: ignore
|
||||
args["mixed_precision"] = "fp8"
|
||||
args["kwargs_handlers"] = [AORecipeKwargs(config=config)] # type: ignore
|
||||
os.environ["ACCELERATE_MIXED_PRECISION"] = "fp8"
|
||||
|
||||
return ret_kwargs
|
||||
return args
|
||||
|
||||
def _build_accelerator_args(self, **kwargs) -> dict[str, Any]:
|
||||
args = super().build_accelerator_args(**kwargs)
|
||||
fp8_args = self.build_fp8_accelerator_args()
|
||||
args.update(fp8_args)
|
||||
return args
|
||||
|
||||
def log(self, logs: dict[str, float], start_time: float | None = None) -> None:
|
||||
"""
|
||||
|
||||
@@ -263,3 +263,13 @@ class AxolotlTrainingMixins:
|
||||
dion_rank_multiple_of: int | None = field(
|
||||
default=None,
|
||||
)
|
||||
|
||||
fp8: bool | None = field(
|
||||
default=None,
|
||||
metadata={"help": "Whether to use FP8 precision for training"},
|
||||
)
|
||||
|
||||
enable_fsdp_float8_all_gather: bool | None = field(
|
||||
default=None,
|
||||
metadata={"help": "Whether to use FSDP with FP8 precision for all_gather"},
|
||||
)
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
# Kernels Integration
|
||||
|
||||
MoE (Mixture of Experts) kernels speed up training for MoE layers and reduce VRAM costs. In transformers v5, `batched_mm` and `grouped_mm` were integrated as built-in options via the `experts_implementation` config kwarg:
|
||||
|
||||
```python
|
||||
class ExpertsInterface(GeneralInterface):
|
||||
_global_mapping = {
|
||||
"batched_mm": batched_mm_experts_forward,
|
||||
"grouped_mm": grouped_mm_experts_forward,
|
||||
}
|
||||
```
|
||||
|
||||
In our custom integration, we add support for **ScatterMoE**, which is even more efficient and faster than `grouped_mm`.
|
||||
|
||||
## Usage
|
||||
|
||||
Add the following to your axolotl YAML config:
|
||||
|
||||
```yaml
|
||||
plugins:
|
||||
- axolotl.integrations.kernels.KernelsPlugin
|
||||
|
||||
use_kernels: true
|
||||
use_scattermoe: true
|
||||
```
|
||||
|
||||
**Important:** Setting `experts_implementation` is incompatible with `use_scattermoe`.
|
||||
|
||||
## How It Works
|
||||
|
||||
The `KernelsPlugin` runs before model loading and:
|
||||
|
||||
1. Registers the ScatterMoE kernel from the [`axolotl-ai-co/scattermoe`](https://huggingface.co/axolotl-ai-co/scattermoe) Hub repo.
|
||||
2. Patches the model's `SparseMoeBlock` forward method with the optimized ScatterMoE implementation.
|
||||
|
||||
This works for any MoE model in transformers that uses a `SparseMoeBlock` class (Mixtral, Qwen2-MoE, OLMoE, etc.).
|
||||
|
||||
## Limitations
|
||||
|
||||
ScatterMoE uses a softmax -> topk routing, so results may be different for some model arch as baseline (GPT-OSS, GLM_MOE_DSA).
|
||||
|
||||
## Note on MegaBlocks
|
||||
|
||||
We tested [MegaBlocks](https://huggingface.co/kernels-community/megablocks) but were unable to ensure numerical accuracy, so we did not integrate it. It was also incompatible with many newer model architectures in transformers.
|
||||
@@ -6,12 +6,6 @@ See https://github.com/EleutherAI/lm-evaluation-harness
|
||||
|
||||
## Usage
|
||||
|
||||
There are two ways to use the LM Eval integration:
|
||||
|
||||
### 1. Post-Training Evaluation
|
||||
|
||||
When training with the plugin enabled, evaluation runs automatically after training completes:
|
||||
|
||||
```yaml
|
||||
plugins:
|
||||
- axolotl.integrations.lm_eval.LMEvalPlugin
|
||||
@@ -22,50 +16,9 @@ lm_eval_tasks:
|
||||
- arc_easy
|
||||
|
||||
lm_eval_batch_size: # Batch size for evaluation
|
||||
|
||||
# Directory to save evaluation results.
|
||||
# The final model is loaded from this directory
|
||||
# unless specified otherwise (see below)
|
||||
output_dir:
|
||||
output_dir: # Directory to save evaluation results
|
||||
```
|
||||
|
||||
Run training as usual:
|
||||
```bash
|
||||
axolotl train config.yml
|
||||
```
|
||||
|
||||
### 2. Standalone CLI Evaluation
|
||||
|
||||
Evaluate any model directly without training:
|
||||
|
||||
```yaml
|
||||
lm_eval_model: meta-llama/Llama-2-7b-hf
|
||||
|
||||
plugins:
|
||||
- axolotl.integrations.lm_eval.LMEvalPlugin
|
||||
|
||||
lm_eval_tasks:
|
||||
- gsm8k
|
||||
- hellaswag
|
||||
- arc_easy
|
||||
|
||||
lm_eval_batch_size: 8
|
||||
output_dir: ./outputs
|
||||
```
|
||||
|
||||
Run evaluation:
|
||||
```bash
|
||||
axolotl lm-eval config.yml
|
||||
```
|
||||
|
||||
## Model Selection Priority
|
||||
|
||||
The model to evaluate is selected in the following priority order:
|
||||
|
||||
1. **`lm_eval_model`** - Explicit model path or HuggingFace repo (highest priority)
|
||||
2. **`hub_model_id`** - Trained model pushed to HuggingFace Hub
|
||||
3. **`output_dir`** - Local checkpoint directory containing trained model weights
|
||||
|
||||
## Citation
|
||||
|
||||
```bib
|
||||
|
||||
@@ -5,7 +5,7 @@ Module for the Plugin for LM Eval Harness
|
||||
import subprocess # nosec
|
||||
|
||||
from axolotl.integrations.base import BasePlugin
|
||||
from axolotl.integrations.lm_eval.cli import build_lm_eval_command, get_model_path
|
||||
from axolotl.integrations.lm_eval.cli import build_lm_eval_command
|
||||
|
||||
from .args import LMEvalArgs as LMEvalArgs
|
||||
|
||||
@@ -29,7 +29,7 @@ class LMEvalPlugin(BasePlugin):
|
||||
wandb_project=cfg.wandb_project,
|
||||
wandb_entity=cfg.wandb_entity,
|
||||
wandb_name=cfg.wandb_name,
|
||||
model=get_model_path(cfg),
|
||||
model=cfg.lm_eval_model or cfg.hub_model_id,
|
||||
):
|
||||
subprocess.run( # nosec
|
||||
lm_eval_args,
|
||||
|
||||
@@ -13,21 +13,6 @@ import yaml
|
||||
from axolotl.utils.dict import DictDefault
|
||||
|
||||
|
||||
def get_model_path(cfg: DictDefault) -> str | None:
|
||||
"""
|
||||
Determine which model path to use for evaluation.
|
||||
|
||||
Priority order (highest to lowest):
|
||||
1. lm_eval_model - Explicit model path override
|
||||
2. hub_model_id - Model pushed to HuggingFace Hub
|
||||
3. None - Falls back to output_dir in build_lm_eval_command
|
||||
|
||||
Returns:
|
||||
Model path string or None to use output_dir fallback
|
||||
"""
|
||||
return cfg.lm_eval_model or cfg.hub_model_id or None
|
||||
|
||||
|
||||
def build_lm_eval_command(
|
||||
tasks: list[str],
|
||||
bfloat16=True,
|
||||
@@ -123,7 +108,7 @@ def lm_eval(config: str, cloud: Optional[str] = None):
|
||||
wandb_project=cfg.wandb_project,
|
||||
wandb_entity=cfg.wandb_entity,
|
||||
wandb_name=cfg.wandb_name,
|
||||
model=get_model_path(cfg),
|
||||
model=cfg.lm_eval_model or cfg.hub_model_id,
|
||||
revision=cfg.revision,
|
||||
apply_chat_template=cfg.apply_chat_template,
|
||||
fewshot_as_multiturn=cfg.fewshot_as_multiturn,
|
||||
|
||||
@@ -15,7 +15,7 @@ from torch import nn
|
||||
from torch.distributed.tensor import DTensor
|
||||
|
||||
from .geglu import geglu_backward, geglu_forward
|
||||
from .quantize import dequantize_weight
|
||||
from .quantize import dequantize
|
||||
from .swiglu import swiglu_backward, swiglu_forward
|
||||
from .utils import torch_amp_custom_bwd, torch_amp_custom_fwd
|
||||
|
||||
@@ -46,12 +46,6 @@ def get_lora_parameters(
|
||||
W = base_layer.weight
|
||||
b = base_layer.bias
|
||||
|
||||
# Unwrap DTensor if FSDP2 left the weight wrapped -- DTensor does not proxy
|
||||
# attribute access to the underlying tensor subclass, so torchao methods like
|
||||
# .dequantize() or .get_original_weight() would not be visible.
|
||||
if isinstance(W, DTensor):
|
||||
W = W.full_tensor()
|
||||
|
||||
if not hasattr(proj, "disable_adapters") or proj.disable_adapters or proj.merged:
|
||||
quant_state = getattr(W, "quant_state", None)
|
||||
return W, b, quant_state, None, None, None
|
||||
@@ -92,7 +86,6 @@ def matmul_lora(
|
||||
B: torch.Tensor | None,
|
||||
s: float | None,
|
||||
out: torch.Tensor | None = None,
|
||||
transpose: bool = True,
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Efficient fused matmul + LoRA computation.
|
||||
@@ -105,15 +98,12 @@ def matmul_lora(
|
||||
B: LoRA B matrix [out_features, rank]
|
||||
s: LoRA scaling factor
|
||||
out: Optional output tensor for inplace operations
|
||||
transpose: If True (default), transpose W before matmul (forward path).
|
||||
Set to False for backward paths where W is already in the correct layout.
|
||||
|
||||
Returns:
|
||||
Result of X @ W + X @ A @ B
|
||||
"""
|
||||
dtype = X.dtype
|
||||
is_quantized = W_quant is not None or type(W) is not torch.Tensor
|
||||
W = dequantize_weight(W, W_quant, transpose=transpose)
|
||||
W = dequantize(W.t(), W_quant)
|
||||
|
||||
reshape = False
|
||||
if X.dim() == 3:
|
||||
@@ -122,7 +112,7 @@ def matmul_lora(
|
||||
reshape = True
|
||||
|
||||
out = torch.matmul(X, W, out=out)
|
||||
if is_quantized:
|
||||
if W_quant is not None:
|
||||
del W
|
||||
|
||||
if A is not None:
|
||||
@@ -302,16 +292,15 @@ class LoRA_MLP(torch.autograd.Function):
|
||||
up = up.view(-1, up.shape[-1])
|
||||
dtype = X.dtype
|
||||
|
||||
# Down projection (backward: no transpose needed, W is already [out, in])
|
||||
# Down projection
|
||||
grad_down = matmul_lora(
|
||||
grad_output,
|
||||
down_weight,
|
||||
down_weight.t(),
|
||||
None,
|
||||
down_quant,
|
||||
down_B,
|
||||
down_A,
|
||||
down_scale,
|
||||
transpose=False,
|
||||
)
|
||||
|
||||
# Activation backward
|
||||
@@ -343,7 +332,7 @@ class LoRA_MLP(torch.autograd.Function):
|
||||
|
||||
if dX is not None:
|
||||
# Up projection gradients
|
||||
up_weight = dequantize_weight(up_weight, up_quant, transpose=True)
|
||||
up_weight = dequantize(up_weight.t(), up_quant)
|
||||
if ctx.inplace:
|
||||
dX = torch.matmul(grad_up, up_weight.t(), out=X)
|
||||
else:
|
||||
@@ -355,7 +344,7 @@ class LoRA_MLP(torch.autograd.Function):
|
||||
dX += grad_up @ up_B.to(dtype).t() @ (up_scale * up_A.to(dtype).t())
|
||||
|
||||
# Gate projection gradients
|
||||
gate_weight = dequantize_weight(gate_weight, gate_quant)
|
||||
gate_weight = dequantize(gate_weight, gate_quant)
|
||||
dX += grad_gate @ gate_weight
|
||||
del gate_weight
|
||||
|
||||
@@ -642,7 +631,7 @@ class LoRA_QKV(torch.autograd.Function):
|
||||
out_buffer = X if ctx.inplace else None
|
||||
|
||||
# Q path
|
||||
q_weight_t = dequantize_weight(q_weight, q_quant)
|
||||
q_weight_t = dequantize(q_weight, q_quant)
|
||||
grad_X = torch.mm(q_grad, q_weight_t, out=out_buffer)
|
||||
del q_weight
|
||||
del q_weight_t
|
||||
@@ -650,7 +639,7 @@ class LoRA_QKV(torch.autograd.Function):
|
||||
grad_X.addmm_(q_grad, torch.mm(B_q_scaled, A_q_scaled))
|
||||
|
||||
# K path
|
||||
k_weight_t = dequantize_weight(k_weight, k_quant)
|
||||
k_weight_t = dequantize(k_weight, k_quant)
|
||||
grad_X.addmm_(k_grad, k_weight_t)
|
||||
del k_weight
|
||||
del k_weight_t
|
||||
@@ -658,7 +647,7 @@ class LoRA_QKV(torch.autograd.Function):
|
||||
grad_X.addmm_(k_grad, torch.mm(B_k_scaled, A_k_scaled))
|
||||
|
||||
# V path
|
||||
v_weight_t = dequantize_weight(v_weight, v_quant)
|
||||
v_weight_t = dequantize(v_weight, v_quant)
|
||||
grad_X.addmm_(v_grad, v_weight_t)
|
||||
del v_weight
|
||||
del v_weight_t
|
||||
@@ -821,7 +810,7 @@ class LoRA_O(torch.autograd.Function):
|
||||
d_B = s * A @ dY_X
|
||||
|
||||
# Get derivative for dX
|
||||
W = dequantize_weight(W, W_quant, transpose=True)
|
||||
W = dequantize(W.t(), W_quant)
|
||||
dX = dY @ W.t()
|
||||
del W
|
||||
|
||||
|
||||
@@ -146,43 +146,3 @@ def dequantize(
|
||||
# Handle transposed data
|
||||
is_transposed: bool = W.shape[0] == 1
|
||||
return out.t() if is_transposed else out
|
||||
|
||||
|
||||
def dequantize_weight(
|
||||
W: torch.Tensor,
|
||||
quant_state: QuantState | list | None = None,
|
||||
transpose: bool = False,
|
||||
) -> torch.Tensor:
|
||||
"""Unified dequantization for both torchao and bnb quantized weights.
|
||||
|
||||
For torchao tensor subclasses (AffineQuantizedTensor, NF4Tensor), dequantizes
|
||||
using the appropriate instance method. For bnb Params4bit, delegates to the
|
||||
optimized CUDA kernel in ``dequantize``.
|
||||
|
||||
Args:
|
||||
W: Quantized weight tensor ``[out_features, in_features]``.
|
||||
quant_state: bnb ``QuantState`` (None for torchao / unquantized).
|
||||
transpose: If True, return ``[in_features, out_features]``.
|
||||
|
||||
Returns:
|
||||
Dequantized float tensor, optionally transposed.
|
||||
"""
|
||||
# torchao path: tensor subclass with embedded quantization state
|
||||
if quant_state is None and type(W) is not torch.Tensor:
|
||||
result = None
|
||||
# NF4Tensor (check first — NF4Tensor.dequantize is a static method)
|
||||
if hasattr(W, "get_original_weight"):
|
||||
result = W.get_original_weight()
|
||||
else:
|
||||
# AffineQuantizedTensor (INT4, etc.)
|
||||
try:
|
||||
result = W.dequantize()
|
||||
except (TypeError, RuntimeError):
|
||||
pass
|
||||
if result is not None:
|
||||
return result.t() if transpose else result
|
||||
|
||||
# bnb path: transpose input before the CUDA kernel (existing convention)
|
||||
if transpose:
|
||||
return dequantize(W.t(), quant_state)
|
||||
return dequantize(W, quant_state)
|
||||
|
||||
@@ -23,7 +23,6 @@ from axolotl.loaders.utils import get_linear_embedding_layers
|
||||
from axolotl.telemetry.errors import send_errors
|
||||
from axolotl.utils.dict import DictDefault
|
||||
from axolotl.utils.logging import get_logger
|
||||
from axolotl.utils.schemas.enums import TorchAOQuantDType
|
||||
|
||||
LOG = get_logger(__name__)
|
||||
|
||||
@@ -135,13 +134,11 @@ def load_lora(
|
||||
|
||||
rank = int(os.environ.get("LOCAL_RANK", 0))
|
||||
|
||||
is_torchao = cfg.peft and cfg.peft.backend == "torchao"
|
||||
if (
|
||||
cfg.fsdp_config
|
||||
and cfg.adapter
|
||||
and cfg.fsdp_config.cpu_ram_efficient_loading
|
||||
and rank != 0
|
||||
and not is_torchao
|
||||
):
|
||||
setup_quantized_meta_for_peft(model)
|
||||
|
||||
@@ -149,15 +146,6 @@ def load_lora(
|
||||
if cfg.peft_autocast_adapter_dtype is not None:
|
||||
model_kwargs["autocast_adapter_dtype"] = cfg.peft_autocast_adapter_dtype
|
||||
|
||||
# Patch PEFT's torchao dispatch before any model creation/loading.
|
||||
# Must happen before both get_peft_model and PeftModel.from_pretrained,
|
||||
# as both trigger LoRA layer dispatch that would fail for INT4/NF4 weights.
|
||||
# INT8 is natively supported by PEFT's TorchaoLoraLinear, so skip the patch.
|
||||
if is_torchao and cfg.peft.weight_dtype != TorchAOQuantDType.int8:
|
||||
from axolotl.monkeypatch.peft.utils import patch_peft_torchao_dispatch
|
||||
|
||||
patch_peft_torchao_dispatch()
|
||||
|
||||
if cfg.lora_model_dir:
|
||||
LOG.debug("Loading pretrained PEFT - LoRA")
|
||||
if cfg.lora_on_cpu:
|
||||
@@ -184,7 +172,6 @@ def load_lora(
|
||||
and cfg.adapter
|
||||
and cfg.fsdp_config.cpu_ram_efficient_loading
|
||||
and rank != 0
|
||||
and not is_torchao
|
||||
):
|
||||
setup_quantized_peft_meta_for_training(model)
|
||||
|
||||
|
||||
@@ -158,15 +158,6 @@ class ModelLoader:
|
||||
"""Property that determines if FSDP with QLoRA is enabled."""
|
||||
return self.is_fsdp_enabled and self.cfg.adapter == "qlora"
|
||||
|
||||
@property
|
||||
def is_torchao_qlora(self):
|
||||
"""Property that determines if torchao backend is used for QLoRA."""
|
||||
return (
|
||||
self.cfg.adapter == "qlora"
|
||||
and self.cfg.peft
|
||||
and self.cfg.peft.backend == "torchao"
|
||||
)
|
||||
|
||||
@send_errors
|
||||
def load(self) -> tuple[PreTrainedModel | PeftModelForCausalLM, PeftConfig | None]:
|
||||
"""Load and prepare the model with all configurations and patches.
|
||||
@@ -500,9 +491,8 @@ class ModelLoader:
|
||||
|
||||
# FSDP requires control over device placement, so don't set device_map when FSDP is enabled
|
||||
if self.is_fsdp_enabled:
|
||||
# For QLoRA + FSDP with bnb, we still need to set device_map for proper initialization
|
||||
# torchao tensors work natively with FSDP2, no device_map override needed
|
||||
if self.is_qlora_and_fsdp_enabled and not self.is_torchao_qlora:
|
||||
# For QLoRA + FSDP, we still need to set device_map to "auto" for proper initialization
|
||||
if self.is_qlora_and_fsdp_enabled:
|
||||
self.model_kwargs["device_map"] = {
|
||||
"": int(os.environ.get("LOCAL_RANK", 0))
|
||||
}
|
||||
@@ -571,44 +561,6 @@ class ModelLoader:
|
||||
self.model_kwargs["quantization_config"] = BitsAndBytesConfig(
|
||||
**self.model_config.quantization_config
|
||||
)
|
||||
elif (
|
||||
self.cfg.adapter == "qlora"
|
||||
and self.cfg.peft
|
||||
and self.cfg.peft.backend == "torchao"
|
||||
and not self.cfg.merge_lora
|
||||
):
|
||||
from transformers import TorchAoConfig
|
||||
|
||||
from axolotl.utils.schemas.enums import TorchAOQuantDType
|
||||
|
||||
weight_dtype = self.cfg.peft.weight_dtype
|
||||
if weight_dtype == TorchAOQuantDType.int4:
|
||||
group_size = self.cfg.peft.group_size or 128
|
||||
self.model_kwargs["quantization_config"] = TorchAoConfig(
|
||||
quant_type="int4_weight_only",
|
||||
group_size=group_size,
|
||||
)
|
||||
elif weight_dtype == TorchAOQuantDType.int8:
|
||||
group_size = self.cfg.peft.group_size or 128
|
||||
self.model_kwargs["quantization_config"] = TorchAoConfig(
|
||||
quant_type="int8_weight_only",
|
||||
group_size=group_size,
|
||||
)
|
||||
elif weight_dtype == TorchAOQuantDType.nf4:
|
||||
from torchao.dtypes._nf4tensor_api import NF4WeightOnlyConfig
|
||||
|
||||
block_size = self.cfg.peft.group_size or 64
|
||||
self.model_kwargs["quantization_config"] = TorchAoConfig(
|
||||
quant_type=NF4WeightOnlyConfig(
|
||||
block_size=block_size,
|
||||
scaler_block_size=256,
|
||||
),
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unsupported torchao weight_dtype for QLoRA: {weight_dtype}. "
|
||||
"Supported: int4, int8, nf4"
|
||||
)
|
||||
elif self.cfg.adapter == "qlora" and self.cfg.load_in_4bit:
|
||||
bnb_config = {
|
||||
"load_in_4bit": True,
|
||||
@@ -908,10 +860,6 @@ class ModelLoader:
|
||||
# Make sure everything is in the same dtype
|
||||
skip_prepare_model_for_kbit_training = True
|
||||
|
||||
# torchao quantized models don't use Params4bit and don't need kbit preparation
|
||||
if self.is_torchao_qlora:
|
||||
skip_prepare_model_for_kbit_training = True
|
||||
|
||||
if (
|
||||
not skip_prepare_model_for_kbit_training
|
||||
and self.cfg.adapter in ["lora", "qlora"]
|
||||
|
||||
@@ -100,7 +100,6 @@ class PatchManager:
|
||||
self._apply_fsdp_patches()
|
||||
self._apply_adapter_patches()
|
||||
self._apply_model_specific_patches()
|
||||
self._apply_fp8_patches()
|
||||
self._apply_flash_attention_peft_patches()
|
||||
self._apply_gradient_checkpointing_patches()
|
||||
self._patch_attention()
|
||||
@@ -235,17 +234,6 @@ class PatchManager:
|
||||
|
||||
patch_kimi_model()
|
||||
|
||||
def _apply_fp8_patches(self):
|
||||
"""Apply patches for FP8 support."""
|
||||
if self.cfg.fp8:
|
||||
from axolotl.monkeypatch.trainer_accelerator_args import (
|
||||
patch_create_accelerate_code_for_fp8,
|
||||
)
|
||||
|
||||
patch_create_accelerate_code_for_fp8(
|
||||
self.cfg.fp8_enable_fsdp_float8_all_gather
|
||||
)
|
||||
|
||||
def _apply_flash_attention_peft_patches(self):
|
||||
"""Apply patches for Flash Attention with PEFT."""
|
||||
if self.cfg.adapter:
|
||||
@@ -348,12 +336,10 @@ class PatchManager:
|
||||
|
||||
def _apply_fsdp2_bnb_patches(self):
|
||||
"""Apply FSDP2 BNB patches."""
|
||||
is_torchao = self.cfg.peft and self.cfg.peft.backend == "torchao"
|
||||
if (
|
||||
self.cfg.fsdp_config
|
||||
and str(self.cfg.fsdp_version) == "2"
|
||||
and self.cfg.adapter == "qlora"
|
||||
and not is_torchao
|
||||
):
|
||||
from axolotl.monkeypatch.fsdp2_qlora import (
|
||||
apply_init_sharded_param_patch,
|
||||
|
||||
@@ -78,30 +78,3 @@ def patch_peft_prep_code():
|
||||
axolotl.loaders.model.prepare_model_for_kbit_training = (
|
||||
fixed_prepare_model_for_kbit_training
|
||||
)
|
||||
|
||||
|
||||
def patch_peft_torchao_dispatch():
|
||||
"""Skip PEFT's TorchaoLoraLinear for non-INT8 torchao weights.
|
||||
|
||||
PEFT's dispatch_torchao() matches AffineQuantizedTensor but then errors in
|
||||
_check_dtype_supported() because it only allows INT8. Our LoRA kernels handle
|
||||
dequantization explicitly, so we bypass PEFT's torchao dispatch entirely and
|
||||
let it fall back to standard Linear LoRA layers.
|
||||
"""
|
||||
try:
|
||||
from peft.tuners.lora import torchao as peft_torchao
|
||||
except ImportError:
|
||||
LOG.warning("Could not import peft.tuners.lora.torchao for patching")
|
||||
return
|
||||
|
||||
if getattr(peft_torchao, "_axolotl_patched", False):
|
||||
return
|
||||
|
||||
def patched_dispatch(target, adapter_name, lora_config, **kwargs):
|
||||
# Return None so PEFT falls back to standard Linear LoRA layers.
|
||||
# Our LoRA kernels handle torchao dequantization explicitly.
|
||||
return None
|
||||
|
||||
peft_torchao.dispatch_torchao = patched_dispatch
|
||||
peft_torchao._axolotl_patched = True
|
||||
LOG.info("Patched PEFT dispatch_torchao to skip TorchaoLoraLinear")
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
"""
|
||||
allow adding additional kwargs to Accelerator init
|
||||
"""
|
||||
|
||||
import inspect
|
||||
|
||||
from transformers import Trainer
|
||||
|
||||
from axolotl.monkeypatch.utils import detab_code
|
||||
from axolotl.utils.logging import get_logger
|
||||
|
||||
LOG = get_logger(__name__)
|
||||
|
||||
ORIGINAL_TRAINER_CODE = """
|
||||
# create accelerator object
|
||||
self.accelerator = Accelerator(**args)
|
||||
"""
|
||||
|
||||
PATCHED_TRAINER_CODE = """
|
||||
if hasattr(self, "additional_accelerator_args"):
|
||||
additional_args = self.additional_accelerator_args(fp8=True, enable_fsdp_float8_all_gather={enable_fsdp_float8_all_gather}, **args)
|
||||
if additional_args:
|
||||
args.update(additional_args)
|
||||
|
||||
# create accelerator object
|
||||
self.accelerator = Accelerator(**args)
|
||||
"""
|
||||
|
||||
|
||||
def get_create_accelerate_code() -> str:
|
||||
training_loop = inspect.getsource(Trainer.create_accelerator_and_postprocess)
|
||||
return training_loop
|
||||
|
||||
|
||||
def check_create_accelerate_code_is_patchable() -> bool:
|
||||
create_code = get_create_accelerate_code()
|
||||
create_code, _ = detab_code(create_code)
|
||||
return ORIGINAL_TRAINER_CODE in create_code
|
||||
|
||||
|
||||
def patch_create_accelerate_code_for_fp8(enable_fsdp_float8_all_gather: bool):
|
||||
"""
|
||||
Monkeypatch create_accelerator_and_postprocess so it checks for additional kwargs.
|
||||
"""
|
||||
|
||||
try:
|
||||
create_code = get_create_accelerate_code()
|
||||
except OSError:
|
||||
return
|
||||
Trainer._original_create_accelerator_and_postprocess = create_code
|
||||
create_code, _ = detab_code(create_code)
|
||||
if ORIGINAL_TRAINER_CODE not in create_code:
|
||||
return
|
||||
|
||||
patched_trainer_code = PATCHED_TRAINER_CODE.format(
|
||||
enable_fsdp_float8_all_gather=enable_fsdp_float8_all_gather
|
||||
)
|
||||
create_code = create_code.replace(ORIGINAL_TRAINER_CODE, patched_trainer_code)
|
||||
create_code = create_code.replace(
|
||||
"def create_accelerator_and_postprocess(",
|
||||
"def fixed_create_accelerator_and_postprocess(",
|
||||
1,
|
||||
)
|
||||
|
||||
# load imports necessary
|
||||
import transformers.trainer
|
||||
|
||||
items_to_import = []
|
||||
for item in dir(transformers.trainer):
|
||||
if item in create_code:
|
||||
items_to_import.append(item)
|
||||
|
||||
exec(
|
||||
"from transformers.trainer import ("
|
||||
+ ", ".join(x for x in items_to_import)
|
||||
+ ")",
|
||||
globals(),
|
||||
)
|
||||
exec(create_code, globals())
|
||||
LOG.info("patching create_accelerator_and_postprocess to allow for overrides")
|
||||
Trainer.create_accelerator_and_postprocess = (
|
||||
fixed_create_accelerator_and_postprocess
|
||||
)
|
||||
@@ -8,7 +8,6 @@ import torch
|
||||
class TorchAOQuantDType(Enum):
|
||||
int4 = torch.int4
|
||||
int8 = torch.int8
|
||||
nf4 = "nf4"
|
||||
float8_e4m3fn = torch.float8_e4m3fn
|
||||
nvfp4 = "nvfp4"
|
||||
|
||||
@@ -17,8 +16,6 @@ class TorchAOQuantDType(Enum):
|
||||
return TorchAOQuantDType.int4
|
||||
if str == "int8":
|
||||
return TorchAOQuantDType.int8
|
||||
if str == "nf4":
|
||||
return TorchAOQuantDType.nf4
|
||||
if str in ["float8_e4m3fn", "fp8", "float8"]:
|
||||
return TorchAOQuantDType.float8_e4m3fn
|
||||
if str == "nvfp4":
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
"""Pydantic models for PEFT-related configuration"""
|
||||
|
||||
from typing import Any, Literal
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
|
||||
from axolotl.utils.schemas.enums import TorchAOQuantDType
|
||||
from axolotl.utils.schemas.quantization import validate_ao_dtype
|
||||
|
||||
|
||||
class LoftQConfig(BaseModel):
|
||||
"""LoftQ configuration subset"""
|
||||
@@ -18,7 +15,7 @@ class LoftQConfig(BaseModel):
|
||||
|
||||
|
||||
class PeftConfig(BaseModel):
|
||||
"""PEFT configuration subset"""
|
||||
"""peftq configuration subset"""
|
||||
|
||||
loftq_config: LoftQConfig | None = Field(
|
||||
default=None,
|
||||
@@ -26,29 +23,6 @@ class PeftConfig(BaseModel):
|
||||
"description": "Configuration options for loftq initialization for LoRA"
|
||||
},
|
||||
)
|
||||
backend: Literal["bnb", "torchao"] | None = Field(
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"description": "Quantization backend for QLoRA. 'bnb' for bitsandbytes (default), 'torchao' for torchao."
|
||||
},
|
||||
)
|
||||
weight_dtype: TorchAOQuantDType | None = Field(
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"description": "Weight quantization dtype (int4, int8, or nf4). Also used with bnb backend to auto-configure quantization."
|
||||
},
|
||||
)
|
||||
group_size: int | None = Field(
|
||||
default=None,
|
||||
json_schema_extra={
|
||||
"description": "Group size for quantization. Defaults to 128 for int4, 64 for nf4."
|
||||
},
|
||||
)
|
||||
|
||||
@field_validator("weight_dtype", mode="before")
|
||||
@classmethod
|
||||
def validate_weight_dtype(cls, v):
|
||||
return validate_ao_dtype(v)
|
||||
|
||||
|
||||
class LoraConfig(BaseModel):
|
||||
@@ -182,56 +156,6 @@ class LoraConfig(BaseModel):
|
||||
|
||||
merge_lora: bool | None = None
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def auto_detect_qlora(cls, data):
|
||||
"""Auto-set adapter type and quantization flags from peft config.
|
||||
|
||||
When peft.backend and peft.weight_dtype are set, this infers the correct
|
||||
adapter type and internal flags (load_in_4bit, load_in_8bit) so users
|
||||
don't need to set them manually.
|
||||
"""
|
||||
peft = data.get("peft")
|
||||
if not isinstance(peft, dict):
|
||||
return data
|
||||
|
||||
backend = peft.get("backend")
|
||||
weight_dtype = peft.get("weight_dtype")
|
||||
|
||||
# Validate: weight_dtype requires backend
|
||||
if weight_dtype and not backend:
|
||||
raise ValueError(
|
||||
"peft.backend is required when peft.weight_dtype is set. "
|
||||
"Use 'torchao' or 'bnb'."
|
||||
)
|
||||
|
||||
if not weight_dtype:
|
||||
return data
|
||||
|
||||
adapter = data.get("adapter")
|
||||
|
||||
if backend == "torchao":
|
||||
# torchao: any quantized weight_dtype means qlora
|
||||
if adapter == "lora":
|
||||
data["adapter"] = "qlora"
|
||||
|
||||
elif backend == "bnb":
|
||||
if weight_dtype == "nf4":
|
||||
# bnb nf4 = qlora with load_in_4bit
|
||||
if adapter == "lora":
|
||||
data["adapter"] = "qlora"
|
||||
data.setdefault("load_in_4bit", True)
|
||||
elif weight_dtype == "int8":
|
||||
# bnb int8 = lora with load_in_8bit
|
||||
data.setdefault("load_in_8bit", True)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"peft.weight_dtype '{weight_dtype}' is not supported with bnb backend. "
|
||||
"Supported: nf4, int8."
|
||||
)
|
||||
|
||||
return data
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def validate_adapter(cls, data):
|
||||
@@ -249,8 +173,6 @@ class LoraConfig(BaseModel):
|
||||
@model_validator(mode="after")
|
||||
def validate_qlora(self):
|
||||
if self.adapter == "qlora":
|
||||
is_torchao = self.peft and self.peft.backend == "torchao"
|
||||
|
||||
if self.merge_lora:
|
||||
# can't merge qlora if loaded in 8bit or 4bit
|
||||
if self.load_in_8bit:
|
||||
@@ -262,20 +184,7 @@ class LoraConfig(BaseModel):
|
||||
if self.load_in_4bit:
|
||||
raise ValueError("Can't merge qlora if loaded in 4bit")
|
||||
|
||||
elif is_torchao:
|
||||
# torchao backend: validate torchao-specific requirements
|
||||
if self.load_in_4bit or self.load_in_8bit:
|
||||
raise ValueError(
|
||||
"load_in_4bit/load_in_8bit are for bitsandbytes. "
|
||||
"With peft.backend: torchao, quantization is handled by torchao."
|
||||
)
|
||||
if not self.peft.weight_dtype:
|
||||
raise ValueError(
|
||||
"peft.weight_dtype is required when peft.backend is 'torchao'"
|
||||
)
|
||||
|
||||
else:
|
||||
# Default bnb path
|
||||
if self.load_in_8bit:
|
||||
raise ValueError("Can't load qlora in 8bit")
|
||||
|
||||
|
||||
@@ -16,8 +16,6 @@ def validate_ao_dtype(v: Any) -> TorchAOQuantDType | None:
|
||||
return TorchAOQuantDType.int4
|
||||
if v == "int8":
|
||||
return TorchAOQuantDType.int8
|
||||
if v == "nf4":
|
||||
return TorchAOQuantDType.nf4
|
||||
if v in ["float8_e4m3fn", "fp8", "float8"]:
|
||||
return TorchAOQuantDType.float8_e4m3fn
|
||||
if v == "nvfp4":
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import torch
|
||||
from bitsandbytes.functional import QuantState
|
||||
|
||||
from axolotl.kernels.quantize import dequantize, dequantize_weight
|
||||
from axolotl.kernels.quantize import dequantize
|
||||
|
||||
|
||||
def test_dequantize_null_state():
|
||||
@@ -100,18 +100,3 @@ def test_dequantize_output_tensor():
|
||||
|
||||
result = dequantize(W, quant_state, out=out)
|
||||
assert result is out
|
||||
|
||||
|
||||
def test_dequantize_weight_plain_tensor():
|
||||
"""Test that dequantize_weight passes through unquantized tensors unchanged"""
|
||||
W = torch.randn(32, 64)
|
||||
result = dequantize_weight(W, quant_state=None, transpose=False)
|
||||
assert torch.equal(result, W)
|
||||
|
||||
|
||||
def test_dequantize_weight_plain_tensor_transpose():
|
||||
"""Test that dequantize_weight transposes unquantized tensors"""
|
||||
W = torch.randn(32, 64)
|
||||
result = dequantize_weight(W, quant_state=None, transpose=True)
|
||||
assert result.shape == (64, 32)
|
||||
assert torch.equal(result, W.t())
|
||||
|
||||
@@ -3,14 +3,6 @@ import pytest
|
||||
from axolotl.utils.config import validate_config
|
||||
from axolotl.utils.dict import DictDefault
|
||||
|
||||
BASE_CFG = {
|
||||
"datasets": [{"path": "dummy_dataset", "type": "alpaca"}],
|
||||
"micro_batch_size": 1,
|
||||
"gradient_accumulation_steps": 1,
|
||||
"learning_rate": 1e-5,
|
||||
"base_model": "dummy_model",
|
||||
}
|
||||
|
||||
|
||||
class TestLoRAConfigValidation:
|
||||
"""Test suite for LoRA/QLoRA configuration validation"""
|
||||
@@ -157,195 +149,3 @@ class TestLoRAConfigValidation:
|
||||
result = validate_config(valid_config)
|
||||
assert result["lora_qkv_kernel"] is True
|
||||
assert result["trust_remote_code"] is None
|
||||
|
||||
|
||||
class TestTorchaoQLoRAConfigValidation:
|
||||
"""Test suite for torchao QLoRA auto-detection and validation"""
|
||||
|
||||
# --- Auto-detection: torchao ---
|
||||
|
||||
@pytest.mark.parametrize("weight_dtype", ["int4", "int8", "nf4"])
|
||||
def test_torchao_auto_detect_from_lora(self, weight_dtype):
|
||||
"""adapter: lora + peft.backend: torchao auto-upgrades to qlora"""
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"adapter": "lora",
|
||||
"peft": {"backend": "torchao", "weight_dtype": weight_dtype},
|
||||
**BASE_CFG,
|
||||
}
|
||||
)
|
||||
result = validate_config(cfg)
|
||||
assert result["adapter"] == "qlora"
|
||||
assert result["peft"]["backend"] == "torchao"
|
||||
|
||||
def test_torchao_explicit_qlora(self):
|
||||
"""adapter: qlora + peft.backend: torchao works directly"""
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"adapter": "qlora",
|
||||
"peft": {"backend": "torchao", "weight_dtype": "int4"},
|
||||
**BASE_CFG,
|
||||
}
|
||||
)
|
||||
result = validate_config(cfg)
|
||||
assert result["adapter"] == "qlora"
|
||||
|
||||
# --- Auto-detection: bnb ---
|
||||
|
||||
def test_bnb_nf4_auto_detect_from_lora(self):
|
||||
"""adapter: lora + peft.backend: bnb + weight_dtype: nf4 → qlora + load_in_4bit"""
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"adapter": "lora",
|
||||
"peft": {"backend": "bnb", "weight_dtype": "nf4"},
|
||||
**BASE_CFG,
|
||||
}
|
||||
)
|
||||
result = validate_config(cfg)
|
||||
assert result["adapter"] == "qlora"
|
||||
assert result["load_in_4bit"] is True
|
||||
|
||||
def test_bnb_int8_auto_detect_from_lora(self):
|
||||
"""adapter: lora + peft.backend: bnb + weight_dtype: int8 → lora + load_in_8bit"""
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"adapter": "lora",
|
||||
"peft": {"backend": "bnb", "weight_dtype": "int8"},
|
||||
**BASE_CFG,
|
||||
}
|
||||
)
|
||||
result = validate_config(cfg)
|
||||
assert result["adapter"] == "lora"
|
||||
assert result["load_in_8bit"] is True
|
||||
|
||||
def test_bnb_nf4_explicit_qlora_auto_sets_load_in_4bit(self):
|
||||
"""adapter: qlora + peft.backend: bnb + weight_dtype: nf4 auto-sets load_in_4bit"""
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"adapter": "qlora",
|
||||
"peft": {"backend": "bnb", "weight_dtype": "nf4"},
|
||||
**BASE_CFG,
|
||||
}
|
||||
)
|
||||
result = validate_config(cfg)
|
||||
assert result["adapter"] == "qlora"
|
||||
assert result["load_in_4bit"] is True
|
||||
|
||||
# --- Backward compat ---
|
||||
|
||||
def test_old_style_qlora_unchanged(self):
|
||||
"""Old-style adapter: qlora + load_in_4bit: true still works"""
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"adapter": "qlora",
|
||||
"load_in_4bit": True,
|
||||
**BASE_CFG,
|
||||
}
|
||||
)
|
||||
result = validate_config(cfg)
|
||||
assert result["adapter"] == "qlora"
|
||||
assert result["load_in_4bit"] is True
|
||||
|
||||
def test_old_style_lora_8bit_unchanged(self):
|
||||
"""Old-style adapter: lora + load_in_8bit: true still works"""
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"adapter": "lora",
|
||||
"load_in_8bit": True,
|
||||
**BASE_CFG,
|
||||
}
|
||||
)
|
||||
result = validate_config(cfg)
|
||||
assert result["adapter"] == "lora"
|
||||
assert result["load_in_8bit"] is True
|
||||
|
||||
def test_plain_lora_unchanged(self):
|
||||
"""adapter: lora without peft block stays as lora"""
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"adapter": "lora",
|
||||
**BASE_CFG,
|
||||
}
|
||||
)
|
||||
result = validate_config(cfg)
|
||||
assert result["adapter"] == "lora"
|
||||
|
||||
# --- Validation errors ---
|
||||
|
||||
def test_torchao_with_load_in_4bit_errors(self):
|
||||
"""peft.backend: torchao + load_in_4bit is a conflict"""
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"adapter": "qlora",
|
||||
"load_in_4bit": True,
|
||||
"peft": {"backend": "torchao", "weight_dtype": "int4"},
|
||||
**BASE_CFG,
|
||||
}
|
||||
)
|
||||
with pytest.raises(ValueError, match="load_in_4bit.*bitsandbytes"):
|
||||
validate_config(cfg)
|
||||
|
||||
def test_torchao_with_load_in_8bit_errors(self):
|
||||
"""peft.backend: torchao + load_in_8bit is a conflict"""
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"adapter": "qlora",
|
||||
"load_in_8bit": True,
|
||||
"peft": {"backend": "torchao", "weight_dtype": "int4"},
|
||||
**BASE_CFG,
|
||||
}
|
||||
)
|
||||
with pytest.raises(ValueError, match="load_in_4bit.*bitsandbytes"):
|
||||
validate_config(cfg)
|
||||
|
||||
def test_torchao_without_weight_dtype_errors(self):
|
||||
"""peft.backend: torchao without weight_dtype errors"""
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"adapter": "qlora",
|
||||
"peft": {"backend": "torchao"},
|
||||
**BASE_CFG,
|
||||
}
|
||||
)
|
||||
with pytest.raises(ValueError, match="peft.weight_dtype is required"):
|
||||
validate_config(cfg)
|
||||
|
||||
def test_weight_dtype_without_backend_errors(self):
|
||||
"""peft.weight_dtype without peft.backend errors"""
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"adapter": "lora",
|
||||
"peft": {"weight_dtype": "int4"},
|
||||
**BASE_CFG,
|
||||
}
|
||||
)
|
||||
with pytest.raises(ValueError, match="peft.backend is required"):
|
||||
validate_config(cfg)
|
||||
|
||||
def test_bnb_unsupported_weight_dtype_errors(self):
|
||||
"""peft.backend: bnb + unsupported weight_dtype errors"""
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"adapter": "lora",
|
||||
"peft": {"backend": "bnb", "weight_dtype": "int4"},
|
||||
**BASE_CFG,
|
||||
}
|
||||
)
|
||||
with pytest.raises(ValueError, match="not supported with bnb"):
|
||||
validate_config(cfg)
|
||||
|
||||
# --- Redundant flags don't conflict ---
|
||||
|
||||
def test_bnb_nf4_with_explicit_load_in_4bit(self):
|
||||
"""peft.backend: bnb + weight_dtype: nf4 + load_in_4bit: true is fine (redundant)"""
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"adapter": "lora",
|
||||
"load_in_4bit": True,
|
||||
"peft": {"backend": "bnb", "weight_dtype": "nf4"},
|
||||
**BASE_CFG,
|
||||
}
|
||||
)
|
||||
result = validate_config(cfg)
|
||||
assert result["adapter"] == "qlora"
|
||||
assert result["load_in_4bit"] is True
|
||||
|
||||
Reference in New Issue
Block a user