fix(validation): add validation for lora target linear with quantize experts (#3461)

* fix: add validation for lora target linear with quantize experts

* chore: fix lint

* chore: comment

* fix: missing link on readme
This commit is contained in:
NanoCode012
2026-03-06 21:19:05 +07:00
committed by GitHub
parent a260d330ed
commit 6c8c73e5a4
4 changed files with 21 additions and 1 deletions

View File

@@ -79,6 +79,20 @@ class TestQuantizeMoeExpertsValidation:
result = validate_config(cfg, capabilities=gpu_caps, env_capabilities=env_caps)
assert result["quantize_moe_experts"] is False
def test_rejects_lora_target_linear(self, min_base_cfg, gpu_caps, env_caps):
"""quantize_moe_experts with lora_target_linear should fail."""
cfg = (
DictDefault(
quantize_moe_experts=True,
adapter="qlora",
load_in_4bit=True,
lora_target_linear=True,
)
| min_base_cfg
)
with pytest.raises(ValueError, match="lora_target_linear is not compatible"):
validate_config(cfg, capabilities=gpu_caps, env_capabilities=env_caps)
def test_default_is_false(self, min_base_cfg, gpu_caps, env_caps):
"""quantize_moe_experts should default to false."""
cfg = DictDefault({}) | min_base_cfg