upgrade bnb 0.45.0 and peft 0.14.0 (#2126)

* upgrade bnb to lastest release

* update peft to working supporting commit

* bump to latest release of peft==0.14.0
This commit is contained in:
Wing Lian
2024-12-06 09:08:55 -05:00
committed by GitHub
parent 5726141c4e
commit 6b3058b2dc
3 changed files with 2 additions and 18 deletions

View File

@@ -1,9 +1,9 @@
--extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/
packaging==23.2
peft==0.13.2
peft==0.14.0
transformers==4.46.3
tokenizers>=0.20.1
bitsandbytes==0.44.1
bitsandbytes==0.45.0
accelerate==1.1.0
datasets==3.1.0
deepspeed==0.15.4

View File

@@ -1521,19 +1521,6 @@ class AxolotlConfigWCapabilities(AxolotlInputConfig):
return data
@model_validator(mode="before")
@classmethod
def check_hopper_8bit_lora(cls, data):
is_sm_90: bool = (
data["capabilities"]
and data["capabilities"].get("compute_capability") == "sm_90"
)
if data.get("adapter") and data.get("load_in_8bit") and is_sm_90:
# see https://github.com/bitsandbytes-foundation/bitsandbytes/issues/538#issuecomment-2262945464
raise ValueError("8-bit LoRA is not supported on Hopper GPUs")
return data
@model_validator(mode="before")
@classmethod
def check_fsdp_deepspeed(cls, data):

View File

@@ -14,8 +14,6 @@ from transformers.testing_utils import get_torch_dist_unique_port
from axolotl.utils.dict import DictDefault
from ..utils import is_hopper
LOG = logging.getLogger("axolotl.tests.e2e.multigpu")
os.environ["WANDB_DISABLED"] = "true"
@@ -144,7 +142,6 @@ class TestMultiGPULlama:
]
)
@pytest.mark.skipif(is_hopper(), reason="h100 doesn't support 8-bit lora")
def test_dpo_lora_ddp(self, temp_dir):
# pylint: disable=duplicate-code
cfg = DictDefault(