reduce test concurrency to avoid HF rate limiting, test suite parity (#2128)
* reduce test concurrency to avoid HF rate limiting, test suite parity * make val_set_size smaller to speed up e2e tests * more retries for pytest fixture downloads * val_set_size was too small * move retry_on_request_exceptions to data utils and add retry strategy * pre-download ultrafeedback as a test fixture * refactor download retry into it's own fn * don't import from data utils * use retry mechanism now for fixtures
This commit is contained in:
@@ -42,7 +42,7 @@ class Test4dMultipackLlama(unittest.TestCase):
|
||||
"lora_dropout": 0.05,
|
||||
"lora_target_linear": True,
|
||||
"sequence_len": 1024,
|
||||
"val_set_size": 0.1,
|
||||
"val_set_size": 0.02,
|
||||
"datasets": [
|
||||
{
|
||||
"path": "mhenrichsen/alpaca_2k_test",
|
||||
@@ -86,7 +86,7 @@ class Test4dMultipackLlama(unittest.TestCase):
|
||||
"lora_alpha": 16,
|
||||
"lora_dropout": 0.05,
|
||||
"lora_target_linear": True,
|
||||
"val_set_size": 0.1,
|
||||
"val_set_size": 0.02,
|
||||
"datasets": [
|
||||
{
|
||||
"path": "mhenrichsen/alpaca_2k_test",
|
||||
|
||||
@@ -40,7 +40,7 @@ class TestFalconPatched(unittest.TestCase):
|
||||
"lora_dropout": 0.1,
|
||||
"lora_target_linear": True,
|
||||
"lora_modules_to_save": ["word_embeddings", "lm_head"],
|
||||
"val_set_size": 0.1,
|
||||
"val_set_size": 0.05,
|
||||
"special_tokens": {
|
||||
"bos_token": "<|endoftext|>",
|
||||
"pad_token": "<|endoftext|>",
|
||||
@@ -80,7 +80,7 @@ class TestFalconPatched(unittest.TestCase):
|
||||
"flash_attention": True,
|
||||
"sample_packing": True,
|
||||
"sequence_len": 2048,
|
||||
"val_set_size": 0.1,
|
||||
"val_set_size": 0.05,
|
||||
"special_tokens": {
|
||||
"bos_token": "<|endoftext|>",
|
||||
"pad_token": "<|endoftext|>",
|
||||
|
||||
@@ -38,7 +38,7 @@ class TestFusedLlama(unittest.TestCase):
|
||||
"flash_attn_fuse_mlp": True,
|
||||
"sample_packing": True,
|
||||
"sequence_len": 1024,
|
||||
"val_set_size": 0.1,
|
||||
"val_set_size": 0.02,
|
||||
"special_tokens": {
|
||||
"unk_token": "<unk>",
|
||||
"bos_token": "<s>",
|
||||
|
||||
@@ -98,7 +98,7 @@ class TestLoraLlama(unittest.TestCase):
|
||||
"lora_alpha": 64,
|
||||
"lora_dropout": 0.05,
|
||||
"lora_target_linear": True,
|
||||
"val_set_size": 0.1,
|
||||
"val_set_size": 0.02,
|
||||
"special_tokens": {
|
||||
"unk_token": "<unk>",
|
||||
"bos_token": "<s>",
|
||||
|
||||
@@ -39,7 +39,7 @@ class TestMistral(unittest.TestCase):
|
||||
"lora_alpha": 64,
|
||||
"lora_dropout": 0.05,
|
||||
"lora_target_linear": True,
|
||||
"val_set_size": 0.1,
|
||||
"val_set_size": 0.05,
|
||||
"special_tokens": {
|
||||
"unk_token": "<unk>",
|
||||
"bos_token": "<s>",
|
||||
@@ -80,7 +80,7 @@ class TestMistral(unittest.TestCase):
|
||||
"flash_attention": True,
|
||||
"sample_packing": True,
|
||||
"sequence_len": 1024,
|
||||
"val_set_size": 0.1,
|
||||
"val_set_size": 0.05,
|
||||
"special_tokens": {
|
||||
"unk_token": "<unk>",
|
||||
"bos_token": "<s>",
|
||||
|
||||
@@ -40,7 +40,7 @@ class TestMixtral(unittest.TestCase):
|
||||
"lora_alpha": 32,
|
||||
"lora_dropout": 0.1,
|
||||
"lora_target_linear": True,
|
||||
"val_set_size": 0.1,
|
||||
"val_set_size": 0.05,
|
||||
"special_tokens": {},
|
||||
"datasets": [
|
||||
{
|
||||
@@ -78,7 +78,7 @@ class TestMixtral(unittest.TestCase):
|
||||
"flash_attention": True,
|
||||
"sample_packing": True,
|
||||
"sequence_len": 2048,
|
||||
"val_set_size": 0.1,
|
||||
"val_set_size": 0.05,
|
||||
"special_tokens": {},
|
||||
"datasets": [
|
||||
{
|
||||
|
||||
@@ -38,7 +38,7 @@ class TestPhiMultipack(unittest.TestCase):
|
||||
"pad_to_sequence_len": True,
|
||||
"load_in_8bit": False,
|
||||
"adapter": None,
|
||||
"val_set_size": 0.1,
|
||||
"val_set_size": 0.05,
|
||||
"special_tokens": {
|
||||
"pad_token": "<|endoftext|>",
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user