remove eval streaming (not HF supported)

This commit is contained in:
Dan Saunders
2025-08-22 00:12:17 +00:00
parent 4121bcbc33
commit 42b38a718a
8 changed files with 32 additions and 148 deletions

View File

@@ -94,8 +94,6 @@ def wrap_dataset_for_tokenized_prompt(
if prompt_tokenizer.supports_batched:
map_kwargs["batched"] = True
# For IterableDataset, we need to get original columns to remove them.
# We'll peek at the first example using a separate iterator to avoid consuming the main one.
def peek_and_get_columns():
# Create a fresh iterator just for peeking
temp_iter = iter(dataset)

View File

@@ -44,46 +44,17 @@ from axolotl.utils.trainer import (
LOG = get_logger(__name__)
def _is_streaming_enabled_for_split(
cfg: DictDefault, split: Literal["train", "test"]
) -> bool:
def _is_streaming_enabled(cfg: DictDefault) -> bool:
"""Check if streaming is enabled for a specific split."""
if split == "test":
# For eval datasets, check eval_streaming first, then fall back to streaming
eval_streaming = cfg.get("eval_streaming")
if eval_streaming is not None:
return eval_streaming
# Fall back to main streaming setting
streaming = cfg.get("streaming")
if streaming is True:
return True
# Check if pretraining dataset exists (defaults to streaming)
has_pretraining = cfg.get("pretraining_dataset") is not None
streaming_default_for_pretraining = has_pretraining and streaming is None
streaming = has_pretraining and streaming is None
return streaming_default_for_pretraining
def _get_streaming_config_for_split(
cfg: DictDefault, split: Literal["train", "test"]
) -> DictDefault:
"""Get a modified config object with split-specific streaming settings."""
if split != "test":
return cfg
# Override with eval-specific configs if they exist
streaming_cfg = DictDefault(cfg)
eval_strategy = cfg.get("eval_dataset_mixing_strategy")
eval_weights = cfg.get("eval_mixing_weights")
if eval_strategy is not None:
streaming_cfg["dataset_mixing_strategy"] = eval_strategy
if eval_weights is not None:
streaming_cfg["mixing_weights"] = eval_weights
return streaming_cfg
return streaming
@retry_on_request_exceptions(max_retries=3, delay=5)
@@ -145,7 +116,6 @@ def _prepare_standard_dataset(
return train_dataset, eval_dataset, -1, prompters
# Validate sample packing configuration for evaluation
# Skip validation for streaming eval datasets since theWhat hy don't have a calculable length
if (
eval_dataset
and cfg.sample_packing
@@ -315,14 +285,14 @@ def _load_tokenized_prepared_datasets(
datasets_configs = cfg.datasets if split == "train" else cfg.test_datasets
prompters: list[Prompter | None] = []
# Check if streaming is enabled for this split
use_streaming = _is_streaming_enabled_for_split(cfg, split)
use_streaming = False
if split == "train":
use_streaming = _is_streaming_enabled(cfg)
if use_streaming:
# For streaming datasets, skip caching and load raw datasets directly
streaming_cfg = _get_streaming_config_for_split(cfg, split)
dataset, prompters = _load_raw_datasets(
streaming_cfg,
cfg,
datasets_configs,
tokenizer,
split,
@@ -417,9 +387,12 @@ def _load_and_process_single_dataset(
processor: ProcessorMixin | None = None,
) -> tuple[Dataset | IterableDataset, Prompter | None]:
"""Load and process a single dataset based on the passed config."""
use_streaming_for_split = _is_streaming_enabled_for_split(cfg, split)
use_streaming = False
if split == "train":
use_streaming = _is_streaming_enabled(cfg)
dataset = load_dataset_with_config(
dataset_config, cfg.hf_use_auth_token, use_streaming_for_split
dataset_config, cfg.hf_use_auth_token, use_streaming
)
d_base_type, d_prompt_style = _parse_dataset_type(dataset_config.type)

View File

@@ -100,6 +100,10 @@ def get_dataset_wrapper(
dataset_config, tokenizer, cfg, dataset, dataset_kwargs
)
# Skip preparation if configured
if cfg.skip_prepare_dataset:
return dataset, None
# Bradley-Terry dataset
if dataset_config.type.startswith("bradley_terry"):
return _handle_bradley_terry_dataset(

View File

@@ -938,12 +938,6 @@ class AxolotlInputConfig(
"description": "Whether to use streaming datasets (IterableDataset) for training datasets. When True, data is loaded on-demand during training without upfront preprocessing. Requires max_steps to be set. Pre-training datasets default to streaming unless explicitly set to False."
},
)
eval_streaming: bool | None = Field(
default=None,
json_schema_extra={
"description": "Whether to use streaming datasets for evaluation datasets. If not set, falls back to the 'streaming' setting. Useful for streaming large training data while keeping smaller eval datasets in memory."
},
)
dataset_mixing_strategy: str | None = Field(
default="round_robin",
json_schema_extra={
@@ -956,18 +950,6 @@ class AxolotlInputConfig(
"description": "Weights for weighted mixing strategy when using multiple datasets. Must sum to 1.0 and have same length as datasets list. Only used when dataset_mixing_strategy='weighted'."
},
)
eval_dataset_mixing_strategy: str | None = Field(
default=None,
json_schema_extra={
"description": "Strategy for mixing multiple evaluation datasets. If not set, falls back to dataset_mixing_strategy. Options: 'concatenate', 'round_robin', 'weighted', 'random'."
},
)
eval_mixing_weights: list[float] | None = Field(
default=None,
json_schema_extra={
"description": "Weights for weighted mixing strategy for evaluation datasets. Must sum to 1.0 and have same length as evaluation datasets list."
},
)
# INTERNALS - document for now, generally not set externally
is_preprocess: bool | None = None

View File

@@ -1130,14 +1130,11 @@ class PretrainingValidationMixin:
@model_validator(mode="before")
@classmethod
def check_streaming_split_batches_accelerate(cls, data):
# Check if either training or eval uses streaming
# Check if streaming is enabled for training
streaming = data.get("streaming", False)
eval_streaming = data.get("eval_streaming")
if eval_streaming is None:
eval_streaming = streaming
# If either training or eval uses streaming, configure accelerator
if streaming or eval_streaming:
# If streaming is enabled, configure accelerator
if streaming:
accelerator_config = data.get("accelerator_config", {})
if not accelerator_config:
data["accelerator_config"] = {
@@ -1412,13 +1409,8 @@ class GRPOVllmValidationMixin:
class StreamingValidationMixin:
"""Validation methods related to streaming datasets."""
def _is_streaming_enabled(self, context: str = "train") -> bool:
"""Check if streaming is enabled for a given context (train or eval)."""
if context == "eval":
eval_streaming = getattr(self, "eval_streaming", None)
if eval_streaming is not None:
return eval_streaming
def _is_streaming_enabled(self) -> bool:
"""Check if streaming is enabled."""
# Fall back to main streaming setting
streaming = getattr(self, "streaming", None)
if streaming is True:
@@ -1426,15 +1418,15 @@ class StreamingValidationMixin:
# Check if pretraining dataset exists (defaults to streaming)
has_pretraining = getattr(self, "pretraining_dataset", None) is not None
streaming_default_for_pretraining = has_pretraining and streaming is None
streaming = has_pretraining and streaming is None
return streaming_default_for_pretraining
return streaming
@model_validator(mode="after")
def check_streaming_requires_max_steps(self):
"""Ensure max_steps is set when using streaming datasets."""
# Check if streaming is enabled for training datasets
if self._is_streaming_enabled("train"):
if self._is_streaming_enabled():
max_steps = getattr(self, "max_steps", None)
if not max_steps:
raise ValueError("max_steps must be set when using streaming datasets")
@@ -1445,7 +1437,7 @@ class StreamingValidationMixin:
def check_streaming_validation_splits_conflict(self):
"""Ensure validation splits are not used with streaming datasets."""
# Check if streaming is enabled for training datasets
if self._is_streaming_enabled("train"):
if self._is_streaming_enabled():
val_set_size = getattr(self, "val_set_size", 0.0)
if val_set_size and val_set_size > 0:
raise ValueError(
@@ -1457,8 +1449,8 @@ class StreamingValidationMixin:
@model_validator(mode="after")
def check_streaming_preprocessing_conflict(self):
"""Ensure preprocessing is not enabled with streaming datasets."""
# Check if streaming is enabled for training or eval datasets
if self._is_streaming_enabled("train") or self._is_streaming_enabled("eval"):
# Check if streaming is enabled for training datasets
if self._is_streaming_enabled():
if os.environ.get("AXOLOTL_IS_PREPROCESS") == "1":
raise ValueError("preprocess is not supported for streaming datasets")
@@ -1467,8 +1459,8 @@ class StreamingValidationMixin:
@model_validator(mode="after")
def check_streaming_skip_prepare_dataset(self):
"""Ensure skip_prepare_dataset is set for streaming datasets."""
# Check if streaming is enabled for training or eval datasets
if self._is_streaming_enabled("train") or self._is_streaming_enabled("eval"):
# Check if streaming is enabled for training datasets
if self._is_streaming_enabled():
skip_prepare = getattr(self, "skip_prepare_dataset", None)
if skip_prepare is False:
LOG.warning(
@@ -1486,7 +1478,6 @@ class StreamingValidationMixin:
# Get datasets to validate length against
datasets = getattr(self, "datasets", None)
test_datasets = getattr(self, "test_datasets", None)
# Check main strategy and weights
strategy = getattr(self, "dataset_mixing_strategy", "concatenate")
@@ -1502,26 +1493,6 @@ class StreamingValidationMixin:
dataset_count,
)
# Check eval-specific strategy and weights
eval_strategy = getattr(self, "eval_dataset_mixing_strategy", None)
eval_weights = getattr(self, "eval_mixing_weights", None)
if eval_strategy is not None:
eval_dataset_count = len(test_datasets) if test_datasets else dataset_count
self._validate_dataset_strategy_and_weights(
eval_strategy,
eval_weights,
"eval_dataset_mixing_strategy",
"eval_mixing_weights",
valid_strategies,
eval_dataset_count,
)
elif eval_weights is not None:
LOG.warning(
"eval_mixing_weights provided but eval_dataset_mixing_strategy is not set. "
"Weights will be ignored unless eval_dataset_mixing_strategy='weighted'."
)
return self
def _validate_dataset_strategy_and_weights(

View File

@@ -471,13 +471,8 @@ def calculate_total_num_steps(cfg, train_dataset, update=True):
mp_start_method=cfg.sample_packing_mp_start_method or "fork",
)
# Remove length column only if it exists
dataset_for_loader = train_dataset
if "length" in train_dataset.column_names:
dataset_for_loader = train_dataset.remove_columns(["length"])
data_loader = DataLoader(
dataset_for_loader,
train_dataset,
batch_sampler=sampler,
)
data_loader_len = len(data_loader) * cfg.micro_batch_size // cfg.batch_size

View File

@@ -112,12 +112,12 @@ class TestStreamingDatasets:
{
"path": "mhenrichsen/alpaca_2k_test",
"type": "alpaca",
"split": "train", # Specify train split for eval dataset
"split": "train",
},
{
"path": "tatsu-lab/alpaca",
"type": "alpaca",
"split": "train", # Specify train split for eval dataset
"split": "train",
},
],
# Streaming config

View File

@@ -664,42 +664,3 @@ class TestDatasetPreparation:
# Should have samples from both datasets
sources = [sample["source"] for sample in samples]
assert len(set(sources)) >= 1 # At least one unique source
def test_eval_streaming_config(self):
"""Test eval_streaming separate from streaming config."""
from axolotl.utils.data.sft import _is_streaming_enabled_for_split
# Test train streaming enabled, eval streaming disabled
cfg = DictDefault({"streaming": True, "eval_streaming": False})
assert _is_streaming_enabled_for_split(cfg, "train")
assert not _is_streaming_enabled_for_split(cfg, "test")
# Test train streaming disabled, eval streaming enabled
cfg2 = DictDefault({"streaming": False, "eval_streaming": True})
assert not _is_streaming_enabled_for_split(cfg2, "train")
assert _is_streaming_enabled_for_split(cfg2, "test")
def test_eval_specific_mixing_configs(self):
"""Test eval-specific mixing configs override main configs."""
from axolotl.utils.data.sft import _get_streaming_config_for_split
cfg = DictDefault(
{
"dataset_mixing_strategy": "round_robin",
"mixing_weights": [0.5, 0.5],
"eval_dataset_mixing_strategy": "weighted",
"eval_mixing_weights": [0.8, 0.2],
}
)
# Train split should use main config
train_cfg = _get_streaming_config_for_split(cfg, "train")
assert train_cfg["dataset_mixing_strategy"] == "round_robin"
assert train_cfg["mixing_weights"] == [0.5, 0.5]
# Test split should use eval-specific config
test_cfg = _get_streaming_config_for_split(cfg, "test")
assert test_cfg["dataset_mixing_strategy"] == "weighted"
assert test_cfg["mixing_weights"] == [0.8, 0.2]