From b37ddf97783da826454aec2662329ef0ea38dc4c Mon Sep 17 00:00:00 2001 From: Wing Lian Date: Sun, 6 Jul 2025 21:55:09 -0400 Subject: [PATCH] don't use tokenizer parallelism when using packing (#2862) [skip ci] --- src/axolotl/utils/trainer.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/axolotl/utils/trainer.py b/src/axolotl/utils/trainer.py index 278fbed5b..06853451c 100644 --- a/src/axolotl/utils/trainer.py +++ b/src/axolotl/utils/trainer.py @@ -609,6 +609,9 @@ def prepare_opinionated_env(cfg): if cfg.qlora_sharded_model_loading: # model loading is forked after the tokenizer os.environ["TOKENIZERS_PARALLELISM"] = "false" + if cfg.sample_packing: + # multipack parallel packing sampler defaults to using fork + os.environ["TOKENIZERS_PARALLELISM"] = "false" def setup_trainer(