chore: add debug logs
This commit is contained in:
@@ -126,7 +126,7 @@ def load_tokenizer(cfg: DictDefault) -> PreTrainedTokenizer:
|
||||
from axolotl.loaders.patch_manager import PatchManager
|
||||
|
||||
PatchManager.apply_pre_tokenizer_load_patches(cfg)
|
||||
LOG.debug("Kimi tokenizer patches applied, continuing with tokenizer loading...")
|
||||
LOG.info("Kimi tokenizer patches applied, continuing with tokenizer loading...")
|
||||
|
||||
def _load_mistral_common_tokenizer(cfg: DictDefault):
|
||||
"""Load mistral-common tokenizer"""
|
||||
@@ -140,9 +140,9 @@ def load_tokenizer(cfg: DictDefault) -> PreTrainedTokenizer:
|
||||
if cfg.tokenizer_use_mistral_common:
|
||||
return _load_mistral_common_tokenizer(cfg)
|
||||
|
||||
LOG.debug("Loading model config...")
|
||||
LOG.info("Loading model config...")
|
||||
model_config = load_model_config(cfg)
|
||||
LOG.debug("Model config loaded successfully")
|
||||
LOG.info("Model config loaded successfully")
|
||||
|
||||
tokenizer_kwargs = {}
|
||||
use_fast = True # this is the default
|
||||
@@ -167,14 +167,14 @@ def load_tokenizer(cfg: DictDefault) -> PreTrainedTokenizer:
|
||||
tokenizer_path, cfg.added_tokens_overrides, output_dir=cfg.output_dir
|
||||
)
|
||||
|
||||
LOG.debug(f"Loading tokenizer from {cfg.tokenizer_config}...")
|
||||
LOG.info(f"Loading tokenizer from {cfg.tokenizer_config}...")
|
||||
tokenizer = tokenizer_cls.from_pretrained(
|
||||
tokenizer_path,
|
||||
trust_remote_code=cfg.trust_remote_code or False,
|
||||
use_fast=use_fast,
|
||||
**tokenizer_kwargs,
|
||||
)
|
||||
LOG.debug("Tokenizer loaded successfully")
|
||||
LOG.info("Tokenizer loaded successfully")
|
||||
|
||||
if (
|
||||
tokenizer.__class__.__name__
|
||||
@@ -311,4 +311,5 @@ def load_tokenizer(cfg: DictDefault) -> PreTrainedTokenizer:
|
||||
if hasattr(tokenizer, "deprecation_warnings"):
|
||||
tokenizer.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True
|
||||
|
||||
LOG.info("load_tokenizer: About to return tokenizer")
|
||||
return tokenizer
|
||||
|
||||
@@ -62,8 +62,9 @@ def setup_model_and_tokenizer(
|
||||
`None`), and processor (if multimodal, else `None`).
|
||||
"""
|
||||
# Load tokenizer
|
||||
LOG.debug(f"Loading tokenizer... {cfg.tokenizer_config or cfg.base_model_config}")
|
||||
LOG.info(f"Loading tokenizer... {cfg.tokenizer_config or cfg.base_model_config}")
|
||||
tokenizer = load_tokenizer(cfg)
|
||||
LOG.info("Tokenizer loaded, creating ModelLoader...")
|
||||
|
||||
# Load processor for multimodal models if needed
|
||||
processor = None
|
||||
@@ -71,9 +72,10 @@ def setup_model_and_tokenizer(
|
||||
processor = load_processor(cfg, tokenizer)
|
||||
|
||||
# Load the model
|
||||
LOG.debug("Loading model")
|
||||
|
||||
LOG.info("Loading model")
|
||||
LOG.info("About to create ModelLoader...")
|
||||
model_loader = ModelLoader(cfg, tokenizer, processor=processor)
|
||||
LOG.info("ModelLoader created, about to load model...")
|
||||
model, peft_config = model_loader.load()
|
||||
if model.generation_config is not None:
|
||||
model.generation_config.do_sample = True
|
||||
|
||||
Reference in New Issue
Block a user