torch_dtype -> dtype (#3177)

* torch_dtype -> dtype

* torch_dtype -> dtype
This commit is contained in:
VED
2025-10-01 13:32:51 +05:30
committed by GitHub
parent f4376748f3
commit a6bfbe3400
5 changed files with 6 additions and 8 deletions

View File

@@ -85,9 +85,7 @@ def do_cli(model: Union[Path, str], output: Union[Path, str]) -> None:
unpatch_llama4 = patch_llama4_linearized_modeling()
from transformers import Llama4ForConditionalGeneration
model_ = Llama4ForConditionalGeneration.from_pretrained(
model, torch_dtype=torch.bfloat16
)
model_ = Llama4ForConditionalGeneration.from_pretrained(model, dtype=torch.bfloat16)
processor = AutoProcessor.from_pretrained(model)
processor.save_pretrained(output)

View File

@@ -69,7 +69,7 @@ def do_quantize(
config = AutoConfig.from_pretrained(model_path)
torch_dtype = config.torch_dtype if hasattr(config, "torch_dtype") else None
model = AutoModelForCausalLM.from_pretrained(
model_path, device_map="auto", torch_dtype=torch_dtype
model_path, device_map="auto", dtype=torch_dtype
)
LOG.info(

View File

@@ -148,7 +148,7 @@ def load_sharded_model(
model = AutoModelForCausalLM.from_pretrained(
model_name,
use_cache=False,
torch_dtype=torch.float32,
dtype=torch.float32,
_attn_implementation=model_config._attn_implementation,
trust_remote_code=cfg.trust_remote_code,
)
@@ -158,7 +158,7 @@ def load_sharded_model(
with init_empty_weights():
model = AutoModelForCausalLM.from_config(
model_config,
torch_dtype=torch_dtype,
dtype=torch_dtype,
trust_remote_code=cfg.trust_remote_code,
)
return model