This commit is contained in:
Sunny Liu
2025-02-21 10:59:03 -05:00
committed by Sung Ching Liu
parent 4caa59a087
commit 3dd5c6f8ec
2 changed files with 3 additions and 5 deletions

View File

@@ -78,8 +78,8 @@ tf32: true # require >=ampere
bfloat16: true # require >=ampere
float16: true
# Tensor parallel
tp_size: 1 # should be set to the number of cuda devices available
# Use Tensor parallel
tensor_parallel: true # require multi-gGPU
# Limit the memory for all available GPUs to this amount (if an integer, expressed in gigabytes); default: unset
gpu_memory_limit: 20GiB

View File

@@ -762,10 +762,8 @@ class ModelLoader:
return hf_ds_cfg
skip_move_to_device = False
if self.cfg.tp_size is not None:
# self.model_kwargs["tp_plan"] = "auto"
if self.cfg.tensor_parallel:
del self.model_kwargs["device_map"]
# skip_move_to_device = True
if ( # pylint: disable=condition-evals-to-constant)
(self.cfg.fsdp and self.cfg.fsdp_config.fsdp_cpu_ram_efficient_loading)