Compare commits
8 Commits
pre-commit
...
1991test
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bfb80a3ef9 | ||
|
|
38773d661f | ||
|
|
271c2c2b82 | ||
|
|
32b6f30947 | ||
|
|
fc1f275e6c | ||
|
|
46d2b4ce89 | ||
|
|
88c9a7aecc | ||
|
|
d9a93990d1 |
295
1991.yml
Normal file
295
1991.yml
Normal file
@@ -0,0 +1,295 @@
|
|||||||
|
base_model: Qwen/Qwen2.5-14B-Instruct
|
||||||
|
model_type: AutoModelForCausalLM #nohup accelerate launch -m axolotl.cli.train /home/ubuntu/qwen2.5_14B.yml > training_output.log 2>&1 &
|
||||||
|
tokenizer_type: AutoTokenizer
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: tatsu-lab/alpaca
|
||||||
|
type: alpaca
|
||||||
|
|
||||||
|
chat_template: chatml
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0
|
||||||
|
output_dir: ./outputs/out
|
||||||
|
|
||||||
|
sequence_len: 2048
|
||||||
|
sample_packing: true
|
||||||
|
eval_sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
unfrozen_parameters:
|
||||||
|
- ^lm_head.weight$
|
||||||
|
- ^model.embed_tokens.weight$
|
||||||
|
# input_layernorm layers
|
||||||
|
- model.layers.0.input_layernorm
|
||||||
|
- model.layers.1.input_layernorm
|
||||||
|
- model.layers.2.input_layernorm
|
||||||
|
- model.layers.3.input_layernorm
|
||||||
|
- model.layers.4.input_layernorm
|
||||||
|
- model.layers.5.input_layernorm
|
||||||
|
- model.layers.6.input_layernorm
|
||||||
|
- model.layers.7.input_layernorm
|
||||||
|
- model.layers.8.input_layernorm
|
||||||
|
- model.layers.9.input_layernorm
|
||||||
|
- model.layers.10.input_layernorm
|
||||||
|
- model.layers.11.input_layernorm
|
||||||
|
- model.layers.12.input_layernorm
|
||||||
|
- model.layers.13.input_layernorm
|
||||||
|
- model.layers.14.input_layernorm
|
||||||
|
- model.layers.15.input_layernorm
|
||||||
|
- model.layers.16.input_layernorm
|
||||||
|
- model.layers.17.input_layernorm
|
||||||
|
- model.layers.18.input_layernorm
|
||||||
|
- model.layers.19.input_layernorm
|
||||||
|
- model.layers.20.input_layernorm
|
||||||
|
- model.layers.21.input_layernorm
|
||||||
|
- model.layers.22.input_layernorm
|
||||||
|
- model.layers.23.input_layernorm
|
||||||
|
# lm_head layers
|
||||||
|
# mlp.down_proj layers
|
||||||
|
- model.layers.1.mlp.down_proj
|
||||||
|
- model.layers.35.mlp.down_proj
|
||||||
|
- model.layers.38.mlp.down_proj
|
||||||
|
- model.layers.37.mlp.down_proj
|
||||||
|
- model.layers.36.mlp.down_proj
|
||||||
|
- model.layers.15.mlp.down_proj
|
||||||
|
- model.layers.11.mlp.down_proj
|
||||||
|
- model.layers.12.mlp.down_proj
|
||||||
|
- model.layers.34.mlp.down_proj
|
||||||
|
- model.layers.44.mlp.down_proj
|
||||||
|
- model.layers.45.mlp.down_proj
|
||||||
|
- model.layers.9.mlp.down_proj
|
||||||
|
- model.layers.41.mlp.down_proj
|
||||||
|
- model.layers.33.mlp.down_proj
|
||||||
|
- model.layers.43.mlp.down_proj
|
||||||
|
- model.layers.40.mlp.down_proj
|
||||||
|
- model.layers.13.mlp.down_proj
|
||||||
|
- model.layers.8.mlp.down_proj
|
||||||
|
- model.layers.39.mlp.down_proj
|
||||||
|
- model.layers.10.mlp.down_proj
|
||||||
|
- model.layers.14.mlp.down_proj
|
||||||
|
- model.layers.16.mlp.down_proj
|
||||||
|
- model.layers.31.mlp.down_proj
|
||||||
|
- model.layers.32.mlp.down_proj
|
||||||
|
# mlp.gate_proj layers
|
||||||
|
- model.layers.1.mlp.gate_proj
|
||||||
|
- model.layers.44.mlp.gate_proj
|
||||||
|
- model.layers.46.mlp.gate_proj
|
||||||
|
- model.layers.45.mlp.gate_proj
|
||||||
|
- model.layers.43.mlp.gate_proj
|
||||||
|
- model.layers.47.mlp.gate_proj
|
||||||
|
- model.layers.42.mlp.gate_proj
|
||||||
|
- model.layers.32.mlp.gate_proj
|
||||||
|
- model.layers.27.mlp.gate_proj
|
||||||
|
- model.layers.33.mlp.gate_proj
|
||||||
|
- model.layers.28.mlp.gate_proj
|
||||||
|
- model.layers.39.mlp.gate_proj
|
||||||
|
- model.layers.41.mlp.gate_proj
|
||||||
|
- model.layers.40.mlp.gate_proj
|
||||||
|
- model.layers.30.mlp.gate_proj
|
||||||
|
- model.layers.29.mlp.gate_proj
|
||||||
|
- model.layers.31.mlp.gate_proj
|
||||||
|
- model.layers.26.mlp.gate_proj
|
||||||
|
- model.layers.37.mlp.gate_proj
|
||||||
|
- model.layers.10.mlp.gate_proj
|
||||||
|
- model.layers.38.mlp.gate_proj
|
||||||
|
- model.layers.12.mlp.gate_proj
|
||||||
|
- model.layers.36.mlp.gate_proj
|
||||||
|
- model.layers.13.mlp.gate_proj
|
||||||
|
# mlp.up_proj layers
|
||||||
|
- model.layers.1.mlp.up_proj
|
||||||
|
- model.layers.13.mlp.up_proj
|
||||||
|
- model.layers.11.mlp.up_proj
|
||||||
|
- model.layers.14.mlp.up_proj
|
||||||
|
- model.layers.15.mlp.up_proj
|
||||||
|
- model.layers.12.mlp.up_proj
|
||||||
|
- model.layers.8.mlp.up_proj
|
||||||
|
- model.layers.16.mlp.up_proj
|
||||||
|
- model.layers.9.mlp.up_proj
|
||||||
|
- model.layers.19.mlp.up_proj
|
||||||
|
- model.layers.10.mlp.up_proj
|
||||||
|
- model.layers.7.mlp.up_proj
|
||||||
|
- model.layers.17.mlp.up_proj
|
||||||
|
- model.layers.20.mlp.up_proj
|
||||||
|
- model.layers.21.mlp.up_proj
|
||||||
|
- model.layers.18.mlp.up_proj
|
||||||
|
- model.layers.38.mlp.up_proj
|
||||||
|
- model.layers.37.mlp.up_proj
|
||||||
|
- model.layers.39.mlp.up_proj
|
||||||
|
- model.layers.42.mlp.up_proj
|
||||||
|
- model.layers.41.mlp.up_proj
|
||||||
|
- model.layers.27.mlp.up_proj
|
||||||
|
- model.layers.28.mlp.up_proj
|
||||||
|
- model.layers.34.mlp.up_proj
|
||||||
|
# model.norm layers
|
||||||
|
# post_attention_layernorm layers
|
||||||
|
- model.layers.0.post_attention_layernorm
|
||||||
|
- model.layers.1.post_attention_layernorm
|
||||||
|
- model.layers.2.post_attention_layernorm
|
||||||
|
- model.layers.3.post_attention_layernorm
|
||||||
|
- model.layers.4.post_attention_layernorm
|
||||||
|
- model.layers.5.post_attention_layernorm
|
||||||
|
- model.layers.6.post_attention_layernorm
|
||||||
|
- model.layers.7.post_attention_layernorm
|
||||||
|
- model.layers.8.post_attention_layernorm
|
||||||
|
- model.layers.9.post_attention_layernorm
|
||||||
|
- model.layers.10.post_attention_layernorm
|
||||||
|
- model.layers.11.post_attention_layernorm
|
||||||
|
- model.layers.12.post_attention_layernorm
|
||||||
|
- model.layers.13.post_attention_layernorm
|
||||||
|
- model.layers.14.post_attention_layernorm
|
||||||
|
- model.layers.15.post_attention_layernorm
|
||||||
|
- model.layers.16.post_attention_layernorm
|
||||||
|
- model.layers.17.post_attention_layernorm
|
||||||
|
- model.layers.18.post_attention_layernorm
|
||||||
|
- model.layers.19.post_attention_layernorm
|
||||||
|
- model.layers.20.post_attention_layernorm
|
||||||
|
- model.layers.21.post_attention_layernorm
|
||||||
|
- model.layers.22.post_attention_layernorm
|
||||||
|
- model.layers.23.post_attention_layernorm
|
||||||
|
# self_attn.k_proj layers
|
||||||
|
- model.layers.47.self_attn.k_proj
|
||||||
|
- model.layers.39.self_attn.k_proj
|
||||||
|
- model.layers.41.self_attn.k_proj
|
||||||
|
- model.layers.37.self_attn.k_proj
|
||||||
|
- model.layers.35.self_attn.k_proj
|
||||||
|
- model.layers.44.self_attn.k_proj
|
||||||
|
- model.layers.38.self_attn.k_proj
|
||||||
|
- model.layers.14.self_attn.k_proj
|
||||||
|
- model.layers.7.self_attn.k_proj
|
||||||
|
- model.layers.12.self_attn.k_proj
|
||||||
|
- model.layers.11.self_attn.k_proj
|
||||||
|
- model.layers.32.self_attn.k_proj
|
||||||
|
- model.layers.10.self_attn.k_proj
|
||||||
|
- model.layers.8.self_attn.k_proj
|
||||||
|
- model.layers.9.self_attn.k_proj
|
||||||
|
- model.layers.6.self_attn.k_proj
|
||||||
|
- model.layers.45.self_attn.k_proj
|
||||||
|
- model.layers.42.self_attn.k_proj
|
||||||
|
- model.layers.5.self_attn.k_proj
|
||||||
|
- model.layers.40.self_attn.k_proj
|
||||||
|
- model.layers.33.self_attn.k_proj
|
||||||
|
- model.layers.0.self_attn.k_proj
|
||||||
|
- model.layers.34.self_attn.k_proj
|
||||||
|
- model.layers.13.self_attn.k_proj
|
||||||
|
# self_attn.o_proj layers
|
||||||
|
- model.layers.12.self_attn.o_proj
|
||||||
|
- model.layers.5.self_attn.o_proj
|
||||||
|
- model.layers.14.self_attn.o_proj
|
||||||
|
- model.layers.16.self_attn.o_proj
|
||||||
|
- model.layers.20.self_attn.o_proj
|
||||||
|
- model.layers.13.self_attn.o_proj
|
||||||
|
- model.layers.11.self_attn.o_proj
|
||||||
|
- model.layers.4.self_attn.o_proj
|
||||||
|
- model.layers.6.self_attn.o_proj
|
||||||
|
- model.layers.19.self_attn.o_proj
|
||||||
|
- model.layers.7.self_attn.o_proj
|
||||||
|
- model.layers.18.self_attn.o_proj
|
||||||
|
- model.layers.8.self_attn.o_proj
|
||||||
|
- model.layers.38.self_attn.o_proj
|
||||||
|
- model.layers.15.self_attn.o_proj
|
||||||
|
- model.layers.17.self_attn.o_proj
|
||||||
|
- model.layers.9.self_attn.o_proj
|
||||||
|
- model.layers.10.self_attn.o_proj
|
||||||
|
- model.layers.21.self_attn.o_proj
|
||||||
|
- model.layers.28.self_attn.o_proj
|
||||||
|
- model.layers.32.self_attn.o_proj
|
||||||
|
- model.layers.35.self_attn.o_proj
|
||||||
|
- model.layers.39.self_attn.o_proj
|
||||||
|
- model.layers.3.self_attn.o_proj
|
||||||
|
# self_attn.q_proj layers
|
||||||
|
- model.layers.1.self_attn.q_proj
|
||||||
|
- model.layers.2.self_attn.q_proj
|
||||||
|
- model.layers.3.self_attn.q_proj
|
||||||
|
- model.layers.44.self_attn.q_proj
|
||||||
|
- model.layers.29.self_attn.q_proj
|
||||||
|
- model.layers.45.self_attn.q_proj
|
||||||
|
- model.layers.43.self_attn.q_proj
|
||||||
|
- model.layers.32.self_attn.q_proj
|
||||||
|
- model.layers.38.self_attn.q_proj
|
||||||
|
- model.layers.19.self_attn.q_proj
|
||||||
|
- model.layers.42.self_attn.q_proj
|
||||||
|
- model.layers.34.self_attn.q_proj
|
||||||
|
- model.layers.36.self_attn.q_proj
|
||||||
|
- model.layers.40.self_attn.q_proj
|
||||||
|
- model.layers.26.self_attn.q_proj
|
||||||
|
- model.layers.20.self_attn.q_proj
|
||||||
|
- model.layers.39.self_attn.q_proj
|
||||||
|
- model.layers.28.self_attn.q_proj
|
||||||
|
- model.layers.35.self_attn.q_proj
|
||||||
|
- model.layers.41.self_attn.q_proj
|
||||||
|
- model.layers.33.self_attn.q_proj
|
||||||
|
- model.layers.25.self_attn.q_proj
|
||||||
|
- model.layers.30.self_attn.q_proj
|
||||||
|
- model.layers.27.self_attn.q_proj
|
||||||
|
# self_attn.v_proj layers
|
||||||
|
- model.layers.0.self_attn.v_proj
|
||||||
|
- model.layers.7.self_attn.v_proj
|
||||||
|
- model.layers.39.self_attn.v_proj
|
||||||
|
- model.layers.31.self_attn.v_proj
|
||||||
|
- model.layers.15.self_attn.v_proj
|
||||||
|
- model.layers.10.self_attn.v_proj
|
||||||
|
- model.layers.32.self_attn.v_proj
|
||||||
|
- model.layers.41.self_attn.v_proj
|
||||||
|
- model.layers.6.self_attn.v_proj
|
||||||
|
- model.layers.33.self_attn.v_proj
|
||||||
|
- model.layers.42.self_attn.v_proj
|
||||||
|
- model.layers.29.self_attn.v_proj
|
||||||
|
- model.layers.14.self_attn.v_proj
|
||||||
|
- model.layers.9.self_attn.v_proj
|
||||||
|
- model.layers.35.self_attn.v_proj
|
||||||
|
- model.layers.38.self_attn.v_proj
|
||||||
|
- model.layers.13.self_attn.v_proj
|
||||||
|
- model.layers.30.self_attn.v_proj
|
||||||
|
- model.layers.5.self_attn.v_proj
|
||||||
|
- model.layers.34.self_attn.v_proj
|
||||||
|
- model.layers.28.self_attn.v_proj
|
||||||
|
- model.layers.37.self_attn.v_proj
|
||||||
|
- model.layers.27.self_attn.v_proj
|
||||||
|
- model.layers.11.self_attn.v_proj
|
||||||
|
# model.embed_tokens layers
|
||||||
|
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 2
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 3
|
||||||
|
optimizer: adamw_torch_fused
|
||||||
|
lr_scheduler: linear
|
||||||
|
learning_rate: 5e-6
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- axolotl.integrations.liger.LigerPlugin
|
||||||
|
liger_rope: true
|
||||||
|
liger_rms_norm: true
|
||||||
|
liger_swiglu: true
|
||||||
|
liger_fused_linear_cross_entropy: true
|
||||||
|
|
||||||
|
gradient_checkpointing: unsloth
|
||||||
|
gradient_checkpointing_kwargs:
|
||||||
|
use_reentrant: false
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 2
|
||||||
|
saves_per_epoch: 1
|
||||||
|
save_total_limit: 4
|
||||||
|
debug:
|
||||||
|
deepspeed: deepspeed_configs/zero3_bf16.json
|
||||||
|
weight_decay: 0.05
|
||||||
|
special_tokens:
|
||||||
|
eos_token: <|im_end|>
|
||||||
@@ -895,13 +895,13 @@ class AxolotlTrainer(SchedulerMixin, Trainer):
|
|||||||
for key, value in metrics.items():
|
for key, value in metrics.items():
|
||||||
self._stored_metrics[train_eval][key].append(value)
|
self._stored_metrics[train_eval][key].append(value)
|
||||||
|
|
||||||
def _save_checkpoint(self, model, trial, metrics=None):
|
def _save_checkpoint(self, model, trial):
|
||||||
# make sure the checkpoint dir exists, since trainer is flakey
|
# make sure the checkpoint dir exists, since trainer is flakey
|
||||||
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
|
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
|
||||||
run_dir = self._get_output_dir(trial=trial)
|
run_dir = self._get_output_dir(trial=trial)
|
||||||
output_dir = os.path.join(run_dir, checkpoint_folder)
|
output_dir = os.path.join(run_dir, checkpoint_folder)
|
||||||
os.makedirs(output_dir, exist_ok=True)
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
return super()._save_checkpoint(model, trial, metrics=metrics)
|
return super()._save_checkpoint(model, trial)
|
||||||
|
|
||||||
|
|
||||||
class AxolotlMambaTrainer(AxolotlTrainer):
|
class AxolotlMambaTrainer(AxolotlTrainer):
|
||||||
|
|||||||
@@ -27,15 +27,18 @@ SUPPORTED_MULTIPACK_MODEL_TYPES = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def patch_for_multipack(model_type, model_name=None, is_remote_code=False):
|
# def patch_for_multipack(model_type, model_name=None, is_remote_code=False):
|
||||||
|
def patch_for_multipack(model_type, model_name=None, has_remote_code=False):
|
||||||
if model_type == "gemmoe":
|
if model_type == "gemmoe":
|
||||||
patch_remote(model_name, ".configuration_gemmoe", ".modeling_gemmoe")
|
patch_remote(model_name, ".configuration_gemmoe", ".modeling_gemmoe")
|
||||||
elif model_type == "deepseek_v2":
|
elif model_type == "deepseek_v2":
|
||||||
patch_remote(model_name, ".configuration_deepseek", ".modeling_deepseek")
|
patch_remote(model_name, ".configuration_deepseek", ".modeling_deepseek")
|
||||||
elif hasattr(transformers, "modeling_flash_attention_utils") and not is_remote_code:
|
# elif hasattr(transformers, "modeling_flash_attention_utils") and not is_remote_code:
|
||||||
transformers.modeling_flash_attention_utils._get_unpad_data = ( # pylint: disable=protected-access
|
elif hasattr(transformers, "modeling_flash_attention_utils"):
|
||||||
get_unpad_data
|
if not has_remote_code:
|
||||||
)
|
transformers.modeling_flash_attention_utils._get_unpad_data = ( # pylint: disable=protected-access
|
||||||
|
get_unpad_data
|
||||||
|
)
|
||||||
if model_type == "mixtral" and is_deepspeed_zero3_enabled():
|
if model_type == "mixtral" and is_deepspeed_zero3_enabled():
|
||||||
patch_mixtral_moe_forward_zero3()
|
patch_mixtral_moe_forward_zero3()
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -394,10 +394,15 @@ class ModelLoader:
|
|||||||
and self.cfg.flash_attention
|
and self.cfg.flash_attention
|
||||||
and self.cfg.sample_packing
|
and self.cfg.sample_packing
|
||||||
):
|
):
|
||||||
|
has_remote_code = (
|
||||||
|
"auto_map" in self.model_config
|
||||||
|
and self.model_type in self.model_config["auto_map"]
|
||||||
|
)
|
||||||
|
|
||||||
patch_for_multipack(
|
patch_for_multipack(
|
||||||
self.cfg.model_config_type,
|
self.cfg.model_config_type,
|
||||||
model_name=self.cfg.base_model,
|
model_name=self.cfg.base_model,
|
||||||
is_remote_code=self.cfg.trust_remote_code,
|
has_remote_code=has_remote_code,
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.cfg.is_llama_derived_model:
|
if self.cfg.is_llama_derived_model:
|
||||||
|
|||||||
Reference in New Issue
Block a user