2281 lines
265 KiB
HTML
2281 lines
265 KiB
HTML
<!DOCTYPE html>
|
||
<html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en"><head>
|
||
|
||
<meta charset="utf-8">
|
||
<meta name="generator" content="quarto-1.8.24">
|
||
|
||
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
|
||
|
||
<meta name="description" content="A complete list of all configuration options.">
|
||
|
||
<title>Config Reference – Axolotl</title>
|
||
<style>
|
||
code{white-space: pre-wrap;}
|
||
span.smallcaps{font-variant: small-caps;}
|
||
div.columns{display: flex; gap: min(4vw, 1.5em);}
|
||
div.column{flex: auto; overflow-x: auto;}
|
||
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
|
||
ul.task-list{list-style: none;}
|
||
ul.task-list li input[type="checkbox"] {
|
||
width: 0.8em;
|
||
margin: 0 0.8em 0.2em -1em; /* quarto-specific, see https://github.com/quarto-dev/quarto-cli/issues/4556 */
|
||
vertical-align: middle;
|
||
}
|
||
/* CSS for syntax highlighting */
|
||
html { -webkit-text-size-adjust: 100%; }
|
||
pre > code.sourceCode { white-space: pre; position: relative; }
|
||
pre > code.sourceCode > span { display: inline-block; line-height: 1.25; }
|
||
pre > code.sourceCode > span:empty { height: 1.2em; }
|
||
.sourceCode { overflow: visible; }
|
||
code.sourceCode > span { color: inherit; text-decoration: inherit; }
|
||
div.sourceCode { margin: 1em 0; }
|
||
pre.sourceCode { margin: 0; }
|
||
@media screen {
|
||
div.sourceCode { overflow: auto; }
|
||
}
|
||
@media print {
|
||
pre > code.sourceCode { white-space: pre-wrap; }
|
||
pre > code.sourceCode > span { text-indent: -5em; padding-left: 5em; }
|
||
}
|
||
pre.numberSource code
|
||
{ counter-reset: source-line 0; }
|
||
pre.numberSource code > span
|
||
{ position: relative; left: -4em; counter-increment: source-line; }
|
||
pre.numberSource code > span > a:first-child::before
|
||
{ content: counter(source-line);
|
||
position: relative; left: -1em; text-align: right; vertical-align: baseline;
|
||
border: none; display: inline-block;
|
||
-webkit-touch-callout: none; -webkit-user-select: none;
|
||
-khtml-user-select: none; -moz-user-select: none;
|
||
-ms-user-select: none; user-select: none;
|
||
padding: 0 4px; width: 4em;
|
||
}
|
||
pre.numberSource { margin-left: 3em; padding-left: 4px; }
|
||
div.sourceCode
|
||
{ }
|
||
@media screen {
|
||
pre > code.sourceCode > span > a:first-child::before { text-decoration: underline; }
|
||
}
|
||
</style>
|
||
|
||
|
||
<script src="../site_libs/quarto-nav/quarto-nav.js"></script>
|
||
<script src="../site_libs/clipboard/clipboard.min.js"></script>
|
||
<script src="../site_libs/quarto-search/autocomplete.umd.js"></script>
|
||
<script src="../site_libs/quarto-search/fuse.min.js"></script>
|
||
<script src="../site_libs/quarto-search/quarto-search.js"></script>
|
||
<meta name="quarto:offset" content="../">
|
||
<link href="../favicon.jpg" rel="icon" type="image/jpeg">
|
||
<script src="../site_libs/quarto-html/quarto.js" type="module"></script>
|
||
<script src="../site_libs/quarto-html/tabsets/tabsets.js" type="module"></script>
|
||
<script src="../site_libs/quarto-html/axe/axe-check.js" type="module"></script>
|
||
<script src="../site_libs/quarto-html/popper.min.js"></script>
|
||
<script src="../site_libs/quarto-html/tippy.umd.min.js"></script>
|
||
<script src="../site_libs/quarto-html/anchor.min.js"></script>
|
||
<link href="../site_libs/quarto-html/tippy.css" rel="stylesheet">
|
||
<link href="../site_libs/quarto-html/quarto-syntax-highlighting-dark-b651517ce65839d647a86e2780455cfb.css" rel="stylesheet" id="quarto-text-highlighting-styles">
|
||
<script src="../site_libs/bootstrap/bootstrap.min.js"></script>
|
||
<link href="../site_libs/bootstrap/bootstrap-icons.css" rel="stylesheet">
|
||
<link href="../site_libs/bootstrap/bootstrap-f9d679a32da2b248d4ca48a0e58e089e.min.css" rel="stylesheet" append-hash="true" id="quarto-bootstrap" data-mode="dark">
|
||
<script id="quarto-search-options" type="application/json">{
|
||
"location": "navbar",
|
||
"copy-button": false,
|
||
"collapse-after": 3,
|
||
"panel-placement": "end",
|
||
"type": "overlay",
|
||
"limit": 50,
|
||
"keyboard-shortcut": [
|
||
"f",
|
||
"/",
|
||
"s"
|
||
],
|
||
"show-item-context": false,
|
||
"language": {
|
||
"search-no-results-text": "No results",
|
||
"search-matching-documents-text": "matching documents",
|
||
"search-copy-link-title": "Copy link to search",
|
||
"search-hide-matches-text": "Hide additional matches",
|
||
"search-more-match-text": "more match in this document",
|
||
"search-more-matches-text": "more matches in this document",
|
||
"search-clear-button-title": "Clear",
|
||
"search-text-placeholder": "",
|
||
"search-detached-cancel-button-title": "Cancel",
|
||
"search-submit-button-title": "Submit",
|
||
"search-label": "Search"
|
||
}
|
||
}</script>
|
||
<script async="" src="https://www.googletagmanager.com/gtag/js?id=G-9KYCVJBNMQ"></script>
|
||
|
||
<script type="text/javascript">
|
||
|
||
window.dataLayer = window.dataLayer || [];
|
||
function gtag(){dataLayer.push(arguments);}
|
||
gtag('js', new Date());
|
||
gtag('config', 'G-9KYCVJBNMQ', { 'anonymize_ip': true});
|
||
</script>
|
||
|
||
|
||
<link rel="stylesheet" href="../styles.css">
|
||
</head>
|
||
|
||
<body class="nav-sidebar docked nav-fixed quarto-light">
|
||
|
||
<div id="quarto-search-results"></div>
|
||
<header id="quarto-header" class="headroom fixed-top">
|
||
<nav class="navbar navbar-expand " data-bs-theme="dark">
|
||
<div class="navbar-container container-fluid">
|
||
<div class="navbar-brand-container mx-auto">
|
||
<a href="../index.html" class="navbar-brand navbar-brand-logo">
|
||
<img src="../image/axolotl_logo_digital_white.svg" alt="" class="navbar-logo light-content">
|
||
<img src="../image/axolotl_logo_digital_white.svg" alt="" class="navbar-logo dark-content">
|
||
</a>
|
||
</div>
|
||
<div class="quarto-navbar-tools tools-wide tools-end">
|
||
<a href="https://twitter.com/axolotl_ai" title="" class="quarto-navigation-tool px-1" aria-label=""><i class="bi bi-twitter"></i></a>
|
||
<a href="https://github.com/axolotl-ai-cloud/axolotl/" title="" class="quarto-navigation-tool px-1" aria-label=""><i class="bi bi-github"></i></a>
|
||
<a href="https://discord.gg/7m9sfhzaf3" title="" class="quarto-navigation-tool px-1" aria-label=""><i class="bi bi-discord"></i></a>
|
||
</div>
|
||
<div id="quarto-search" class="" title="Search"></div>
|
||
</div> <!-- /container-fluid -->
|
||
</nav>
|
||
<nav class="quarto-secondary-nav">
|
||
<div class="container-fluid d-flex">
|
||
<button type="button" class="quarto-btn-toggle btn" data-bs-toggle="collapse" role="button" data-bs-target=".quarto-sidebar-collapse-item" aria-controls="quarto-sidebar" aria-expanded="false" aria-label="Toggle sidebar navigation" onclick="if (window.quartoToggleHeadroom) { window.quartoToggleHeadroom(); }">
|
||
<i class="bi bi-layout-text-sidebar-reverse"></i>
|
||
</button>
|
||
<nav class="quarto-page-breadcrumbs" aria-label="breadcrumb"><ol class="breadcrumb"><li class="breadcrumb-item"><a href="../docs/getting-started.html">Getting Started</a></li><li class="breadcrumb-item"><a href="../docs/config-reference.html">Config Reference</a></li></ol></nav>
|
||
<a class="flex-grow-1" role="navigation" data-bs-toggle="collapse" data-bs-target=".quarto-sidebar-collapse-item" aria-controls="quarto-sidebar" aria-expanded="false" aria-label="Toggle sidebar navigation" onclick="if (window.quartoToggleHeadroom) { window.quartoToggleHeadroom(); }">
|
||
</a>
|
||
</div>
|
||
</nav>
|
||
</header>
|
||
<!-- content -->
|
||
<div id="quarto-content" class="quarto-container page-columns page-rows-contents page-layout-article page-navbar">
|
||
<!-- sidebar -->
|
||
<nav id="quarto-sidebar" class="sidebar collapse collapse-horizontal quarto-sidebar-collapse-item sidebar-navigation docked overflow-auto">
|
||
<div class="pt-lg-2 mt-2 text-left sidebar-header">
|
||
<a href="../index.html" class="sidebar-logo-link">
|
||
</a>
|
||
</div>
|
||
<div class="sidebar-menu-container">
|
||
<ul class="list-unstyled mt-1">
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../index.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Home</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item sidebar-item-section">
|
||
<div class="sidebar-item-container">
|
||
<a class="sidebar-item-text sidebar-link text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-1" role="navigation" aria-expanded="true">
|
||
<span class="menu-text">Getting Started</span></a>
|
||
<a class="sidebar-item-toggle text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-1" role="navigation" aria-expanded="true" aria-label="Toggle section">
|
||
<i class="bi bi-chevron-right ms-2"></i>
|
||
</a>
|
||
</div>
|
||
<ul id="quarto-sidebar-section-1" class="collapse list-unstyled sidebar-section depth1 show">
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/getting-started.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Quickstart</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/installation.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Installation</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/inference.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Inference and Merging</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/cli.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Command Line Interface (CLI)</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/config-reference.html" class="sidebar-item-text sidebar-link active">
|
||
<span class="menu-text">Config Reference</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/api" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">API Reference</span></a>
|
||
</div>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
<li class="sidebar-item sidebar-item-section">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/dataset-formats/index.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Dataset Formats</span></a>
|
||
<a class="sidebar-item-toggle text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-2" role="navigation" aria-expanded="true" aria-label="Toggle section">
|
||
<i class="bi bi-chevron-right ms-2"></i>
|
||
</a>
|
||
</div>
|
||
<ul id="quarto-sidebar-section-2" class="collapse list-unstyled sidebar-section depth1 show">
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/dataset-formats/pretraining.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Pre-training</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/dataset-formats/inst_tune.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Instruction Tuning</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/dataset-formats/conversation.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Conversation</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/dataset-formats/stepwise_supervised.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Stepwise Supervised Format</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/dataset-formats/template_free.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Template-Free</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/dataset-formats/tokenized.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Custom Pre-Tokenized Dataset</span></a>
|
||
</div>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
<li class="sidebar-item sidebar-item-section">
|
||
<div class="sidebar-item-container">
|
||
<a class="sidebar-item-text sidebar-link text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-3" role="navigation" aria-expanded="true">
|
||
<span class="menu-text">Deployments</span></a>
|
||
<a class="sidebar-item-toggle text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-3" role="navigation" aria-expanded="true" aria-label="Toggle section">
|
||
<i class="bi bi-chevron-right ms-2"></i>
|
||
</a>
|
||
</div>
|
||
<ul id="quarto-sidebar-section-3" class="collapse list-unstyled sidebar-section depth1 show">
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/docker.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Docker</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/multi-gpu.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Multi-GPU</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/multi-node.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Multi Node</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/ray-integration.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Ray Train</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/amd_hpc.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">AMD GPUs on HPC Systems</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/mac.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Mac M-series</span></a>
|
||
</div>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
<li class="sidebar-item sidebar-item-section">
|
||
<div class="sidebar-item-container">
|
||
<a class="sidebar-item-text sidebar-link text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-4" role="navigation" aria-expanded="true">
|
||
<span class="menu-text">How To Guides</span></a>
|
||
<a class="sidebar-item-toggle text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-4" role="navigation" aria-expanded="true" aria-label="Toggle section">
|
||
<i class="bi bi-chevron-right ms-2"></i>
|
||
</a>
|
||
</div>
|
||
<ul id="quarto-sidebar-section-4" class="collapse list-unstyled sidebar-section depth1 show">
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/multimodal.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">MultiModal / Vision Language Models (BETA)</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/rlhf.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">RLHF (Beta)</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/reward_modelling.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Reward Modelling</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/lr_groups.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Learning Rate Groups</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/lora_optims.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">LoRA Optimizations</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/dataset_loading.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Dataset Loading</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/qat.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Quantization Aware Training (QAT)</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/quantize.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Quantization with torchao</span></a>
|
||
</div>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
<li class="sidebar-item sidebar-item-section">
|
||
<div class="sidebar-item-container">
|
||
<a class="sidebar-item-text sidebar-link text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-5" role="navigation" aria-expanded="true">
|
||
<span class="menu-text">Core Concepts</span></a>
|
||
<a class="sidebar-item-toggle text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-5" role="navigation" aria-expanded="true" aria-label="Toggle section">
|
||
<i class="bi bi-chevron-right ms-2"></i>
|
||
</a>
|
||
</div>
|
||
<ul id="quarto-sidebar-section-5" class="collapse list-unstyled sidebar-section depth1 show">
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/batch_vs_grad.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Batch size vs Gradient accumulation</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/dataset_preprocessing.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Dataset Preprocessing</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/streaming.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Streaming Datasets</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/multipack.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Multipack (Sample Packing)</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/mixed_precision.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Mixed Precision Training</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/optimizers.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Optimizers</span></a>
|
||
</div>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
<li class="sidebar-item sidebar-item-section">
|
||
<div class="sidebar-item-container">
|
||
<a class="sidebar-item-text sidebar-link text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-6" role="navigation" aria-expanded="true">
|
||
<span class="menu-text">Advanced Features</span></a>
|
||
<a class="sidebar-item-toggle text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-6" role="navigation" aria-expanded="true" aria-label="Toggle section">
|
||
<i class="bi bi-chevron-right ms-2"></i>
|
||
</a>
|
||
</div>
|
||
<ul id="quarto-sidebar-section-6" class="collapse list-unstyled sidebar-section depth1 show">
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/fsdp_qlora.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">FDSP + QLoRA</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/unsloth.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Unsloth</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/torchao.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">PyTorch ao</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/custom_integrations.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Custom Integrations</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/sequence_parallelism.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Sequence Parallelism</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/gradient_checkpointing.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Gradient Checkpointing and Activation Offloading</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/nd_parallelism.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">N-D Parallelism (Beta)</span></a>
|
||
</div>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
<li class="sidebar-item sidebar-item-section">
|
||
<div class="sidebar-item-container">
|
||
<a class="sidebar-item-text sidebar-link text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-7" role="navigation" aria-expanded="true">
|
||
<span class="menu-text">Troubleshooting</span></a>
|
||
<a class="sidebar-item-toggle text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-7" role="navigation" aria-expanded="true" aria-label="Toggle section">
|
||
<i class="bi bi-chevron-right ms-2"></i>
|
||
</a>
|
||
</div>
|
||
<ul id="quarto-sidebar-section-7" class="collapse list-unstyled sidebar-section depth1 show">
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/faq.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">FAQ</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/debugging.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">Debugging</span></a>
|
||
</div>
|
||
</li>
|
||
<li class="sidebar-item">
|
||
<div class="sidebar-item-container">
|
||
<a href="../docs/nccl.html" class="sidebar-item-text sidebar-link">
|
||
<span class="menu-text">NCCL</span></a>
|
||
</div>
|
||
</li>
|
||
</ul>
|
||
</li>
|
||
</ul>
|
||
</div>
|
||
</nav>
|
||
<div id="quarto-sidebar-glass" class="quarto-sidebar-collapse-item" data-bs-toggle="collapse" data-bs-target=".quarto-sidebar-collapse-item"></div>
|
||
<!-- margin-sidebar -->
|
||
<div id="quarto-margin-sidebar" class="sidebar margin-sidebar zindex-bottom">
|
||
|
||
</div>
|
||
<!-- main -->
|
||
<main class="content" id="quarto-document-content">
|
||
|
||
<header id="title-block-header" class="quarto-title-block default"><nav class="quarto-page-breadcrumbs quarto-title-breadcrumbs d-none d-lg-block" aria-label="breadcrumb"><ol class="breadcrumb"><li class="breadcrumb-item"><a href="../docs/getting-started.html">Getting Started</a></li><li class="breadcrumb-item"><a href="../docs/config-reference.html">Config Reference</a></li></ol></nav>
|
||
<div class="quarto-title">
|
||
<h1 class="title">Config Reference</h1>
|
||
</div>
|
||
|
||
<div>
|
||
<div class="description">
|
||
A complete list of all configuration options.
|
||
</div>
|
||
</div>
|
||
|
||
|
||
<div class="quarto-title-meta">
|
||
|
||
|
||
|
||
|
||
</div>
|
||
|
||
|
||
|
||
</header>
|
||
|
||
|
||
<div class="code-copy-outer-scaffold"><div class="sourceCode" id="cb1"><pre class="sourceCode yaml code-with-copy"><code class="sourceCode yaml"><span id="cb1-1"><a href="#cb1-1" aria-hidden="true" tabindex="-1"></a><span class="co"># Allow overwrite yml config using from cli</span></span>
|
||
<span id="cb1-2"><a href="#cb1-2" aria-hidden="true" tabindex="-1"></a><span class="fu">strict</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-3"><a href="#cb1-3" aria-hidden="true" tabindex="-1"></a><span class="co"># Resume from a specific checkpoint dir</span></span>
|
||
<span id="cb1-4"><a href="#cb1-4" aria-hidden="true" tabindex="-1"></a><span class="fu">resume_from_checkpoint</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-5"><a href="#cb1-5" aria-hidden="true" tabindex="-1"></a><span class="co"># If resume_from_checkpoint isn't set and you simply want it to start where it left off.</span></span>
|
||
<span id="cb1-6"><a href="#cb1-6" aria-hidden="true" tabindex="-1"></a><span class="co"># Be careful with this being turned on between different models.</span></span>
|
||
<span id="cb1-7"><a href="#cb1-7" aria-hidden="true" tabindex="-1"></a><span class="fu">auto_resume_from_checkpoints</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-8"><a href="#cb1-8" aria-hidden="true" tabindex="-1"></a><span class="co"># Resize the model embeddings when new tokens are added to multiples of 32. This is</span></span>
|
||
<span id="cb1-9"><a href="#cb1-9" aria-hidden="true" tabindex="-1"></a><span class="co"># reported to improve training speed on some models</span></span>
|
||
<span id="cb1-10"><a href="#cb1-10" aria-hidden="true" tabindex="-1"></a><span class="fu">resize_token_embeddings_to_32x</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-11"><a href="#cb1-11" aria-hidden="true" tabindex="-1"></a><span class="fu">mean_resizing_embeddings</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-12"><a href="#cb1-12" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-13"><a href="#cb1-13" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.</span></span>
|
||
<span id="cb1-14"><a href="#cb1-14" aria-hidden="true" tabindex="-1"></a><span class="fu">shrink_embeddings</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-15"><a href="#cb1-15" aria-hidden="true" tabindex="-1"></a><span class="co"># Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs</span></span>
|
||
<span id="cb1-16"><a href="#cb1-16" aria-hidden="true" tabindex="-1"></a><span class="fu">embeddings_skip_upcast</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-17"><a href="#cb1-17" aria-hidden="true" tabindex="-1"></a><span class="co"># Reinitialize model weights randomly instead of loading pretrained weights</span></span>
|
||
<span id="cb1-18"><a href="#cb1-18" aria-hidden="true" tabindex="-1"></a><span class="fu">reinit_weights</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-19"><a href="#cb1-19" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-20"><a href="#cb1-20" aria-hidden="true" tabindex="-1"></a><span class="co"># module to custom trainer class to use for training</span></span>
|
||
<span id="cb1-21"><a href="#cb1-21" aria-hidden="true" tabindex="-1"></a><span class="fu">trainer_cls</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-22"><a href="#cb1-22" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-23"><a href="#cb1-23" aria-hidden="true" tabindex="-1"></a><span class="co"># Use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'</span></span>
|
||
<span id="cb1-24"><a href="#cb1-24" aria-hidden="true" tabindex="-1"></a><span class="fu">rl</span><span class="kw">:</span><span class="at"> RLType | None</span></span>
|
||
<span id="cb1-25"><a href="#cb1-25" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-26"><a href="#cb1-26" aria-hidden="true" tabindex="-1"></a><span class="fu">trl</span><span class="kw">:</span><span class="at"> TRLConfig | None</span></span>
|
||
<span id="cb1-27"><a href="#cb1-27" aria-hidden="true" tabindex="-1"></a><span class="co"> # For TRLConfig:</span></span>
|
||
<span id="cb1-28"><a href="#cb1-28" aria-hidden="true" tabindex="-1"></a><span class="co"> # Beta parameter for the RL training. Same as `rl_beta`. Use</span></span>
|
||
<span id="cb1-29"><a href="#cb1-29" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">beta</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-30"><a href="#cb1-30" aria-hidden="true" tabindex="-1"></a><span class="co"> # Maximum length of the completion for RL training.</span></span>
|
||
<span id="cb1-31"><a href="#cb1-31" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">max_completion_length</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-32"><a href="#cb1-32" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-33"><a href="#cb1-33" aria-hidden="true" tabindex="-1"></a><span class="co"> # Whether to use VLLM for RL training.</span></span>
|
||
<span id="cb1-34"><a href="#cb1-34" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">use_vllm</span><span class="kw">:</span><span class="at"> bool = False</span></span>
|
||
<span id="cb1-35"><a href="#cb1-35" aria-hidden="true" tabindex="-1"></a><span class="co"> # VLLM mode to use, one of 'server' or 'colocate'</span></span>
|
||
<span id="cb1-36"><a href="#cb1-36" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">vllm_mode</span><span class="kw">:</span><span class="at"> Literal['server', 'colocate'] | None</span></span>
|
||
<span id="cb1-37"><a href="#cb1-37" aria-hidden="true" tabindex="-1"></a><span class="co"> # Host of the vLLM server to connect to.</span></span>
|
||
<span id="cb1-38"><a href="#cb1-38" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">vllm_server_host</span><span class="kw">:</span><span class="at"> str | None = 0.0.0.0</span></span>
|
||
<span id="cb1-39"><a href="#cb1-39" aria-hidden="true" tabindex="-1"></a><span class="co"> # Port of the vLLM server to connect to.</span></span>
|
||
<span id="cb1-40"><a href="#cb1-40" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">vllm_server_port</span><span class="kw">:</span><span class="at"> int | None = 8000</span></span>
|
||
<span id="cb1-41"><a href="#cb1-41" aria-hidden="true" tabindex="-1"></a><span class="co"> # Total timeout (in seconds) to wait for the vLLM server to respond.</span></span>
|
||
<span id="cb1-42"><a href="#cb1-42" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">vllm_server_timeout</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-43"><a href="#cb1-43" aria-hidden="true" tabindex="-1"></a><span class="co"> # Regex for vLLM guided decoding.</span></span>
|
||
<span id="cb1-44"><a href="#cb1-44" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">vllm_guided_decoding_regex</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-45"><a href="#cb1-45" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-46"><a href="#cb1-46" aria-hidden="true" tabindex="-1"></a><span class="co"> # List of reward functions to load. Paths must be importable from current dir.</span></span>
|
||
<span id="cb1-47"><a href="#cb1-47" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">reward_funcs</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-48"><a href="#cb1-48" aria-hidden="true" tabindex="-1"></a><span class="co"> # List of reward weights for the reward functions.</span></span>
|
||
<span id="cb1-49"><a href="#cb1-49" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">reward_weights</span><span class="kw">:</span><span class="at"> list[float] | None</span></span>
|
||
<span id="cb1-50"><a href="#cb1-50" aria-hidden="true" tabindex="-1"></a><span class="co"> # Number of generations to sample.</span></span>
|
||
<span id="cb1-51"><a href="#cb1-51" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">num_generations</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-52"><a href="#cb1-52" aria-hidden="true" tabindex="-1"></a><span class="co"> # Whether to log completions.</span></span>
|
||
<span id="cb1-53"><a href="#cb1-53" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">log_completions</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-54"><a href="#cb1-54" aria-hidden="true" tabindex="-1"></a><span class="co"> # Number of completions to print when log_completions is True.</span></span>
|
||
<span id="cb1-55"><a href="#cb1-55" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">num_completions_to_print</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-56"><a href="#cb1-56" aria-hidden="true" tabindex="-1"></a><span class="co"> # Controls whether importance sampling ratios are computed at the `'token'` or</span></span>
|
||
<span id="cb1-57"><a href="#cb1-57" aria-hidden="true" tabindex="-1"></a><span class="co"> # `'sequence'` level. For GSPO, use `sequence`, default is None which corresponds to</span></span>
|
||
<span id="cb1-58"><a href="#cb1-58" aria-hidden="true" tabindex="-1"></a><span class="co"> # the original GRPO paper.</span></span>
|
||
<span id="cb1-59"><a href="#cb1-59" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">importance_sampling_level</span><span class="kw">:</span><span class="at"> Literal['sequence', 'token'] | None</span></span>
|
||
<span id="cb1-60"><a href="#cb1-60" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-61"><a href="#cb1-61" aria-hidden="true" tabindex="-1"></a><span class="co"> # Whether to sync the reference model.</span></span>
|
||
<span id="cb1-62"><a href="#cb1-62" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">sync_ref_model</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-63"><a href="#cb1-63" aria-hidden="true" tabindex="-1"></a><span class="co"> # Mixup alpha for the reference model.</span></span>
|
||
<span id="cb1-64"><a href="#cb1-64" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">ref_model_mixup_alpha</span><span class="kw">:</span><span class="at"> float | None = 0.9</span></span>
|
||
<span id="cb1-65"><a href="#cb1-65" aria-hidden="true" tabindex="-1"></a><span class="co"> # Sync steps for the reference model.</span></span>
|
||
<span id="cb1-66"><a href="#cb1-66" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">ref_model_sync_steps</span><span class="kw">:</span><span class="at"> int | None = 64</span></span>
|
||
<span id="cb1-67"><a href="#cb1-67" aria-hidden="true" tabindex="-1"></a><span class="co"> # Whether to scale rewards by their standard deviation.</span></span>
|
||
<span id="cb1-68"><a href="#cb1-68" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">scale_rewards</span><span class="kw">:</span><span class="at"> bool = True</span></span>
|
||
<span id="cb1-69"><a href="#cb1-69" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-70"><a href="#cb1-70" aria-hidden="true" tabindex="-1"></a><span class="co"> # Sampling temperature for the GRPO policy.</span></span>
|
||
<span id="cb1-71"><a href="#cb1-71" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">temperature</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-72"><a href="#cb1-72" aria-hidden="true" tabindex="-1"></a><span class="co"> # Top-p sampling probability for the generation policy.</span></span>
|
||
<span id="cb1-73"><a href="#cb1-73" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">top_p</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-74"><a href="#cb1-74" aria-hidden="true" tabindex="-1"></a><span class="co"> # Top-k sampling for the generation policy.</span></span>
|
||
<span id="cb1-75"><a href="#cb1-75" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">top_k</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-76"><a href="#cb1-76" aria-hidden="true" tabindex="-1"></a><span class="co"> # Minimum probability for the generation policy.</span></span>
|
||
<span id="cb1-77"><a href="#cb1-77" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">min_p</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-78"><a href="#cb1-78" aria-hidden="true" tabindex="-1"></a><span class="co"> # Penalty for tokens that appear in prompt and generated text.</span></span>
|
||
<span id="cb1-79"><a href="#cb1-79" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">repetition_penalty</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-80"><a href="#cb1-80" aria-hidden="true" tabindex="-1"></a><span class="co"> # Number of iterations per batch (μ) for GRPO.</span></span>
|
||
<span id="cb1-81"><a href="#cb1-81" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">num_iterations</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-82"><a href="#cb1-82" aria-hidden="true" tabindex="-1"></a><span class="co"> # Epsilon value for clipping in the GRPO algorithm.</span></span>
|
||
<span id="cb1-83"><a href="#cb1-83" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">epsilon</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-84"><a href="#cb1-84" aria-hidden="true" tabindex="-1"></a><span class="co"> # Upper-bound epsilon value for clipping in the GRPO algorithm.</span></span>
|
||
<span id="cb1-85"><a href="#cb1-85" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">epsilon_high</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-86"><a href="#cb1-86" aria-hidden="true" tabindex="-1"></a><span class="co"> # Whether to use Liger loss for GRPO.</span></span>
|
||
<span id="cb1-87"><a href="#cb1-87" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">use_liger_loss</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-88"><a href="#cb1-88" aria-hidden="true" tabindex="-1"></a><span class="co"> # Loss formulation to use. Supported values: grpo, bnpo, dr_grpo.</span></span>
|
||
<span id="cb1-89"><a href="#cb1-89" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">loss_type</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-90"><a href="#cb1-90" aria-hidden="true" tabindex="-1"></a><span class="co"> # Whether to exclude truncated completions from loss calculation.</span></span>
|
||
<span id="cb1-91"><a href="#cb1-91" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">mask_truncated_completions</span><span class="kw">:</span><span class="at"> bool = False</span></span>
|
||
<span id="cb1-92"><a href="#cb1-92" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-93"><a href="#cb1-93" aria-hidden="true" tabindex="-1"></a><span class="fu">vllm</span><span class="kw">:</span><span class="at"> VllmConfig | None</span></span>
|
||
<span id="cb1-94"><a href="#cb1-94" aria-hidden="true" tabindex="-1"></a><span class="co"> # For VllmConfig:</span></span>
|
||
<span id="cb1-95"><a href="#cb1-95" aria-hidden="true" tabindex="-1"></a><span class="co"> # Device to use for VLLM</span></span>
|
||
<span id="cb1-96"><a href="#cb1-96" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">device</span><span class="kw">:</span><span class="at"> str | None = auto</span></span>
|
||
<span id="cb1-97"><a href="#cb1-97" aria-hidden="true" tabindex="-1"></a><span class="co"> # Tensor parallel size for VLLM</span></span>
|
||
<span id="cb1-98"><a href="#cb1-98" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">tensor_parallel_size</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-99"><a href="#cb1-99" aria-hidden="true" tabindex="-1"></a><span class="co"> # Data parallel size for VLLM</span></span>
|
||
<span id="cb1-100"><a href="#cb1-100" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">data_parallel_size</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-101"><a href="#cb1-101" aria-hidden="true" tabindex="-1"></a><span class="co"> # GPU memory utilization for VLLM</span></span>
|
||
<span id="cb1-102"><a href="#cb1-102" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">gpu_memory_utilization</span><span class="kw">:</span><span class="at"> float | None = 0.9</span></span>
|
||
<span id="cb1-103"><a href="#cb1-103" aria-hidden="true" tabindex="-1"></a><span class="co"> # Data type for VLLM</span></span>
|
||
<span id="cb1-104"><a href="#cb1-104" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">dtype</span><span class="kw">:</span><span class="at"> str | None = auto</span></span>
|
||
<span id="cb1-105"><a href="#cb1-105" aria-hidden="true" tabindex="-1"></a><span class="co"> # Maximum length of the model context for VLLM</span></span>
|
||
<span id="cb1-106"><a href="#cb1-106" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">max_model_len</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-107"><a href="#cb1-107" aria-hidden="true" tabindex="-1"></a><span class="co"> # Enable prefix caching for VLLM</span></span>
|
||
<span id="cb1-108"><a href="#cb1-108" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">enable_prefix_caching</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-109"><a href="#cb1-109" aria-hidden="true" tabindex="-1"></a><span class="co"> # Host for the vLLM server to start on</span></span>
|
||
<span id="cb1-110"><a href="#cb1-110" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">host</span><span class="kw">:</span><span class="at"> str | None = 0.0.0.0</span></span>
|
||
<span id="cb1-111"><a href="#cb1-111" aria-hidden="true" tabindex="-1"></a><span class="co"> # Port of the vLLM server to start on</span></span>
|
||
<span id="cb1-112"><a href="#cb1-112" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">port</span><span class="kw">:</span><span class="at"> int | None = 8000</span></span>
|
||
<span id="cb1-113"><a href="#cb1-113" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-114"><a href="#cb1-114" aria-hidden="true" tabindex="-1"></a><span class="co"> # Enable reasoning for VLLM</span></span>
|
||
<span id="cb1-115"><a href="#cb1-115" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">enable_reasoning</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-116"><a href="#cb1-116" aria-hidden="true" tabindex="-1"></a><span class="co"> # Reasoning parser for VLLM</span></span>
|
||
<span id="cb1-117"><a href="#cb1-117" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">reasoning_parser</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-118"><a href="#cb1-118" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-119"><a href="#cb1-119" aria-hidden="true" tabindex="-1"></a><span class="fu">qat</span><span class="kw">:</span><span class="at"> QATConfig | None</span></span>
|
||
<span id="cb1-120"><a href="#cb1-120" aria-hidden="true" tabindex="-1"></a><span class="co"> # For QATConfig:</span></span>
|
||
<span id="cb1-121"><a href="#cb1-121" aria-hidden="true" tabindex="-1"></a><span class="co"> # Fake quantization layout to use for activation quantization.</span></span>
|
||
<span id="cb1-122"><a href="#cb1-122" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">activation_dtype</span><span class="kw">:</span><span class="at"> TorchAOQuantDType | None</span></span>
|
||
<span id="cb1-123"><a href="#cb1-123" aria-hidden="true" tabindex="-1"></a><span class="co"> # Fake quantization layout to use for weight quantization.</span></span>
|
||
<span id="cb1-124"><a href="#cb1-124" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">weight_dtype</span><span class="kw">:</span><span class="at"> TorchAOQuantDType = TorchAOQuantDType.int8</span></span>
|
||
<span id="cb1-125"><a href="#cb1-125" aria-hidden="true" tabindex="-1"></a><span class="co"> # Quantize embedding</span></span>
|
||
<span id="cb1-126"><a href="#cb1-126" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">quantize_embedding</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-127"><a href="#cb1-127" aria-hidden="true" tabindex="-1"></a><span class="co"> # The number of elements in each group for per-group fake quantization</span></span>
|
||
<span id="cb1-128"><a href="#cb1-128" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">group_size</span><span class="kw">:</span><span class="at"> int | None = 32</span></span>
|
||
<span id="cb1-129"><a href="#cb1-129" aria-hidden="true" tabindex="-1"></a><span class="co"> # The number of steps to apply fake quantization after</span></span>
|
||
<span id="cb1-130"><a href="#cb1-130" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">fake_quant_after_n_steps</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-131"><a href="#cb1-131" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-132"><a href="#cb1-132" aria-hidden="true" tabindex="-1"></a><span class="fu">quantization</span><span class="kw">:</span><span class="at"> PTQConfig | None</span></span>
|
||
<span id="cb1-133"><a href="#cb1-133" aria-hidden="true" tabindex="-1"></a><span class="co"> # For PTQConfig:</span></span>
|
||
<span id="cb1-134"><a href="#cb1-134" aria-hidden="true" tabindex="-1"></a><span class="co"> # Fake quantization layout to use for weight quantization.</span></span>
|
||
<span id="cb1-135"><a href="#cb1-135" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">weight_dtype</span><span class="kw">:</span><span class="at"> TorchAOQuantDType = TorchAOQuantDType.int8</span></span>
|
||
<span id="cb1-136"><a href="#cb1-136" aria-hidden="true" tabindex="-1"></a><span class="co"> # Fake quantization layout to use for activation quantization.</span></span>
|
||
<span id="cb1-137"><a href="#cb1-137" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">activation_dtype</span><span class="kw">:</span><span class="at"> TorchAOQuantDType | None</span></span>
|
||
<span id="cb1-138"><a href="#cb1-138" aria-hidden="true" tabindex="-1"></a><span class="co"> # Whether to quantize the embedding layer.</span></span>
|
||
<span id="cb1-139"><a href="#cb1-139" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">quantize_embedding</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-140"><a href="#cb1-140" aria-hidden="true" tabindex="-1"></a><span class="co"> # The number of elements in each group for per-group fake quantization</span></span>
|
||
<span id="cb1-141"><a href="#cb1-141" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">group_size</span><span class="kw">:</span><span class="at"> int | None = 32</span></span>
|
||
<span id="cb1-142"><a href="#cb1-142" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-143"><a href="#cb1-143" aria-hidden="true" tabindex="-1"></a><span class="co"># Reward modelling: `True` or `False`</span></span>
|
||
<span id="cb1-144"><a href="#cb1-144" aria-hidden="true" tabindex="-1"></a><span class="fu">reward_model</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-145"><a href="#cb1-145" aria-hidden="true" tabindex="-1"></a><span class="co"># Process reward modelling: `True` or `False`</span></span>
|
||
<span id="cb1-146"><a href="#cb1-146" aria-hidden="true" tabindex="-1"></a><span class="fu">process_reward_model</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-147"><a href="#cb1-147" aria-hidden="true" tabindex="-1"></a><span class="co"># Coefficient to incentivize the reward model to output mean-zero rewards (proposed by</span></span>
|
||
<span id="cb1-148"><a href="#cb1-148" aria-hidden="true" tabindex="-1"></a><span class="co"># https://huggingface.co/papers/2312.09244, Eq. 2). Recommended value: `0.01`.</span></span>
|
||
<span id="cb1-149"><a href="#cb1-149" aria-hidden="true" tabindex="-1"></a><span class="fu">center_rewards_coefficient</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-150"><a href="#cb1-150" aria-hidden="true" tabindex="-1"></a><span class="fu">num_labels</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-151"><a href="#cb1-151" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-152"><a href="#cb1-152" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to perform weighting in DPO trainer</span></span>
|
||
<span id="cb1-153"><a href="#cb1-153" aria-hidden="true" tabindex="-1"></a><span class="fu">dpo_use_weighting</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-154"><a href="#cb1-154" aria-hidden="true" tabindex="-1"></a><span class="fu">dpo_use_logits_to_keep</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-155"><a href="#cb1-155" aria-hidden="true" tabindex="-1"></a><span class="fu">dpo_label_smoothing</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-156"><a href="#cb1-156" aria-hidden="true" tabindex="-1"></a><span class="fu">dpo_norm_loss</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-157"><a href="#cb1-157" aria-hidden="true" tabindex="-1"></a><span class="fu">dpo_padding_free</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-158"><a href="#cb1-158" aria-hidden="true" tabindex="-1"></a><span class="fu">dpo_generate_during_eval</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-159"><a href="#cb1-159" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-160"><a href="#cb1-160" aria-hidden="true" tabindex="-1"></a><span class="co"># A list of one or more datasets to finetune the model with</span></span>
|
||
<span id="cb1-161"><a href="#cb1-161" aria-hidden="true" tabindex="-1"></a><span class="fu">datasets</span><span class="kw">:</span><span class="at"> Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset], MinLen(1)] | None</span></span>
|
||
<span id="cb1-162"><a href="#cb1-162" aria-hidden="true" tabindex="-1"></a><span class="co"> # For SFTDataset:</span></span>
|
||
<span id="cb1-163"><a href="#cb1-163" aria-hidden="true" tabindex="-1"></a><span class="co"> # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory</span></span>
|
||
<span id="cb1-164"><a href="#cb1-164" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">path</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-165"><a href="#cb1-165" aria-hidden="true" tabindex="-1"></a><span class="co"> # name of dataset split to load from</span></span>
|
||
<span id="cb1-166"><a href="#cb1-166" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">split</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-167"><a href="#cb1-167" aria-hidden="true" tabindex="-1"></a><span class="co"> # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]</span></span>
|
||
<span id="cb1-168"><a href="#cb1-168" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">type</span><span class="kw">:</span><span class="at"> str | UserDefinedPrompterType | None</span></span>
|
||
<span id="cb1-169"><a href="#cb1-169" aria-hidden="true" tabindex="-1"></a><span class="co"> # For UserDefinedPrompterType:</span></span>
|
||
<span id="cb1-170"><a href="#cb1-170" aria-hidden="true" tabindex="-1"></a><span class="co"> # Custom user instruction prompt</span></span>
|
||
<span id="cb1-171"><a href="#cb1-171" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">system_prompt</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-172"><a href="#cb1-172" aria-hidden="true" tabindex="-1"></a><span class="co"> # Use {system} as key to be replaced</span></span>
|
||
<span id="cb1-173"><a href="#cb1-173" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">system_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-174"><a href="#cb1-174" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_system</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-175"><a href="#cb1-175" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_instruction</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-176"><a href="#cb1-176" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_input</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-177"><a href="#cb1-177" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_output</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-178"><a href="#cb1-178" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-179"><a href="#cb1-179" aria-hidden="true" tabindex="-1"></a><span class="co"> # Customizable to be single line or multi-line. Use {instruction}/{input} as key to</span></span>
|
||
<span id="cb1-180"><a href="#cb1-180" aria-hidden="true" tabindex="-1"></a><span class="co"> # be replaced. 'format' can include {input}</span></span>
|
||
<span id="cb1-181"><a href="#cb1-181" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-182"><a href="#cb1-182" aria-hidden="true" tabindex="-1"></a><span class="co"> # 'no_input_format' cannot include {input}</span></span>
|
||
<span id="cb1-183"><a href="#cb1-183" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">no_input_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-184"><a href="#cb1-184" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">input_transform</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-185"><a href="#cb1-185" aria-hidden="true" tabindex="-1"></a><span class="co"> # split dataset into N pieces (use with shards_idx)</span></span>
|
||
<span id="cb1-186"><a href="#cb1-186" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">shards</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-187"><a href="#cb1-187" aria-hidden="true" tabindex="-1"></a><span class="co"> # the index of sharded dataset to use</span></span>
|
||
<span id="cb1-188"><a href="#cb1-188" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">shards_idx</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-189"><a href="#cb1-189" aria-hidden="true" tabindex="-1"></a><span class="co"> # process dataset in N sequential chunks for memory efficiency (exclusive with</span></span>
|
||
<span id="cb1-190"><a href="#cb1-190" aria-hidden="true" tabindex="-1"></a><span class="co"> # `shards`)</span></span>
|
||
<span id="cb1-191"><a href="#cb1-191" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">preprocess_shards</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-192"><a href="#cb1-192" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">conversation</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-193"><a href="#cb1-193" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-194"><a href="#cb1-194" aria-hidden="true" tabindex="-1"></a><span class="co"> # The name of the chat template to use for training, following values are supported:</span></span>
|
||
<span id="cb1-195"><a href="#cb1-195" aria-hidden="true" tabindex="-1"></a><span class="co"> # tokenizer_default: Uses the chat template that is available in the</span></span>
|
||
<span id="cb1-196"><a href="#cb1-196" aria-hidden="true" tabindex="-1"></a><span class="co"> # tokenizer_config.json. If the chat template is not available in the tokenizer, it</span></span>
|
||
<span id="cb1-197"><a href="#cb1-197" aria-hidden="true" tabindex="-1"></a><span class="co"> # will raise an error. This is the default.</span></span>
|
||
<span id="cb1-198"><a href="#cb1-198" aria-hidden="true" tabindex="-1"></a><span class="co"> # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates</span></span>
|
||
<span id="cb1-199"><a href="#cb1-199" aria-hidden="true" tabindex="-1"></a><span class="co"> # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.</span></span>
|
||
<span id="cb1-200"><a href="#cb1-200" aria-hidden="true" tabindex="-1"></a><span class="co"> # tokenizer_default_fallback_*: where * is the name of the chat template to fallback</span></span>
|
||
<span id="cb1-201"><a href="#cb1-201" aria-hidden="true" tabindex="-1"></a><span class="co"> # to if the tokenizer does not have a chat template else default to tokenizer. E.g.</span></span>
|
||
<span id="cb1-202"><a href="#cb1-202" aria-hidden="true" tabindex="-1"></a><span class="co"> # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat</span></span>
|
||
<span id="cb1-203"><a href="#cb1-203" aria-hidden="true" tabindex="-1"></a><span class="co"> # template. The custom jinja template should be provided in the chat_template_jinja</span></span>
|
||
<span id="cb1-204"><a href="#cb1-204" aria-hidden="true" tabindex="-1"></a><span class="co"> # field.</span></span>
|
||
<span id="cb1-205"><a href="#cb1-205" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">chat_template</span><span class="kw">:</span><span class="at"> ChatTemplate | str | None</span></span>
|
||
<span id="cb1-206"><a href="#cb1-206" aria-hidden="true" tabindex="-1"></a><span class="co"> # Custom jinja chat template or path to jinja file. Used only if `chat_template:</span></span>
|
||
<span id="cb1-207"><a href="#cb1-207" aria-hidden="true" tabindex="-1"></a><span class="co"> # jinja` or empty.</span></span>
|
||
<span id="cb1-208"><a href="#cb1-208" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">chat_template_jinja</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-209"><a href="#cb1-209" aria-hidden="true" tabindex="-1"></a><span class="co"> # path to source data files</span></span>
|
||
<span id="cb1-210"><a href="#cb1-210" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">data_files</span><span class="kw">:</span><span class="at"> str | list[str] | None</span></span>
|
||
<span id="cb1-211"><a href="#cb1-211" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">input_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-212"><a href="#cb1-212" aria-hidden="true" tabindex="-1"></a><span class="co"> # name of dataset configuration to load</span></span>
|
||
<span id="cb1-213"><a href="#cb1-213" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">name</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-214"><a href="#cb1-214" aria-hidden="true" tabindex="-1"></a><span class="co"> # defines the datatype when path is a file</span></span>
|
||
<span id="cb1-215"><a href="#cb1-215" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">ds_type</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-216"><a href="#cb1-216" aria-hidden="true" tabindex="-1"></a><span class="co"> # For `completion` datasets only, uses the provided field instead of `text` column</span></span>
|
||
<span id="cb1-217"><a href="#cb1-217" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-218"><a href="#cb1-218" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_human</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-219"><a href="#cb1-219" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_model</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-220"><a href="#cb1-220" aria-hidden="true" tabindex="-1"></a><span class="co"> # Key containing the messages (default: "messages")</span></span>
|
||
<span id="cb1-221"><a href="#cb1-221" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_messages</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-222"><a href="#cb1-222" aria-hidden="true" tabindex="-1"></a><span class="co"> # Key containing the tools (default: "tools"). Must be a list[dict] and follow [JSON</span></span>
|
||
<span id="cb1-223"><a href="#cb1-223" aria-hidden="true" tabindex="-1"></a><span class="co"> # schema](https://json-schema.org/learn/getting-started-step-by-step).</span></span>
|
||
<span id="cb1-224"><a href="#cb1-224" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_tools</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-225"><a href="#cb1-225" aria-hidden="true" tabindex="-1"></a><span class="co"> # Key containing the reasoning trace (default: "reasoning_content").</span></span>
|
||
<span id="cb1-226"><a href="#cb1-226" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_thinking</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-227"><a href="#cb1-227" aria-hidden="true" tabindex="-1"></a><span class="co"> # The key the chat template expects that indicates the reasoning trace.</span></span>
|
||
<span id="cb1-228"><a href="#cb1-228" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">template_thinking_key</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-229"><a href="#cb1-229" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-230"><a href="#cb1-230" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">message_field_role</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-231"><a href="#cb1-231" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-232"><a href="#cb1-232" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">message_field_content</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-233"><a href="#cb1-233" aria-hidden="true" tabindex="-1"></a><span class="co"> # Mapping of properties from the input dataset to the chat template. (default:</span></span>
|
||
<span id="cb1-234"><a href="#cb1-234" aria-hidden="true" tabindex="-1"></a><span class="co"> # message_property_mappings={'role':'role', 'content':'content'}) If a property exists</span></span>
|
||
<span id="cb1-235"><a href="#cb1-235" aria-hidden="true" tabindex="-1"></a><span class="co"> # in the template but not in this mapping, the system will attempt to load it directly</span></span>
|
||
<span id="cb1-236"><a href="#cb1-236" aria-hidden="true" tabindex="-1"></a><span class="co"> # from the message using the property name as the key. Example: In the mapping below,</span></span>
|
||
<span id="cb1-237"><a href="#cb1-237" aria-hidden="true" tabindex="-1"></a><span class="co"> # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and</span></span>
|
||
<span id="cb1-238"><a href="#cb1-238" aria-hidden="true" tabindex="-1"></a><span class="co"> # used as 'content' in the chat template.</span></span>
|
||
<span id="cb1-239"><a href="#cb1-239" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">message_property_mappings</span><span class="kw">:</span><span class="at"> dict[str, str] | None</span></span>
|
||
<span id="cb1-240"><a href="#cb1-240" aria-hidden="true" tabindex="-1"></a><span class="co"> # The key in the message turn that indicates via boolean whether tokens of a turn</span></span>
|
||
<span id="cb1-241"><a href="#cb1-241" aria-hidden="true" tabindex="-1"></a><span class="co"> # should be considered for training. Useful to selectively train on certain turns</span></span>
|
||
<span id="cb1-242"><a href="#cb1-242" aria-hidden="true" tabindex="-1"></a><span class="co"> # besides the `roles_to_train`.</span></span>
|
||
<span id="cb1-243"><a href="#cb1-243" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">message_field_training</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-244"><a href="#cb1-244" aria-hidden="true" tabindex="-1"></a><span class="co"> # The key in the message turn that contains the training details. Useful to</span></span>
|
||
<span id="cb1-245"><a href="#cb1-245" aria-hidden="true" tabindex="-1"></a><span class="co"> # selectively train on certain tokens in a turn. The value of the key is a List[Dict]</span></span>
|
||
<span id="cb1-246"><a href="#cb1-246" aria-hidden="true" tabindex="-1"></a><span class="co"> # containing `begin_offset` (start character index in content), `end_offset` (end</span></span>
|
||
<span id="cb1-247"><a href="#cb1-247" aria-hidden="true" tabindex="-1"></a><span class="co"> # character index in content), and `train` (boolean whether to train).</span></span>
|
||
<span id="cb1-248"><a href="#cb1-248" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">message_field_training_detail</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-249"><a href="#cb1-249" aria-hidden="true" tabindex="-1"></a><span class="co"> # (for Qwen3 template only) Whether to split the assistant content based on a</span></span>
|
||
<span id="cb1-250"><a href="#cb1-250" aria-hidden="true" tabindex="-1"></a><span class="co"> # reasoning trace inside delimited tags</span></span>
|
||
<span id="cb1-251"><a href="#cb1-251" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">split_thinking</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-252"><a href="#cb1-252" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">logprobs_field</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-253"><a href="#cb1-253" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">temperature</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-254"><a href="#cb1-254" aria-hidden="true" tabindex="-1"></a><span class="co"> # Roles to train on. The tokens from these roles will be considered for the loss.</span></span>
|
||
<span id="cb1-255"><a href="#cb1-255" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">roles_to_train</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-256"><a href="#cb1-256" aria-hidden="true" tabindex="-1"></a><span class="co"> # Which EOS tokens to train on in the conversation. Possible values are: all: train on</span></span>
|
||
<span id="cb1-257"><a href="#cb1-257" aria-hidden="true" tabindex="-1"></a><span class="co"> # all EOS tokens, turn (default): train on the EOS token at the end of each trainable</span></span>
|
||
<span id="cb1-258"><a href="#cb1-258" aria-hidden="true" tabindex="-1"></a><span class="co"> # turn, last: train on the last EOS token in the conversation</span></span>
|
||
<span id="cb1-259"><a href="#cb1-259" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">train_on_eos</span><span class="kw">:</span><span class="at"> Literal['all', 'turn', 'last'] | None</span></span>
|
||
<span id="cb1-260"><a href="#cb1-260" aria-hidden="true" tabindex="-1"></a><span class="co"> # Roles mapping in the messages. The format is {target_role: [source_roles]}. All</span></span>
|
||
<span id="cb1-261"><a href="#cb1-261" aria-hidden="true" tabindex="-1"></a><span class="co"> # source roles will be mapped to the target role. The default is: user: ["human",</span></span>
|
||
<span id="cb1-262"><a href="#cb1-262" aria-hidden="true" tabindex="-1"></a><span class="co"> # "user"], assistant: ["gpt", "assistant"], system: ["system"], tool: ["tool"]</span></span>
|
||
<span id="cb1-263"><a href="#cb1-263" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">roles</span><span class="kw">:</span><span class="at"> dict[str, list[str]] | None</span></span>
|
||
<span id="cb1-264"><a href="#cb1-264" aria-hidden="true" tabindex="-1"></a><span class="co"> # Whether to drop the system turn from the dataset. Only works with chat_template.</span></span>
|
||
<span id="cb1-265"><a href="#cb1-265" aria-hidden="true" tabindex="-1"></a><span class="co"> # This does not drop the default system message from chat_template if it exists. If</span></span>
|
||
<span id="cb1-266"><a href="#cb1-266" aria-hidden="true" tabindex="-1"></a><span class="co"> # you wish to, we recommend using a custom jinja template with the default system</span></span>
|
||
<span id="cb1-267"><a href="#cb1-267" aria-hidden="true" tabindex="-1"></a><span class="co"> # message removed or adding a system turn with empty content.</span></span>
|
||
<span id="cb1-268"><a href="#cb1-268" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">drop_system_message</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-269"><a href="#cb1-269" aria-hidden="true" tabindex="-1"></a><span class="co"> # Trust remote code for untrusted source</span></span>
|
||
<span id="cb1-270"><a href="#cb1-270" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">trust_remote_code</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-271"><a href="#cb1-271" aria-hidden="true" tabindex="-1"></a><span class="co"> # The specific revision of the dataset to use when loading from the Hugging Face Hub.</span></span>
|
||
<span id="cb1-272"><a href="#cb1-272" aria-hidden="true" tabindex="-1"></a><span class="co"> # This can be a commit hash, tag, or branch name. If not specified, the latest version</span></span>
|
||
<span id="cb1-273"><a href="#cb1-273" aria-hidden="true" tabindex="-1"></a><span class="co"> # will be used. This parameter is ignored for local datasets.</span></span>
|
||
<span id="cb1-274"><a href="#cb1-274" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">revision</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-275"><a href="#cb1-275" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-276"><a href="#cb1-276" aria-hidden="true" tabindex="-1"></a><span class="co"> # For DPODataset:</span></span>
|
||
<span id="cb1-277"><a href="#cb1-277" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">path</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-278"><a href="#cb1-278" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">split</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-279"><a href="#cb1-279" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">type</span><span class="kw">:</span><span class="at"> UserDefinedDPOType | str | None</span></span>
|
||
<span id="cb1-280"><a href="#cb1-280" aria-hidden="true" tabindex="-1"></a><span class="co"> # For UserDefinedDPOType:</span></span>
|
||
<span id="cb1-281"><a href="#cb1-281" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_system</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-282"><a href="#cb1-282" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_prompt</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-283"><a href="#cb1-283" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_chosen</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-284"><a href="#cb1-284" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_rejected</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-285"><a href="#cb1-285" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">prompt_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-286"><a href="#cb1-286" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">chosen_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-287"><a href="#cb1-287" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">rejected_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-288"><a href="#cb1-288" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">data_files</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-289"><a href="#cb1-289" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">revision</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-290"><a href="#cb1-290" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_messages</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-291"><a href="#cb1-291" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-292"><a href="#cb1-292" aria-hidden="true" tabindex="-1"></a><span class="co"> # For KTODataset:</span></span>
|
||
<span id="cb1-293"><a href="#cb1-293" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">path</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-294"><a href="#cb1-294" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">split</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-295"><a href="#cb1-295" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">type</span><span class="kw">:</span><span class="at"> UserDefinedKTOType | str | None</span></span>
|
||
<span id="cb1-296"><a href="#cb1-296" aria-hidden="true" tabindex="-1"></a><span class="co"> # For UserDefinedKTOType:</span></span>
|
||
<span id="cb1-297"><a href="#cb1-297" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_system</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-298"><a href="#cb1-298" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_prompt</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-299"><a href="#cb1-299" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_completion</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-300"><a href="#cb1-300" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_label</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-301"><a href="#cb1-301" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">prompt_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-302"><a href="#cb1-302" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">completion_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-303"><a href="#cb1-303" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">data_files</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-304"><a href="#cb1-304" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">trust_remote_code</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-305"><a href="#cb1-305" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">revision</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-306"><a href="#cb1-306" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-307"><a href="#cb1-307" aria-hidden="true" tabindex="-1"></a><span class="co"> # For StepwiseSupervisedDataset:</span></span>
|
||
<span id="cb1-308"><a href="#cb1-308" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">path</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-309"><a href="#cb1-309" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">split</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-310"><a href="#cb1-310" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">data_files</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-311"><a href="#cb1-311" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">revision</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-312"><a href="#cb1-312" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">step_separator</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-313"><a href="#cb1-313" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">max_completion_length</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-314"><a href="#cb1-314" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">train_on_last_step_only</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-315"><a href="#cb1-315" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-316"><a href="#cb1-316" aria-hidden="true" tabindex="-1"></a><span class="co"># A list of one or more datasets to eval the model with. You can use either</span></span>
|
||
<span id="cb1-317"><a href="#cb1-317" aria-hidden="true" tabindex="-1"></a><span class="co"># test_datasets, or val_set_size, but not both.</span></span>
|
||
<span id="cb1-318"><a href="#cb1-318" aria-hidden="true" tabindex="-1"></a><span class="fu">test_datasets</span><span class="kw">:</span><span class="at"> Annotated[list[SFTDataset | DPODataset | KTODataset | StepwiseSupervisedDataset], MinLen(1)] | None</span></span>
|
||
<span id="cb1-319"><a href="#cb1-319" aria-hidden="true" tabindex="-1"></a><span class="co"> # For SFTDataset:</span></span>
|
||
<span id="cb1-320"><a href="#cb1-320" aria-hidden="true" tabindex="-1"></a><span class="co"> # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory</span></span>
|
||
<span id="cb1-321"><a href="#cb1-321" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">path</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-322"><a href="#cb1-322" aria-hidden="true" tabindex="-1"></a><span class="co"> # name of dataset split to load from</span></span>
|
||
<span id="cb1-323"><a href="#cb1-323" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">split</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-324"><a href="#cb1-324" aria-hidden="true" tabindex="-1"></a><span class="co"> # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]</span></span>
|
||
<span id="cb1-325"><a href="#cb1-325" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">type</span><span class="kw">:</span><span class="at"> str | UserDefinedPrompterType | None</span></span>
|
||
<span id="cb1-326"><a href="#cb1-326" aria-hidden="true" tabindex="-1"></a><span class="co"> # For UserDefinedPrompterType:</span></span>
|
||
<span id="cb1-327"><a href="#cb1-327" aria-hidden="true" tabindex="-1"></a><span class="co"> # Custom user instruction prompt</span></span>
|
||
<span id="cb1-328"><a href="#cb1-328" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">system_prompt</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-329"><a href="#cb1-329" aria-hidden="true" tabindex="-1"></a><span class="co"> # Use {system} as key to be replaced</span></span>
|
||
<span id="cb1-330"><a href="#cb1-330" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">system_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-331"><a href="#cb1-331" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_system</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-332"><a href="#cb1-332" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_instruction</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-333"><a href="#cb1-333" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_input</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-334"><a href="#cb1-334" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_output</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-335"><a href="#cb1-335" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-336"><a href="#cb1-336" aria-hidden="true" tabindex="-1"></a><span class="co"> # Customizable to be single line or multi-line. Use {instruction}/{input} as key to</span></span>
|
||
<span id="cb1-337"><a href="#cb1-337" aria-hidden="true" tabindex="-1"></a><span class="co"> # be replaced. 'format' can include {input}</span></span>
|
||
<span id="cb1-338"><a href="#cb1-338" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-339"><a href="#cb1-339" aria-hidden="true" tabindex="-1"></a><span class="co"> # 'no_input_format' cannot include {input}</span></span>
|
||
<span id="cb1-340"><a href="#cb1-340" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">no_input_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-341"><a href="#cb1-341" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">input_transform</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-342"><a href="#cb1-342" aria-hidden="true" tabindex="-1"></a><span class="co"> # split dataset into N pieces (use with shards_idx)</span></span>
|
||
<span id="cb1-343"><a href="#cb1-343" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">shards</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-344"><a href="#cb1-344" aria-hidden="true" tabindex="-1"></a><span class="co"> # the index of sharded dataset to use</span></span>
|
||
<span id="cb1-345"><a href="#cb1-345" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">shards_idx</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-346"><a href="#cb1-346" aria-hidden="true" tabindex="-1"></a><span class="co"> # process dataset in N sequential chunks for memory efficiency (exclusive with</span></span>
|
||
<span id="cb1-347"><a href="#cb1-347" aria-hidden="true" tabindex="-1"></a><span class="co"> # `shards`)</span></span>
|
||
<span id="cb1-348"><a href="#cb1-348" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">preprocess_shards</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-349"><a href="#cb1-349" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">conversation</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-350"><a href="#cb1-350" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-351"><a href="#cb1-351" aria-hidden="true" tabindex="-1"></a><span class="co"> # The name of the chat template to use for training, following values are supported:</span></span>
|
||
<span id="cb1-352"><a href="#cb1-352" aria-hidden="true" tabindex="-1"></a><span class="co"> # tokenizer_default: Uses the chat template that is available in the</span></span>
|
||
<span id="cb1-353"><a href="#cb1-353" aria-hidden="true" tabindex="-1"></a><span class="co"> # tokenizer_config.json. If the chat template is not available in the tokenizer, it</span></span>
|
||
<span id="cb1-354"><a href="#cb1-354" aria-hidden="true" tabindex="-1"></a><span class="co"> # will raise an error. This is the default.</span></span>
|
||
<span id="cb1-355"><a href="#cb1-355" aria-hidden="true" tabindex="-1"></a><span class="co"> # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates</span></span>
|
||
<span id="cb1-356"><a href="#cb1-356" aria-hidden="true" tabindex="-1"></a><span class="co"> # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.</span></span>
|
||
<span id="cb1-357"><a href="#cb1-357" aria-hidden="true" tabindex="-1"></a><span class="co"> # tokenizer_default_fallback_*: where * is the name of the chat template to fallback</span></span>
|
||
<span id="cb1-358"><a href="#cb1-358" aria-hidden="true" tabindex="-1"></a><span class="co"> # to if the tokenizer does not have a chat template else default to tokenizer. E.g.</span></span>
|
||
<span id="cb1-359"><a href="#cb1-359" aria-hidden="true" tabindex="-1"></a><span class="co"> # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat</span></span>
|
||
<span id="cb1-360"><a href="#cb1-360" aria-hidden="true" tabindex="-1"></a><span class="co"> # template. The custom jinja template should be provided in the chat_template_jinja</span></span>
|
||
<span id="cb1-361"><a href="#cb1-361" aria-hidden="true" tabindex="-1"></a><span class="co"> # field.</span></span>
|
||
<span id="cb1-362"><a href="#cb1-362" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">chat_template</span><span class="kw">:</span><span class="at"> ChatTemplate | str | None</span></span>
|
||
<span id="cb1-363"><a href="#cb1-363" aria-hidden="true" tabindex="-1"></a><span class="co"> # Custom jinja chat template or path to jinja file. Used only if `chat_template:</span></span>
|
||
<span id="cb1-364"><a href="#cb1-364" aria-hidden="true" tabindex="-1"></a><span class="co"> # jinja` or empty.</span></span>
|
||
<span id="cb1-365"><a href="#cb1-365" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">chat_template_jinja</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-366"><a href="#cb1-366" aria-hidden="true" tabindex="-1"></a><span class="co"> # path to source data files</span></span>
|
||
<span id="cb1-367"><a href="#cb1-367" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">data_files</span><span class="kw">:</span><span class="at"> str | list[str] | None</span></span>
|
||
<span id="cb1-368"><a href="#cb1-368" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">input_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-369"><a href="#cb1-369" aria-hidden="true" tabindex="-1"></a><span class="co"> # name of dataset configuration to load</span></span>
|
||
<span id="cb1-370"><a href="#cb1-370" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">name</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-371"><a href="#cb1-371" aria-hidden="true" tabindex="-1"></a><span class="co"> # defines the datatype when path is a file</span></span>
|
||
<span id="cb1-372"><a href="#cb1-372" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">ds_type</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-373"><a href="#cb1-373" aria-hidden="true" tabindex="-1"></a><span class="co"> # For `completion` datasets only, uses the provided field instead of `text` column</span></span>
|
||
<span id="cb1-374"><a href="#cb1-374" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-375"><a href="#cb1-375" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_human</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-376"><a href="#cb1-376" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_model</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-377"><a href="#cb1-377" aria-hidden="true" tabindex="-1"></a><span class="co"> # Key containing the messages (default: "messages")</span></span>
|
||
<span id="cb1-378"><a href="#cb1-378" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_messages</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-379"><a href="#cb1-379" aria-hidden="true" tabindex="-1"></a><span class="co"> # Key containing the tools (default: "tools"). Must be a list[dict] and follow [JSON</span></span>
|
||
<span id="cb1-380"><a href="#cb1-380" aria-hidden="true" tabindex="-1"></a><span class="co"> # schema](https://json-schema.org/learn/getting-started-step-by-step).</span></span>
|
||
<span id="cb1-381"><a href="#cb1-381" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_tools</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-382"><a href="#cb1-382" aria-hidden="true" tabindex="-1"></a><span class="co"> # Key containing the reasoning trace (default: "reasoning_content").</span></span>
|
||
<span id="cb1-383"><a href="#cb1-383" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_thinking</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-384"><a href="#cb1-384" aria-hidden="true" tabindex="-1"></a><span class="co"> # The key the chat template expects that indicates the reasoning trace.</span></span>
|
||
<span id="cb1-385"><a href="#cb1-385" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">template_thinking_key</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-386"><a href="#cb1-386" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-387"><a href="#cb1-387" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">message_field_role</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-388"><a href="#cb1-388" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-389"><a href="#cb1-389" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">message_field_content</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-390"><a href="#cb1-390" aria-hidden="true" tabindex="-1"></a><span class="co"> # Mapping of properties from the input dataset to the chat template. (default:</span></span>
|
||
<span id="cb1-391"><a href="#cb1-391" aria-hidden="true" tabindex="-1"></a><span class="co"> # message_property_mappings={'role':'role', 'content':'content'}) If a property exists</span></span>
|
||
<span id="cb1-392"><a href="#cb1-392" aria-hidden="true" tabindex="-1"></a><span class="co"> # in the template but not in this mapping, the system will attempt to load it directly</span></span>
|
||
<span id="cb1-393"><a href="#cb1-393" aria-hidden="true" tabindex="-1"></a><span class="co"> # from the message using the property name as the key. Example: In the mapping below,</span></span>
|
||
<span id="cb1-394"><a href="#cb1-394" aria-hidden="true" tabindex="-1"></a><span class="co"> # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and</span></span>
|
||
<span id="cb1-395"><a href="#cb1-395" aria-hidden="true" tabindex="-1"></a><span class="co"> # used as 'content' in the chat template.</span></span>
|
||
<span id="cb1-396"><a href="#cb1-396" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">message_property_mappings</span><span class="kw">:</span><span class="at"> dict[str, str] | None</span></span>
|
||
<span id="cb1-397"><a href="#cb1-397" aria-hidden="true" tabindex="-1"></a><span class="co"> # The key in the message turn that indicates via boolean whether tokens of a turn</span></span>
|
||
<span id="cb1-398"><a href="#cb1-398" aria-hidden="true" tabindex="-1"></a><span class="co"> # should be considered for training. Useful to selectively train on certain turns</span></span>
|
||
<span id="cb1-399"><a href="#cb1-399" aria-hidden="true" tabindex="-1"></a><span class="co"> # besides the `roles_to_train`.</span></span>
|
||
<span id="cb1-400"><a href="#cb1-400" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">message_field_training</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-401"><a href="#cb1-401" aria-hidden="true" tabindex="-1"></a><span class="co"> # The key in the message turn that contains the training details. Useful to</span></span>
|
||
<span id="cb1-402"><a href="#cb1-402" aria-hidden="true" tabindex="-1"></a><span class="co"> # selectively train on certain tokens in a turn. The value of the key is a List[Dict]</span></span>
|
||
<span id="cb1-403"><a href="#cb1-403" aria-hidden="true" tabindex="-1"></a><span class="co"> # containing `begin_offset` (start character index in content), `end_offset` (end</span></span>
|
||
<span id="cb1-404"><a href="#cb1-404" aria-hidden="true" tabindex="-1"></a><span class="co"> # character index in content), and `train` (boolean whether to train).</span></span>
|
||
<span id="cb1-405"><a href="#cb1-405" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">message_field_training_detail</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-406"><a href="#cb1-406" aria-hidden="true" tabindex="-1"></a><span class="co"> # (for Qwen3 template only) Whether to split the assistant content based on a</span></span>
|
||
<span id="cb1-407"><a href="#cb1-407" aria-hidden="true" tabindex="-1"></a><span class="co"> # reasoning trace inside delimited tags</span></span>
|
||
<span id="cb1-408"><a href="#cb1-408" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">split_thinking</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-409"><a href="#cb1-409" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">logprobs_field</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-410"><a href="#cb1-410" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">temperature</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-411"><a href="#cb1-411" aria-hidden="true" tabindex="-1"></a><span class="co"> # Roles to train on. The tokens from these roles will be considered for the loss.</span></span>
|
||
<span id="cb1-412"><a href="#cb1-412" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">roles_to_train</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-413"><a href="#cb1-413" aria-hidden="true" tabindex="-1"></a><span class="co"> # Which EOS tokens to train on in the conversation. Possible values are: all: train on</span></span>
|
||
<span id="cb1-414"><a href="#cb1-414" aria-hidden="true" tabindex="-1"></a><span class="co"> # all EOS tokens, turn (default): train on the EOS token at the end of each trainable</span></span>
|
||
<span id="cb1-415"><a href="#cb1-415" aria-hidden="true" tabindex="-1"></a><span class="co"> # turn, last: train on the last EOS token in the conversation</span></span>
|
||
<span id="cb1-416"><a href="#cb1-416" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">train_on_eos</span><span class="kw">:</span><span class="at"> Literal['all', 'turn', 'last'] | None</span></span>
|
||
<span id="cb1-417"><a href="#cb1-417" aria-hidden="true" tabindex="-1"></a><span class="co"> # Roles mapping in the messages. The format is {target_role: [source_roles]}. All</span></span>
|
||
<span id="cb1-418"><a href="#cb1-418" aria-hidden="true" tabindex="-1"></a><span class="co"> # source roles will be mapped to the target role. The default is: user: ["human",</span></span>
|
||
<span id="cb1-419"><a href="#cb1-419" aria-hidden="true" tabindex="-1"></a><span class="co"> # "user"], assistant: ["gpt", "assistant"], system: ["system"], tool: ["tool"]</span></span>
|
||
<span id="cb1-420"><a href="#cb1-420" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">roles</span><span class="kw">:</span><span class="at"> dict[str, list[str]] | None</span></span>
|
||
<span id="cb1-421"><a href="#cb1-421" aria-hidden="true" tabindex="-1"></a><span class="co"> # Whether to drop the system turn from the dataset. Only works with chat_template.</span></span>
|
||
<span id="cb1-422"><a href="#cb1-422" aria-hidden="true" tabindex="-1"></a><span class="co"> # This does not drop the default system message from chat_template if it exists. If</span></span>
|
||
<span id="cb1-423"><a href="#cb1-423" aria-hidden="true" tabindex="-1"></a><span class="co"> # you wish to, we recommend using a custom jinja template with the default system</span></span>
|
||
<span id="cb1-424"><a href="#cb1-424" aria-hidden="true" tabindex="-1"></a><span class="co"> # message removed or adding a system turn with empty content.</span></span>
|
||
<span id="cb1-425"><a href="#cb1-425" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">drop_system_message</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-426"><a href="#cb1-426" aria-hidden="true" tabindex="-1"></a><span class="co"> # Trust remote code for untrusted source</span></span>
|
||
<span id="cb1-427"><a href="#cb1-427" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">trust_remote_code</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-428"><a href="#cb1-428" aria-hidden="true" tabindex="-1"></a><span class="co"> # The specific revision of the dataset to use when loading from the Hugging Face Hub.</span></span>
|
||
<span id="cb1-429"><a href="#cb1-429" aria-hidden="true" tabindex="-1"></a><span class="co"> # This can be a commit hash, tag, or branch name. If not specified, the latest version</span></span>
|
||
<span id="cb1-430"><a href="#cb1-430" aria-hidden="true" tabindex="-1"></a><span class="co"> # will be used. This parameter is ignored for local datasets.</span></span>
|
||
<span id="cb1-431"><a href="#cb1-431" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">revision</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-432"><a href="#cb1-432" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-433"><a href="#cb1-433" aria-hidden="true" tabindex="-1"></a><span class="co"> # For DPODataset:</span></span>
|
||
<span id="cb1-434"><a href="#cb1-434" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">path</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-435"><a href="#cb1-435" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">split</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-436"><a href="#cb1-436" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">type</span><span class="kw">:</span><span class="at"> UserDefinedDPOType | str | None</span></span>
|
||
<span id="cb1-437"><a href="#cb1-437" aria-hidden="true" tabindex="-1"></a><span class="co"> # For UserDefinedDPOType:</span></span>
|
||
<span id="cb1-438"><a href="#cb1-438" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_system</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-439"><a href="#cb1-439" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_prompt</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-440"><a href="#cb1-440" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_chosen</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-441"><a href="#cb1-441" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_rejected</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-442"><a href="#cb1-442" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">prompt_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-443"><a href="#cb1-443" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">chosen_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-444"><a href="#cb1-444" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">rejected_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-445"><a href="#cb1-445" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">data_files</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-446"><a href="#cb1-446" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">revision</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-447"><a href="#cb1-447" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_messages</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-448"><a href="#cb1-448" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-449"><a href="#cb1-449" aria-hidden="true" tabindex="-1"></a><span class="co"> # For KTODataset:</span></span>
|
||
<span id="cb1-450"><a href="#cb1-450" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">path</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-451"><a href="#cb1-451" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">split</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-452"><a href="#cb1-452" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">type</span><span class="kw">:</span><span class="at"> UserDefinedKTOType | str | None</span></span>
|
||
<span id="cb1-453"><a href="#cb1-453" aria-hidden="true" tabindex="-1"></a><span class="co"> # For UserDefinedKTOType:</span></span>
|
||
<span id="cb1-454"><a href="#cb1-454" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_system</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-455"><a href="#cb1-455" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_prompt</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-456"><a href="#cb1-456" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_completion</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-457"><a href="#cb1-457" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_label</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-458"><a href="#cb1-458" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">prompt_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-459"><a href="#cb1-459" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">completion_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-460"><a href="#cb1-460" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">data_files</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-461"><a href="#cb1-461" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">trust_remote_code</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-462"><a href="#cb1-462" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">revision</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-463"><a href="#cb1-463" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-464"><a href="#cb1-464" aria-hidden="true" tabindex="-1"></a><span class="co"> # For StepwiseSupervisedDataset:</span></span>
|
||
<span id="cb1-465"><a href="#cb1-465" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">path</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-466"><a href="#cb1-466" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">split</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-467"><a href="#cb1-467" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">data_files</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-468"><a href="#cb1-468" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">revision</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-469"><a href="#cb1-469" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">step_separator</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-470"><a href="#cb1-470" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">max_completion_length</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-471"><a href="#cb1-471" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">train_on_last_step_only</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-472"><a href="#cb1-472" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-473"><a href="#cb1-473" aria-hidden="true" tabindex="-1"></a><span class="co"># If false, the datasets will not be shuffled and will keep their original order in</span></span>
|
||
<span id="cb1-474"><a href="#cb1-474" aria-hidden="true" tabindex="-1"></a><span class="co"># `datasets`. The same applies to the `test_datasets` option and the</span></span>
|
||
<span id="cb1-475"><a href="#cb1-475" aria-hidden="true" tabindex="-1"></a><span class="co"># `pretraining_dataset` option. Default is true.</span></span>
|
||
<span id="cb1-476"><a href="#cb1-476" aria-hidden="true" tabindex="-1"></a><span class="fu">shuffle_merged_datasets</span><span class="kw">:</span><span class="at"> bool | None = True</span></span>
|
||
<span id="cb1-477"><a href="#cb1-477" aria-hidden="true" tabindex="-1"></a><span class="co"># If true, each dataset in `datasets` will be shuffled before merging. This allows</span></span>
|
||
<span id="cb1-478"><a href="#cb1-478" aria-hidden="true" tabindex="-1"></a><span class="co"># curriculum learning strategies to be applied at the dataset level. Default is false.</span></span>
|
||
<span id="cb1-479"><a href="#cb1-479" aria-hidden="true" tabindex="-1"></a><span class="fu">shuffle_before_merging_datasets</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-480"><a href="#cb1-480" aria-hidden="true" tabindex="-1"></a><span class="co"># Axolotl attempts to save the dataset as an arrow after packing the data together so</span></span>
|
||
<span id="cb1-481"><a href="#cb1-481" aria-hidden="true" tabindex="-1"></a><span class="co"># subsequent training attempts load faster, relative path</span></span>
|
||
<span id="cb1-482"><a href="#cb1-482" aria-hidden="true" tabindex="-1"></a><span class="fu">dataset_prepared_path</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-483"><a href="#cb1-483" aria-hidden="true" tabindex="-1"></a><span class="co"># Num shards for whole dataset</span></span>
|
||
<span id="cb1-484"><a href="#cb1-484" aria-hidden="true" tabindex="-1"></a><span class="fu">dataset_shard_num</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-485"><a href="#cb1-485" aria-hidden="true" tabindex="-1"></a><span class="co"># Index of shard to use for whole dataset</span></span>
|
||
<span id="cb1-486"><a href="#cb1-486" aria-hidden="true" tabindex="-1"></a><span class="fu">dataset_shard_idx</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-487"><a href="#cb1-487" aria-hidden="true" tabindex="-1"></a><span class="fu">skip_prepare_dataset</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-488"><a href="#cb1-488" aria-hidden="true" tabindex="-1"></a><span class="co"># Number of shards to save the prepared dataset</span></span>
|
||
<span id="cb1-489"><a href="#cb1-489" aria-hidden="true" tabindex="-1"></a><span class="fu">num_dataset_shards_to_save</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-490"><a href="#cb1-490" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-491"><a href="#cb1-491" aria-hidden="true" tabindex="-1"></a><span class="co"># Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize</span></span>
|
||
<span id="cb1-492"><a href="#cb1-492" aria-hidden="true" tabindex="-1"></a><span class="fu">pretraining_dataset</span><span class="kw">:</span><span class="at"> Annotated[list[PretrainingDataset | SFTDataset], MinLen(1)] | None</span></span>
|
||
<span id="cb1-493"><a href="#cb1-493" aria-hidden="true" tabindex="-1"></a><span class="co"> # For PretrainingDataset:</span></span>
|
||
<span id="cb1-494"><a href="#cb1-494" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">name</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-495"><a href="#cb1-495" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">path</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-496"><a href="#cb1-496" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">split</span><span class="kw">:</span><span class="at"> str | None = train</span></span>
|
||
<span id="cb1-497"><a href="#cb1-497" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">text_column</span><span class="kw">:</span><span class="at"> str | None = text</span></span>
|
||
<span id="cb1-498"><a href="#cb1-498" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">type</span><span class="kw">:</span><span class="at"> str | None = pretrain</span></span>
|
||
<span id="cb1-499"><a href="#cb1-499" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">trust_remote_code</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-500"><a href="#cb1-500" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">data_files</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-501"><a href="#cb1-501" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">skip</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-502"><a href="#cb1-502" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-503"><a href="#cb1-503" aria-hidden="true" tabindex="-1"></a><span class="co"> # For SFTDataset:</span></span>
|
||
<span id="cb1-504"><a href="#cb1-504" aria-hidden="true" tabindex="-1"></a><span class="co"> # HuggingFace dataset repo | s3:// | gs:// | path to local file or directory</span></span>
|
||
<span id="cb1-505"><a href="#cb1-505" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">path</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-506"><a href="#cb1-506" aria-hidden="true" tabindex="-1"></a><span class="co"> # name of dataset split to load from</span></span>
|
||
<span id="cb1-507"><a href="#cb1-507" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">split</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-508"><a href="#cb1-508" aria-hidden="true" tabindex="-1"></a><span class="co"> # The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]</span></span>
|
||
<span id="cb1-509"><a href="#cb1-509" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">type</span><span class="kw">:</span><span class="at"> str | UserDefinedPrompterType | None</span></span>
|
||
<span id="cb1-510"><a href="#cb1-510" aria-hidden="true" tabindex="-1"></a><span class="co"> # For UserDefinedPrompterType:</span></span>
|
||
<span id="cb1-511"><a href="#cb1-511" aria-hidden="true" tabindex="-1"></a><span class="co"> # Custom user instruction prompt</span></span>
|
||
<span id="cb1-512"><a href="#cb1-512" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">system_prompt</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-513"><a href="#cb1-513" aria-hidden="true" tabindex="-1"></a><span class="co"> # Use {system} as key to be replaced</span></span>
|
||
<span id="cb1-514"><a href="#cb1-514" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">system_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-515"><a href="#cb1-515" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_system</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-516"><a href="#cb1-516" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_instruction</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-517"><a href="#cb1-517" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_input</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-518"><a href="#cb1-518" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_output</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-519"><a href="#cb1-519" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-520"><a href="#cb1-520" aria-hidden="true" tabindex="-1"></a><span class="co"> # Customizable to be single line or multi-line. Use {instruction}/{input} as key to</span></span>
|
||
<span id="cb1-521"><a href="#cb1-521" aria-hidden="true" tabindex="-1"></a><span class="co"> # be replaced. 'format' can include {input}</span></span>
|
||
<span id="cb1-522"><a href="#cb1-522" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-523"><a href="#cb1-523" aria-hidden="true" tabindex="-1"></a><span class="co"> # 'no_input_format' cannot include {input}</span></span>
|
||
<span id="cb1-524"><a href="#cb1-524" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">no_input_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-525"><a href="#cb1-525" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">input_transform</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-526"><a href="#cb1-526" aria-hidden="true" tabindex="-1"></a><span class="co"> # split dataset into N pieces (use with shards_idx)</span></span>
|
||
<span id="cb1-527"><a href="#cb1-527" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">shards</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-528"><a href="#cb1-528" aria-hidden="true" tabindex="-1"></a><span class="co"> # the index of sharded dataset to use</span></span>
|
||
<span id="cb1-529"><a href="#cb1-529" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">shards_idx</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-530"><a href="#cb1-530" aria-hidden="true" tabindex="-1"></a><span class="co"> # process dataset in N sequential chunks for memory efficiency (exclusive with</span></span>
|
||
<span id="cb1-531"><a href="#cb1-531" aria-hidden="true" tabindex="-1"></a><span class="co"> # `shards`)</span></span>
|
||
<span id="cb1-532"><a href="#cb1-532" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">preprocess_shards</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-533"><a href="#cb1-533" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">conversation</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-534"><a href="#cb1-534" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-535"><a href="#cb1-535" aria-hidden="true" tabindex="-1"></a><span class="co"> # The name of the chat template to use for training, following values are supported:</span></span>
|
||
<span id="cb1-536"><a href="#cb1-536" aria-hidden="true" tabindex="-1"></a><span class="co"> # tokenizer_default: Uses the chat template that is available in the</span></span>
|
||
<span id="cb1-537"><a href="#cb1-537" aria-hidden="true" tabindex="-1"></a><span class="co"> # tokenizer_config.json. If the chat template is not available in the tokenizer, it</span></span>
|
||
<span id="cb1-538"><a href="#cb1-538" aria-hidden="true" tabindex="-1"></a><span class="co"> # will raise an error. This is the default.</span></span>
|
||
<span id="cb1-539"><a href="#cb1-539" aria-hidden="true" tabindex="-1"></a><span class="co"> # alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates</span></span>
|
||
<span id="cb1-540"><a href="#cb1-540" aria-hidden="true" tabindex="-1"></a><span class="co"> # are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.</span></span>
|
||
<span id="cb1-541"><a href="#cb1-541" aria-hidden="true" tabindex="-1"></a><span class="co"> # tokenizer_default_fallback_*: where * is the name of the chat template to fallback</span></span>
|
||
<span id="cb1-542"><a href="#cb1-542" aria-hidden="true" tabindex="-1"></a><span class="co"> # to if the tokenizer does not have a chat template else default to tokenizer. E.g.</span></span>
|
||
<span id="cb1-543"><a href="#cb1-543" aria-hidden="true" tabindex="-1"></a><span class="co"> # tokenizer_default_fallback_chatml. jinja: Uses a custom jinja template for the chat</span></span>
|
||
<span id="cb1-544"><a href="#cb1-544" aria-hidden="true" tabindex="-1"></a><span class="co"> # template. The custom jinja template should be provided in the chat_template_jinja</span></span>
|
||
<span id="cb1-545"><a href="#cb1-545" aria-hidden="true" tabindex="-1"></a><span class="co"> # field.</span></span>
|
||
<span id="cb1-546"><a href="#cb1-546" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">chat_template</span><span class="kw">:</span><span class="at"> ChatTemplate | str | None</span></span>
|
||
<span id="cb1-547"><a href="#cb1-547" aria-hidden="true" tabindex="-1"></a><span class="co"> # Custom jinja chat template or path to jinja file. Used only if `chat_template:</span></span>
|
||
<span id="cb1-548"><a href="#cb1-548" aria-hidden="true" tabindex="-1"></a><span class="co"> # jinja` or empty.</span></span>
|
||
<span id="cb1-549"><a href="#cb1-549" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">chat_template_jinja</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-550"><a href="#cb1-550" aria-hidden="true" tabindex="-1"></a><span class="co"> # path to source data files</span></span>
|
||
<span id="cb1-551"><a href="#cb1-551" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">data_files</span><span class="kw">:</span><span class="at"> str | list[str] | None</span></span>
|
||
<span id="cb1-552"><a href="#cb1-552" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">input_format</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-553"><a href="#cb1-553" aria-hidden="true" tabindex="-1"></a><span class="co"> # name of dataset configuration to load</span></span>
|
||
<span id="cb1-554"><a href="#cb1-554" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">name</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-555"><a href="#cb1-555" aria-hidden="true" tabindex="-1"></a><span class="co"> # defines the datatype when path is a file</span></span>
|
||
<span id="cb1-556"><a href="#cb1-556" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">ds_type</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-557"><a href="#cb1-557" aria-hidden="true" tabindex="-1"></a><span class="co"> # For `completion` datasets only, uses the provided field instead of `text` column</span></span>
|
||
<span id="cb1-558"><a href="#cb1-558" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-559"><a href="#cb1-559" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_human</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-560"><a href="#cb1-560" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_model</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-561"><a href="#cb1-561" aria-hidden="true" tabindex="-1"></a><span class="co"> # Key containing the messages (default: "messages")</span></span>
|
||
<span id="cb1-562"><a href="#cb1-562" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_messages</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-563"><a href="#cb1-563" aria-hidden="true" tabindex="-1"></a><span class="co"> # Key containing the tools (default: "tools"). Must be a list[dict] and follow [JSON</span></span>
|
||
<span id="cb1-564"><a href="#cb1-564" aria-hidden="true" tabindex="-1"></a><span class="co"> # schema](https://json-schema.org/learn/getting-started-step-by-step).</span></span>
|
||
<span id="cb1-565"><a href="#cb1-565" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_tools</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-566"><a href="#cb1-566" aria-hidden="true" tabindex="-1"></a><span class="co"> # Key containing the reasoning trace (default: "reasoning_content").</span></span>
|
||
<span id="cb1-567"><a href="#cb1-567" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_thinking</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-568"><a href="#cb1-568" aria-hidden="true" tabindex="-1"></a><span class="co"> # The key the chat template expects that indicates the reasoning trace.</span></span>
|
||
<span id="cb1-569"><a href="#cb1-569" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">template_thinking_key</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-570"><a href="#cb1-570" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-571"><a href="#cb1-571" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">message_field_role</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-572"><a href="#cb1-572" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-573"><a href="#cb1-573" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">message_field_content</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-574"><a href="#cb1-574" aria-hidden="true" tabindex="-1"></a><span class="co"> # Mapping of properties from the input dataset to the chat template. (default:</span></span>
|
||
<span id="cb1-575"><a href="#cb1-575" aria-hidden="true" tabindex="-1"></a><span class="co"> # message_property_mappings={'role':'role', 'content':'content'}) If a property exists</span></span>
|
||
<span id="cb1-576"><a href="#cb1-576" aria-hidden="true" tabindex="-1"></a><span class="co"> # in the template but not in this mapping, the system will attempt to load it directly</span></span>
|
||
<span id="cb1-577"><a href="#cb1-577" aria-hidden="true" tabindex="-1"></a><span class="co"> # from the message using the property name as the key. Example: In the mapping below,</span></span>
|
||
<span id="cb1-578"><a href="#cb1-578" aria-hidden="true" tabindex="-1"></a><span class="co"> # 'from' is loaded from input dataset and used as 'role', while 'value' is loaded and</span></span>
|
||
<span id="cb1-579"><a href="#cb1-579" aria-hidden="true" tabindex="-1"></a><span class="co"> # used as 'content' in the chat template.</span></span>
|
||
<span id="cb1-580"><a href="#cb1-580" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">message_property_mappings</span><span class="kw">:</span><span class="at"> dict[str, str] | None</span></span>
|
||
<span id="cb1-581"><a href="#cb1-581" aria-hidden="true" tabindex="-1"></a><span class="co"> # The key in the message turn that indicates via boolean whether tokens of a turn</span></span>
|
||
<span id="cb1-582"><a href="#cb1-582" aria-hidden="true" tabindex="-1"></a><span class="co"> # should be considered for training. Useful to selectively train on certain turns</span></span>
|
||
<span id="cb1-583"><a href="#cb1-583" aria-hidden="true" tabindex="-1"></a><span class="co"> # besides the `roles_to_train`.</span></span>
|
||
<span id="cb1-584"><a href="#cb1-584" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">message_field_training</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-585"><a href="#cb1-585" aria-hidden="true" tabindex="-1"></a><span class="co"> # The key in the message turn that contains the training details. Useful to</span></span>
|
||
<span id="cb1-586"><a href="#cb1-586" aria-hidden="true" tabindex="-1"></a><span class="co"> # selectively train on certain tokens in a turn. The value of the key is a List[Dict]</span></span>
|
||
<span id="cb1-587"><a href="#cb1-587" aria-hidden="true" tabindex="-1"></a><span class="co"> # containing `begin_offset` (start character index in content), `end_offset` (end</span></span>
|
||
<span id="cb1-588"><a href="#cb1-588" aria-hidden="true" tabindex="-1"></a><span class="co"> # character index in content), and `train` (boolean whether to train).</span></span>
|
||
<span id="cb1-589"><a href="#cb1-589" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">message_field_training_detail</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-590"><a href="#cb1-590" aria-hidden="true" tabindex="-1"></a><span class="co"> # (for Qwen3 template only) Whether to split the assistant content based on a</span></span>
|
||
<span id="cb1-591"><a href="#cb1-591" aria-hidden="true" tabindex="-1"></a><span class="co"> # reasoning trace inside delimited tags</span></span>
|
||
<span id="cb1-592"><a href="#cb1-592" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">split_thinking</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-593"><a href="#cb1-593" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">logprobs_field</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-594"><a href="#cb1-594" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">temperature</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-595"><a href="#cb1-595" aria-hidden="true" tabindex="-1"></a><span class="co"> # Roles to train on. The tokens from these roles will be considered for the loss.</span></span>
|
||
<span id="cb1-596"><a href="#cb1-596" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">roles_to_train</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-597"><a href="#cb1-597" aria-hidden="true" tabindex="-1"></a><span class="co"> # Which EOS tokens to train on in the conversation. Possible values are: all: train on</span></span>
|
||
<span id="cb1-598"><a href="#cb1-598" aria-hidden="true" tabindex="-1"></a><span class="co"> # all EOS tokens, turn (default): train on the EOS token at the end of each trainable</span></span>
|
||
<span id="cb1-599"><a href="#cb1-599" aria-hidden="true" tabindex="-1"></a><span class="co"> # turn, last: train on the last EOS token in the conversation</span></span>
|
||
<span id="cb1-600"><a href="#cb1-600" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">train_on_eos</span><span class="kw">:</span><span class="at"> Literal['all', 'turn', 'last'] | None</span></span>
|
||
<span id="cb1-601"><a href="#cb1-601" aria-hidden="true" tabindex="-1"></a><span class="co"> # Roles mapping in the messages. The format is {target_role: [source_roles]}. All</span></span>
|
||
<span id="cb1-602"><a href="#cb1-602" aria-hidden="true" tabindex="-1"></a><span class="co"> # source roles will be mapped to the target role. The default is: user: ["human",</span></span>
|
||
<span id="cb1-603"><a href="#cb1-603" aria-hidden="true" tabindex="-1"></a><span class="co"> # "user"], assistant: ["gpt", "assistant"], system: ["system"], tool: ["tool"]</span></span>
|
||
<span id="cb1-604"><a href="#cb1-604" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">roles</span><span class="kw">:</span><span class="at"> dict[str, list[str]] | None</span></span>
|
||
<span id="cb1-605"><a href="#cb1-605" aria-hidden="true" tabindex="-1"></a><span class="co"> # Whether to drop the system turn from the dataset. Only works with chat_template.</span></span>
|
||
<span id="cb1-606"><a href="#cb1-606" aria-hidden="true" tabindex="-1"></a><span class="co"> # This does not drop the default system message from chat_template if it exists. If</span></span>
|
||
<span id="cb1-607"><a href="#cb1-607" aria-hidden="true" tabindex="-1"></a><span class="co"> # you wish to, we recommend using a custom jinja template with the default system</span></span>
|
||
<span id="cb1-608"><a href="#cb1-608" aria-hidden="true" tabindex="-1"></a><span class="co"> # message removed or adding a system turn with empty content.</span></span>
|
||
<span id="cb1-609"><a href="#cb1-609" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">drop_system_message</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-610"><a href="#cb1-610" aria-hidden="true" tabindex="-1"></a><span class="co"> # Trust remote code for untrusted source</span></span>
|
||
<span id="cb1-611"><a href="#cb1-611" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">trust_remote_code</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-612"><a href="#cb1-612" aria-hidden="true" tabindex="-1"></a><span class="co"> # The specific revision of the dataset to use when loading from the Hugging Face Hub.</span></span>
|
||
<span id="cb1-613"><a href="#cb1-613" aria-hidden="true" tabindex="-1"></a><span class="co"> # This can be a commit hash, tag, or branch name. If not specified, the latest version</span></span>
|
||
<span id="cb1-614"><a href="#cb1-614" aria-hidden="true" tabindex="-1"></a><span class="co"> # will be used. This parameter is ignored for local datasets.</span></span>
|
||
<span id="cb1-615"><a href="#cb1-615" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">revision</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-616"><a href="#cb1-616" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-617"><a href="#cb1-617" aria-hidden="true" tabindex="-1"></a><span class="co"># The maximum number of processes to use while preprocessing your input dataset. This</span></span>
|
||
<span id="cb1-618"><a href="#cb1-618" aria-hidden="true" tabindex="-1"></a><span class="co"># defaults to `os.cpu_count()` if not set. For Runpod VMs, it will default to number of</span></span>
|
||
<span id="cb1-619"><a href="#cb1-619" aria-hidden="true" tabindex="-1"></a><span class="co"># vCPUs via RUNPOD_CPU_COUNT.</span></span>
|
||
<span id="cb1-620"><a href="#cb1-620" aria-hidden="true" tabindex="-1"></a><span class="fu">dataset_processes</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-621"><a href="#cb1-621" aria-hidden="true" tabindex="-1"></a><span class="co"># Deduplicates datasets and test_datasets with identical entries</span></span>
|
||
<span id="cb1-622"><a href="#cb1-622" aria-hidden="true" tabindex="-1"></a><span class="fu">dataset_exact_deduplication</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-623"><a href="#cb1-623" aria-hidden="true" tabindex="-1"></a><span class="co"># Keep dataset in memory while preprocessing. Only needed if cached dataset is taking</span></span>
|
||
<span id="cb1-624"><a href="#cb1-624" aria-hidden="true" tabindex="-1"></a><span class="co"># too much storage</span></span>
|
||
<span id="cb1-625"><a href="#cb1-625" aria-hidden="true" tabindex="-1"></a><span class="fu">dataset_keep_in_memory</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-626"><a href="#cb1-626" aria-hidden="true" tabindex="-1"></a><span class="fu">dataloader_pin_memory</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-627"><a href="#cb1-627" aria-hidden="true" tabindex="-1"></a><span class="fu">dataloader_num_workers</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-628"><a href="#cb1-628" aria-hidden="true" tabindex="-1"></a><span class="fu">dataloader_prefetch_factor</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-629"><a href="#cb1-629" aria-hidden="true" tabindex="-1"></a><span class="fu">dataloader_drop_last</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-630"><a href="#cb1-630" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-631"><a href="#cb1-631" aria-hidden="true" tabindex="-1"></a><span class="fu">accelerator_config</span><span class="kw">:</span><span class="at"> dict[str, Any] | None</span></span>
|
||
<span id="cb1-632"><a href="#cb1-632" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-633"><a href="#cb1-633" aria-hidden="true" tabindex="-1"></a><span class="fu">remove_unused_columns</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-634"><a href="#cb1-634" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-635"><a href="#cb1-635" aria-hidden="true" tabindex="-1"></a><span class="co"># Push prepared dataset to hub - repo_org/repo_name</span></span>
|
||
<span id="cb1-636"><a href="#cb1-636" aria-hidden="true" tabindex="-1"></a><span class="fu">push_dataset_to_hub</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-637"><a href="#cb1-637" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private</span></span>
|
||
<span id="cb1-638"><a href="#cb1-638" aria-hidden="true" tabindex="-1"></a><span class="co"># datasets. Required to be true when used in combination with `push_dataset_to_hub`</span></span>
|
||
<span id="cb1-639"><a href="#cb1-639" aria-hidden="true" tabindex="-1"></a><span class="fu">hf_use_auth_token</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-640"><a href="#cb1-640" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-641"><a href="#cb1-641" aria-hidden="true" tabindex="-1"></a><span class="fu">device</span><span class="kw">:</span><span class="at"> Any | None</span></span>
|
||
<span id="cb1-642"><a href="#cb1-642" aria-hidden="true" tabindex="-1"></a><span class="co"># Passed through to transformers when loading the model when launched without</span></span>
|
||
<span id="cb1-643"><a href="#cb1-643" aria-hidden="true" tabindex="-1"></a><span class="co"># accelerate. Use `sequential` when training w/ model parallelism to limit memory</span></span>
|
||
<span id="cb1-644"><a href="#cb1-644" aria-hidden="true" tabindex="-1"></a><span class="fu">device_map</span><span class="kw">:</span><span class="at"> Any | None</span></span>
|
||
<span id="cb1-645"><a href="#cb1-645" aria-hidden="true" tabindex="-1"></a><span class="fu">world_size</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-646"><a href="#cb1-646" aria-hidden="true" tabindex="-1"></a><span class="co"># Don't mess with this, it's here for accelerate and torchrun</span></span>
|
||
<span id="cb1-647"><a href="#cb1-647" aria-hidden="true" tabindex="-1"></a><span class="fu">local_rank</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-648"><a href="#cb1-648" aria-hidden="true" tabindex="-1"></a><span class="fu">ddp</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-649"><a href="#cb1-649" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-650"><a href="#cb1-650" aria-hidden="true" tabindex="-1"></a><span class="co"># Seed for reproducibility</span></span>
|
||
<span id="cb1-651"><a href="#cb1-651" aria-hidden="true" tabindex="-1"></a><span class="fu">seed</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-652"><a href="#cb1-652" aria-hidden="true" tabindex="-1"></a><span class="co"># Advanced DDP Arguments - timeout</span></span>
|
||
<span id="cb1-653"><a href="#cb1-653" aria-hidden="true" tabindex="-1"></a><span class="fu">ddp_timeout</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-654"><a href="#cb1-654" aria-hidden="true" tabindex="-1"></a><span class="co"># Advanced DDP Arguments - bucket cap in MB</span></span>
|
||
<span id="cb1-655"><a href="#cb1-655" aria-hidden="true" tabindex="-1"></a><span class="fu">ddp_bucket_cap_mb</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-656"><a href="#cb1-656" aria-hidden="true" tabindex="-1"></a><span class="co"># Advanced DDP Arguments - broadcast buffers</span></span>
|
||
<span id="cb1-657"><a href="#cb1-657" aria-hidden="true" tabindex="-1"></a><span class="fu">ddp_broadcast_buffers</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-658"><a href="#cb1-658" aria-hidden="true" tabindex="-1"></a><span class="fu">ddp_find_unused_parameters</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-659"><a href="#cb1-659" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-660"><a href="#cb1-660" aria-hidden="true" tabindex="-1"></a><span class="co"># Approximate number of predictions sent to wandb depending on batch size. Enabled above</span></span>
|
||
<span id="cb1-661"><a href="#cb1-661" aria-hidden="true" tabindex="-1"></a><span class="co"># 0. Default is 0</span></span>
|
||
<span id="cb1-662"><a href="#cb1-662" aria-hidden="true" tabindex="-1"></a><span class="fu">eval_table_size</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-663"><a href="#cb1-663" aria-hidden="true" tabindex="-1"></a><span class="co"># Total number of tokens generated for predictions sent to wandb. Default is 128</span></span>
|
||
<span id="cb1-664"><a href="#cb1-664" aria-hidden="true" tabindex="-1"></a><span class="fu">eval_max_new_tokens</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-665"><a href="#cb1-665" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to run causal language model evaluation for metrics in</span></span>
|
||
<span id="cb1-666"><a href="#cb1-666" aria-hidden="true" tabindex="-1"></a><span class="co"># `eval_causal_lm_metrics`</span></span>
|
||
<span id="cb1-667"><a href="#cb1-667" aria-hidden="true" tabindex="-1"></a><span class="fu">do_causal_lm_eval</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-668"><a href="#cb1-668" aria-hidden="true" tabindex="-1"></a><span class="co"># HF evaluate metrics used during evaluation. Default is ['sacrebleu', 'comet', 'ter',</span></span>
|
||
<span id="cb1-669"><a href="#cb1-669" aria-hidden="true" tabindex="-1"></a><span class="co"># 'chrf', 'perplexity']</span></span>
|
||
<span id="cb1-670"><a href="#cb1-670" aria-hidden="true" tabindex="-1"></a><span class="fu">eval_causal_lm_metrics</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-671"><a href="#cb1-671" aria-hidden="true" tabindex="-1"></a><span class="fu">do_bench_eval</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-672"><a href="#cb1-672" aria-hidden="true" tabindex="-1"></a><span class="fu">bench_dataset</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-673"><a href="#cb1-673" aria-hidden="true" tabindex="-1"></a><span class="fu">bench_split</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-674"><a href="#cb1-674" aria-hidden="true" tabindex="-1"></a><span class="fu">metric_for_best_model</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-675"><a href="#cb1-675" aria-hidden="true" tabindex="-1"></a><span class="fu">greater_is_better</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-676"><a href="#cb1-676" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-677"><a href="#cb1-677" aria-hidden="true" tabindex="-1"></a><span class="co"># High loss value, indicating the learning has broken down (a good estimate is ~2 times</span></span>
|
||
<span id="cb1-678"><a href="#cb1-678" aria-hidden="true" tabindex="-1"></a><span class="co"># the loss at the start of training)</span></span>
|
||
<span id="cb1-679"><a href="#cb1-679" aria-hidden="true" tabindex="-1"></a><span class="fu">loss_watchdog_threshold</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-680"><a href="#cb1-680" aria-hidden="true" tabindex="-1"></a><span class="co"># Number of high-loss steps in a row before the trainer aborts (default: 3)</span></span>
|
||
<span id="cb1-681"><a href="#cb1-681" aria-hidden="true" tabindex="-1"></a><span class="fu">loss_watchdog_patience</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-682"><a href="#cb1-682" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-683"><a href="#cb1-683" aria-hidden="true" tabindex="-1"></a><span class="co"># Run garbage collection every `gc_steps` steps. -1 will run on epoch end and before</span></span>
|
||
<span id="cb1-684"><a href="#cb1-684" aria-hidden="true" tabindex="-1"></a><span class="co"># evaluations. Default is 0 (disabled).</span></span>
|
||
<span id="cb1-685"><a href="#cb1-685" aria-hidden="true" tabindex="-1"></a><span class="fu">gc_steps</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-686"><a href="#cb1-686" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-687"><a href="#cb1-687" aria-hidden="true" tabindex="-1"></a><span class="co"># Use CUDA bf16. bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection.</span></span>
|
||
<span id="cb1-688"><a href="#cb1-688" aria-hidden="true" tabindex="-1"></a><span class="co"># require >=ampere</span></span>
|
||
<span id="cb1-689"><a href="#cb1-689" aria-hidden="true" tabindex="-1"></a><span class="fu">bf16</span><span class="kw">:</span><span class="at"> Literal['auto'] | bool | None = auto</span></span>
|
||
<span id="cb1-690"><a href="#cb1-690" aria-hidden="true" tabindex="-1"></a><span class="co"># Use CUDA fp16</span></span>
|
||
<span id="cb1-691"><a href="#cb1-691" aria-hidden="true" tabindex="-1"></a><span class="fu">fp16</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-692"><a href="#cb1-692" aria-hidden="true" tabindex="-1"></a><span class="co"># Enable FP8 mixed precision training using TorchAO. Best used in combination with</span></span>
|
||
<span id="cb1-693"><a href="#cb1-693" aria-hidden="true" tabindex="-1"></a><span class="co"># torch.compile.</span></span>
|
||
<span id="cb1-694"><a href="#cb1-694" aria-hidden="true" tabindex="-1"></a><span class="fu">fp8</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-695"><a href="#cb1-695" aria-hidden="true" tabindex="-1"></a><span class="co"># Enable FSDP float8 all-gather optimization for FP8 training. Can improve training</span></span>
|
||
<span id="cb1-696"><a href="#cb1-696" aria-hidden="true" tabindex="-1"></a><span class="co"># speed by 10-15% when FSDP is enabled.</span></span>
|
||
<span id="cb1-697"><a href="#cb1-697" aria-hidden="true" tabindex="-1"></a><span class="fu">fp8_enable_fsdp_float8_all_gather</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-698"><a href="#cb1-698" aria-hidden="true" tabindex="-1"></a><span class="co"># No AMP (automatic mixed precision) - require >=ampere</span></span>
|
||
<span id="cb1-699"><a href="#cb1-699" aria-hidden="true" tabindex="-1"></a><span class="fu">bfloat16</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-700"><a href="#cb1-700" aria-hidden="true" tabindex="-1"></a><span class="co"># No AMP (automatic mixed precision)</span></span>
|
||
<span id="cb1-701"><a href="#cb1-701" aria-hidden="true" tabindex="-1"></a><span class="fu">float16</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-702"><a href="#cb1-702" aria-hidden="true" tabindex="-1"></a><span class="co"># Use CUDA tf32 - require >=ampere</span></span>
|
||
<span id="cb1-703"><a href="#cb1-703" aria-hidden="true" tabindex="-1"></a><span class="fu">tf32</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-704"><a href="#cb1-704" aria-hidden="true" tabindex="-1"></a><span class="fu">float32</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-705"><a href="#cb1-705" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-706"><a href="#cb1-706" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use gradient checkpointing. Available options are: true, false, 'offload',</span></span>
|
||
<span id="cb1-707"><a href="#cb1-707" aria-hidden="true" tabindex="-1"></a><span class="co"># 'offload_disk'.</span></span>
|
||
<span id="cb1-708"><a href="#cb1-708" aria-hidden="true" tabindex="-1"></a><span class="co"># https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing</span></span>
|
||
<span id="cb1-709"><a href="#cb1-709" aria-hidden="true" tabindex="-1"></a><span class="fu">gradient_checkpointing</span><span class="kw">:</span><span class="at"> Literal['offload', 'offload_disk'] | bool | None = False</span></span>
|
||
<span id="cb1-710"><a href="#cb1-710" aria-hidden="true" tabindex="-1"></a><span class="co"># Additional kwargs to pass to the trainer for gradient checkpointing</span></span>
|
||
<span id="cb1-711"><a href="#cb1-711" aria-hidden="true" tabindex="-1"></a><span class="fu">gradient_checkpointing_kwargs</span><span class="kw">:</span><span class="at"> dict[str, Any] | None</span></span>
|
||
<span id="cb1-712"><a href="#cb1-712" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to offload activations. Available options are: true, false, 'legacy', 'disk'.</span></span>
|
||
<span id="cb1-713"><a href="#cb1-713" aria-hidden="true" tabindex="-1"></a><span class="fu">activation_offloading</span><span class="kw">:</span><span class="at"> Literal['legacy', 'disk'] | bool | None = False</span></span>
|
||
<span id="cb1-714"><a href="#cb1-714" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-715"><a href="#cb1-715" aria-hidden="true" tabindex="-1"></a><span class="fu">unfrozen_parameters</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-716"><a href="#cb1-716" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-717"><a href="#cb1-717" aria-hidden="true" tabindex="-1"></a><span class="co"># The maximum length of an input to train with, this should typically be less than 2048</span></span>
|
||
<span id="cb1-718"><a href="#cb1-718" aria-hidden="true" tabindex="-1"></a><span class="co"># as most models have a token/context limit of 2048</span></span>
|
||
<span id="cb1-719"><a href="#cb1-719" aria-hidden="true" tabindex="-1"></a><span class="fu">sequence_len</span><span class="kw">:</span><span class="at"> int = 512</span></span>
|
||
<span id="cb1-720"><a href="#cb1-720" aria-hidden="true" tabindex="-1"></a><span class="co"># What to do when a tokenized row exceeds sequence_len. 'drop' removes the row;</span></span>
|
||
<span id="cb1-721"><a href="#cb1-721" aria-hidden="true" tabindex="-1"></a><span class="co"># 'truncate' slices tensors to sequence_len. Defaults to 'drop' for backward</span></span>
|
||
<span id="cb1-722"><a href="#cb1-722" aria-hidden="true" tabindex="-1"></a><span class="co"># compatibility.</span></span>
|
||
<span id="cb1-723"><a href="#cb1-723" aria-hidden="true" tabindex="-1"></a><span class="fu">excess_length_strategy</span><span class="kw">:</span><span class="at"> Literal['drop', 'truncate'] | None</span></span>
|
||
<span id="cb1-724"><a href="#cb1-724" aria-hidden="true" tabindex="-1"></a><span class="co"># The maximum length of an input for evaluation. If not specified, defaults to</span></span>
|
||
<span id="cb1-725"><a href="#cb1-725" aria-hidden="true" tabindex="-1"></a><span class="co"># sequence_len</span></span>
|
||
<span id="cb1-726"><a href="#cb1-726" aria-hidden="true" tabindex="-1"></a><span class="fu">eval_sequence_len</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-727"><a href="#cb1-727" aria-hidden="true" tabindex="-1"></a><span class="fu">min_sample_len</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-728"><a href="#cb1-728" aria-hidden="true" tabindex="-1"></a><span class="co"># maximum prompt length for RL training</span></span>
|
||
<span id="cb1-729"><a href="#cb1-729" aria-hidden="true" tabindex="-1"></a><span class="fu">max_prompt_len</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-730"><a href="#cb1-730" aria-hidden="true" tabindex="-1"></a><span class="co"># Use efficient multi-packing with block diagonal attention and per sequence</span></span>
|
||
<span id="cb1-731"><a href="#cb1-731" aria-hidden="true" tabindex="-1"></a><span class="co"># position_ids. Recommend set to 'true'</span></span>
|
||
<span id="cb1-732"><a href="#cb1-732" aria-hidden="true" tabindex="-1"></a><span class="fu">sample_packing</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-733"><a href="#cb1-733" aria-hidden="true" tabindex="-1"></a><span class="co"># The number of samples packed at a time. Increasing the following values helps with</span></span>
|
||
<span id="cb1-734"><a href="#cb1-734" aria-hidden="true" tabindex="-1"></a><span class="co"># packing, but usually only slightly (<%1.)</span></span>
|
||
<span id="cb1-735"><a href="#cb1-735" aria-hidden="true" tabindex="-1"></a><span class="fu">sample_packing_group_size</span><span class="kw">:</span><span class="at"> int | None = 100000</span></span>
|
||
<span id="cb1-736"><a href="#cb1-736" aria-hidden="true" tabindex="-1"></a><span class="co"># The number of samples which can be packed into one sequence. Increase if using a large</span></span>
|
||
<span id="cb1-737"><a href="#cb1-737" aria-hidden="true" tabindex="-1"></a><span class="co"># sequence_len with many short samples.</span></span>
|
||
<span id="cb1-738"><a href="#cb1-738" aria-hidden="true" tabindex="-1"></a><span class="fu">sample_packing_bin_size</span><span class="kw">:</span><span class="at"> int | None = 200</span></span>
|
||
<span id="cb1-739"><a href="#cb1-739" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to pack samples sequentially</span></span>
|
||
<span id="cb1-740"><a href="#cb1-740" aria-hidden="true" tabindex="-1"></a><span class="fu">sample_packing_sequentially</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-741"><a href="#cb1-741" aria-hidden="true" tabindex="-1"></a><span class="co"># The multiprocessing start method to use for packing. Should be 'fork', 'spawn' or</span></span>
|
||
<span id="cb1-742"><a href="#cb1-742" aria-hidden="true" tabindex="-1"></a><span class="co"># 'forkserver'</span></span>
|
||
<span id="cb1-743"><a href="#cb1-743" aria-hidden="true" tabindex="-1"></a><span class="fu">sample_packing_mp_start_method</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-744"><a href="#cb1-744" aria-hidden="true" tabindex="-1"></a><span class="co"># Set to 'false' if getting errors during eval with sample_packing on</span></span>
|
||
<span id="cb1-745"><a href="#cb1-745" aria-hidden="true" tabindex="-1"></a><span class="fu">eval_sample_packing</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-746"><a href="#cb1-746" aria-hidden="true" tabindex="-1"></a><span class="co"># Pad inputs so each step uses constant sized buffers. This will reduce memory</span></span>
|
||
<span id="cb1-747"><a href="#cb1-747" aria-hidden="true" tabindex="-1"></a><span class="co"># fragmentation and may prevent OOMs, by re-using memory more efficiently. Defaults to</span></span>
|
||
<span id="cb1-748"><a href="#cb1-748" aria-hidden="true" tabindex="-1"></a><span class="co"># True if `sample_packing` enabled</span></span>
|
||
<span id="cb1-749"><a href="#cb1-749" aria-hidden="true" tabindex="-1"></a><span class="fu">pad_to_sequence_len</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-750"><a href="#cb1-750" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use sequential sampling for curriculum learning</span></span>
|
||
<span id="cb1-751"><a href="#cb1-751" aria-hidden="true" tabindex="-1"></a><span class="fu">curriculum_sampling</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-752"><a href="#cb1-752" aria-hidden="true" tabindex="-1"></a><span class="fu">multipack_real_batches</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-753"><a href="#cb1-753" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-754"><a href="#cb1-754" aria-hidden="true" tabindex="-1"></a><span class="co"># Use batch flattening for speedups when not using sample_packing</span></span>
|
||
<span id="cb1-755"><a href="#cb1-755" aria-hidden="true" tabindex="-1"></a><span class="fu">batch_flattening</span><span class="kw">:</span><span class="at"> Literal['auto'] | bool | None</span></span>
|
||
<span id="cb1-756"><a href="#cb1-756" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-757"><a href="#cb1-757" aria-hidden="true" tabindex="-1"></a><span class="fu">use_pose</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-758"><a href="#cb1-758" aria-hidden="true" tabindex="-1"></a><span class="fu">pose_split_on_token_ids</span><span class="kw">:</span><span class="at"> list[int] | None</span></span>
|
||
<span id="cb1-759"><a href="#cb1-759" aria-hidden="true" tabindex="-1"></a><span class="fu">pose_max_context_len</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-760"><a href="#cb1-760" aria-hidden="true" tabindex="-1"></a><span class="fu">pose_num_chunks</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-761"><a href="#cb1-761" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-762"><a href="#cb1-762" aria-hidden="true" tabindex="-1"></a><span class="fu">pretrain_multipack_buffer_size</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-763"><a href="#cb1-763" aria-hidden="true" tabindex="-1"></a><span class="co"># whether to prevent cross attention for packed sequences during pretraining</span></span>
|
||
<span id="cb1-764"><a href="#cb1-764" aria-hidden="true" tabindex="-1"></a><span class="fu">pretrain_multipack_attn</span><span class="kw">:</span><span class="at"> bool | None = True</span></span>
|
||
<span id="cb1-765"><a href="#cb1-765" aria-hidden="true" tabindex="-1"></a><span class="co"># whether to concatenate samples during pretraining</span></span>
|
||
<span id="cb1-766"><a href="#cb1-766" aria-hidden="true" tabindex="-1"></a><span class="fu">pretraining_sample_concatenation</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-767"><a href="#cb1-767" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-768"><a href="#cb1-768" aria-hidden="true" tabindex="-1"></a><span class="co"># Use streaming mode for loading datasets</span></span>
|
||
<span id="cb1-769"><a href="#cb1-769" aria-hidden="true" tabindex="-1"></a><span class="fu">streaming</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-770"><a href="#cb1-770" aria-hidden="true" tabindex="-1"></a><span class="co"># Buffer size for multipack streaming datasets</span></span>
|
||
<span id="cb1-771"><a href="#cb1-771" aria-hidden="true" tabindex="-1"></a><span class="fu">streaming_multipack_buffer_size</span><span class="kw">:</span><span class="at"> int | None = 10000</span></span>
|
||
<span id="cb1-772"><a href="#cb1-772" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-773"><a href="#cb1-773" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use xformers attention patch https://github.com/facebookresearch/xformers</span></span>
|
||
<span id="cb1-774"><a href="#cb1-774" aria-hidden="true" tabindex="-1"></a><span class="fu">xformers_attention</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-775"><a href="#cb1-775" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use scaled-dot-product attention https://pytorch.org/docs/stable/generated/</span></span>
|
||
<span id="cb1-776"><a href="#cb1-776" aria-hidden="true" tabindex="-1"></a><span class="co"># torch.nn.functional.scaled_dot_product_attention.html</span></span>
|
||
<span id="cb1-777"><a href="#cb1-777" aria-hidden="true" tabindex="-1"></a><span class="fu">sdp_attention</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-778"><a href="#cb1-778" aria-hidden="true" tabindex="-1"></a><span class="co"># Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf</span></span>
|
||
<span id="cb1-779"><a href="#cb1-779" aria-hidden="true" tabindex="-1"></a><span class="fu">s2_attention</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-780"><a href="#cb1-780" aria-hidden="true" tabindex="-1"></a><span class="fu">flex_attention</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-781"><a href="#cb1-781" aria-hidden="true" tabindex="-1"></a><span class="fu">flex_attn_compile_kwargs</span><span class="kw">:</span><span class="at"> dict[str, Any] | None</span></span>
|
||
<span id="cb1-782"><a href="#cb1-782" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention</span></span>
|
||
<span id="cb1-783"><a href="#cb1-783" aria-hidden="true" tabindex="-1"></a><span class="fu">flash_attention</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-784"><a href="#cb1-784" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use flash-attention cross entropy implementation - advanced use only</span></span>
|
||
<span id="cb1-785"><a href="#cb1-785" aria-hidden="true" tabindex="-1"></a><span class="fu">flash_attn_cross_entropy</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-786"><a href="#cb1-786" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use flash-attention rms norm implementation - advanced use only</span></span>
|
||
<span id="cb1-787"><a href="#cb1-787" aria-hidden="true" tabindex="-1"></a><span class="fu">flash_attn_rms_norm</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-788"><a href="#cb1-788" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to fuse part of the MLP into a single operation</span></span>
|
||
<span id="cb1-789"><a href="#cb1-789" aria-hidden="true" tabindex="-1"></a><span class="fu">flash_attn_fuse_mlp</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-790"><a href="#cb1-790" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use bettertransformers</span></span>
|
||
<span id="cb1-791"><a href="#cb1-791" aria-hidden="true" tabindex="-1"></a><span class="fu">flash_optimum</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-792"><a href="#cb1-792" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-793"><a href="#cb1-793" aria-hidden="true" tabindex="-1"></a><span class="fu">eager_attention</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-794"><a href="#cb1-794" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-795"><a href="#cb1-795" aria-hidden="true" tabindex="-1"></a><span class="co"># Specify a custom attention implementation, used mostly for kernels.</span></span>
|
||
<span id="cb1-796"><a href="#cb1-796" aria-hidden="true" tabindex="-1"></a><span class="fu">attn_implementation</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-797"><a href="#cb1-797" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-798"><a href="#cb1-798" aria-hidden="true" tabindex="-1"></a><span class="fu">unsloth_cross_entropy_loss</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-799"><a href="#cb1-799" aria-hidden="true" tabindex="-1"></a><span class="fu">unsloth_lora_mlp</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-800"><a href="#cb1-800" aria-hidden="true" tabindex="-1"></a><span class="fu">unsloth_lora_qkv</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-801"><a href="#cb1-801" aria-hidden="true" tabindex="-1"></a><span class="fu">unsloth_lora_o</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-802"><a href="#cb1-802" aria-hidden="true" tabindex="-1"></a><span class="fu">unsloth_rms_norm</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-803"><a href="#cb1-803" aria-hidden="true" tabindex="-1"></a><span class="fu">unsloth_rope</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-804"><a href="#cb1-804" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-805"><a href="#cb1-805" aria-hidden="true" tabindex="-1"></a><span class="co"># Apply custom LoRA autograd functions and activation function Triton kernels for speed</span></span>
|
||
<span id="cb1-806"><a href="#cb1-806" aria-hidden="true" tabindex="-1"></a><span class="co"># and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html</span></span>
|
||
<span id="cb1-807"><a href="#cb1-807" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_mlp_kernel</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-808"><a href="#cb1-808" aria-hidden="true" tabindex="-1"></a><span class="co"># Apply custom LoRA autograd functions and activation function Triton kernels for speed</span></span>
|
||
<span id="cb1-809"><a href="#cb1-809" aria-hidden="true" tabindex="-1"></a><span class="co"># and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html</span></span>
|
||
<span id="cb1-810"><a href="#cb1-810" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_qkv_kernel</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-811"><a href="#cb1-811" aria-hidden="true" tabindex="-1"></a><span class="co"># Apply custom LoRA autograd functions and activation function Triton kernels for speed</span></span>
|
||
<span id="cb1-812"><a href="#cb1-812" aria-hidden="true" tabindex="-1"></a><span class="co"># and memory savings. See: https://docs.axolotl.ai/docs/lora_optims.html</span></span>
|
||
<span id="cb1-813"><a href="#cb1-813" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_o_kernel</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-814"><a href="#cb1-814" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-815"><a href="#cb1-815" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use chunked cross entropy loss for memory efficiency</span></span>
|
||
<span id="cb1-816"><a href="#cb1-816" aria-hidden="true" tabindex="-1"></a><span class="fu">chunked_cross_entropy</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-817"><a href="#cb1-817" aria-hidden="true" tabindex="-1"></a><span class="co"># Number of chunks to use for chunked cross entropy loss</span></span>
|
||
<span id="cb1-818"><a href="#cb1-818" aria-hidden="true" tabindex="-1"></a><span class="fu">chunked_cross_entropy_num_chunks</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-819"><a href="#cb1-819" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-820"><a href="#cb1-820" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use ALST tiled mlp for memory efficient long context</span></span>
|
||
<span id="cb1-821"><a href="#cb1-821" aria-hidden="true" tabindex="-1"></a><span class="fu">tiled_mlp</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-822"><a href="#cb1-822" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-823"><a href="#cb1-823" aria-hidden="true" tabindex="-1"></a><span class="co"># Number of shards to use for ALST tiled mlp. If unset, it will be set based on</span></span>
|
||
<span id="cb1-824"><a href="#cb1-824" aria-hidden="true" tabindex="-1"></a><span class="co"># seqlen/hidden_size</span></span>
|
||
<span id="cb1-825"><a href="#cb1-825" aria-hidden="true" tabindex="-1"></a><span class="fu">tiled_mlp_num_shards</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-826"><a href="#cb1-826" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-827"><a href="#cb1-827" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use original mlp for ALST tiled mlp. Otherwise uses a generic MLP based on</span></span>
|
||
<span id="cb1-828"><a href="#cb1-828" aria-hidden="true" tabindex="-1"></a><span class="co"># llama.</span></span>
|
||
<span id="cb1-829"><a href="#cb1-829" aria-hidden="true" tabindex="-1"></a><span class="fu">tiled_mlp_use_original_mlp</span><span class="kw">:</span><span class="at"> bool | None = True</span></span>
|
||
<span id="cb1-830"><a href="#cb1-830" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-831"><a href="#cb1-831" aria-hidden="true" tabindex="-1"></a><span class="fu">llama4_linearized_experts</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-832"><a href="#cb1-832" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-833"><a href="#cb1-833" aria-hidden="true" tabindex="-1"></a><span class="co"># Deepspeed config path. e.g., deepspeed_configs/zero3.json</span></span>
|
||
<span id="cb1-834"><a href="#cb1-834" aria-hidden="true" tabindex="-1"></a><span class="fu">deepspeed</span><span class="kw">:</span><span class="at"> str | dict[str, Any] | None</span></span>
|
||
<span id="cb1-835"><a href="#cb1-835" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use deepcompile for faster training with deepspeed</span></span>
|
||
<span id="cb1-836"><a href="#cb1-836" aria-hidden="true" tabindex="-1"></a><span class="fu">deepcompile</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-837"><a href="#cb1-837" aria-hidden="true" tabindex="-1"></a><span class="co"># FSDP configuration</span></span>
|
||
<span id="cb1-838"><a href="#cb1-838" aria-hidden="true" tabindex="-1"></a><span class="fu">fsdp</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-839"><a href="#cb1-839" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-840"><a href="#cb1-840" aria-hidden="true" tabindex="-1"></a><span class="co"># FSDP configuration options</span></span>
|
||
<span id="cb1-841"><a href="#cb1-841" aria-hidden="true" tabindex="-1"></a><span class="fu">fsdp_config</span><span class="kw">:</span><span class="at"> dict[str, Any] | None</span></span>
|
||
<span id="cb1-842"><a href="#cb1-842" aria-hidden="true" tabindex="-1"></a><span class="co"># FSDP version</span></span>
|
||
<span id="cb1-843"><a href="#cb1-843" aria-hidden="true" tabindex="-1"></a><span class="fu">fsdp_version</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-844"><a href="#cb1-844" aria-hidden="true" tabindex="-1"></a><span class="fu">fsdp_final_state_dict_type</span><span class="kw">:</span><span class="at"> Literal['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] | None</span></span>
|
||
<span id="cb1-845"><a href="#cb1-845" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-846"><a href="#cb1-846" aria-hidden="true" tabindex="-1"></a><span class="co"># How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for</span></span>
|
||
<span id="cb1-847"><a href="#cb1-847" aria-hidden="true" tabindex="-1"></a><span class="co"># no eval.</span></span>
|
||
<span id="cb1-848"><a href="#cb1-848" aria-hidden="true" tabindex="-1"></a><span class="fu">val_set_size</span><span class="kw">:</span><span class="at"> float | None = 0.0</span></span>
|
||
<span id="cb1-849"><a href="#cb1-849" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-850"><a href="#cb1-850" aria-hidden="true" tabindex="-1"></a><span class="co"># Number of devices to shard across. If not set, will use all available devices.</span></span>
|
||
<span id="cb1-851"><a href="#cb1-851" aria-hidden="true" tabindex="-1"></a><span class="fu">dp_shard_size</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-852"><a href="#cb1-852" aria-hidden="true" tabindex="-1"></a><span class="co"># Number of devices to replicate across.</span></span>
|
||
<span id="cb1-853"><a href="#cb1-853" aria-hidden="true" tabindex="-1"></a><span class="fu">dp_replicate_size</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-854"><a href="#cb1-854" aria-hidden="true" tabindex="-1"></a><span class="co"># Deprecated: use `context_parallel_size` instead</span></span>
|
||
<span id="cb1-855"><a href="#cb1-855" aria-hidden="true" tabindex="-1"></a><span class="fu">sequence_parallel_degree</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-856"><a href="#cb1-856" aria-hidden="true" tabindex="-1"></a><span class="co"># Set to a divisor of the number of GPUs available to split sequences into chunks of</span></span>
|
||
<span id="cb1-857"><a href="#cb1-857" aria-hidden="true" tabindex="-1"></a><span class="co"># equal size. Use in long context training to prevent OOM when sequences cannot fit into</span></span>
|
||
<span id="cb1-858"><a href="#cb1-858" aria-hidden="true" tabindex="-1"></a><span class="co"># a single GPU's VRAM. E.g., if 4 GPUs are available, set this value to 2 to split each</span></span>
|
||
<span id="cb1-859"><a href="#cb1-859" aria-hidden="true" tabindex="-1"></a><span class="co"># sequence into two equal-sized subsequences, or set to 4 to split into four equal-sized</span></span>
|
||
<span id="cb1-860"><a href="#cb1-860" aria-hidden="true" tabindex="-1"></a><span class="co"># subsequences. See https://docs.axolotl.ai/docs/sequence_parallelism.html for more</span></span>
|
||
<span id="cb1-861"><a href="#cb1-861" aria-hidden="true" tabindex="-1"></a><span class="co"># details.</span></span>
|
||
<span id="cb1-862"><a href="#cb1-862" aria-hidden="true" tabindex="-1"></a><span class="fu">context_parallel_size</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-863"><a href="#cb1-863" aria-hidden="true" tabindex="-1"></a><span class="co"># Optional; strides across the key dimension. Larger values use more memory but should</span></span>
|
||
<span id="cb1-864"><a href="#cb1-864" aria-hidden="true" tabindex="-1"></a><span class="co"># make training faster. Must evenly divide the number of KV heads in your model.</span></span>
|
||
<span id="cb1-865"><a href="#cb1-865" aria-hidden="true" tabindex="-1"></a><span class="fu">heads_k_stride</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-866"><a href="#cb1-866" aria-hidden="true" tabindex="-1"></a><span class="co"># One of 'varlen_llama3', 'batch_ring', 'batch_zigzag', 'batch_stripe'. Defaults to</span></span>
|
||
<span id="cb1-867"><a href="#cb1-867" aria-hidden="true" tabindex="-1"></a><span class="co"># 'varlen_llama3' in the sample packing case, and 'batch_ring' in the non-sample packing</span></span>
|
||
<span id="cb1-868"><a href="#cb1-868" aria-hidden="true" tabindex="-1"></a><span class="co"># case.</span></span>
|
||
<span id="cb1-869"><a href="#cb1-869" aria-hidden="true" tabindex="-1"></a><span class="fu">ring_attn_func</span><span class="kw">:</span><span class="at"> RingAttnFunc | None</span></span>
|
||
<span id="cb1-870"><a href="#cb1-870" aria-hidden="true" tabindex="-1"></a><span class="co"># Number of tensor parallel processes in TP group. Only supported with DeepSpeed AutoTP.</span></span>
|
||
<span id="cb1-871"><a href="#cb1-871" aria-hidden="true" tabindex="-1"></a><span class="fu">tensor_parallel_size</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-872"><a href="#cb1-872" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-873"><a href="#cb1-873" aria-hidden="true" tabindex="-1"></a><span class="co"># Add or change special tokens. If you add tokens here, you don't need to add them to</span></span>
|
||
<span id="cb1-874"><a href="#cb1-874" aria-hidden="true" tabindex="-1"></a><span class="co"># the `tokens` list.</span></span>
|
||
<span id="cb1-875"><a href="#cb1-875" aria-hidden="true" tabindex="-1"></a><span class="fu">special_tokens</span><span class="kw">:</span><span class="at"> SpecialTokensConfig | None</span></span>
|
||
<span id="cb1-876"><a href="#cb1-876" aria-hidden="true" tabindex="-1"></a><span class="co"> # For SpecialTokensConfig:</span></span>
|
||
<span id="cb1-877"><a href="#cb1-877" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">bos_token</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-878"><a href="#cb1-878" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">eos_token</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-879"><a href="#cb1-879" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">pad_token</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-880"><a href="#cb1-880" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">unk_token</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-881"><a href="#cb1-881" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">additional_special_tokens</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-882"><a href="#cb1-882" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-883"><a href="#cb1-883" aria-hidden="true" tabindex="-1"></a><span class="co"># Add extra tokens to the tokenizer</span></span>
|
||
<span id="cb1-884"><a href="#cb1-884" aria-hidden="true" tabindex="-1"></a><span class="fu">tokens</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-885"><a href="#cb1-885" aria-hidden="true" tabindex="-1"></a><span class="co"># Mapping token_id to new_token_string to override reserved added_tokens in the</span></span>
|
||
<span id="cb1-886"><a href="#cb1-886" aria-hidden="true" tabindex="-1"></a><span class="co"># tokenizer. Only works for tokens that are not part of the base vocab (aka are</span></span>
|
||
<span id="cb1-887"><a href="#cb1-887" aria-hidden="true" tabindex="-1"></a><span class="co"># added_tokens). Can be checked if they exist in tokenizer.json added_tokens.</span></span>
|
||
<span id="cb1-888"><a href="#cb1-888" aria-hidden="true" tabindex="-1"></a><span class="fu">added_tokens_overrides</span><span class="kw">:</span><span class="at"> dict[int, str] | None</span></span>
|
||
<span id="cb1-889"><a href="#cb1-889" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-890"><a href="#cb1-890" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use torch.compile and which backend to use. setting to `auto` will enable</span></span>
|
||
<span id="cb1-891"><a href="#cb1-891" aria-hidden="true" tabindex="-1"></a><span class="co"># torch compile when torch>=2.6.0</span></span>
|
||
<span id="cb1-892"><a href="#cb1-892" aria-hidden="true" tabindex="-1"></a><span class="fu">torch_compile</span><span class="kw">:</span><span class="at"> Literal['auto'] | bool | None</span></span>
|
||
<span id="cb1-893"><a href="#cb1-893" aria-hidden="true" tabindex="-1"></a><span class="co"># Backend to use for torch.compile</span></span>
|
||
<span id="cb1-894"><a href="#cb1-894" aria-hidden="true" tabindex="-1"></a><span class="fu">torch_compile_backend</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-895"><a href="#cb1-895" aria-hidden="true" tabindex="-1"></a><span class="fu">torch_compile_mode</span><span class="kw">:</span><span class="at"> Literal['default', 'reduce-overhead', 'max-autotune'] | None</span></span>
|
||
<span id="cb1-896"><a href="#cb1-896" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-897"><a href="#cb1-897" aria-hidden="true" tabindex="-1"></a><span class="co"># Maximum number of iterations to train for. It precedes num_epochs which means that if</span></span>
|
||
<span id="cb1-898"><a href="#cb1-898" aria-hidden="true" tabindex="-1"></a><span class="co"># both are set, num_epochs will not be guaranteed. e.g., when 1 epoch is 1000 steps =></span></span>
|
||
<span id="cb1-899"><a href="#cb1-899" aria-hidden="true" tabindex="-1"></a><span class="co"># `num_epochs: 2` and `max_steps: 100` will train for 100 steps</span></span>
|
||
<span id="cb1-900"><a href="#cb1-900" aria-hidden="true" tabindex="-1"></a><span class="fu">max_steps</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-901"><a href="#cb1-901" aria-hidden="true" tabindex="-1"></a><span class="co"># Number of warmup steps. Cannot use with warmup_ratio</span></span>
|
||
<span id="cb1-902"><a href="#cb1-902" aria-hidden="true" tabindex="-1"></a><span class="fu">warmup_steps</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-903"><a href="#cb1-903" aria-hidden="true" tabindex="-1"></a><span class="co"># Warmup ratio. Cannot use with warmup_steps</span></span>
|
||
<span id="cb1-904"><a href="#cb1-904" aria-hidden="true" tabindex="-1"></a><span class="fu">warmup_ratio</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-905"><a href="#cb1-905" aria-hidden="true" tabindex="-1"></a><span class="co"># Leave empty to eval at each epoch, integer for every N steps. float for fraction of</span></span>
|
||
<span id="cb1-906"><a href="#cb1-906" aria-hidden="true" tabindex="-1"></a><span class="co"># total steps</span></span>
|
||
<span id="cb1-907"><a href="#cb1-907" aria-hidden="true" tabindex="-1"></a><span class="fu">eval_steps</span><span class="kw">:</span><span class="at"> int | float | None</span></span>
|
||
<span id="cb1-908"><a href="#cb1-908" aria-hidden="true" tabindex="-1"></a><span class="co"># Number of times per epoch to run evals, mutually exclusive with eval_steps</span></span>
|
||
<span id="cb1-909"><a href="#cb1-909" aria-hidden="true" tabindex="-1"></a><span class="fu">evals_per_epoch</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-910"><a href="#cb1-910" aria-hidden="true" tabindex="-1"></a><span class="co"># Set to `no` to skip evaluation, `epoch` at end of each epoch, leave empty to infer</span></span>
|
||
<span id="cb1-911"><a href="#cb1-911" aria-hidden="true" tabindex="-1"></a><span class="co"># from `eval_steps`</span></span>
|
||
<span id="cb1-912"><a href="#cb1-912" aria-hidden="true" tabindex="-1"></a><span class="fu">eval_strategy</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-913"><a href="#cb1-913" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-914"><a href="#cb1-914" aria-hidden="true" tabindex="-1"></a><span class="co"># Leave empty to save at each epoch, integer for every N steps. float for fraction of</span></span>
|
||
<span id="cb1-915"><a href="#cb1-915" aria-hidden="true" tabindex="-1"></a><span class="co"># total steps</span></span>
|
||
<span id="cb1-916"><a href="#cb1-916" aria-hidden="true" tabindex="-1"></a><span class="fu">save_steps</span><span class="kw">:</span><span class="at"> int | float | None</span></span>
|
||
<span id="cb1-917"><a href="#cb1-917" aria-hidden="true" tabindex="-1"></a><span class="co"># Number of times per epoch to save a checkpoint, mutually exclusive with save_steps</span></span>
|
||
<span id="cb1-918"><a href="#cb1-918" aria-hidden="true" tabindex="-1"></a><span class="fu">saves_per_epoch</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-919"><a href="#cb1-919" aria-hidden="true" tabindex="-1"></a><span class="co"># Set to `no` to skip checkpoint saves, `epoch` at end of each epoch, `best` when better</span></span>
|
||
<span id="cb1-920"><a href="#cb1-920" aria-hidden="true" tabindex="-1"></a><span class="co"># result is achieved, leave empty to infer from `save_steps`</span></span>
|
||
<span id="cb1-921"><a href="#cb1-921" aria-hidden="true" tabindex="-1"></a><span class="fu">save_strategy</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-922"><a href="#cb1-922" aria-hidden="true" tabindex="-1"></a><span class="co"># Checkpoints saved at a time</span></span>
|
||
<span id="cb1-923"><a href="#cb1-923" aria-hidden="true" tabindex="-1"></a><span class="fu">save_total_limit</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-924"><a href="#cb1-924" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to checkpoint a model after the first step of training. Defaults to False.</span></span>
|
||
<span id="cb1-925"><a href="#cb1-925" aria-hidden="true" tabindex="-1"></a><span class="fu">save_first_step</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-926"><a href="#cb1-926" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-927"><a href="#cb1-927" aria-hidden="true" tabindex="-1"></a><span class="co"># Logging frequency</span></span>
|
||
<span id="cb1-928"><a href="#cb1-928" aria-hidden="true" tabindex="-1"></a><span class="fu">logging_steps</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-929"><a href="#cb1-929" aria-hidden="true" tabindex="-1"></a><span class="co"># Stop training after this many evaluation losses have increased in a row. https://huggi</span></span>
|
||
<span id="cb1-930"><a href="#cb1-930" aria-hidden="true" tabindex="-1"></a><span class="co"># ngface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppin</span></span>
|
||
<span id="cb1-931"><a href="#cb1-931" aria-hidden="true" tabindex="-1"></a><span class="co"># gCallback</span></span>
|
||
<span id="cb1-932"><a href="#cb1-932" aria-hidden="true" tabindex="-1"></a><span class="fu">early_stopping_patience</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-933"><a href="#cb1-933" aria-hidden="true" tabindex="-1"></a><span class="fu">load_best_model_at_end</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-934"><a href="#cb1-934" aria-hidden="true" tabindex="-1"></a><span class="co"># Save only the model weights, skipping the optimizer. Using this means you can't resume</span></span>
|
||
<span id="cb1-935"><a href="#cb1-935" aria-hidden="true" tabindex="-1"></a><span class="co"># from checkpoints.</span></span>
|
||
<span id="cb1-936"><a href="#cb1-936" aria-hidden="true" tabindex="-1"></a><span class="fu">save_only_model</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-937"><a href="#cb1-937" aria-hidden="true" tabindex="-1"></a><span class="co"># Use tensorboard for logging</span></span>
|
||
<span id="cb1-938"><a href="#cb1-938" aria-hidden="true" tabindex="-1"></a><span class="fu">use_tensorboard</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-939"><a href="#cb1-939" aria-hidden="true" tabindex="-1"></a><span class="co"># Enable the pytorch profiler to capture the first N steps of training to the</span></span>
|
||
<span id="cb1-940"><a href="#cb1-940" aria-hidden="true" tabindex="-1"></a><span class="co"># output_dir. see https://pytorch.org/blog/understanding-gpu-memory-1/ for more</span></span>
|
||
<span id="cb1-941"><a href="#cb1-941" aria-hidden="true" tabindex="-1"></a><span class="co"># information. Snapshots can be visualized @ https://pytorch.org/memory_viz</span></span>
|
||
<span id="cb1-942"><a href="#cb1-942" aria-hidden="true" tabindex="-1"></a><span class="fu">profiler_steps</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-943"><a href="#cb1-943" aria-hidden="true" tabindex="-1"></a><span class="co"># Which step to start the profiler at. Useful for only capturing a few steps mid-run.</span></span>
|
||
<span id="cb1-944"><a href="#cb1-944" aria-hidden="true" tabindex="-1"></a><span class="fu">profiler_steps_start</span><span class="kw">:</span><span class="at"> int | None = 0</span></span>
|
||
<span id="cb1-945"><a href="#cb1-945" aria-hidden="true" tabindex="-1"></a><span class="co"># bool of whether to report tokens per second at the end of training. This is not</span></span>
|
||
<span id="cb1-946"><a href="#cb1-946" aria-hidden="true" tabindex="-1"></a><span class="co"># supported with pre-training datasets.</span></span>
|
||
<span id="cb1-947"><a href="#cb1-947" aria-hidden="true" tabindex="-1"></a><span class="fu">include_tokens_per_second</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-948"><a href="#cb1-948" aria-hidden="true" tabindex="-1"></a><span class="co"># bool of whether to report tokens per second per-gpu during training by measuring</span></span>
|
||
<span id="cb1-949"><a href="#cb1-949" aria-hidden="true" tabindex="-1"></a><span class="co"># throughput of non-padding tokens.</span></span>
|
||
<span id="cb1-950"><a href="#cb1-950" aria-hidden="true" tabindex="-1"></a><span class="fu">include_tkps</span><span class="kw">:</span><span class="at"> bool | None = True</span></span>
|
||
<span id="cb1-951"><a href="#cb1-951" aria-hidden="true" tabindex="-1"></a><span class="co"># NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to</span></span>
|
||
<span id="cb1-952"><a href="#cb1-952" aria-hidden="true" tabindex="-1"></a><span class="co"># add noise to embeddings. Currently only supported on Llama and Mistral</span></span>
|
||
<span id="cb1-953"><a href="#cb1-953" aria-hidden="true" tabindex="-1"></a><span class="fu">neftune_noise_alpha</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-954"><a href="#cb1-954" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-955"><a href="#cb1-955" aria-hidden="true" tabindex="-1"></a><span class="co"># Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to</span></span>
|
||
<span id="cb1-956"><a href="#cb1-956" aria-hidden="true" tabindex="-1"></a><span class="co"># `beta` in `ORPOConfig` due to trl mapping.</span></span>
|
||
<span id="cb1-957"><a href="#cb1-957" aria-hidden="true" tabindex="-1"></a><span class="fu">orpo_alpha</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-958"><a href="#cb1-958" aria-hidden="true" tabindex="-1"></a><span class="co"># Weighting of NLL term in loss from RPO paper</span></span>
|
||
<span id="cb1-959"><a href="#cb1-959" aria-hidden="true" tabindex="-1"></a><span class="fu">rpo_alpha</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-960"><a href="#cb1-960" aria-hidden="true" tabindex="-1"></a><span class="co"># Target reward margin for the SimPO loss</span></span>
|
||
<span id="cb1-961"><a href="#cb1-961" aria-hidden="true" tabindex="-1"></a><span class="fu">simpo_gamma</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-962"><a href="#cb1-962" aria-hidden="true" tabindex="-1"></a><span class="co"># Weight of the BC regularizer</span></span>
|
||
<span id="cb1-963"><a href="#cb1-963" aria-hidden="true" tabindex="-1"></a><span class="fu">cpo_alpha</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-964"><a href="#cb1-964" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-965"><a href="#cb1-965" aria-hidden="true" tabindex="-1"></a><span class="co"># Factor for desirable loss term in KTO loss</span></span>
|
||
<span id="cb1-966"><a href="#cb1-966" aria-hidden="true" tabindex="-1"></a><span class="fu">kto_desirable_weight</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-967"><a href="#cb1-967" aria-hidden="true" tabindex="-1"></a><span class="co"># Factor for undesirable loss term in KTO loss</span></span>
|
||
<span id="cb1-968"><a href="#cb1-968" aria-hidden="true" tabindex="-1"></a><span class="fu">kto_undesirable_weight</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-969"><a href="#cb1-969" aria-hidden="true" tabindex="-1"></a><span class="co"># The beta parameter for the RL training</span></span>
|
||
<span id="cb1-970"><a href="#cb1-970" aria-hidden="true" tabindex="-1"></a><span class="fu">rl_beta</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-971"><a href="#cb1-971" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-972"><a href="#cb1-972" aria-hidden="true" tabindex="-1"></a><span class="co"># Defines the max memory usage per gpu on the system. Passed through to transformers</span></span>
|
||
<span id="cb1-973"><a href="#cb1-973" aria-hidden="true" tabindex="-1"></a><span class="co"># when loading the model.</span></span>
|
||
<span id="cb1-974"><a href="#cb1-974" aria-hidden="true" tabindex="-1"></a><span class="fu">max_memory</span><span class="kw">:</span><span class="at"> dict[int | Literal['cpu', 'disk'], int | str] | None</span></span>
|
||
<span id="cb1-975"><a href="#cb1-975" aria-hidden="true" tabindex="-1"></a><span class="co"># Limit the memory for all available GPUs to this amount (if an integer, expressed in</span></span>
|
||
<span id="cb1-976"><a href="#cb1-976" aria-hidden="true" tabindex="-1"></a><span class="co"># gigabytes); default: unset</span></span>
|
||
<span id="cb1-977"><a href="#cb1-977" aria-hidden="true" tabindex="-1"></a><span class="fu">gpu_memory_limit</span><span class="kw">:</span><span class="at"> int | str | None</span></span>
|
||
<span id="cb1-978"><a href="#cb1-978" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use low_cpu_mem_usage</span></span>
|
||
<span id="cb1-979"><a href="#cb1-979" aria-hidden="true" tabindex="-1"></a><span class="fu">low_cpu_mem_usage</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-980"><a href="#cb1-980" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-981"><a href="#cb1-981" aria-hidden="true" tabindex="-1"></a><span class="co"># The name of the chat template to use for training, following values are supported:</span></span>
|
||
<span id="cb1-982"><a href="#cb1-982" aria-hidden="true" tabindex="-1"></a><span class="co"># tokenizer_default: Uses the chat template that is available in the</span></span>
|
||
<span id="cb1-983"><a href="#cb1-983" aria-hidden="true" tabindex="-1"></a><span class="co"># tokenizer_config.json. If the chat template is not available in the tokenizer, it will</span></span>
|
||
<span id="cb1-984"><a href="#cb1-984" aria-hidden="true" tabindex="-1"></a><span class="co"># raise an error. This is the default value.</span></span>
|
||
<span id="cb1-985"><a href="#cb1-985" aria-hidden="true" tabindex="-1"></a><span class="co"># alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates</span></span>
|
||
<span id="cb1-986"><a href="#cb1-986" aria-hidden="true" tabindex="-1"></a><span class="co"># are available in the axolotl codebase at src/axolotl/utils/chat_templates.py.</span></span>
|
||
<span id="cb1-987"><a href="#cb1-987" aria-hidden="true" tabindex="-1"></a><span class="co"># tokenizer_default_fallback_*: where * is the name of the chat template to fallback to.</span></span>
|
||
<span id="cb1-988"><a href="#cb1-988" aria-hidden="true" tabindex="-1"></a><span class="co"># E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not</span></span>
|
||
<span id="cb1-989"><a href="#cb1-989" aria-hidden="true" tabindex="-1"></a><span class="co"># available in the tokenizer. jinja: Uses a custom jinja template for the chat template.</span></span>
|
||
<span id="cb1-990"><a href="#cb1-990" aria-hidden="true" tabindex="-1"></a><span class="co"># The custom jinja template should be provided in the chat_template_jinja field. The</span></span>
|
||
<span id="cb1-991"><a href="#cb1-991" aria-hidden="true" tabindex="-1"></a><span class="co"># selected chat template will be saved to the tokenizer_config.json for easier</span></span>
|
||
<span id="cb1-992"><a href="#cb1-992" aria-hidden="true" tabindex="-1"></a><span class="co"># inferencing</span></span>
|
||
<span id="cb1-993"><a href="#cb1-993" aria-hidden="true" tabindex="-1"></a><span class="fu">chat_template</span><span class="kw">:</span><span class="at"> ChatTemplate | Annotated[str, StringConstraints(pattern='^tokenizer_default_fallback_')] | None</span></span>
|
||
<span id="cb1-994"><a href="#cb1-994" aria-hidden="true" tabindex="-1"></a><span class="co"># Custom jinja template or path to jinja file for chat template. This will be only used</span></span>
|
||
<span id="cb1-995"><a href="#cb1-995" aria-hidden="true" tabindex="-1"></a><span class="co"># if chat_template is set to `jinja` or `null` (in which case chat_template is</span></span>
|
||
<span id="cb1-996"><a href="#cb1-996" aria-hidden="true" tabindex="-1"></a><span class="co"># automatically set to `jinja`). Default is null.</span></span>
|
||
<span id="cb1-997"><a href="#cb1-997" aria-hidden="true" tabindex="-1"></a><span class="fu">chat_template_jinja</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-998"><a href="#cb1-998" aria-hidden="true" tabindex="-1"></a><span class="co"># Additional kwargs to pass to the chat template. This is useful for customizing the</span></span>
|
||
<span id="cb1-999"><a href="#cb1-999" aria-hidden="true" tabindex="-1"></a><span class="co"># chat template. For example, you can pass `thinking=False` to add a generation prompt</span></span>
|
||
<span id="cb1-1000"><a href="#cb1-1000" aria-hidden="true" tabindex="-1"></a><span class="co"># to the chat template.</span></span>
|
||
<span id="cb1-1001"><a href="#cb1-1001" aria-hidden="true" tabindex="-1"></a><span class="fu">chat_template_kwargs</span><span class="kw">:</span><span class="at"> dict[str, Any] | None</span></span>
|
||
<span id="cb1-1002"><a href="#cb1-1002" aria-hidden="true" tabindex="-1"></a><span class="co"># Custom EOT (End-of-Turn) tokens to mask/unmask during training. These tokens mark the</span></span>
|
||
<span id="cb1-1003"><a href="#cb1-1003" aria-hidden="true" tabindex="-1"></a><span class="co"># boundaries between conversation turns. For example: ['/INST', '</s>',</span></span>
|
||
<span id="cb1-1004"><a href="#cb1-1004" aria-hidden="true" tabindex="-1"></a><span class="co"># '[/SYSTEM_PROMPT]']. If not specified, defaults to just the model's eos_token. This is</span></span>
|
||
<span id="cb1-1005"><a href="#cb1-1005" aria-hidden="true" tabindex="-1"></a><span class="co"># useful for templates that use multiple delimiter tokens.</span></span>
|
||
<span id="cb1-1006"><a href="#cb1-1006" aria-hidden="true" tabindex="-1"></a><span class="fu">eot_tokens</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-1007"><a href="#cb1-1007" aria-hidden="true" tabindex="-1"></a><span class="co"># Changes the default system message. Currently only supports chatml.</span></span>
|
||
<span id="cb1-1008"><a href="#cb1-1008" aria-hidden="true" tabindex="-1"></a><span class="fu">default_system_message</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1009"><a href="#cb1-1009" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1010"><a href="#cb1-1010" aria-hidden="true" tabindex="-1"></a><span class="co"># Token index or indices to adjust embedding weights to the mean of the other tokens.</span></span>
|
||
<span id="cb1-1011"><a href="#cb1-1011" aria-hidden="true" tabindex="-1"></a><span class="co"># This is useful when the model has untrained embeddings.</span></span>
|
||
<span id="cb1-1012"><a href="#cb1-1012" aria-hidden="true" tabindex="-1"></a><span class="fu">fix_untrained_tokens</span><span class="kw">:</span><span class="at"> int | list[int] | None</span></span>
|
||
<span id="cb1-1013"><a href="#cb1-1013" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1014"><a href="#cb1-1014" aria-hidden="true" tabindex="-1"></a><span class="fu">is_preprocess</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1015"><a href="#cb1-1015" aria-hidden="true" tabindex="-1"></a><span class="fu">preprocess_iterable</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1016"><a href="#cb1-1016" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1017"><a href="#cb1-1017" aria-hidden="true" tabindex="-1"></a><span class="co"># Total number of tokens - internal use</span></span>
|
||
<span id="cb1-1018"><a href="#cb1-1018" aria-hidden="true" tabindex="-1"></a><span class="fu">total_num_tokens</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-1019"><a href="#cb1-1019" aria-hidden="true" tabindex="-1"></a><span class="fu">total_supervised_tokens</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-1020"><a href="#cb1-1020" aria-hidden="true" tabindex="-1"></a><span class="co"># You can set these packing optimizations AFTER starting a training at least once. The</span></span>
|
||
<span id="cb1-1021"><a href="#cb1-1021" aria-hidden="true" tabindex="-1"></a><span class="co"># trainer will provide recommended values for these values.</span></span>
|
||
<span id="cb1-1022"><a href="#cb1-1022" aria-hidden="true" tabindex="-1"></a><span class="fu">sample_packing_eff_est</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1023"><a href="#cb1-1023" aria-hidden="true" tabindex="-1"></a><span class="fu">axolotl_config_path</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1024"><a href="#cb1-1024" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1025"><a href="#cb1-1025" aria-hidden="true" tabindex="-1"></a><span class="co"># Internal use only - Used to identify which the model is based on</span></span>
|
||
<span id="cb1-1026"><a href="#cb1-1026" aria-hidden="true" tabindex="-1"></a><span class="fu">is_falcon_derived_model</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1027"><a href="#cb1-1027" aria-hidden="true" tabindex="-1"></a><span class="co"># Internal use only - Used to identify which the model is based on</span></span>
|
||
<span id="cb1-1028"><a href="#cb1-1028" aria-hidden="true" tabindex="-1"></a><span class="fu">is_llama_derived_model</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1029"><a href="#cb1-1029" aria-hidden="true" tabindex="-1"></a><span class="co"># Internal use only - Used to identify which the model is based on. Please note that if</span></span>
|
||
<span id="cb1-1030"><a href="#cb1-1030" aria-hidden="true" tabindex="-1"></a><span class="co"># you set this to true, `padding_side` will be set to 'left' by default</span></span>
|
||
<span id="cb1-1031"><a href="#cb1-1031" aria-hidden="true" tabindex="-1"></a><span class="fu">is_mistral_derived_model</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1032"><a href="#cb1-1032" aria-hidden="true" tabindex="-1"></a><span class="co"># Internal use only - Used to identify which the model is based on</span></span>
|
||
<span id="cb1-1033"><a href="#cb1-1033" aria-hidden="true" tabindex="-1"></a><span class="fu">is_qwen_derived_model</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1034"><a href="#cb1-1034" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1035"><a href="#cb1-1035" aria-hidden="true" tabindex="-1"></a><span class="co"># Add plugins to extend the pipeline. See `src/axolotl/integrations` for the available</span></span>
|
||
<span id="cb1-1036"><a href="#cb1-1036" aria-hidden="true" tabindex="-1"></a><span class="co"># plugins or doc below for more details.</span></span>
|
||
<span id="cb1-1037"><a href="#cb1-1037" aria-hidden="true" tabindex="-1"></a><span class="co"># https://docs.axolotl.ai/docs/custom_integrations.html</span></span>
|
||
<span id="cb1-1038"><a href="#cb1-1038" aria-hidden="true" tabindex="-1"></a><span class="fu">plugins</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-1039"><a href="#cb1-1039" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1040"><a href="#cb1-1040" aria-hidden="true" tabindex="-1"></a><span class="co"># This is the huggingface model that contains *.pt, *.safetensors, or *.bin files. This</span></span>
|
||
<span id="cb1-1041"><a href="#cb1-1041" aria-hidden="true" tabindex="-1"></a><span class="co"># can also be a relative path to a model on disk</span></span>
|
||
<span id="cb1-1042"><a href="#cb1-1042" aria-hidden="true" tabindex="-1"></a><span class="fu">base_model</span><span class="kw">:</span><span class="at"> str (required)</span></span>
|
||
<span id="cb1-1043"><a href="#cb1-1043" aria-hidden="true" tabindex="-1"></a><span class="co"># If the base_model repo on hf hub doesn't include configuration .json files, You can</span></span>
|
||
<span id="cb1-1044"><a href="#cb1-1044" aria-hidden="true" tabindex="-1"></a><span class="co"># set that here, or leave this empty to default to base_model</span></span>
|
||
<span id="cb1-1045"><a href="#cb1-1045" aria-hidden="true" tabindex="-1"></a><span class="fu">base_model_config</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1046"><a href="#cb1-1046" aria-hidden="true" tabindex="-1"></a><span class="fu">cls_model_config</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1047"><a href="#cb1-1047" aria-hidden="true" tabindex="-1"></a><span class="co"># Optional tokenizer configuration path in case you want to use a different tokenizer</span></span>
|
||
<span id="cb1-1048"><a href="#cb1-1048" aria-hidden="true" tabindex="-1"></a><span class="co"># than the one defined in the base model</span></span>
|
||
<span id="cb1-1049"><a href="#cb1-1049" aria-hidden="true" tabindex="-1"></a><span class="fu">tokenizer_config</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1050"><a href="#cb1-1050" aria-hidden="true" tabindex="-1"></a><span class="co"># use_fast option for tokenizer loading from_pretrained, default to True</span></span>
|
||
<span id="cb1-1051"><a href="#cb1-1051" aria-hidden="true" tabindex="-1"></a><span class="fu">tokenizer_use_fast</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1052"><a href="#cb1-1052" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use the legacy tokenizer setting, defaults to True</span></span>
|
||
<span id="cb1-1053"><a href="#cb1-1053" aria-hidden="true" tabindex="-1"></a><span class="fu">tokenizer_legacy</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1054"><a href="#cb1-1054" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use mistral-common tokenizer. If set to True, it will use the mistral-</span></span>
|
||
<span id="cb1-1055"><a href="#cb1-1055" aria-hidden="true" tabindex="-1"></a><span class="co"># common tokenizer.</span></span>
|
||
<span id="cb1-1056"><a href="#cb1-1056" aria-hidden="true" tabindex="-1"></a><span class="fu">tokenizer_use_mistral_common</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1057"><a href="#cb1-1057" aria-hidden="true" tabindex="-1"></a><span class="co"># Corresponding tokenizer for the model AutoTokenizer is a good choice</span></span>
|
||
<span id="cb1-1058"><a href="#cb1-1058" aria-hidden="true" tabindex="-1"></a><span class="fu">tokenizer_type</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1059"><a href="#cb1-1059" aria-hidden="true" tabindex="-1"></a><span class="co"># transformers processor class</span></span>
|
||
<span id="cb1-1060"><a href="#cb1-1060" aria-hidden="true" tabindex="-1"></a><span class="fu">processor_type</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1061"><a href="#cb1-1061" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to save jinja files for tokenizer, transformers default is True</span></span>
|
||
<span id="cb1-1062"><a href="#cb1-1062" aria-hidden="true" tabindex="-1"></a><span class="fu">tokenizer_save_jinja_files</span><span class="kw">:</span><span class="at"> bool | None = True</span></span>
|
||
<span id="cb1-1063"><a href="#cb1-1063" aria-hidden="true" tabindex="-1"></a><span class="co"># Trust remote code for untrusted source</span></span>
|
||
<span id="cb1-1064"><a href="#cb1-1064" aria-hidden="true" tabindex="-1"></a><span class="fu">trust_remote_code</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1065"><a href="#cb1-1065" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1066"><a href="#cb1-1066" aria-hidden="true" tabindex="-1"></a><span class="co"># Don't move the model to the device before sharding. Set to `false` to revert to legacy</span></span>
|
||
<span id="cb1-1067"><a href="#cb1-1067" aria-hidden="true" tabindex="-1"></a><span class="co"># behavior.</span></span>
|
||
<span id="cb1-1068"><a href="#cb1-1068" aria-hidden="true" tabindex="-1"></a><span class="fu">experimental_skip_move_to_device</span><span class="kw">:</span><span class="at"> bool | None = True</span></span>
|
||
<span id="cb1-1069"><a href="#cb1-1069" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1070"><a href="#cb1-1070" aria-hidden="true" tabindex="-1"></a><span class="co"># Use custom kernels, e.g. MegaBlocks.</span></span>
|
||
<span id="cb1-1071"><a href="#cb1-1071" aria-hidden="true" tabindex="-1"></a><span class="fu">use_kernels</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1072"><a href="#cb1-1072" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1073"><a href="#cb1-1073" aria-hidden="true" tabindex="-1"></a><span class="co"># Model loading quantization config</span></span>
|
||
<span id="cb1-1074"><a href="#cb1-1074" aria-hidden="true" tabindex="-1"></a><span class="fu">model_quantization_config</span><span class="kw">:</span><span class="at"> Literal['Mxfp4Config'] | None</span></span>
|
||
<span id="cb1-1075"><a href="#cb1-1075" aria-hidden="true" tabindex="-1"></a><span class="co"># kwargs for model quantization config</span></span>
|
||
<span id="cb1-1076"><a href="#cb1-1076" aria-hidden="true" tabindex="-1"></a><span class="fu">model_quantization_config_kwargs</span><span class="kw">:</span><span class="at"> dict[str, Any] | None</span></span>
|
||
<span id="cb1-1077"><a href="#cb1-1077" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1078"><a href="#cb1-1078" aria-hidden="true" tabindex="-1"></a><span class="co"># Where to save the full-finetuned model to</span></span>
|
||
<span id="cb1-1079"><a href="#cb1-1079" aria-hidden="true" tabindex="-1"></a><span class="fu">output_dir</span><span class="kw">:</span><span class="at"> str = ./model-out</span></span>
|
||
<span id="cb1-1080"><a href="#cb1-1080" aria-hidden="true" tabindex="-1"></a><span class="co"># push checkpoints to hub</span></span>
|
||
<span id="cb1-1081"><a href="#cb1-1081" aria-hidden="true" tabindex="-1"></a><span class="fu">hub_model_id</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1082"><a href="#cb1-1082" aria-hidden="true" tabindex="-1"></a><span class="co"># how to push checkpoints to hub</span></span>
|
||
<span id="cb1-1083"><a href="#cb1-1083" aria-hidden="true" tabindex="-1"></a><span class="fu">hub_strategy</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1084"><a href="#cb1-1084" aria-hidden="true" tabindex="-1"></a><span class="co"># Save model as safetensors (require safetensors package). Default True</span></span>
|
||
<span id="cb1-1085"><a href="#cb1-1085" aria-hidden="true" tabindex="-1"></a><span class="fu">save_safetensors</span><span class="kw">:</span><span class="at"> bool | None = True</span></span>
|
||
<span id="cb1-1086"><a href="#cb1-1086" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1087"><a href="#cb1-1087" aria-hidden="true" tabindex="-1"></a><span class="co"># This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer</span></span>
|
||
<span id="cb1-1088"><a href="#cb1-1088" aria-hidden="true" tabindex="-1"></a><span class="fu">load_in_8bit</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-1089"><a href="#cb1-1089" aria-hidden="true" tabindex="-1"></a><span class="co"># Use bitsandbytes 4 bit</span></span>
|
||
<span id="cb1-1090"><a href="#cb1-1090" aria-hidden="true" tabindex="-1"></a><span class="fu">load_in_4bit</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-1091"><a href="#cb1-1091" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1092"><a href="#cb1-1092" aria-hidden="true" tabindex="-1"></a><span class="co"># If you want to use 'lora' or 'qlora' or leave blank to train all parameters in</span></span>
|
||
<span id="cb1-1093"><a href="#cb1-1093" aria-hidden="true" tabindex="-1"></a><span class="co"># original model</span></span>
|
||
<span id="cb1-1094"><a href="#cb1-1094" aria-hidden="true" tabindex="-1"></a><span class="fu">adapter</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1095"><a href="#cb1-1095" aria-hidden="true" tabindex="-1"></a><span class="co"># If you already have a lora model trained that you want to load, put that here. This</span></span>
|
||
<span id="cb1-1096"><a href="#cb1-1096" aria-hidden="true" tabindex="-1"></a><span class="co"># means after training, if you want to test the model, you should set this to the value</span></span>
|
||
<span id="cb1-1097"><a href="#cb1-1097" aria-hidden="true" tabindex="-1"></a><span class="co"># of `output_dir`. Note that if you merge an adapter to the base model, a new</span></span>
|
||
<span id="cb1-1098"><a href="#cb1-1098" aria-hidden="true" tabindex="-1"></a><span class="co"># subdirectory `merged` will be created under the `output_dir`.</span></span>
|
||
<span id="cb1-1099"><a href="#cb1-1099" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_model_dir</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1100"><a href="#cb1-1100" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_r</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-1101"><a href="#cb1-1101" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_alpha</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-1102"><a href="#cb1-1102" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_fan_in_fan_out</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1103"><a href="#cb1-1103" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_target_modules</span><span class="kw">:</span><span class="at"> str | list[str] | None</span></span>
|
||
<span id="cb1-1104"><a href="#cb1-1104" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_target_parameters</span><span class="kw">:</span><span class="at"> str | list[str] | None</span></span>
|
||
<span id="cb1-1105"><a href="#cb1-1105" aria-hidden="true" tabindex="-1"></a><span class="co"># If true, will target all linear modules</span></span>
|
||
<span id="cb1-1106"><a href="#cb1-1106" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_target_linear</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1107"><a href="#cb1-1107" aria-hidden="true" tabindex="-1"></a><span class="co"># If you added new tokens to the tokenizer, you may need to save some LoRA modules</span></span>
|
||
<span id="cb1-1108"><a href="#cb1-1108" aria-hidden="true" tabindex="-1"></a><span class="co"># because they need to know the new tokens. For LLaMA and Mistral, you need to save</span></span>
|
||
<span id="cb1-1109"><a href="#cb1-1109" aria-hidden="true" tabindex="-1"></a><span class="co"># `embed_tokens` and `lm_head`. It may vary for other models. `embed_tokens` converts</span></span>
|
||
<span id="cb1-1110"><a href="#cb1-1110" aria-hidden="true" tabindex="-1"></a><span class="co"># tokens to embeddings, and `lm_head` converts embeddings to token probabilities.</span></span>
|
||
<span id="cb1-1111"><a href="#cb1-1111" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_modules_to_save</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-1112"><a href="#cb1-1112" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_dropout</span><span class="kw">:</span><span class="at"> float | None = 0.0</span></span>
|
||
<span id="cb1-1113"><a href="#cb1-1113" aria-hidden="true" tabindex="-1"></a><span class="co"># The layer indices to transform, otherwise, apply to all layers</span></span>
|
||
<span id="cb1-1114"><a href="#cb1-1114" aria-hidden="true" tabindex="-1"></a><span class="fu">peft_layers_to_transform</span><span class="kw">:</span><span class="at"> list[int] | None</span></span>
|
||
<span id="cb1-1115"><a href="#cb1-1115" aria-hidden="true" tabindex="-1"></a><span class="fu">peft_layers_pattern</span><span class="kw">:</span><span class="at"> list[str] | None</span></span>
|
||
<span id="cb1-1116"><a href="#cb1-1116" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1117"><a href="#cb1-1117" aria-hidden="true" tabindex="-1"></a><span class="fu">peft</span><span class="kw">:</span><span class="at"> PeftConfig | None</span></span>
|
||
<span id="cb1-1118"><a href="#cb1-1118" aria-hidden="true" tabindex="-1"></a><span class="co"> # For PeftConfig:</span></span>
|
||
<span id="cb1-1119"><a href="#cb1-1119" aria-hidden="true" tabindex="-1"></a><span class="co"> # Configuration options for loftq initialization for LoRA</span></span>
|
||
<span id="cb1-1120"><a href="#cb1-1120" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">loftq_config</span><span class="kw">:</span><span class="at"> LoftQConfig | None</span></span>
|
||
<span id="cb1-1121"><a href="#cb1-1121" aria-hidden="true" tabindex="-1"></a><span class="co"> # For LoftQConfig:</span></span>
|
||
<span id="cb1-1122"><a href="#cb1-1122" aria-hidden="true" tabindex="-1"></a><span class="co"> # typically 4 bits</span></span>
|
||
<span id="cb1-1123"><a href="#cb1-1123" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">loftq_bits</span><span class="kw">:</span><span class="at"> int = 4</span></span>
|
||
<span id="cb1-1124"><a href="#cb1-1124" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1125"><a href="#cb1-1125" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use DoRA.</span></span>
|
||
<span id="cb1-1126"><a href="#cb1-1126" aria-hidden="true" tabindex="-1"></a><span class="fu">peft_use_dora</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1127"><a href="#cb1-1127" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use RSLoRA.</span></span>
|
||
<span id="cb1-1128"><a href="#cb1-1128" aria-hidden="true" tabindex="-1"></a><span class="fu">peft_use_rslora</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1129"><a href="#cb1-1129" aria-hidden="true" tabindex="-1"></a><span class="co"># List of layer indices to replicate.</span></span>
|
||
<span id="cb1-1130"><a href="#cb1-1130" aria-hidden="true" tabindex="-1"></a><span class="fu">peft_layer_replication</span><span class="kw">:</span><span class="at"> list[tuple[int, int]] | None</span></span>
|
||
<span id="cb1-1131"><a href="#cb1-1131" aria-hidden="true" tabindex="-1"></a><span class="co"># How to initialize LoRA weights. Default to True which is MS original implementation.</span></span>
|
||
<span id="cb1-1132"><a href="#cb1-1132" aria-hidden="true" tabindex="-1"></a><span class="fu">peft_init_lora_weights</span><span class="kw">:</span><span class="at"> bool | str | None</span></span>
|
||
<span id="cb1-1133"><a href="#cb1-1133" aria-hidden="true" tabindex="-1"></a><span class="co"># A list of token indices to fine-tune on the `embed_tokens` layer. Otherwise, a dict</span></span>
|
||
<span id="cb1-1134"><a href="#cb1-1134" aria-hidden="true" tabindex="-1"></a><span class="co"># mapping an embedding layer name to its trainable token indices. See</span></span>
|
||
<span id="cb1-1135"><a href="#cb1-1135" aria-hidden="true" tabindex="-1"></a><span class="co"># https://huggingface.co/docs/peft/v0.17.0/en/developer_guides/lora#efficiently-train-</span></span>
|
||
<span id="cb1-1136"><a href="#cb1-1136" aria-hidden="true" tabindex="-1"></a><span class="co"># tokens-alongside-lora</span></span>
|
||
<span id="cb1-1137"><a href="#cb1-1137" aria-hidden="true" tabindex="-1"></a><span class="fu">peft_trainable_token_indices</span><span class="kw">:</span><span class="at"> list[int] | dict[str, list[int]] | None</span></span>
|
||
<span id="cb1-1138"><a href="#cb1-1138" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1139"><a href="#cb1-1139" aria-hidden="true" tabindex="-1"></a><span class="co"># load qlora model in sharded format for FSDP using answer.ai technique.</span></span>
|
||
<span id="cb1-1140"><a href="#cb1-1140" aria-hidden="true" tabindex="-1"></a><span class="fu">qlora_sharded_model_loading</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-1141"><a href="#cb1-1141" aria-hidden="true" tabindex="-1"></a><span class="co"># Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it</span></span>
|
||
<span id="cb1-1142"><a href="#cb1-1142" aria-hidden="true" tabindex="-1"></a><span class="co"># takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge</span></span>
|
||
<span id="cb1-1143"><a href="#cb1-1143" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_on_cpu</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1144"><a href="#cb1-1144" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether you are training a 4-bit GPTQ quantized model</span></span>
|
||
<span id="cb1-1145"><a href="#cb1-1145" aria-hidden="true" tabindex="-1"></a><span class="fu">gptq</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1146"><a href="#cb1-1146" aria-hidden="true" tabindex="-1"></a><span class="co"># optional overrides to the bnb 4bit quantization configuration</span></span>
|
||
<span id="cb1-1147"><a href="#cb1-1147" aria-hidden="true" tabindex="-1"></a><span class="fu">bnb_config_kwargs</span><span class="kw">:</span><span class="at"> dict[str, Any] | None</span></span>
|
||
<span id="cb1-1148"><a href="#cb1-1148" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1149"><a href="#cb1-1149" aria-hidden="true" tabindex="-1"></a><span class="co"># loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.</span></span>
|
||
<span id="cb1-1150"><a href="#cb1-1150" aria-hidden="true" tabindex="-1"></a><span class="fu">loraplus_lr_ratio</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1151"><a href="#cb1-1151" aria-hidden="true" tabindex="-1"></a><span class="co"># loraplus learning rate for lora embedding layers. Default value is 1e-6.</span></span>
|
||
<span id="cb1-1152"><a href="#cb1-1152" aria-hidden="true" tabindex="-1"></a><span class="fu">loraplus_lr_embedding</span><span class="kw">:</span><span class="at"> float | None = 1e-06</span></span>
|
||
<span id="cb1-1153"><a href="#cb1-1153" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1154"><a href="#cb1-1154" aria-hidden="true" tabindex="-1"></a><span class="fu">merge_lora</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1155"><a href="#cb1-1155" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1156"><a href="#cb1-1156" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use ReLoRA. Use with jagged_restart_*steps options.</span></span>
|
||
<span id="cb1-1157"><a href="#cb1-1157" aria-hidden="true" tabindex="-1"></a><span class="fu">relora</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1158"><a href="#cb1-1158" aria-hidden="true" tabindex="-1"></a><span class="co"># threshold for optimizer magnitude when pruning</span></span>
|
||
<span id="cb1-1159"><a href="#cb1-1159" aria-hidden="true" tabindex="-1"></a><span class="fu">relora_prune_ratio</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1160"><a href="#cb1-1160" aria-hidden="true" tabindex="-1"></a><span class="co"># True to perform lora weight merges on cpu during restarts, for modest gpu memory</span></span>
|
||
<span id="cb1-1161"><a href="#cb1-1161" aria-hidden="true" tabindex="-1"></a><span class="co"># savings</span></span>
|
||
<span id="cb1-1162"><a href="#cb1-1162" aria-hidden="true" tabindex="-1"></a><span class="fu">relora_cpu_offload</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1163"><a href="#cb1-1163" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1164"><a href="#cb1-1164" aria-hidden="true" tabindex="-1"></a><span class="co"># how often to reset for jagged restarts</span></span>
|
||
<span id="cb1-1165"><a href="#cb1-1165" aria-hidden="true" tabindex="-1"></a><span class="fu">jagged_restart_steps</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-1166"><a href="#cb1-1166" aria-hidden="true" tabindex="-1"></a><span class="co"># how many warmup steps to take after reset for jagged restarts</span></span>
|
||
<span id="cb1-1167"><a href="#cb1-1167" aria-hidden="true" tabindex="-1"></a><span class="fu">jagged_restart_warmup_steps</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-1168"><a href="#cb1-1168" aria-hidden="true" tabindex="-1"></a><span class="co"># how many anneal steps to take before reset for jagged restarts</span></span>
|
||
<span id="cb1-1169"><a href="#cb1-1169" aria-hidden="true" tabindex="-1"></a><span class="fu">jagged_restart_anneal_steps</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-1170"><a href="#cb1-1170" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1171"><a href="#cb1-1171" aria-hidden="true" tabindex="-1"></a><span class="co"># If greater than 1, backpropagation will be skipped and the gradients will be</span></span>
|
||
<span id="cb1-1172"><a href="#cb1-1172" aria-hidden="true" tabindex="-1"></a><span class="co"># accumulated for the given number of steps.</span></span>
|
||
<span id="cb1-1173"><a href="#cb1-1173" aria-hidden="true" tabindex="-1"></a><span class="fu">gradient_accumulation_steps</span><span class="kw">:</span><span class="at"> int | None = 1</span></span>
|
||
<span id="cb1-1174"><a href="#cb1-1174" aria-hidden="true" tabindex="-1"></a><span class="co"># The number of samples to include in each batch. This is the number of samples sent to</span></span>
|
||
<span id="cb1-1175"><a href="#cb1-1175" aria-hidden="true" tabindex="-1"></a><span class="co"># each GPU. Batch size per gpu = micro_batch_size * gradient_accumulation_steps</span></span>
|
||
<span id="cb1-1176"><a href="#cb1-1176" aria-hidden="true" tabindex="-1"></a><span class="fu">micro_batch_size</span><span class="kw">:</span><span class="at"> int | None = 1</span></span>
|
||
<span id="cb1-1177"><a href="#cb1-1177" aria-hidden="true" tabindex="-1"></a><span class="co"># Total batch size, we do not recommended setting this manually</span></span>
|
||
<span id="cb1-1178"><a href="#cb1-1178" aria-hidden="true" tabindex="-1"></a><span class="fu">batch_size</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-1179"><a href="#cb1-1179" aria-hidden="true" tabindex="-1"></a><span class="co"># per gpu micro batch size for evals, defaults to value of micro_batch_size</span></span>
|
||
<span id="cb1-1180"><a href="#cb1-1180" aria-hidden="true" tabindex="-1"></a><span class="fu">eval_batch_size</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-1181"><a href="#cb1-1181" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1182"><a href="#cb1-1182" aria-hidden="true" tabindex="-1"></a><span class="co"># whether to find batch size that fits in memory. Passed to underlying transformers</span></span>
|
||
<span id="cb1-1183"><a href="#cb1-1183" aria-hidden="true" tabindex="-1"></a><span class="co"># Trainer</span></span>
|
||
<span id="cb1-1184"><a href="#cb1-1184" aria-hidden="true" tabindex="-1"></a><span class="fu">auto_find_batch_size</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1185"><a href="#cb1-1185" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1186"><a href="#cb1-1186" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to mask out or include the human's prompt from the training labels</span></span>
|
||
<span id="cb1-1187"><a href="#cb1-1187" aria-hidden="true" tabindex="-1"></a><span class="fu">train_on_inputs</span><span class="kw">:</span><span class="at"> bool | None = False</span></span>
|
||
<span id="cb1-1188"><a href="#cb1-1188" aria-hidden="true" tabindex="-1"></a><span class="co"># Group similarly sized data to minimize padding. May be slower to start, as it must</span></span>
|
||
<span id="cb1-1189"><a href="#cb1-1189" aria-hidden="true" tabindex="-1"></a><span class="co"># download and sort the entire dataset. Note that training loss may have an oscillating</span></span>
|
||
<span id="cb1-1190"><a href="#cb1-1190" aria-hidden="true" tabindex="-1"></a><span class="co"># pattern with this enabled.</span></span>
|
||
<span id="cb1-1191"><a href="#cb1-1191" aria-hidden="true" tabindex="-1"></a><span class="fu">group_by_length</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1192"><a href="#cb1-1192" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1193"><a href="#cb1-1193" aria-hidden="true" tabindex="-1"></a><span class="fu">learning_rate</span><span class="kw">:</span><span class="at"> str | float (required)</span></span>
|
||
<span id="cb1-1194"><a href="#cb1-1194" aria-hidden="true" tabindex="-1"></a><span class="fu">embedding_lr</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1195"><a href="#cb1-1195" aria-hidden="true" tabindex="-1"></a><span class="fu">embedding_lr_scale</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1196"><a href="#cb1-1196" aria-hidden="true" tabindex="-1"></a><span class="co"># Specify weight decay</span></span>
|
||
<span id="cb1-1197"><a href="#cb1-1197" aria-hidden="true" tabindex="-1"></a><span class="fu">weight_decay</span><span class="kw">:</span><span class="at"> float | None = 0.0</span></span>
|
||
<span id="cb1-1198"><a href="#cb1-1198" aria-hidden="true" tabindex="-1"></a><span class="co"># Specify optimizer</span></span>
|
||
<span id="cb1-1199"><a href="#cb1-1199" aria-hidden="true" tabindex="-1"></a><span class="fu">optimizer</span><span class="kw">:</span><span class="at"> OptimizerNames | CustomSupportedOptimizers | None = OptimizerNames.ADAMW_TORCH_FUSED</span></span>
|
||
<span id="cb1-1200"><a href="#cb1-1200" aria-hidden="true" tabindex="-1"></a><span class="co"># Dictionary of arguments to pass to the optimizer</span></span>
|
||
<span id="cb1-1201"><a href="#cb1-1201" aria-hidden="true" tabindex="-1"></a><span class="fu">optim_args</span><span class="kw">:</span><span class="at"> str | dict[str, Any] | None</span></span>
|
||
<span id="cb1-1202"><a href="#cb1-1202" aria-hidden="true" tabindex="-1"></a><span class="co"># The target modules to optimize, i.e. the module names that you would like to train,</span></span>
|
||
<span id="cb1-1203"><a href="#cb1-1203" aria-hidden="true" tabindex="-1"></a><span class="co"># right now this is used only for GaLore algorithm</span></span>
|
||
<span id="cb1-1204"><a href="#cb1-1204" aria-hidden="true" tabindex="-1"></a><span class="fu">optim_target_modules</span><span class="kw">:</span><span class="at"> list[str] | Literal['all_linear'] | None</span></span>
|
||
<span id="cb1-1205"><a href="#cb1-1205" aria-hidden="true" tabindex="-1"></a><span class="co"># Path to torch distx for optim 'adamw_anyprecision'</span></span>
|
||
<span id="cb1-1206"><a href="#cb1-1206" aria-hidden="true" tabindex="-1"></a><span class="fu">torchdistx_path</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1207"><a href="#cb1-1207" aria-hidden="true" tabindex="-1"></a><span class="fu">lr_scheduler</span><span class="kw">:</span><span class="at"> SchedulerType | Literal['one_cycle'] | Literal['rex'] | None = SchedulerType.COSINE</span></span>
|
||
<span id="cb1-1208"><a href="#cb1-1208" aria-hidden="true" tabindex="-1"></a><span class="co"># Specify a scheduler and kwargs to use with the optimizer</span></span>
|
||
<span id="cb1-1209"><a href="#cb1-1209" aria-hidden="true" tabindex="-1"></a><span class="fu">lr_scheduler_kwargs</span><span class="kw">:</span><span class="at"> dict[str, Any] | None</span></span>
|
||
<span id="cb1-1210"><a href="#cb1-1210" aria-hidden="true" tabindex="-1"></a><span class="fu">lr_quadratic_warmup</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1211"><a href="#cb1-1211" aria-hidden="true" tabindex="-1"></a><span class="co"># decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of</span></span>
|
||
<span id="cb1-1212"><a href="#cb1-1212" aria-hidden="true" tabindex="-1"></a><span class="co"># peak lr</span></span>
|
||
<span id="cb1-1213"><a href="#cb1-1213" aria-hidden="true" tabindex="-1"></a><span class="fu">cosine_min_lr_ratio</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1214"><a href="#cb1-1214" aria-hidden="true" tabindex="-1"></a><span class="co"># freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means</span></span>
|
||
<span id="cb1-1215"><a href="#cb1-1215" aria-hidden="true" tabindex="-1"></a><span class="co"># start cosine_min_lr at 80% of training step</span></span>
|
||
<span id="cb1-1216"><a href="#cb1-1216" aria-hidden="true" tabindex="-1"></a><span class="fu">cosine_constant_lr_ratio</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1217"><a href="#cb1-1217" aria-hidden="true" tabindex="-1"></a><span class="co"># Learning rate div factor</span></span>
|
||
<span id="cb1-1218"><a href="#cb1-1218" aria-hidden="true" tabindex="-1"></a><span class="fu">lr_div_factor</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1219"><a href="#cb1-1219" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1220"><a href="#cb1-1220" aria-hidden="true" tabindex="-1"></a><span class="fu">lr_groups</span><span class="kw">:</span><span class="at"> list[LrGroup] | None</span></span>
|
||
<span id="cb1-1221"><a href="#cb1-1221" aria-hidden="true" tabindex="-1"></a><span class="co"> # For LrGroup:</span></span>
|
||
<span id="cb1-1222"><a href="#cb1-1222" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">name</span><span class="kw">:</span><span class="at"> str (required)</span></span>
|
||
<span id="cb1-1223"><a href="#cb1-1223" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">modules</span><span class="kw">:</span><span class="at"> list[str] (required)</span></span>
|
||
<span id="cb1-1224"><a href="#cb1-1224" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">lr</span><span class="kw">:</span><span class="at"> float (required)</span></span>
|
||
<span id="cb1-1225"><a href="#cb1-1225" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1226"><a href="#cb1-1226" aria-hidden="true" tabindex="-1"></a><span class="co"># adamw hyperparams</span></span>
|
||
<span id="cb1-1227"><a href="#cb1-1227" aria-hidden="true" tabindex="-1"></a><span class="fu">adam_epsilon</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1228"><a href="#cb1-1228" aria-hidden="true" tabindex="-1"></a><span class="co"># only used for CAME Optimizer</span></span>
|
||
<span id="cb1-1229"><a href="#cb1-1229" aria-hidden="true" tabindex="-1"></a><span class="fu">adam_epsilon2</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1230"><a href="#cb1-1230" aria-hidden="true" tabindex="-1"></a><span class="co"># adamw hyperparams</span></span>
|
||
<span id="cb1-1231"><a href="#cb1-1231" aria-hidden="true" tabindex="-1"></a><span class="fu">adam_beta1</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1232"><a href="#cb1-1232" aria-hidden="true" tabindex="-1"></a><span class="co"># adamw hyperparams</span></span>
|
||
<span id="cb1-1233"><a href="#cb1-1233" aria-hidden="true" tabindex="-1"></a><span class="fu">adam_beta2</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1234"><a href="#cb1-1234" aria-hidden="true" tabindex="-1"></a><span class="co"># only used for CAME Optimizer</span></span>
|
||
<span id="cb1-1235"><a href="#cb1-1235" aria-hidden="true" tabindex="-1"></a><span class="fu">adam_beta3</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1236"><a href="#cb1-1236" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1237"><a href="#cb1-1237" aria-hidden="true" tabindex="-1"></a><span class="co"># Dion Optimizer learning rate</span></span>
|
||
<span id="cb1-1238"><a href="#cb1-1238" aria-hidden="true" tabindex="-1"></a><span class="fu">dion_lr</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1239"><a href="#cb1-1239" aria-hidden="true" tabindex="-1"></a><span class="co"># Dion Optimizer momentum</span></span>
|
||
<span id="cb1-1240"><a href="#cb1-1240" aria-hidden="true" tabindex="-1"></a><span class="fu">dion_momentum</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1241"><a href="#cb1-1241" aria-hidden="true" tabindex="-1"></a><span class="co"># Dion Optimizer: r/d fraction for low-rank approximation. Used to compute the low-rank</span></span>
|
||
<span id="cb1-1242"><a href="#cb1-1242" aria-hidden="true" tabindex="-1"></a><span class="co"># dimension.</span></span>
|
||
<span id="cb1-1243"><a href="#cb1-1243" aria-hidden="true" tabindex="-1"></a><span class="fu">dion_rank_fraction</span><span class="kw">:</span><span class="at"> float | None = 1.0</span></span>
|
||
<span id="cb1-1244"><a href="#cb1-1244" aria-hidden="true" tabindex="-1"></a><span class="co"># Dion Optimizer: Round up the low-rank dimension to a multiple of this number. This may</span></span>
|
||
<span id="cb1-1245"><a href="#cb1-1245" aria-hidden="true" tabindex="-1"></a><span class="co"># be useful to ensure even sharding.</span></span>
|
||
<span id="cb1-1246"><a href="#cb1-1246" aria-hidden="true" tabindex="-1"></a><span class="fu">dion_rank_multiple_of</span><span class="kw">:</span><span class="at"> int | None = 1</span></span>
|
||
<span id="cb1-1247"><a href="#cb1-1247" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1248"><a href="#cb1-1248" aria-hidden="true" tabindex="-1"></a><span class="co"># Gradient clipping max norm</span></span>
|
||
<span id="cb1-1249"><a href="#cb1-1249" aria-hidden="true" tabindex="-1"></a><span class="fu">max_grad_norm</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1250"><a href="#cb1-1250" aria-hidden="true" tabindex="-1"></a><span class="fu">num_epochs</span><span class="kw">:</span><span class="at"> float = 1.0</span></span>
|
||
<span id="cb1-1251"><a href="#cb1-1251" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1252"><a href="#cb1-1252" aria-hidden="true" tabindex="-1"></a><span class="fu">use_wandb</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1253"><a href="#cb1-1253" aria-hidden="true" tabindex="-1"></a><span class="co"># Set the name of your wandb run</span></span>
|
||
<span id="cb1-1254"><a href="#cb1-1254" aria-hidden="true" tabindex="-1"></a><span class="fu">wandb_name</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1255"><a href="#cb1-1255" aria-hidden="true" tabindex="-1"></a><span class="co"># Set the ID of your wandb run</span></span>
|
||
<span id="cb1-1256"><a href="#cb1-1256" aria-hidden="true" tabindex="-1"></a><span class="fu">wandb_run_id</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1257"><a href="#cb1-1257" aria-hidden="true" tabindex="-1"></a><span class="co"># "offline" to save run metadata locally and not sync to the server, "disabled" to turn</span></span>
|
||
<span id="cb1-1258"><a href="#cb1-1258" aria-hidden="true" tabindex="-1"></a><span class="co"># off wandb</span></span>
|
||
<span id="cb1-1259"><a href="#cb1-1259" aria-hidden="true" tabindex="-1"></a><span class="fu">wandb_mode</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1260"><a href="#cb1-1260" aria-hidden="true" tabindex="-1"></a><span class="co"># Your wandb project name</span></span>
|
||
<span id="cb1-1261"><a href="#cb1-1261" aria-hidden="true" tabindex="-1"></a><span class="fu">wandb_project</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1262"><a href="#cb1-1262" aria-hidden="true" tabindex="-1"></a><span class="co"># A wandb Team name if using a Team</span></span>
|
||
<span id="cb1-1263"><a href="#cb1-1263" aria-hidden="true" tabindex="-1"></a><span class="fu">wandb_entity</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1264"><a href="#cb1-1264" aria-hidden="true" tabindex="-1"></a><span class="fu">wandb_watch</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1265"><a href="#cb1-1265" aria-hidden="true" tabindex="-1"></a><span class="co"># "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only</span></span>
|
||
<span id="cb1-1266"><a href="#cb1-1266" aria-hidden="true" tabindex="-1"></a><span class="co"># at the end of training</span></span>
|
||
<span id="cb1-1267"><a href="#cb1-1267" aria-hidden="true" tabindex="-1"></a><span class="fu">wandb_log_model</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1268"><a href="#cb1-1268" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1269"><a href="#cb1-1269" aria-hidden="true" tabindex="-1"></a><span class="fu">use_mlflow</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1270"><a href="#cb1-1270" aria-hidden="true" tabindex="-1"></a><span class="co"># URI to mlflow</span></span>
|
||
<span id="cb1-1271"><a href="#cb1-1271" aria-hidden="true" tabindex="-1"></a><span class="fu">mlflow_tracking_uri</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1272"><a href="#cb1-1272" aria-hidden="true" tabindex="-1"></a><span class="co"># Your experiment name</span></span>
|
||
<span id="cb1-1273"><a href="#cb1-1273" aria-hidden="true" tabindex="-1"></a><span class="fu">mlflow_experiment_name</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1274"><a href="#cb1-1274" aria-hidden="true" tabindex="-1"></a><span class="co"># Your run name</span></span>
|
||
<span id="cb1-1275"><a href="#cb1-1275" aria-hidden="true" tabindex="-1"></a><span class="fu">mlflow_run_name</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1276"><a href="#cb1-1276" aria-hidden="true" tabindex="-1"></a><span class="co"># set to true to copy each saved checkpoint on each save to mlflow artifact registry</span></span>
|
||
<span id="cb1-1277"><a href="#cb1-1277" aria-hidden="true" tabindex="-1"></a><span class="fu">hf_mlflow_log_artifacts</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1278"><a href="#cb1-1278" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1279"><a href="#cb1-1279" aria-hidden="true" tabindex="-1"></a><span class="co"># Enable or disable Comet integration.</span></span>
|
||
<span id="cb1-1280"><a href="#cb1-1280" aria-hidden="true" tabindex="-1"></a><span class="fu">use_comet</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1281"><a href="#cb1-1281" aria-hidden="true" tabindex="-1"></a><span class="co"># API key for Comet. Recommended to set via `comet login`.</span></span>
|
||
<span id="cb1-1282"><a href="#cb1-1282" aria-hidden="true" tabindex="-1"></a><span class="fu">comet_api_key</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1283"><a href="#cb1-1283" aria-hidden="true" tabindex="-1"></a><span class="co"># Workspace name in Comet. Defaults to the user's default workspace.</span></span>
|
||
<span id="cb1-1284"><a href="#cb1-1284" aria-hidden="true" tabindex="-1"></a><span class="fu">comet_workspace</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1285"><a href="#cb1-1285" aria-hidden="true" tabindex="-1"></a><span class="co"># Project name in Comet. Defaults to Uncategorized.</span></span>
|
||
<span id="cb1-1286"><a href="#cb1-1286" aria-hidden="true" tabindex="-1"></a><span class="fu">comet_project_name</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1287"><a href="#cb1-1287" aria-hidden="true" tabindex="-1"></a><span class="co"># Identifier for the experiment. Used to append data to an existing experiment or</span></span>
|
||
<span id="cb1-1288"><a href="#cb1-1288" aria-hidden="true" tabindex="-1"></a><span class="co"># control the key of new experiments. Default to a random key.</span></span>
|
||
<span id="cb1-1289"><a href="#cb1-1289" aria-hidden="true" tabindex="-1"></a><span class="fu">comet_experiment_key</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1290"><a href="#cb1-1290" aria-hidden="true" tabindex="-1"></a><span class="co"># Create a new experiment ("create") or log to an existing one ("get"). Default</span></span>
|
||
<span id="cb1-1291"><a href="#cb1-1291" aria-hidden="true" tabindex="-1"></a><span class="co"># ("get_or_create") auto-selects based on configuration.</span></span>
|
||
<span id="cb1-1292"><a href="#cb1-1292" aria-hidden="true" tabindex="-1"></a><span class="fu">comet_mode</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1293"><a href="#cb1-1293" aria-hidden="true" tabindex="-1"></a><span class="co"># Set to True to log data to Comet server, or False for offline storage. Default is</span></span>
|
||
<span id="cb1-1294"><a href="#cb1-1294" aria-hidden="true" tabindex="-1"></a><span class="co"># True.</span></span>
|
||
<span id="cb1-1295"><a href="#cb1-1295" aria-hidden="true" tabindex="-1"></a><span class="fu">comet_online</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1296"><a href="#cb1-1296" aria-hidden="true" tabindex="-1"></a><span class="co"># Dictionary for additional configuration settings, see the doc for more details.</span></span>
|
||
<span id="cb1-1297"><a href="#cb1-1297" aria-hidden="true" tabindex="-1"></a><span class="fu">comet_experiment_config</span><span class="kw">:</span><span class="at"> dict[str, Any] | None</span></span>
|
||
<span id="cb1-1298"><a href="#cb1-1298" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1299"><a href="#cb1-1299" aria-hidden="true" tabindex="-1"></a><span class="co"># the number of activate layers in LISA</span></span>
|
||
<span id="cb1-1300"><a href="#cb1-1300" aria-hidden="true" tabindex="-1"></a><span class="fu">lisa_n_layers</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-1301"><a href="#cb1-1301" aria-hidden="true" tabindex="-1"></a><span class="co"># how often to switch layers in LISA</span></span>
|
||
<span id="cb1-1302"><a href="#cb1-1302" aria-hidden="true" tabindex="-1"></a><span class="fu">lisa_step_interval</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-1303"><a href="#cb1-1303" aria-hidden="true" tabindex="-1"></a><span class="co"># path under the model to access the layers</span></span>
|
||
<span id="cb1-1304"><a href="#cb1-1304" aria-hidden="true" tabindex="-1"></a><span class="fu">lisa_layers_attribute</span><span class="kw">:</span><span class="at"> str | None = model.layers</span></span>
|
||
<span id="cb1-1305"><a href="#cb1-1305" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1306"><a href="#cb1-1306" aria-hidden="true" tabindex="-1"></a><span class="fu">gradio_title</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1307"><a href="#cb1-1307" aria-hidden="true" tabindex="-1"></a><span class="fu">gradio_share</span><span class="kw">:</span><span class="at"> bool | None</span></span>
|
||
<span id="cb1-1308"><a href="#cb1-1308" aria-hidden="true" tabindex="-1"></a><span class="fu">gradio_server_name</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1309"><a href="#cb1-1309" aria-hidden="true" tabindex="-1"></a><span class="fu">gradio_server_port</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-1310"><a href="#cb1-1310" aria-hidden="true" tabindex="-1"></a><span class="fu">gradio_max_new_tokens</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-1311"><a href="#cb1-1311" aria-hidden="true" tabindex="-1"></a><span class="fu">gradio_temperature</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1312"><a href="#cb1-1312" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1313"><a href="#cb1-1313" aria-hidden="true" tabindex="-1"></a><span class="fu">use_ray</span><span class="kw">:</span><span class="at"> bool = False</span></span>
|
||
<span id="cb1-1314"><a href="#cb1-1314" aria-hidden="true" tabindex="-1"></a><span class="fu">ray_run_name</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1315"><a href="#cb1-1315" aria-hidden="true" tabindex="-1"></a><span class="fu">ray_num_workers</span><span class="kw">:</span><span class="at"> int = 1</span></span>
|
||
<span id="cb1-1316"><a href="#cb1-1316" aria-hidden="true" tabindex="-1"></a><span class="fu">resources_per_worker</span><span class="kw">:</span><span class="at"> dict</span></span>
|
||
<span id="cb1-1317"><a href="#cb1-1317" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1318"><a href="#cb1-1318" aria-hidden="true" tabindex="-1"></a><span class="co"># The size of the image to resize to. It can be an integer (resized into padded-square</span></span>
|
||
<span id="cb1-1319"><a href="#cb1-1319" aria-hidden="true" tabindex="-1"></a><span class="co"># image) or a tuple (width, height).If not provided, we will attempt to load from</span></span>
|
||
<span id="cb1-1320"><a href="#cb1-1320" aria-hidden="true" tabindex="-1"></a><span class="co"># preprocessor.size, otherwise, images won't be resized.</span></span>
|
||
<span id="cb1-1321"><a href="#cb1-1321" aria-hidden="true" tabindex="-1"></a><span class="fu">image_size</span><span class="kw">:</span><span class="at"> int | tuple[int, int] | None</span></span>
|
||
<span id="cb1-1322"><a href="#cb1-1322" aria-hidden="true" tabindex="-1"></a><span class="co"># The resampling algorithm to use for image resizing. Default is bilinear. Please refer</span></span>
|
||
<span id="cb1-1323"><a href="#cb1-1323" aria-hidden="true" tabindex="-1"></a><span class="co"># to PIL.Image.Resampling for more details.</span></span>
|
||
<span id="cb1-1324"><a href="#cb1-1324" aria-hidden="true" tabindex="-1"></a><span class="fu">image_resize_algorithm</span><span class="kw">:</span><span class="at"> Literal['bilinear', 'bicubic', 'lanczos'] | Resampling | None</span></span>
|
||
<span id="cb1-1325"><a href="#cb1-1325" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1326"><a href="#cb1-1326" aria-hidden="true" tabindex="-1"></a><span class="co"># optional overrides to the base model configuration</span></span>
|
||
<span id="cb1-1327"><a href="#cb1-1327" aria-hidden="true" tabindex="-1"></a><span class="fu">overrides_of_model_config</span><span class="kw">:</span><span class="at"> dict[str, Any] | None</span></span>
|
||
<span id="cb1-1328"><a href="#cb1-1328" aria-hidden="true" tabindex="-1"></a><span class="co"># optional overrides the base model loading from_pretrained</span></span>
|
||
<span id="cb1-1329"><a href="#cb1-1329" aria-hidden="true" tabindex="-1"></a><span class="fu">overrides_of_model_kwargs</span><span class="kw">:</span><span class="at"> dict[str, Any] | None</span></span>
|
||
<span id="cb1-1330"><a href="#cb1-1330" aria-hidden="true" tabindex="-1"></a><span class="co"># If you want to specify the type of model to load, AutoModelForCausalLM is a good</span></span>
|
||
<span id="cb1-1331"><a href="#cb1-1331" aria-hidden="true" tabindex="-1"></a><span class="co"># choice too</span></span>
|
||
<span id="cb1-1332"><a href="#cb1-1332" aria-hidden="true" tabindex="-1"></a><span class="fu">type_of_model</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1333"><a href="#cb1-1333" aria-hidden="true" tabindex="-1"></a><span class="co"># You can specify to choose a specific model revision from huggingface hub</span></span>
|
||
<span id="cb1-1334"><a href="#cb1-1334" aria-hidden="true" tabindex="-1"></a><span class="fu">revision_of_model</span><span class="kw">:</span><span class="at"> str | None</span></span>
|
||
<span id="cb1-1335"><a href="#cb1-1335" aria-hidden="true" tabindex="-1"></a></span>
|
||
<span id="cb1-1336"><a href="#cb1-1336" aria-hidden="true" tabindex="-1"></a><span class="fu">max_packed_sequence_len</span><span class="kw">:</span><span class="at"> int | None</span></span>
|
||
<span id="cb1-1337"><a href="#cb1-1337" aria-hidden="true" tabindex="-1"></a><span class="fu">rope_scaling</span><span class="kw">:</span><span class="at"> Any | None</span></span>
|
||
<span id="cb1-1338"><a href="#cb1-1338" aria-hidden="true" tabindex="-1"></a><span class="fu">noisy_embedding_alpha</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1339"><a href="#cb1-1339" aria-hidden="true" tabindex="-1"></a><span class="fu">dpo_beta</span><span class="kw">:</span><span class="at"> float | None</span></span>
|
||
<span id="cb1-1340"><a href="#cb1-1340" aria-hidden="true" tabindex="-1"></a><span class="fu">evaluation_strategy</span><span class="kw">:</span><span class="at"> str | None</span></span></code></pre></div><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></div>
|
||
|
||
|
||
|
||
</main> <!-- /main -->
|
||
<script id="quarto-html-after-body" type="application/javascript">
|
||
window.document.addEventListener("DOMContentLoaded", function (event) {
|
||
const icon = "";
|
||
const anchorJS = new window.AnchorJS();
|
||
anchorJS.options = {
|
||
placement: 'right',
|
||
icon: icon
|
||
};
|
||
anchorJS.add('.anchored');
|
||
const isCodeAnnotation = (el) => {
|
||
for (const clz of el.classList) {
|
||
if (clz.startsWith('code-annotation-')) {
|
||
return true;
|
||
}
|
||
}
|
||
return false;
|
||
}
|
||
const onCopySuccess = function(e) {
|
||
// button target
|
||
const button = e.trigger;
|
||
// don't keep focus
|
||
button.blur();
|
||
// flash "checked"
|
||
button.classList.add('code-copy-button-checked');
|
||
var currentTitle = button.getAttribute("title");
|
||
button.setAttribute("title", "Copied!");
|
||
let tooltip;
|
||
if (window.bootstrap) {
|
||
button.setAttribute("data-bs-toggle", "tooltip");
|
||
button.setAttribute("data-bs-placement", "left");
|
||
button.setAttribute("data-bs-title", "Copied!");
|
||
tooltip = new bootstrap.Tooltip(button,
|
||
{ trigger: "manual",
|
||
customClass: "code-copy-button-tooltip",
|
||
offset: [0, -8]});
|
||
tooltip.show();
|
||
}
|
||
setTimeout(function() {
|
||
if (tooltip) {
|
||
tooltip.hide();
|
||
button.removeAttribute("data-bs-title");
|
||
button.removeAttribute("data-bs-toggle");
|
||
button.removeAttribute("data-bs-placement");
|
||
}
|
||
button.setAttribute("title", currentTitle);
|
||
button.classList.remove('code-copy-button-checked');
|
||
}, 1000);
|
||
// clear code selection
|
||
e.clearSelection();
|
||
}
|
||
const getTextToCopy = function(trigger) {
|
||
const outerScaffold = trigger.parentElement.cloneNode(true);
|
||
const codeEl = outerScaffold.querySelector('code');
|
||
for (const childEl of codeEl.children) {
|
||
if (isCodeAnnotation(childEl)) {
|
||
childEl.remove();
|
||
}
|
||
}
|
||
return codeEl.innerText;
|
||
}
|
||
const clipboard = new window.ClipboardJS('.code-copy-button:not([data-in-quarto-modal])', {
|
||
text: getTextToCopy
|
||
});
|
||
clipboard.on('success', onCopySuccess);
|
||
if (window.document.getElementById('quarto-embedded-source-code-modal')) {
|
||
const clipboardModal = new window.ClipboardJS('.code-copy-button[data-in-quarto-modal]', {
|
||
text: getTextToCopy,
|
||
container: window.document.getElementById('quarto-embedded-source-code-modal')
|
||
});
|
||
clipboardModal.on('success', onCopySuccess);
|
||
}
|
||
var localhostRegex = new RegExp(/^(?:http|https):\/\/localhost\:?[0-9]*\//);
|
||
var mailtoRegex = new RegExp(/^mailto:/);
|
||
var filterRegex = new RegExp("https:\/\/docs\.axolotl\.ai");
|
||
var isInternal = (href) => {
|
||
return filterRegex.test(href) || localhostRegex.test(href) || mailtoRegex.test(href);
|
||
}
|
||
// Inspect non-navigation links and adorn them if external
|
||
var links = window.document.querySelectorAll('a[href]:not(.nav-link):not(.navbar-brand):not(.toc-action):not(.sidebar-link):not(.sidebar-item-toggle):not(.pagination-link):not(.no-external):not([aria-hidden]):not(.dropdown-item):not(.quarto-navigation-tool):not(.about-link)');
|
||
for (var i=0; i<links.length; i++) {
|
||
const link = links[i];
|
||
if (!isInternal(link.href)) {
|
||
// undo the damage that might have been done by quarto-nav.js in the case of
|
||
// links that we want to consider external
|
||
if (link.dataset.originalHref !== undefined) {
|
||
link.href = link.dataset.originalHref;
|
||
}
|
||
}
|
||
}
|
||
function tippyHover(el, contentFn, onTriggerFn, onUntriggerFn) {
|
||
const config = {
|
||
allowHTML: true,
|
||
maxWidth: 500,
|
||
delay: 100,
|
||
arrow: false,
|
||
appendTo: function(el) {
|
||
return el.parentElement;
|
||
},
|
||
interactive: true,
|
||
interactiveBorder: 10,
|
||
theme: 'quarto',
|
||
placement: 'bottom-start',
|
||
};
|
||
if (contentFn) {
|
||
config.content = contentFn;
|
||
}
|
||
if (onTriggerFn) {
|
||
config.onTrigger = onTriggerFn;
|
||
}
|
||
if (onUntriggerFn) {
|
||
config.onUntrigger = onUntriggerFn;
|
||
}
|
||
window.tippy(el, config);
|
||
}
|
||
const noterefs = window.document.querySelectorAll('a[role="doc-noteref"]');
|
||
for (var i=0; i<noterefs.length; i++) {
|
||
const ref = noterefs[i];
|
||
tippyHover(ref, function() {
|
||
// use id or data attribute instead here
|
||
let href = ref.getAttribute('data-footnote-href') || ref.getAttribute('href');
|
||
try { href = new URL(href).hash; } catch {}
|
||
const id = href.replace(/^#\/?/, "");
|
||
const note = window.document.getElementById(id);
|
||
if (note) {
|
||
return note.innerHTML;
|
||
} else {
|
||
return "";
|
||
}
|
||
});
|
||
}
|
||
const xrefs = window.document.querySelectorAll('a.quarto-xref');
|
||
const processXRef = (id, note) => {
|
||
// Strip column container classes
|
||
const stripColumnClz = (el) => {
|
||
el.classList.remove("page-full", "page-columns");
|
||
if (el.children) {
|
||
for (const child of el.children) {
|
||
stripColumnClz(child);
|
||
}
|
||
}
|
||
}
|
||
stripColumnClz(note)
|
||
if (id === null || id.startsWith('sec-')) {
|
||
// Special case sections, only their first couple elements
|
||
const container = document.createElement("div");
|
||
if (note.children && note.children.length > 2) {
|
||
container.appendChild(note.children[0].cloneNode(true));
|
||
for (let i = 1; i < note.children.length; i++) {
|
||
const child = note.children[i];
|
||
if (child.tagName === "P" && child.innerText === "") {
|
||
continue;
|
||
} else {
|
||
container.appendChild(child.cloneNode(true));
|
||
break;
|
||
}
|
||
}
|
||
if (window.Quarto?.typesetMath) {
|
||
window.Quarto.typesetMath(container);
|
||
}
|
||
return container.innerHTML
|
||
} else {
|
||
if (window.Quarto?.typesetMath) {
|
||
window.Quarto.typesetMath(note);
|
||
}
|
||
return note.innerHTML;
|
||
}
|
||
} else {
|
||
// Remove any anchor links if they are present
|
||
const anchorLink = note.querySelector('a.anchorjs-link');
|
||
if (anchorLink) {
|
||
anchorLink.remove();
|
||
}
|
||
if (window.Quarto?.typesetMath) {
|
||
window.Quarto.typesetMath(note);
|
||
}
|
||
if (note.classList.contains("callout")) {
|
||
return note.outerHTML;
|
||
} else {
|
||
return note.innerHTML;
|
||
}
|
||
}
|
||
}
|
||
for (var i=0; i<xrefs.length; i++) {
|
||
const xref = xrefs[i];
|
||
tippyHover(xref, undefined, function(instance) {
|
||
instance.disable();
|
||
let url = xref.getAttribute('href');
|
||
let hash = undefined;
|
||
if (url.startsWith('#')) {
|
||
hash = url;
|
||
} else {
|
||
try { hash = new URL(url).hash; } catch {}
|
||
}
|
||
if (hash) {
|
||
const id = hash.replace(/^#\/?/, "");
|
||
const note = window.document.getElementById(id);
|
||
if (note !== null) {
|
||
try {
|
||
const html = processXRef(id, note.cloneNode(true));
|
||
instance.setContent(html);
|
||
} finally {
|
||
instance.enable();
|
||
instance.show();
|
||
}
|
||
} else {
|
||
// See if we can fetch this
|
||
fetch(url.split('#')[0])
|
||
.then(res => res.text())
|
||
.then(html => {
|
||
const parser = new DOMParser();
|
||
const htmlDoc = parser.parseFromString(html, "text/html");
|
||
const note = htmlDoc.getElementById(id);
|
||
if (note !== null) {
|
||
const html = processXRef(id, note);
|
||
instance.setContent(html);
|
||
}
|
||
}).finally(() => {
|
||
instance.enable();
|
||
instance.show();
|
||
});
|
||
}
|
||
} else {
|
||
// See if we can fetch a full url (with no hash to target)
|
||
// This is a special case and we should probably do some content thinning / targeting
|
||
fetch(url)
|
||
.then(res => res.text())
|
||
.then(html => {
|
||
const parser = new DOMParser();
|
||
const htmlDoc = parser.parseFromString(html, "text/html");
|
||
const note = htmlDoc.querySelector('main.content');
|
||
if (note !== null) {
|
||
// This should only happen for chapter cross references
|
||
// (since there is no id in the URL)
|
||
// remove the first header
|
||
if (note.children.length > 0 && note.children[0].tagName === "HEADER") {
|
||
note.children[0].remove();
|
||
}
|
||
const html = processXRef(null, note);
|
||
instance.setContent(html);
|
||
}
|
||
}).finally(() => {
|
||
instance.enable();
|
||
instance.show();
|
||
});
|
||
}
|
||
}, function(instance) {
|
||
});
|
||
}
|
||
let selectedAnnoteEl;
|
||
const selectorForAnnotation = ( cell, annotation) => {
|
||
let cellAttr = 'data-code-cell="' + cell + '"';
|
||
let lineAttr = 'data-code-annotation="' + annotation + '"';
|
||
const selector = 'span[' + cellAttr + '][' + lineAttr + ']';
|
||
return selector;
|
||
}
|
||
const selectCodeLines = (annoteEl) => {
|
||
const doc = window.document;
|
||
const targetCell = annoteEl.getAttribute("data-target-cell");
|
||
const targetAnnotation = annoteEl.getAttribute("data-target-annotation");
|
||
const annoteSpan = window.document.querySelector(selectorForAnnotation(targetCell, targetAnnotation));
|
||
const lines = annoteSpan.getAttribute("data-code-lines").split(",");
|
||
const lineIds = lines.map((line) => {
|
||
return targetCell + "-" + line;
|
||
})
|
||
let top = null;
|
||
let height = null;
|
||
let parent = null;
|
||
if (lineIds.length > 0) {
|
||
//compute the position of the single el (top and bottom and make a div)
|
||
const el = window.document.getElementById(lineIds[0]);
|
||
top = el.offsetTop;
|
||
height = el.offsetHeight;
|
||
parent = el.parentElement.parentElement;
|
||
if (lineIds.length > 1) {
|
||
const lastEl = window.document.getElementById(lineIds[lineIds.length - 1]);
|
||
const bottom = lastEl.offsetTop + lastEl.offsetHeight;
|
||
height = bottom - top;
|
||
}
|
||
if (top !== null && height !== null && parent !== null) {
|
||
// cook up a div (if necessary) and position it
|
||
let div = window.document.getElementById("code-annotation-line-highlight");
|
||
if (div === null) {
|
||
div = window.document.createElement("div");
|
||
div.setAttribute("id", "code-annotation-line-highlight");
|
||
div.style.position = 'absolute';
|
||
parent.appendChild(div);
|
||
}
|
||
div.style.top = top - 2 + "px";
|
||
div.style.height = height + 4 + "px";
|
||
div.style.left = 0;
|
||
let gutterDiv = window.document.getElementById("code-annotation-line-highlight-gutter");
|
||
if (gutterDiv === null) {
|
||
gutterDiv = window.document.createElement("div");
|
||
gutterDiv.setAttribute("id", "code-annotation-line-highlight-gutter");
|
||
gutterDiv.style.position = 'absolute';
|
||
const codeCell = window.document.getElementById(targetCell);
|
||
const gutter = codeCell.querySelector('.code-annotation-gutter');
|
||
gutter.appendChild(gutterDiv);
|
||
}
|
||
gutterDiv.style.top = top - 2 + "px";
|
||
gutterDiv.style.height = height + 4 + "px";
|
||
}
|
||
selectedAnnoteEl = annoteEl;
|
||
}
|
||
};
|
||
const unselectCodeLines = () => {
|
||
const elementsIds = ["code-annotation-line-highlight", "code-annotation-line-highlight-gutter"];
|
||
elementsIds.forEach((elId) => {
|
||
const div = window.document.getElementById(elId);
|
||
if (div) {
|
||
div.remove();
|
||
}
|
||
});
|
||
selectedAnnoteEl = undefined;
|
||
};
|
||
// Handle positioning of the toggle
|
||
window.addEventListener(
|
||
"resize",
|
||
throttle(() => {
|
||
elRect = undefined;
|
||
if (selectedAnnoteEl) {
|
||
selectCodeLines(selectedAnnoteEl);
|
||
}
|
||
}, 10)
|
||
);
|
||
function throttle(fn, ms) {
|
||
let throttle = false;
|
||
let timer;
|
||
return (...args) => {
|
||
if(!throttle) { // first call gets through
|
||
fn.apply(this, args);
|
||
throttle = true;
|
||
} else { // all the others get throttled
|
||
if(timer) clearTimeout(timer); // cancel #2
|
||
timer = setTimeout(() => {
|
||
fn.apply(this, args);
|
||
timer = throttle = false;
|
||
}, ms);
|
||
}
|
||
};
|
||
}
|
||
// Attach click handler to the DT
|
||
const annoteDls = window.document.querySelectorAll('dt[data-target-cell]');
|
||
for (const annoteDlNode of annoteDls) {
|
||
annoteDlNode.addEventListener('click', (event) => {
|
||
const clickedEl = event.target;
|
||
if (clickedEl !== selectedAnnoteEl) {
|
||
unselectCodeLines();
|
||
const activeEl = window.document.querySelector('dt[data-target-cell].code-annotation-active');
|
||
if (activeEl) {
|
||
activeEl.classList.remove('code-annotation-active');
|
||
}
|
||
selectCodeLines(clickedEl);
|
||
clickedEl.classList.add('code-annotation-active');
|
||
} else {
|
||
// Unselect the line
|
||
unselectCodeLines();
|
||
clickedEl.classList.remove('code-annotation-active');
|
||
}
|
||
});
|
||
}
|
||
const findCites = (el) => {
|
||
const parentEl = el.parentElement;
|
||
if (parentEl) {
|
||
const cites = parentEl.dataset.cites;
|
||
if (cites) {
|
||
return {
|
||
el,
|
||
cites: cites.split(' ')
|
||
};
|
||
} else {
|
||
return findCites(el.parentElement)
|
||
}
|
||
} else {
|
||
return undefined;
|
||
}
|
||
};
|
||
var bibliorefs = window.document.querySelectorAll('a[role="doc-biblioref"]');
|
||
for (var i=0; i<bibliorefs.length; i++) {
|
||
const ref = bibliorefs[i];
|
||
const citeInfo = findCites(ref);
|
||
if (citeInfo) {
|
||
tippyHover(citeInfo.el, function() {
|
||
var popup = window.document.createElement('div');
|
||
citeInfo.cites.forEach(function(cite) {
|
||
var citeDiv = window.document.createElement('div');
|
||
citeDiv.classList.add('hanging-indent');
|
||
citeDiv.classList.add('csl-entry');
|
||
var biblioDiv = window.document.getElementById('ref-' + cite);
|
||
if (biblioDiv) {
|
||
citeDiv.innerHTML = biblioDiv.innerHTML;
|
||
}
|
||
popup.appendChild(citeDiv);
|
||
});
|
||
return popup.innerHTML;
|
||
});
|
||
}
|
||
}
|
||
});
|
||
</script>
|
||
</div> <!-- /content -->
|
||
|
||
|
||
|
||
|
||
</body></html> |