1166 lines
98 KiB
HTML
1166 lines
98 KiB
HTML
<!DOCTYPE html>
|
|
<html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en"><head>
|
|
|
|
<meta charset="utf-8">
|
|
<meta name="generator" content="quarto-1.4.553">
|
|
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
|
|
|
|
<meta name="description" content="A complete list of all configuration options.">
|
|
|
|
<title>Axolotl - Config options</title>
|
|
<style>
|
|
code{white-space: pre-wrap;}
|
|
span.smallcaps{font-variant: small-caps;}
|
|
div.columns{display: flex; gap: min(4vw, 1.5em);}
|
|
div.column{flex: auto; overflow-x: auto;}
|
|
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
|
|
ul.task-list{list-style: none;}
|
|
ul.task-list li input[type="checkbox"] {
|
|
width: 0.8em;
|
|
margin: 0 0.8em 0.2em -1em; /* quarto-specific, see https://github.com/quarto-dev/quarto-cli/issues/4556 */
|
|
vertical-align: middle;
|
|
}
|
|
/* CSS for syntax highlighting */
|
|
pre > code.sourceCode { white-space: pre; position: relative; }
|
|
pre > code.sourceCode > span { line-height: 1.25; }
|
|
pre > code.sourceCode > span:empty { height: 1.2em; }
|
|
.sourceCode { overflow: visible; }
|
|
code.sourceCode > span { color: inherit; text-decoration: inherit; }
|
|
div.sourceCode { margin: 1em 0; }
|
|
pre.sourceCode { margin: 0; }
|
|
@media screen {
|
|
div.sourceCode { overflow: auto; }
|
|
}
|
|
@media print {
|
|
pre > code.sourceCode { white-space: pre-wrap; }
|
|
pre > code.sourceCode > span { text-indent: -5em; padding-left: 5em; }
|
|
}
|
|
pre.numberSource code
|
|
{ counter-reset: source-line 0; }
|
|
pre.numberSource code > span
|
|
{ position: relative; left: -4em; counter-increment: source-line; }
|
|
pre.numberSource code > span > a:first-child::before
|
|
{ content: counter(source-line);
|
|
position: relative; left: -1em; text-align: right; vertical-align: baseline;
|
|
border: none; display: inline-block;
|
|
-webkit-touch-callout: none; -webkit-user-select: none;
|
|
-khtml-user-select: none; -moz-user-select: none;
|
|
-ms-user-select: none; user-select: none;
|
|
padding: 0 4px; width: 4em;
|
|
}
|
|
pre.numberSource { margin-left: 3em; padding-left: 4px; }
|
|
div.sourceCode
|
|
{ }
|
|
@media screen {
|
|
pre > code.sourceCode > span > a:first-child::before { text-decoration: underline; }
|
|
}
|
|
</style>
|
|
|
|
|
|
<script src="../site_libs/quarto-nav/quarto-nav.js"></script>
|
|
<script src="../site_libs/clipboard/clipboard.min.js"></script>
|
|
<script src="../site_libs/quarto-search/autocomplete.umd.js"></script>
|
|
<script src="../site_libs/quarto-search/fuse.min.js"></script>
|
|
<script src="../site_libs/quarto-search/quarto-search.js"></script>
|
|
<meta name="quarto:offset" content="../">
|
|
<link href="../favicon.jpg" rel="icon" type="image/jpeg">
|
|
<script src="../site_libs/quarto-html/quarto.js"></script>
|
|
<script src="../site_libs/quarto-html/popper.min.js"></script>
|
|
<script src="../site_libs/quarto-html/tippy.umd.min.js"></script>
|
|
<script src="../site_libs/quarto-html/anchor.min.js"></script>
|
|
<link href="../site_libs/quarto-html/tippy.css" rel="stylesheet">
|
|
<link href="../site_libs/quarto-html/quarto-syntax-highlighting.css" rel="stylesheet" id="quarto-text-highlighting-styles">
|
|
<script src="../site_libs/bootstrap/bootstrap.min.js"></script>
|
|
<link href="../site_libs/bootstrap/bootstrap-icons.css" rel="stylesheet">
|
|
<link href="../site_libs/bootstrap/bootstrap.min.css" rel="stylesheet" id="quarto-bootstrap" data-mode="light">
|
|
<script id="quarto-search-options" type="application/json">{
|
|
"location": "navbar",
|
|
"copy-button": false,
|
|
"collapse-after": 3,
|
|
"panel-placement": "end",
|
|
"type": "overlay",
|
|
"limit": 50,
|
|
"keyboard-shortcut": [
|
|
"f",
|
|
"/",
|
|
"s"
|
|
],
|
|
"show-item-context": false,
|
|
"language": {
|
|
"search-no-results-text": "No results",
|
|
"search-matching-documents-text": "matching documents",
|
|
"search-copy-link-title": "Copy link to search",
|
|
"search-hide-matches-text": "Hide additional matches",
|
|
"search-more-match-text": "more match in this document",
|
|
"search-more-matches-text": "more matches in this document",
|
|
"search-clear-button-title": "Clear",
|
|
"search-text-placeholder": "",
|
|
"search-detached-cancel-button-title": "Cancel",
|
|
"search-submit-button-title": "Submit",
|
|
"search-label": "Search"
|
|
}
|
|
}</script>
|
|
|
|
|
|
<link rel="stylesheet" href="../styles.css">
|
|
</head>
|
|
|
|
<body class="nav-sidebar docked nav-fixed">
|
|
|
|
<div id="quarto-search-results"></div>
|
|
<header id="quarto-header" class="headroom fixed-top">
|
|
<nav class="navbar navbar-expand " data-bs-theme="dark">
|
|
<div class="navbar-container container-fluid">
|
|
<div class="navbar-brand-container mx-auto">
|
|
<a class="navbar-brand" href="../index.html">
|
|
<span class="navbar-title">Axolotl</span>
|
|
</a>
|
|
</div>
|
|
<div class="quarto-navbar-tools tools-wide tools-end">
|
|
<a href="https://twitter.com/axolotl_ai" title="" class="quarto-navigation-tool px-1" aria-label=""><i class="bi bi-twitter"></i></a>
|
|
<a href="https://github.com/OpenAccess-AI-Collective/axolotl/" title="" class="quarto-navigation-tool px-1" aria-label=""><i class="bi bi-github"></i></a>
|
|
<a href="https://discord.gg/7m9sfhzaf3" title="" class="quarto-navigation-tool px-1" aria-label=""><i class="bi bi-discord"></i></a>
|
|
</div>
|
|
<div id="quarto-search" class="" title="Search"></div>
|
|
</div> <!-- /container-fluid -->
|
|
</nav>
|
|
<nav class="quarto-secondary-nav">
|
|
<div class="container-fluid d-flex">
|
|
<button type="button" class="quarto-btn-toggle btn" data-bs-toggle="collapse" data-bs-target=".quarto-sidebar-collapse-item" aria-controls="quarto-sidebar" aria-expanded="false" aria-label="Toggle sidebar navigation" onclick="if (window.quartoToggleHeadroom) { window.quartoToggleHeadroom(); }">
|
|
<i class="bi bi-layout-text-sidebar-reverse"></i>
|
|
</button>
|
|
<nav class="quarto-page-breadcrumbs" aria-label="breadcrumb"><ol class="breadcrumb"><li class="breadcrumb-item"><a href="../docs/config.html">Reference</a></li><li class="breadcrumb-item"><a href="../docs/config.html">Config options</a></li></ol></nav>
|
|
<a class="flex-grow-1" role="button" data-bs-toggle="collapse" data-bs-target=".quarto-sidebar-collapse-item" aria-controls="quarto-sidebar" aria-expanded="false" aria-label="Toggle sidebar navigation" onclick="if (window.quartoToggleHeadroom) { window.quartoToggleHeadroom(); }">
|
|
</a>
|
|
</div>
|
|
</nav>
|
|
</header>
|
|
<!-- content -->
|
|
<div id="quarto-content" class="quarto-container page-columns page-rows-contents page-layout-article page-navbar">
|
|
<!-- sidebar -->
|
|
<nav id="quarto-sidebar" class="sidebar collapse collapse-horizontal quarto-sidebar-collapse-item sidebar-navigation docked overflow-auto">
|
|
<div class="sidebar-menu-container">
|
|
<ul class="list-unstyled mt-1">
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../index.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">Home</span></a>
|
|
</div>
|
|
</li>
|
|
<li class="sidebar-item sidebar-item-section">
|
|
<div class="sidebar-item-container">
|
|
<a class="sidebar-item-text sidebar-link text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-1" aria-expanded="true">
|
|
<span class="menu-text">How-To Guides</span></a>
|
|
<a class="sidebar-item-toggle text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-1" aria-expanded="true" aria-label="Toggle section">
|
|
<i class="bi bi-chevron-right ms-2"></i>
|
|
</a>
|
|
</div>
|
|
<ul id="quarto-sidebar-section-1" class="collapse list-unstyled sidebar-section depth1 show">
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/debugging.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">Debugging</span></a>
|
|
</div>
|
|
</li>
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/multipack.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">Multipack (Sample Packing)</span></a>
|
|
</div>
|
|
</li>
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/fsdp_qlora.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">FDSP + QLoRA</span></a>
|
|
</div>
|
|
</li>
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/input_output.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">Template-free prompt construction</span></a>
|
|
</div>
|
|
</li>
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/rlhf.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">RLHF (Beta)</span></a>
|
|
</div>
|
|
</li>
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/nccl.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">NCCL</span></a>
|
|
</div>
|
|
</li>
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/mac.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">Mac M-series</span></a>
|
|
</div>
|
|
</li>
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/multi-node.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">Multi Node</span></a>
|
|
</div>
|
|
</li>
|
|
</ul>
|
|
</li>
|
|
<li class="sidebar-item sidebar-item-section">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/dataset-formats/index.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">Dataset Formats</span></a>
|
|
<a class="sidebar-item-toggle text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-2" aria-expanded="true" aria-label="Toggle section">
|
|
<i class="bi bi-chevron-right ms-2"></i>
|
|
</a>
|
|
</div>
|
|
<ul id="quarto-sidebar-section-2" class="collapse list-unstyled sidebar-section depth1 show">
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/dataset-formats/pretraining.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">Pre-training</span></a>
|
|
</div>
|
|
</li>
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/dataset-formats/inst_tune.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">Instruction Tuning</span></a>
|
|
</div>
|
|
</li>
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/dataset-formats/conversation.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">Conversation</span></a>
|
|
</div>
|
|
</li>
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/dataset-formats/template_free.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">Template-Free</span></a>
|
|
</div>
|
|
</li>
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/dataset-formats/tokenized.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">Custom Pre-Tokenized Dataset</span></a>
|
|
</div>
|
|
</li>
|
|
</ul>
|
|
</li>
|
|
<li class="sidebar-item sidebar-item-section">
|
|
<div class="sidebar-item-container">
|
|
<a class="sidebar-item-text sidebar-link text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-3" aria-expanded="true">
|
|
<span class="menu-text">Reference</span></a>
|
|
<a class="sidebar-item-toggle text-start" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar-section-3" aria-expanded="true" aria-label="Toggle section">
|
|
<i class="bi bi-chevron-right ms-2"></i>
|
|
</a>
|
|
</div>
|
|
<ul id="quarto-sidebar-section-3" class="collapse list-unstyled sidebar-section depth1 show">
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/config.html" class="sidebar-item-text sidebar-link active">
|
|
<span class="menu-text">Config options</span></a>
|
|
</div>
|
|
</li>
|
|
</ul>
|
|
</li>
|
|
<li class="sidebar-item">
|
|
<div class="sidebar-item-container">
|
|
<a href="../docs/faq.html" class="sidebar-item-text sidebar-link">
|
|
<span class="menu-text">FAQ</span></a>
|
|
</div>
|
|
</li>
|
|
</ul>
|
|
</div>
|
|
</nav>
|
|
<div id="quarto-sidebar-glass" class="quarto-sidebar-collapse-item" data-bs-toggle="collapse" data-bs-target=".quarto-sidebar-collapse-item"></div>
|
|
<!-- margin-sidebar -->
|
|
<div id="quarto-margin-sidebar" class="sidebar margin-sidebar">
|
|
|
|
</div>
|
|
<!-- main -->
|
|
<main class="content" id="quarto-document-content">
|
|
|
|
<header id="title-block-header" class="quarto-title-block default"><nav class="quarto-page-breadcrumbs quarto-title-breadcrumbs d-none d-lg-block" aria-label="breadcrumb"><ol class="breadcrumb"><li class="breadcrumb-item"><a href="../docs/config.html">Reference</a></li><li class="breadcrumb-item"><a href="../docs/config.html">Config options</a></li></ol></nav>
|
|
<div class="quarto-title">
|
|
<h1 class="title">Config options</h1>
|
|
</div>
|
|
|
|
<div>
|
|
<div class="description">
|
|
A complete list of all configuration options.
|
|
</div>
|
|
</div>
|
|
|
|
|
|
<div class="quarto-title-meta">
|
|
|
|
|
|
|
|
|
|
</div>
|
|
|
|
|
|
|
|
</header>
|
|
|
|
|
|
<div class="sourceCode" id="cb1"><pre class="sourceCode yaml code-with-copy"><code class="sourceCode yaml"><span id="cb1-1"><a href="#cb1-1" aria-hidden="true" tabindex="-1"></a><span class="co"># This is the huggingface model that contains *.pt, *.safetensors, or *.bin files</span></span>
|
|
<span id="cb1-2"><a href="#cb1-2" aria-hidden="true" tabindex="-1"></a><span class="co"># This can also be a relative path to a model on disk</span></span>
|
|
<span id="cb1-3"><a href="#cb1-3" aria-hidden="true" tabindex="-1"></a><span class="fu">base_model</span><span class="kw">:</span><span class="at"> ./llama-7b-hf</span></span>
|
|
<span id="cb1-4"><a href="#cb1-4" aria-hidden="true" tabindex="-1"></a><span class="co"># You can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)</span></span>
|
|
<span id="cb1-5"><a href="#cb1-5" aria-hidden="true" tabindex="-1"></a><span class="fu">base_model_ignore_patterns</span><span class="kw">:</span></span>
|
|
<span id="cb1-6"><a href="#cb1-6" aria-hidden="true" tabindex="-1"></a><span class="co"># If the base_model repo on hf hub doesn't include configuration .json files,</span></span>
|
|
<span id="cb1-7"><a href="#cb1-7" aria-hidden="true" tabindex="-1"></a><span class="co"># You can set that here, or leave this empty to default to base_model</span></span>
|
|
<span id="cb1-8"><a href="#cb1-8" aria-hidden="true" tabindex="-1"></a><span class="fu">base_model_config</span><span class="kw">:</span><span class="at"> ./llama-7b-hf</span></span>
|
|
<span id="cb1-9"><a href="#cb1-9" aria-hidden="true" tabindex="-1"></a><span class="co"># You can specify to choose a specific model revision from huggingface hub</span></span>
|
|
<span id="cb1-10"><a href="#cb1-10" aria-hidden="true" tabindex="-1"></a><span class="fu">revision_of_model</span><span class="kw">:</span></span>
|
|
<span id="cb1-11"><a href="#cb1-11" aria-hidden="true" tabindex="-1"></a><span class="co"># Optional tokenizer configuration path in case you want to use a different tokenizer</span></span>
|
|
<span id="cb1-12"><a href="#cb1-12" aria-hidden="true" tabindex="-1"></a><span class="co"># than the one defined in the base model</span></span>
|
|
<span id="cb1-13"><a href="#cb1-13" aria-hidden="true" tabindex="-1"></a><span class="fu">tokenizer_config</span><span class="kw">:</span></span>
|
|
<span id="cb1-14"><a href="#cb1-14" aria-hidden="true" tabindex="-1"></a><span class="co"># If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too</span></span>
|
|
<span id="cb1-15"><a href="#cb1-15" aria-hidden="true" tabindex="-1"></a><span class="fu">model_type</span><span class="kw">:</span><span class="at"> AutoModelForCausalLM</span></span>
|
|
<span id="cb1-16"><a href="#cb1-16" aria-hidden="true" tabindex="-1"></a><span class="co"># Corresponding tokenizer for the model AutoTokenizer is a good choice</span></span>
|
|
<span id="cb1-17"><a href="#cb1-17" aria-hidden="true" tabindex="-1"></a><span class="fu">tokenizer_type</span><span class="kw">:</span><span class="at"> AutoTokenizer</span></span>
|
|
<span id="cb1-18"><a href="#cb1-18" aria-hidden="true" tabindex="-1"></a><span class="co"># Trust remote code for untrusted source</span></span>
|
|
<span id="cb1-19"><a href="#cb1-19" aria-hidden="true" tabindex="-1"></a><span class="fu">trust_remote_code</span><span class="kw">:</span></span>
|
|
<span id="cb1-20"><a href="#cb1-20" aria-hidden="true" tabindex="-1"></a><span class="co"># use_fast option for tokenizer loading from_pretrained, default to True</span></span>
|
|
<span id="cb1-21"><a href="#cb1-21" aria-hidden="true" tabindex="-1"></a><span class="fu">tokenizer_use_fast</span><span class="kw">:</span></span>
|
|
<span id="cb1-22"><a href="#cb1-22" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use the legacy tokenizer setting, defaults to True</span></span>
|
|
<span id="cb1-23"><a href="#cb1-23" aria-hidden="true" tabindex="-1"></a><span class="fu">tokenizer_legacy</span><span class="kw">:</span></span>
|
|
<span id="cb1-24"><a href="#cb1-24" aria-hidden="true" tabindex="-1"></a><span class="co"># Resize the model embeddings when new tokens are added to multiples of 32</span></span>
|
|
<span id="cb1-25"><a href="#cb1-25" aria-hidden="true" tabindex="-1"></a><span class="co"># This is reported to improve training speed on some models</span></span>
|
|
<span id="cb1-26"><a href="#cb1-26" aria-hidden="true" tabindex="-1"></a><span class="fu">resize_token_embeddings_to_32x</span><span class="kw">:</span></span>
|
|
<span id="cb1-27"><a href="#cb1-27" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-28"><a href="#cb1-28" aria-hidden="true" tabindex="-1"></a><span class="co"># (Internal use only)</span></span>
|
|
<span id="cb1-29"><a href="#cb1-29" aria-hidden="true" tabindex="-1"></a><span class="co"># Used to identify which the model is based on</span></span>
|
|
<span id="cb1-30"><a href="#cb1-30" aria-hidden="true" tabindex="-1"></a><span class="fu">is_falcon_derived_model</span><span class="kw">:</span></span>
|
|
<span id="cb1-31"><a href="#cb1-31" aria-hidden="true" tabindex="-1"></a><span class="fu">is_llama_derived_model</span><span class="kw">:</span></span>
|
|
<span id="cb1-32"><a href="#cb1-32" aria-hidden="true" tabindex="-1"></a><span class="fu">is_qwen_derived_model</span><span class="kw">:</span></span>
|
|
<span id="cb1-33"><a href="#cb1-33" aria-hidden="true" tabindex="-1"></a><span class="co"># Please note that if you set this to true, `padding_side` will be set to "left" by default</span></span>
|
|
<span id="cb1-34"><a href="#cb1-34" aria-hidden="true" tabindex="-1"></a><span class="fu">is_mistral_derived_model</span><span class="kw">:</span></span>
|
|
<span id="cb1-35"><a href="#cb1-35" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-36"><a href="#cb1-36" aria-hidden="true" tabindex="-1"></a><span class="co"># optional overrides to the base model configuration</span></span>
|
|
<span id="cb1-37"><a href="#cb1-37" aria-hidden="true" tabindex="-1"></a><span class="fu">overrides_of_model_config</span><span class="kw">:</span></span>
|
|
<span id="cb1-38"><a href="#cb1-38" aria-hidden="true" tabindex="-1"></a><span class="co"> # RoPE Scaling https://github.com/huggingface/transformers/pull/24653</span></span>
|
|
<span id="cb1-39"><a href="#cb1-39" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">rope_scaling</span><span class="kw">:</span></span>
|
|
<span id="cb1-40"><a href="#cb1-40" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">type</span><span class="kw">:</span><span class="co"> # linear | dynamic</span></span>
|
|
<span id="cb1-41"><a href="#cb1-41" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">factor</span><span class="kw">:</span><span class="co"> # float</span></span>
|
|
<span id="cb1-42"><a href="#cb1-42" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-43"><a href="#cb1-43" aria-hidden="true" tabindex="-1"></a><span class="co"># optional overrides to the bnb 4bit quantization configuration</span></span>
|
|
<span id="cb1-44"><a href="#cb1-44" aria-hidden="true" tabindex="-1"></a><span class="co"># https://huggingface.co/docs/transformers/main/main_classes/quantization#transformers.BitsAndBytesConfig</span></span>
|
|
<span id="cb1-45"><a href="#cb1-45" aria-hidden="true" tabindex="-1"></a><span class="fu">bnb_config_kwargs</span><span class="kw">:</span></span>
|
|
<span id="cb1-46"><a href="#cb1-46" aria-hidden="true" tabindex="-1"></a><span class="co"> # These are default values</span></span>
|
|
<span id="cb1-47"><a href="#cb1-47" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">llm_int8_has_fp16_weight</span><span class="kw">:</span><span class="at"> </span><span class="ch">false</span></span>
|
|
<span id="cb1-48"><a href="#cb1-48" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">bnb_4bit_quant_type</span><span class="kw">:</span><span class="at"> nf4</span></span>
|
|
<span id="cb1-49"><a href="#cb1-49" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">bnb_4bit_use_double_quant</span><span class="kw">:</span><span class="at"> </span><span class="ch">true</span></span>
|
|
<span id="cb1-50"><a href="#cb1-50" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-51"><a href="#cb1-51" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-52"><a href="#cb1-52" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether you are training a 4-bit GPTQ quantized model</span></span>
|
|
<span id="cb1-53"><a href="#cb1-53" aria-hidden="true" tabindex="-1"></a><span class="fu">gptq</span><span class="kw">:</span><span class="at"> </span><span class="ch">true</span></span>
|
|
<span id="cb1-54"><a href="#cb1-54" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-55"><a href="#cb1-55" aria-hidden="true" tabindex="-1"></a><span class="co"># This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer</span></span>
|
|
<span id="cb1-56"><a href="#cb1-56" aria-hidden="true" tabindex="-1"></a><span class="fu">load_in_8bit</span><span class="kw">:</span><span class="at"> </span><span class="ch">true</span></span>
|
|
<span id="cb1-57"><a href="#cb1-57" aria-hidden="true" tabindex="-1"></a><span class="co"># Use bitsandbytes 4 bit</span></span>
|
|
<span id="cb1-58"><a href="#cb1-58" aria-hidden="true" tabindex="-1"></a><span class="fu">load_in_4bit</span><span class="kw">:</span></span>
|
|
<span id="cb1-59"><a href="#cb1-59" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-60"><a href="#cb1-60" aria-hidden="true" tabindex="-1"></a><span class="co"># Use CUDA bf16</span></span>
|
|
<span id="cb1-61"><a href="#cb1-61" aria-hidden="true" tabindex="-1"></a><span class="fu">bf16</span><span class="kw">:</span><span class="at"> </span><span class="ch">true</span><span class="co"> # bool or 'full' for `bf16_full_eval`. require >=ampere</span></span>
|
|
<span id="cb1-62"><a href="#cb1-62" aria-hidden="true" tabindex="-1"></a><span class="co"># Use CUDA fp16</span></span>
|
|
<span id="cb1-63"><a href="#cb1-63" aria-hidden="true" tabindex="-1"></a><span class="fu">fp16</span><span class="kw">:</span><span class="at"> </span><span class="ch">true</span></span>
|
|
<span id="cb1-64"><a href="#cb1-64" aria-hidden="true" tabindex="-1"></a><span class="co"># Use CUDA tf32</span></span>
|
|
<span id="cb1-65"><a href="#cb1-65" aria-hidden="true" tabindex="-1"></a><span class="fu">tf32</span><span class="kw">:</span><span class="at"> </span><span class="ch">true</span><span class="co"> # require >=ampere</span></span>
|
|
<span id="cb1-66"><a href="#cb1-66" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-67"><a href="#cb1-67" aria-hidden="true" tabindex="-1"></a><span class="co"># No AMP (automatic mixed precision)</span></span>
|
|
<span id="cb1-68"><a href="#cb1-68" aria-hidden="true" tabindex="-1"></a><span class="fu">bfloat16</span><span class="kw">:</span><span class="at"> </span><span class="ch">true</span><span class="co"> # require >=ampere</span></span>
|
|
<span id="cb1-69"><a href="#cb1-69" aria-hidden="true" tabindex="-1"></a><span class="fu">float16</span><span class="kw">:</span><span class="at"> </span><span class="ch">true</span></span>
|
|
<span id="cb1-70"><a href="#cb1-70" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-71"><a href="#cb1-71" aria-hidden="true" tabindex="-1"></a><span class="co"># Limit the memory for all available GPUs to this amount (if an integer, expressed in gigabytes); default: unset</span></span>
|
|
<span id="cb1-72"><a href="#cb1-72" aria-hidden="true" tabindex="-1"></a><span class="fu">gpu_memory_limit</span><span class="kw">:</span><span class="at"> 20GiB</span></span>
|
|
<span id="cb1-73"><a href="#cb1-73" aria-hidden="true" tabindex="-1"></a><span class="co"># Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge</span></span>
|
|
<span id="cb1-74"><a href="#cb1-74" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_on_cpu</span><span class="kw">:</span><span class="at"> </span><span class="ch">true</span></span>
|
|
<span id="cb1-75"><a href="#cb1-75" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-76"><a href="#cb1-76" aria-hidden="true" tabindex="-1"></a><span class="co"># A list of one or more datasets to finetune the model with</span></span>
|
|
<span id="cb1-77"><a href="#cb1-77" aria-hidden="true" tabindex="-1"></a><span class="fu">datasets</span><span class="kw">:</span></span>
|
|
<span id="cb1-78"><a href="#cb1-78" aria-hidden="true" tabindex="-1"></a><span class="co"> # HuggingFace dataset repo | s3://,gs:// path | "json" for local dataset, make sure to fill data_files</span></span>
|
|
<span id="cb1-79"><a href="#cb1-79" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="kw">-</span><span class="at"> </span><span class="fu">path</span><span class="kw">:</span><span class="at"> vicgalle/alpaca-gpt4</span></span>
|
|
<span id="cb1-80"><a href="#cb1-80" aria-hidden="true" tabindex="-1"></a><span class="co"> # The type of prompt to use for training. [alpaca, sharegpt, gpteacher, oasst, reflection]</span></span>
|
|
<span id="cb1-81"><a href="#cb1-81" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">type</span><span class="kw">:</span><span class="at"> alpaca</span><span class="co"> # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn></span></span>
|
|
<span id="cb1-82"><a href="#cb1-82" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">ds_type</span><span class="kw">:</span><span class="co"> # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file</span></span>
|
|
<span id="cb1-83"><a href="#cb1-83" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">data_files</span><span class="kw">:</span><span class="co"> # Optional[str] path to source data files</span></span>
|
|
<span id="cb1-84"><a href="#cb1-84" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">shards</span><span class="kw">:</span><span class="co"> # Optional[int] number of shards to split data into</span></span>
|
|
<span id="cb1-85"><a href="#cb1-85" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">name</span><span class="kw">:</span><span class="co"> # Optional[str] name of dataset configuration to load</span></span>
|
|
<span id="cb1-86"><a href="#cb1-86" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">train_on_split</span><span class="kw">:</span><span class="at"> train</span><span class="co"> # Optional[str] name of dataset split to load from</span></span>
|
|
<span id="cb1-87"><a href="#cb1-87" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-88"><a href="#cb1-88" aria-hidden="true" tabindex="-1"></a><span class="co"> # Optional[str] fastchat conversation type, only used with type: sharegpt</span></span>
|
|
<span id="cb1-89"><a href="#cb1-89" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">conversation</span><span class="kw">:</span><span class="co"> # Options (see Conversation 'name'): https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py</span></span>
|
|
<span id="cb1-90"><a href="#cb1-90" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_human</span><span class="kw">:</span><span class="co"> # Optional[str]. Human key to use for conversation.</span></span>
|
|
<span id="cb1-91"><a href="#cb1-91" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_model</span><span class="kw">:</span><span class="co"> # Optional[str]. Assistant key to use for conversation.</span></span>
|
|
<span id="cb1-92"><a href="#cb1-92" aria-hidden="true" tabindex="-1"></a><span class="co"> # Add additional keys from your dataset as input or output roles</span></span>
|
|
<span id="cb1-93"><a href="#cb1-93" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">roles</span><span class="kw">:</span></span>
|
|
<span id="cb1-94"><a href="#cb1-94" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">input</span><span class="kw">:</span><span class="co"> # Optional[List[str]]. These will be masked based on train_on_input</span></span>
|
|
<span id="cb1-95"><a href="#cb1-95" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">output</span><span class="kw">:</span><span class="co"> # Optional[List[str]].</span></span>
|
|
<span id="cb1-96"><a href="#cb1-96" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-97"><a href="#cb1-97" aria-hidden="true" tabindex="-1"></a><span class="co"> # Custom user instruction prompt</span></span>
|
|
<span id="cb1-98"><a href="#cb1-98" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="kw">-</span><span class="at"> </span><span class="fu">path</span><span class="kw">:</span><span class="at"> repo</span></span>
|
|
<span id="cb1-99"><a href="#cb1-99" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">type</span><span class="kw">:</span></span>
|
|
<span id="cb1-100"><a href="#cb1-100" aria-hidden="true" tabindex="-1"></a><span class="co"> # The below are defaults. only set what's needed if you use a different column name.</span></span>
|
|
<span id="cb1-101"><a href="#cb1-101" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">system_prompt</span><span class="kw">:</span><span class="at"> </span><span class="st">""</span></span>
|
|
<span id="cb1-102"><a href="#cb1-102" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">system_format</span><span class="kw">:</span><span class="at"> </span><span class="st">"{system}"</span></span>
|
|
<span id="cb1-103"><a href="#cb1-103" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_system</span><span class="kw">:</span><span class="at"> system</span></span>
|
|
<span id="cb1-104"><a href="#cb1-104" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_instruction</span><span class="kw">:</span><span class="at"> instruction</span></span>
|
|
<span id="cb1-105"><a href="#cb1-105" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_input</span><span class="kw">:</span><span class="at"> input</span></span>
|
|
<span id="cb1-106"><a href="#cb1-106" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field_output</span><span class="kw">:</span><span class="at"> output</span></span>
|
|
<span id="cb1-107"><a href="#cb1-107" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-108"><a href="#cb1-108" aria-hidden="true" tabindex="-1"></a><span class="co"> # Customizable to be single line or multi-line</span></span>
|
|
<span id="cb1-109"><a href="#cb1-109" aria-hidden="true" tabindex="-1"></a><span class="co"> # Use {instruction}/{input} as key to be replaced</span></span>
|
|
<span id="cb1-110"><a href="#cb1-110" aria-hidden="true" tabindex="-1"></a><span class="co"> # 'format' can include {input}</span></span>
|
|
<span id="cb1-111"><a href="#cb1-111" aria-hidden="true" tabindex="-1"></a><span class="fu"> format</span><span class="kw">: </span><span class="ch">|-</span></span>
|
|
<span id="cb1-112"><a href="#cb1-112" aria-hidden="true" tabindex="-1"></a> User: {instruction} {input}</span>
|
|
<span id="cb1-113"><a href="#cb1-113" aria-hidden="true" tabindex="-1"></a> Assistant:</span>
|
|
<span id="cb1-114"><a href="#cb1-114" aria-hidden="true" tabindex="-1"></a><span class="co"> # 'no_input_format' cannot include {input}</span></span>
|
|
<span id="cb1-115"><a href="#cb1-115" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">no_input_format</span><span class="kw">:</span><span class="at"> </span><span class="st">"{instruction} "</span></span>
|
|
<span id="cb1-116"><a href="#cb1-116" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-117"><a href="#cb1-117" aria-hidden="true" tabindex="-1"></a><span class="co"> # For `completion` datsets only, uses the provided field instead of `text` column</span></span>
|
|
<span id="cb1-118"><a href="#cb1-118" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">field</span><span class="kw">:</span></span>
|
|
<span id="cb1-119"><a href="#cb1-119" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-120"><a href="#cb1-120" aria-hidden="true" tabindex="-1"></a><span class="co"># If false, the datasets will not be shuffled and will keep their original order in `datasets`.</span></span>
|
|
<span id="cb1-121"><a href="#cb1-121" aria-hidden="true" tabindex="-1"></a><span class="co"># The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true.</span></span>
|
|
<span id="cb1-122"><a href="#cb1-122" aria-hidden="true" tabindex="-1"></a><span class="fu">shuffle_merged_datasets</span><span class="kw">:</span><span class="at"> </span><span class="ch">true</span></span>
|
|
<span id="cb1-123"><a href="#cb1-123" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-124"><a href="#cb1-124" aria-hidden="true" tabindex="-1"></a><span class="co"># A list of one or more datasets to eval the model with.</span></span>
|
|
<span id="cb1-125"><a href="#cb1-125" aria-hidden="true" tabindex="-1"></a><span class="co"># You can use either test_datasets, or val_set_size, but not both.</span></span>
|
|
<span id="cb1-126"><a href="#cb1-126" aria-hidden="true" tabindex="-1"></a><span class="fu">test_datasets</span><span class="kw">:</span></span>
|
|
<span id="cb1-127"><a href="#cb1-127" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="kw">-</span><span class="at"> </span><span class="fu">path</span><span class="kw">:</span><span class="at"> /workspace/data/eval.jsonl</span></span>
|
|
<span id="cb1-128"><a href="#cb1-128" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">ds_type</span><span class="kw">:</span><span class="at"> json</span></span>
|
|
<span id="cb1-129"><a href="#cb1-129" aria-hidden="true" tabindex="-1"></a><span class="co"> # You need to specify a split. For "json" datasets the default split is called "train".</span></span>
|
|
<span id="cb1-130"><a href="#cb1-130" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">split</span><span class="kw">:</span><span class="at"> train</span></span>
|
|
<span id="cb1-131"><a href="#cb1-131" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">type</span><span class="kw">:</span><span class="at"> completion</span></span>
|
|
<span id="cb1-132"><a href="#cb1-132" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">data_files</span><span class="kw">:</span></span>
|
|
<span id="cb1-133"><a href="#cb1-133" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="kw">-</span><span class="at"> /workspace/data/eval.jsonl</span></span>
|
|
<span id="cb1-134"><a href="#cb1-134" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-135"><a href="#cb1-135" aria-hidden="true" tabindex="-1"></a><span class="co"># use RL training: 'dpo', 'ipo', 'kto_pair'</span></span>
|
|
<span id="cb1-136"><a href="#cb1-136" aria-hidden="true" tabindex="-1"></a><span class="fu">rl</span><span class="kw">:</span></span>
|
|
<span id="cb1-137"><a href="#cb1-137" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-138"><a href="#cb1-138" aria-hidden="true" tabindex="-1"></a><span class="co"># Saves the desired chat template to the tokenizer_config.json for easier inferencing</span></span>
|
|
<span id="cb1-139"><a href="#cb1-139" aria-hidden="true" tabindex="-1"></a><span class="co"># Currently supports chatml and inst (mistral/mixtral)</span></span>
|
|
<span id="cb1-140"><a href="#cb1-140" aria-hidden="true" tabindex="-1"></a><span class="fu">chat_template</span><span class="kw">:</span><span class="at"> chatml</span></span>
|
|
<span id="cb1-141"><a href="#cb1-141" aria-hidden="true" tabindex="-1"></a><span class="co"># Changes the default system message</span></span>
|
|
<span id="cb1-142"><a href="#cb1-142" aria-hidden="true" tabindex="-1"></a><span class="fu">default_system_message</span><span class="kw">:</span><span class="at"> You are a helpful assistant. Please give a long and detailed answer.</span><span class="co"> # Currently only supports chatml.</span></span>
|
|
<span id="cb1-143"><a href="#cb1-143" aria-hidden="true" tabindex="-1"></a><span class="co"># Axolotl attempts to save the dataset as an arrow after packing the data together so</span></span>
|
|
<span id="cb1-144"><a href="#cb1-144" aria-hidden="true" tabindex="-1"></a><span class="co"># subsequent training attempts load faster, relative path</span></span>
|
|
<span id="cb1-145"><a href="#cb1-145" aria-hidden="true" tabindex="-1"></a><span class="fu">dataset_prepared_path</span><span class="kw">:</span><span class="at"> data/last_run_prepared</span></span>
|
|
<span id="cb1-146"><a href="#cb1-146" aria-hidden="true" tabindex="-1"></a><span class="co"># Push prepared dataset to hub</span></span>
|
|
<span id="cb1-147"><a href="#cb1-147" aria-hidden="true" tabindex="-1"></a><span class="fu">push_dataset_to_hub</span><span class="kw">:</span><span class="co"> # repo path</span></span>
|
|
<span id="cb1-148"><a href="#cb1-148" aria-hidden="true" tabindex="-1"></a><span class="co"># The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`</span></span>
|
|
<span id="cb1-149"><a href="#cb1-149" aria-hidden="true" tabindex="-1"></a><span class="co"># if not set.</span></span>
|
|
<span id="cb1-150"><a href="#cb1-150" aria-hidden="true" tabindex="-1"></a><span class="fu">dataset_processes</span><span class="kw">:</span><span class="co"> # defaults to os.cpu_count() if not set</span></span>
|
|
<span id="cb1-151"><a href="#cb1-151" aria-hidden="true" tabindex="-1"></a><span class="co"># Keep dataset in memory while preprocessing</span></span>
|
|
<span id="cb1-152"><a href="#cb1-152" aria-hidden="true" tabindex="-1"></a><span class="co"># Only needed if cached dataset is taking too much storage</span></span>
|
|
<span id="cb1-153"><a href="#cb1-153" aria-hidden="true" tabindex="-1"></a><span class="fu">dataset_keep_in_memory</span><span class="kw">:</span></span>
|
|
<span id="cb1-154"><a href="#cb1-154" aria-hidden="true" tabindex="-1"></a><span class="co"># push checkpoints to hub</span></span>
|
|
<span id="cb1-155"><a href="#cb1-155" aria-hidden="true" tabindex="-1"></a><span class="fu">hub_model_id</span><span class="kw">:</span><span class="co"> # private repo path to push finetuned model</span></span>
|
|
<span id="cb1-156"><a href="#cb1-156" aria-hidden="true" tabindex="-1"></a><span class="co"># how to push checkpoints to hub</span></span>
|
|
<span id="cb1-157"><a href="#cb1-157" aria-hidden="true" tabindex="-1"></a><span class="co"># https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy</span></span>
|
|
<span id="cb1-158"><a href="#cb1-158" aria-hidden="true" tabindex="-1"></a><span class="fu">hub_strategy</span><span class="kw">:</span></span>
|
|
<span id="cb1-159"><a href="#cb1-159" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets</span></span>
|
|
<span id="cb1-160"><a href="#cb1-160" aria-hidden="true" tabindex="-1"></a><span class="co"># Required to be true when used in combination with `push_dataset_to_hub`</span></span>
|
|
<span id="cb1-161"><a href="#cb1-161" aria-hidden="true" tabindex="-1"></a><span class="fu">hf_use_auth_token</span><span class="kw">:</span><span class="co"> # boolean</span></span>
|
|
<span id="cb1-162"><a href="#cb1-162" aria-hidden="true" tabindex="-1"></a><span class="co"># How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval.</span></span>
|
|
<span id="cb1-163"><a href="#cb1-163" aria-hidden="true" tabindex="-1"></a><span class="fu">val_set_size</span><span class="kw">:</span><span class="at"> </span><span class="fl">0.04</span></span>
|
|
<span id="cb1-164"><a href="#cb1-164" aria-hidden="true" tabindex="-1"></a><span class="co"># Num shards for whole dataset</span></span>
|
|
<span id="cb1-165"><a href="#cb1-165" aria-hidden="true" tabindex="-1"></a><span class="fu">dataset_shard_num</span><span class="kw">:</span></span>
|
|
<span id="cb1-166"><a href="#cb1-166" aria-hidden="true" tabindex="-1"></a><span class="co"># Index of shard to use for whole dataset</span></span>
|
|
<span id="cb1-167"><a href="#cb1-167" aria-hidden="true" tabindex="-1"></a><span class="fu">dataset_shard_idx</span><span class="kw">:</span></span>
|
|
<span id="cb1-168"><a href="#cb1-168" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-169"><a href="#cb1-169" aria-hidden="true" tabindex="-1"></a><span class="co"># The maximum length of an input to train with, this should typically be less than 2048</span></span>
|
|
<span id="cb1-170"><a href="#cb1-170" aria-hidden="true" tabindex="-1"></a><span class="co"># as most models have a token/context limit of 2048</span></span>
|
|
<span id="cb1-171"><a href="#cb1-171" aria-hidden="true" tabindex="-1"></a><span class="fu">sequence_len</span><span class="kw">:</span><span class="at"> </span><span class="dv">2048</span></span>
|
|
<span id="cb1-172"><a href="#cb1-172" aria-hidden="true" tabindex="-1"></a><span class="co"># Pad inputs so each step uses constant sized buffers</span></span>
|
|
<span id="cb1-173"><a href="#cb1-173" aria-hidden="true" tabindex="-1"></a><span class="co"># This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently</span></span>
|
|
<span id="cb1-174"><a href="#cb1-174" aria-hidden="true" tabindex="-1"></a><span class="fu">pad_to_sequence_len</span><span class="kw">:</span></span>
|
|
<span id="cb1-175"><a href="#cb1-175" aria-hidden="true" tabindex="-1"></a><span class="co"># Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true'</span></span>
|
|
<span id="cb1-176"><a href="#cb1-176" aria-hidden="true" tabindex="-1"></a><span class="fu">sample_packing</span><span class="kw">:</span></span>
|
|
<span id="cb1-177"><a href="#cb1-177" aria-hidden="true" tabindex="-1"></a><span class="co"># Set to 'false' if getting errors during eval with sample_packing on.</span></span>
|
|
<span id="cb1-178"><a href="#cb1-178" aria-hidden="true" tabindex="-1"></a><span class="fu">eval_sample_packing</span><span class="kw">:</span></span>
|
|
<span id="cb1-179"><a href="#cb1-179" aria-hidden="true" tabindex="-1"></a><span class="co"># You can set these packing optimizations AFTER starting a training at least once.</span></span>
|
|
<span id="cb1-180"><a href="#cb1-180" aria-hidden="true" tabindex="-1"></a><span class="co"># The trainer will provide recommended values for these values.</span></span>
|
|
<span id="cb1-181"><a href="#cb1-181" aria-hidden="true" tabindex="-1"></a><span class="fu">sample_packing_eff_est</span><span class="kw">:</span></span>
|
|
<span id="cb1-182"><a href="#cb1-182" aria-hidden="true" tabindex="-1"></a><span class="fu">total_num_tokens</span><span class="kw">:</span></span>
|
|
<span id="cb1-183"><a href="#cb1-183" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-184"><a href="#cb1-184" aria-hidden="true" tabindex="-1"></a><span class="co"># Passed through to transformers when loading the model when launched without accelerate</span></span>
|
|
<span id="cb1-185"><a href="#cb1-185" aria-hidden="true" tabindex="-1"></a><span class="co"># Use `sequential` when training w/ model parallelism to limit memory</span></span>
|
|
<span id="cb1-186"><a href="#cb1-186" aria-hidden="true" tabindex="-1"></a><span class="fu">device_map</span><span class="kw">:</span></span>
|
|
<span id="cb1-187"><a href="#cb1-187" aria-hidden="true" tabindex="-1"></a><span class="co"># Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model.</span></span>
|
|
<span id="cb1-188"><a href="#cb1-188" aria-hidden="true" tabindex="-1"></a><span class="fu">max_memory</span><span class="kw">:</span></span>
|
|
<span id="cb1-189"><a href="#cb1-189" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-190"><a href="#cb1-190" aria-hidden="true" tabindex="-1"></a><span class="co"># If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model</span></span>
|
|
<span id="cb1-191"><a href="#cb1-191" aria-hidden="true" tabindex="-1"></a><span class="fu">adapter</span><span class="kw">:</span><span class="at"> lora</span></span>
|
|
<span id="cb1-192"><a href="#cb1-192" aria-hidden="true" tabindex="-1"></a><span class="co"># If you already have a lora model trained that you want to load, put that here.</span></span>
|
|
<span id="cb1-193"><a href="#cb1-193" aria-hidden="true" tabindex="-1"></a><span class="co"># This means after training, if you want to test the model, you should set this to the value of `output_dir`.</span></span>
|
|
<span id="cb1-194"><a href="#cb1-194" aria-hidden="true" tabindex="-1"></a><span class="co"># Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`.</span></span>
|
|
<span id="cb1-195"><a href="#cb1-195" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_model_dir</span><span class="kw">:</span></span>
|
|
<span id="cb1-196"><a href="#cb1-196" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-197"><a href="#cb1-197" aria-hidden="true" tabindex="-1"></a><span class="co"># LoRA hyperparameters</span></span>
|
|
<span id="cb1-198"><a href="#cb1-198" aria-hidden="true" tabindex="-1"></a><span class="co"># For more details about the following options, see:</span></span>
|
|
<span id="cb1-199"><a href="#cb1-199" aria-hidden="true" tabindex="-1"></a><span class="co"># https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2</span></span>
|
|
<span id="cb1-200"><a href="#cb1-200" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_r</span><span class="kw">:</span><span class="at"> </span><span class="dv">8</span></span>
|
|
<span id="cb1-201"><a href="#cb1-201" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_alpha</span><span class="kw">:</span><span class="at"> </span><span class="dv">16</span></span>
|
|
<span id="cb1-202"><a href="#cb1-202" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_dropout</span><span class="kw">:</span><span class="at"> </span><span class="fl">0.05</span></span>
|
|
<span id="cb1-203"><a href="#cb1-203" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_target_modules</span><span class="kw">:</span></span>
|
|
<span id="cb1-204"><a href="#cb1-204" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="kw">-</span><span class="at"> q_proj</span></span>
|
|
<span id="cb1-205"><a href="#cb1-205" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="kw">-</span><span class="at"> v_proj</span></span>
|
|
<span id="cb1-206"><a href="#cb1-206" aria-hidden="true" tabindex="-1"></a><span class="co"># - k_proj</span></span>
|
|
<span id="cb1-207"><a href="#cb1-207" aria-hidden="true" tabindex="-1"></a><span class="co"># - o_proj</span></span>
|
|
<span id="cb1-208"><a href="#cb1-208" aria-hidden="true" tabindex="-1"></a><span class="co"># - gate_proj</span></span>
|
|
<span id="cb1-209"><a href="#cb1-209" aria-hidden="true" tabindex="-1"></a><span class="co"># - down_proj</span></span>
|
|
<span id="cb1-210"><a href="#cb1-210" aria-hidden="true" tabindex="-1"></a><span class="co"># - up_proj</span></span>
|
|
<span id="cb1-211"><a href="#cb1-211" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_target_linear</span><span class="kw">:</span><span class="co"> # If true, will target all linear modules</span></span>
|
|
<span id="cb1-212"><a href="#cb1-212" aria-hidden="true" tabindex="-1"></a><span class="fu">peft_layers_to_transform</span><span class="kw">:</span><span class="co"> # The layer indices to transform, otherwise, apply to all layers</span></span>
|
|
<span id="cb1-213"><a href="#cb1-213" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-214"><a href="#cb1-214" aria-hidden="true" tabindex="-1"></a><span class="co"># If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.</span></span>
|
|
<span id="cb1-215"><a href="#cb1-215" aria-hidden="true" tabindex="-1"></a><span class="co"># For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models.</span></span>
|
|
<span id="cb1-216"><a href="#cb1-216" aria-hidden="true" tabindex="-1"></a><span class="co"># `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities.</span></span>
|
|
<span id="cb1-217"><a href="#cb1-217" aria-hidden="true" tabindex="-1"></a><span class="co"># https://github.com/huggingface/peft/issues/334#issuecomment-1561727994</span></span>
|
|
<span id="cb1-218"><a href="#cb1-218" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_modules_to_save</span><span class="kw">:</span></span>
|
|
<span id="cb1-219"><a href="#cb1-219" aria-hidden="true" tabindex="-1"></a><span class="co"># - embed_tokens</span></span>
|
|
<span id="cb1-220"><a href="#cb1-220" aria-hidden="true" tabindex="-1"></a><span class="co"># - lm_head</span></span>
|
|
<span id="cb1-221"><a href="#cb1-221" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-222"><a href="#cb1-222" aria-hidden="true" tabindex="-1"></a><span class="fu">lora_fan_in_fan_out</span><span class="kw">:</span><span class="at"> </span><span class="ch">false</span></span>
|
|
<span id="cb1-223"><a href="#cb1-223" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-224"><a href="#cb1-224" aria-hidden="true" tabindex="-1"></a><span class="fu">peft</span><span class="kw">:</span></span>
|
|
<span id="cb1-225"><a href="#cb1-225" aria-hidden="true" tabindex="-1"></a><span class="co"> # Configuration options for loftq initialization for LoRA</span></span>
|
|
<span id="cb1-226"><a href="#cb1-226" aria-hidden="true" tabindex="-1"></a><span class="co"> # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization</span></span>
|
|
<span id="cb1-227"><a href="#cb1-227" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">loftq_config</span><span class="kw">:</span></span>
|
|
<span id="cb1-228"><a href="#cb1-228" aria-hidden="true" tabindex="-1"></a><span class="at"> </span><span class="fu">loftq_bits</span><span class="kw">:</span><span class="co"> # typically 4 bits</span></span>
|
|
<span id="cb1-229"><a href="#cb1-229" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-230"><a href="#cb1-230" aria-hidden="true" tabindex="-1"></a><span class="co"># ReLoRA configuration</span></span>
|
|
<span id="cb1-231"><a href="#cb1-231" aria-hidden="true" tabindex="-1"></a><span class="co"># Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed</span></span>
|
|
<span id="cb1-232"><a href="#cb1-232" aria-hidden="true" tabindex="-1"></a><span class="fu">relora_steps</span><span class="kw">:</span><span class="co"> # Number of steps per ReLoRA restart</span></span>
|
|
<span id="cb1-233"><a href="#cb1-233" aria-hidden="true" tabindex="-1"></a><span class="fu">relora_warmup_steps</span><span class="kw">:</span><span class="co"> # Number of per-restart warmup steps</span></span>
|
|
<span id="cb1-234"><a href="#cb1-234" aria-hidden="true" tabindex="-1"></a><span class="fu">relora_anneal_steps</span><span class="kw">:</span><span class="co"> # Number of anneal steps for each relora cycle</span></span>
|
|
<span id="cb1-235"><a href="#cb1-235" aria-hidden="true" tabindex="-1"></a><span class="fu">relora_prune_ratio</span><span class="kw">:</span><span class="co"> # threshold for optimizer magnitude when pruning</span></span>
|
|
<span id="cb1-236"><a href="#cb1-236" aria-hidden="true" tabindex="-1"></a><span class="fu">relora_cpu_offload</span><span class="kw">:</span><span class="co"> # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings</span></span>
|
|
<span id="cb1-237"><a href="#cb1-237" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-238"><a href="#cb1-238" aria-hidden="true" tabindex="-1"></a><span class="co"># wandb configuration if you're using it</span></span>
|
|
<span id="cb1-239"><a href="#cb1-239" aria-hidden="true" tabindex="-1"></a><span class="co"># Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`.</span></span>
|
|
<span id="cb1-240"><a href="#cb1-240" aria-hidden="true" tabindex="-1"></a><span class="fu">wandb_mode</span><span class="kw">:</span><span class="co"> # "offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb</span></span>
|
|
<span id="cb1-241"><a href="#cb1-241" aria-hidden="true" tabindex="-1"></a><span class="fu">wandb_project</span><span class="kw">:</span><span class="co"> # Your wandb project name</span></span>
|
|
<span id="cb1-242"><a href="#cb1-242" aria-hidden="true" tabindex="-1"></a><span class="fu">wandb_entity</span><span class="kw">:</span><span class="co"> # A wandb Team name if using a Team</span></span>
|
|
<span id="cb1-243"><a href="#cb1-243" aria-hidden="true" tabindex="-1"></a><span class="fu">wandb_watch</span><span class="kw">:</span></span>
|
|
<span id="cb1-244"><a href="#cb1-244" aria-hidden="true" tabindex="-1"></a><span class="fu">wandb_name</span><span class="kw">:</span><span class="co"> # Set the name of your wandb run</span></span>
|
|
<span id="cb1-245"><a href="#cb1-245" aria-hidden="true" tabindex="-1"></a><span class="fu">wandb_run_id</span><span class="kw">:</span><span class="co"> # Set the ID of your wandb run</span></span>
|
|
<span id="cb1-246"><a href="#cb1-246" aria-hidden="true" tabindex="-1"></a><span class="fu">wandb_log_model</span><span class="kw">:</span><span class="co"> # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training</span></span>
|
|
<span id="cb1-247"><a href="#cb1-247" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-248"><a href="#cb1-248" aria-hidden="true" tabindex="-1"></a><span class="co"># mlflow configuration if you're using it</span></span>
|
|
<span id="cb1-249"><a href="#cb1-249" aria-hidden="true" tabindex="-1"></a><span class="fu">mlflow_tracking_uri</span><span class="kw">:</span><span class="co"> # URI to mlflow</span></span>
|
|
<span id="cb1-250"><a href="#cb1-250" aria-hidden="true" tabindex="-1"></a><span class="fu">mlflow_experiment_name</span><span class="kw">:</span><span class="co"> # Your experiment name</span></span>
|
|
<span id="cb1-251"><a href="#cb1-251" aria-hidden="true" tabindex="-1"></a><span class="fu">hf_mlflow_log_artifacts</span><span class="kw">:</span><span class="co"> # set to true to copy each saved checkpoint on each save to mlflow artifact registry</span></span>
|
|
<span id="cb1-252"><a href="#cb1-252" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-253"><a href="#cb1-253" aria-hidden="true" tabindex="-1"></a><span class="co"># Where to save the full-finetuned model to</span></span>
|
|
<span id="cb1-254"><a href="#cb1-254" aria-hidden="true" tabindex="-1"></a><span class="fu">output_dir</span><span class="kw">:</span><span class="at"> ./completed-model</span></span>
|
|
<span id="cb1-255"><a href="#cb1-255" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-256"><a href="#cb1-256" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use torch.compile and which backend to use</span></span>
|
|
<span id="cb1-257"><a href="#cb1-257" aria-hidden="true" tabindex="-1"></a><span class="fu">torch_compile</span><span class="kw">:</span><span class="co"> # bool</span></span>
|
|
<span id="cb1-258"><a href="#cb1-258" aria-hidden="true" tabindex="-1"></a><span class="fu">torch_compile_backend</span><span class="kw">:</span><span class="co"> # Optional[str]</span></span>
|
|
<span id="cb1-259"><a href="#cb1-259" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-260"><a href="#cb1-260" aria-hidden="true" tabindex="-1"></a><span class="co"># Training hyperparameters</span></span>
|
|
<span id="cb1-261"><a href="#cb1-261" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-262"><a href="#cb1-262" aria-hidden="true" tabindex="-1"></a><span class="co"># If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps.</span></span>
|
|
<span id="cb1-263"><a href="#cb1-263" aria-hidden="true" tabindex="-1"></a><span class="fu">gradient_accumulation_steps</span><span class="kw">:</span><span class="at"> </span><span class="dv">1</span></span>
|
|
<span id="cb1-264"><a href="#cb1-264" aria-hidden="true" tabindex="-1"></a><span class="co"># The number of samples to include in each batch. This is the number of samples sent to each GPU.</span></span>
|
|
<span id="cb1-265"><a href="#cb1-265" aria-hidden="true" tabindex="-1"></a><span class="fu">micro_batch_size</span><span class="kw">:</span><span class="at"> </span><span class="dv">2</span></span>
|
|
<span id="cb1-266"><a href="#cb1-266" aria-hidden="true" tabindex="-1"></a><span class="fu">eval_batch_size</span><span class="kw">:</span></span>
|
|
<span id="cb1-267"><a href="#cb1-267" aria-hidden="true" tabindex="-1"></a><span class="fu">num_epochs</span><span class="kw">:</span><span class="at"> </span><span class="dv">4</span></span>
|
|
<span id="cb1-268"><a href="#cb1-268" aria-hidden="true" tabindex="-1"></a><span class="fu">warmup_steps</span><span class="kw">:</span><span class="at"> </span><span class="dv">100</span><span class="co"> # cannot use with warmup_ratio</span></span>
|
|
<span id="cb1-269"><a href="#cb1-269" aria-hidden="true" tabindex="-1"></a><span class="fu">warmup_ratio</span><span class="kw">:</span><span class="at"> </span><span class="fl">0.05</span><span class="co"> # cannot use with warmup_steps</span></span>
|
|
<span id="cb1-270"><a href="#cb1-270" aria-hidden="true" tabindex="-1"></a><span class="fu">learning_rate</span><span class="kw">:</span><span class="at"> </span><span class="fl">0.00003</span></span>
|
|
<span id="cb1-271"><a href="#cb1-271" aria-hidden="true" tabindex="-1"></a><span class="fu">lr_quadratic_warmup</span><span class="kw">:</span></span>
|
|
<span id="cb1-272"><a href="#cb1-272" aria-hidden="true" tabindex="-1"></a><span class="fu">logging_steps</span><span class="kw">:</span></span>
|
|
<span id="cb1-273"><a href="#cb1-273" aria-hidden="true" tabindex="-1"></a><span class="fu">eval_steps</span><span class="kw">:</span><span class="co"> # Leave empty to eval at each epoch, integers for every N steps. decimal for fraction of total steps</span></span>
|
|
<span id="cb1-274"><a href="#cb1-274" aria-hidden="true" tabindex="-1"></a><span class="fu">evals_per_epoch</span><span class="kw">:</span><span class="co"> # number of times per epoch to run evals, mutually exclusive with eval_steps</span></span>
|
|
<span id="cb1-275"><a href="#cb1-275" aria-hidden="true" tabindex="-1"></a><span class="fu">save_strategy</span><span class="kw">:</span><span class="co"> # Set to `no` to skip checkpoint saves</span></span>
|
|
<span id="cb1-276"><a href="#cb1-276" aria-hidden="true" tabindex="-1"></a><span class="fu">save_steps</span><span class="kw">:</span><span class="co"> # Leave empty to save at each epoch</span></span>
|
|
<span id="cb1-277"><a href="#cb1-277" aria-hidden="true" tabindex="-1"></a><span class="fu">saves_per_epoch</span><span class="kw">:</span><span class="co"> # number of times per epoch to save a checkpoint, mutually exclusive with save_steps</span></span>
|
|
<span id="cb1-278"><a href="#cb1-278" aria-hidden="true" tabindex="-1"></a><span class="fu">save_total_limit</span><span class="kw">:</span><span class="co"> # Checkpoints saved at a time</span></span>
|
|
<span id="cb1-279"><a href="#cb1-279" aria-hidden="true" tabindex="-1"></a><span class="co"># Maximum number of iterations to train for. It precedes num_epochs which means that</span></span>
|
|
<span id="cb1-280"><a href="#cb1-280" aria-hidden="true" tabindex="-1"></a><span class="co"># if both are set, num_epochs will not be guaranteed.</span></span>
|
|
<span id="cb1-281"><a href="#cb1-281" aria-hidden="true" tabindex="-1"></a><span class="co"># e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps</span></span>
|
|
<span id="cb1-282"><a href="#cb1-282" aria-hidden="true" tabindex="-1"></a><span class="fu">max_steps</span><span class="kw">:</span></span>
|
|
<span id="cb1-283"><a href="#cb1-283" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-284"><a href="#cb1-284" aria-hidden="true" tabindex="-1"></a><span class="fu">eval_table_size</span><span class="kw">:</span><span class="co"> # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0</span></span>
|
|
<span id="cb1-285"><a href="#cb1-285" aria-hidden="true" tabindex="-1"></a><span class="fu">eval_max_new_tokens</span><span class="kw">:</span><span class="co"> # Total number of tokens generated for predictions sent to wandb. Default is 128</span></span>
|
|
<span id="cb1-286"><a href="#cb1-286" aria-hidden="true" tabindex="-1"></a><span class="fu">eval_causal_lm_metrics</span><span class="kw">:</span><span class="co"> # HF evaluate metrics used during evaluation. Default is ["sacrebleu", "comet", "ter", chrf]</span></span>
|
|
<span id="cb1-287"><a href="#cb1-287" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-288"><a href="#cb1-288" aria-hidden="true" tabindex="-1"></a><span class="fu">loss_watchdog_threshold</span><span class="kw">:</span><span class="co"> # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training)</span></span>
|
|
<span id="cb1-289"><a href="#cb1-289" aria-hidden="true" tabindex="-1"></a><span class="fu">loss_watchdog_patience</span><span class="kw">:</span><span class="co"> # Number of high-loss steps in a row before the trainer aborts (default: 3)</span></span>
|
|
<span id="cb1-290"><a href="#cb1-290" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-291"><a href="#cb1-291" aria-hidden="true" tabindex="-1"></a><span class="co"># Save model as safetensors (require safetensors package)</span></span>
|
|
<span id="cb1-292"><a href="#cb1-292" aria-hidden="true" tabindex="-1"></a><span class="fu">save_safetensors</span><span class="kw">:</span></span>
|
|
<span id="cb1-293"><a href="#cb1-293" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-294"><a href="#cb1-294" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to mask out or include the human's prompt from the training labels</span></span>
|
|
<span id="cb1-295"><a href="#cb1-295" aria-hidden="true" tabindex="-1"></a><span class="fu">train_on_inputs</span><span class="kw">:</span><span class="at"> </span><span class="ch">false</span></span>
|
|
<span id="cb1-296"><a href="#cb1-296" aria-hidden="true" tabindex="-1"></a><span class="co"># Group similarly sized data to minimize padding.</span></span>
|
|
<span id="cb1-297"><a href="#cb1-297" aria-hidden="true" tabindex="-1"></a><span class="co"># May be slower to start, as it must download and sort the entire dataset.</span></span>
|
|
<span id="cb1-298"><a href="#cb1-298" aria-hidden="true" tabindex="-1"></a><span class="co"># Note that training loss may have an oscillating pattern with this enabled.</span></span>
|
|
<span id="cb1-299"><a href="#cb1-299" aria-hidden="true" tabindex="-1"></a><span class="fu">group_by_length</span><span class="kw">:</span><span class="at"> </span><span class="ch">false</span></span>
|
|
<span id="cb1-300"><a href="#cb1-300" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-301"><a href="#cb1-301" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing</span></span>
|
|
<span id="cb1-302"><a href="#cb1-302" aria-hidden="true" tabindex="-1"></a><span class="fu">gradient_checkpointing</span><span class="kw">:</span><span class="at"> </span><span class="ch">false</span></span>
|
|
<span id="cb1-303"><a href="#cb1-303" aria-hidden="true" tabindex="-1"></a><span class="co"># additional kwargs to pass to the trainer for gradient checkpointing</span></span>
|
|
<span id="cb1-304"><a href="#cb1-304" aria-hidden="true" tabindex="-1"></a><span class="co"># gradient_checkpointing_kwargs:</span></span>
|
|
<span id="cb1-305"><a href="#cb1-305" aria-hidden="true" tabindex="-1"></a><span class="co"># use_reentrant: true</span></span>
|
|
<span id="cb1-306"><a href="#cb1-306" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-307"><a href="#cb1-307" aria-hidden="true" tabindex="-1"></a><span class="co"># Stop training after this many evaluation losses have increased in a row</span></span>
|
|
<span id="cb1-308"><a href="#cb1-308" aria-hidden="true" tabindex="-1"></a><span class="co"># https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback</span></span>
|
|
<span id="cb1-309"><a href="#cb1-309" aria-hidden="true" tabindex="-1"></a><span class="fu">early_stopping_patience</span><span class="kw">:</span><span class="at"> </span><span class="dv">3</span></span>
|
|
<span id="cb1-310"><a href="#cb1-310" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-311"><a href="#cb1-311" aria-hidden="true" tabindex="-1"></a><span class="co"># Specify a scheduler and kwargs to use with the optimizer</span></span>
|
|
<span id="cb1-312"><a href="#cb1-312" aria-hidden="true" tabindex="-1"></a><span class="fu">lr_scheduler</span><span class="kw">:</span><span class="co"> # 'one_cycle' | 'log_sweep' | empty for cosine</span></span>
|
|
<span id="cb1-313"><a href="#cb1-313" aria-hidden="true" tabindex="-1"></a><span class="fu">lr_scheduler_kwargs</span><span class="kw">:</span></span>
|
|
<span id="cb1-314"><a href="#cb1-314" aria-hidden="true" tabindex="-1"></a><span class="fu">cosine_min_lr_ratio</span><span class="kw">:</span><span class="co"> # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr</span></span>
|
|
<span id="cb1-315"><a href="#cb1-315" aria-hidden="true" tabindex="-1"></a><span class="fu">cosine_constant_lr_ratio</span><span class="kw">:</span><span class="co"> # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)</span></span>
|
|
<span id="cb1-316"><a href="#cb1-316" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-317"><a href="#cb1-317" aria-hidden="true" tabindex="-1"></a><span class="co"># For one_cycle optim</span></span>
|
|
<span id="cb1-318"><a href="#cb1-318" aria-hidden="true" tabindex="-1"></a><span class="fu">lr_div_factor</span><span class="kw">:</span><span class="co"> # Learning rate div factor</span></span>
|
|
<span id="cb1-319"><a href="#cb1-319" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-320"><a href="#cb1-320" aria-hidden="true" tabindex="-1"></a><span class="co"># Specify optimizer</span></span>
|
|
<span id="cb1-321"><a href="#cb1-321" aria-hidden="true" tabindex="-1"></a><span class="co"># Valid values are driven by the Transformers OptimizerNames class, see:</span></span>
|
|
<span id="cb1-322"><a href="#cb1-322" aria-hidden="true" tabindex="-1"></a><span class="co"># https://github.com/huggingface/transformers/blob/95b374952dc27d8511541d6f5a4e22c9ec11fb24/src/transformers/training_args.py#L134</span></span>
|
|
<span id="cb1-323"><a href="#cb1-323" aria-hidden="true" tabindex="-1"></a><span class="co">#</span></span>
|
|
<span id="cb1-324"><a href="#cb1-324" aria-hidden="true" tabindex="-1"></a><span class="co"># Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of</span></span>
|
|
<span id="cb1-325"><a href="#cb1-325" aria-hidden="true" tabindex="-1"></a><span class="co"># torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used</span></span>
|
|
<span id="cb1-326"><a href="#cb1-326" aria-hidden="true" tabindex="-1"></a><span class="co"># in the examples/ for your model and fine-tuning use case.</span></span>
|
|
<span id="cb1-327"><a href="#cb1-327" aria-hidden="true" tabindex="-1"></a><span class="co">#</span></span>
|
|
<span id="cb1-328"><a href="#cb1-328" aria-hidden="true" tabindex="-1"></a><span class="co"># Valid values for 'optimizer' include:</span></span>
|
|
<span id="cb1-329"><a href="#cb1-329" aria-hidden="true" tabindex="-1"></a><span class="co"># - adamw_hf</span></span>
|
|
<span id="cb1-330"><a href="#cb1-330" aria-hidden="true" tabindex="-1"></a><span class="co"># - adamw_torch</span></span>
|
|
<span id="cb1-331"><a href="#cb1-331" aria-hidden="true" tabindex="-1"></a><span class="co"># - adamw_torch_fused</span></span>
|
|
<span id="cb1-332"><a href="#cb1-332" aria-hidden="true" tabindex="-1"></a><span class="co"># - adamw_torch_xla</span></span>
|
|
<span id="cb1-333"><a href="#cb1-333" aria-hidden="true" tabindex="-1"></a><span class="co"># - adamw_apex_fused</span></span>
|
|
<span id="cb1-334"><a href="#cb1-334" aria-hidden="true" tabindex="-1"></a><span class="co"># - adafactor</span></span>
|
|
<span id="cb1-335"><a href="#cb1-335" aria-hidden="true" tabindex="-1"></a><span class="co"># - adamw_anyprecision</span></span>
|
|
<span id="cb1-336"><a href="#cb1-336" aria-hidden="true" tabindex="-1"></a><span class="co"># - sgd</span></span>
|
|
<span id="cb1-337"><a href="#cb1-337" aria-hidden="true" tabindex="-1"></a><span class="co"># - adagrad</span></span>
|
|
<span id="cb1-338"><a href="#cb1-338" aria-hidden="true" tabindex="-1"></a><span class="co"># - adamw_bnb_8bit</span></span>
|
|
<span id="cb1-339"><a href="#cb1-339" aria-hidden="true" tabindex="-1"></a><span class="co"># - lion_8bit</span></span>
|
|
<span id="cb1-340"><a href="#cb1-340" aria-hidden="true" tabindex="-1"></a><span class="co"># - lion_32bit</span></span>
|
|
<span id="cb1-341"><a href="#cb1-341" aria-hidden="true" tabindex="-1"></a><span class="co"># - paged_adamw_32bit</span></span>
|
|
<span id="cb1-342"><a href="#cb1-342" aria-hidden="true" tabindex="-1"></a><span class="co"># - paged_adamw_8bit</span></span>
|
|
<span id="cb1-343"><a href="#cb1-343" aria-hidden="true" tabindex="-1"></a><span class="co"># - paged_lion_32bit</span></span>
|
|
<span id="cb1-344"><a href="#cb1-344" aria-hidden="true" tabindex="-1"></a><span class="co"># - paged_lion_8bit</span></span>
|
|
<span id="cb1-345"><a href="#cb1-345" aria-hidden="true" tabindex="-1"></a><span class="co"># - galore_adamw</span></span>
|
|
<span id="cb1-346"><a href="#cb1-346" aria-hidden="true" tabindex="-1"></a><span class="co"># - galore_adamw_8bit</span></span>
|
|
<span id="cb1-347"><a href="#cb1-347" aria-hidden="true" tabindex="-1"></a><span class="co"># - galore_adafactor</span></span>
|
|
<span id="cb1-348"><a href="#cb1-348" aria-hidden="true" tabindex="-1"></a><span class="co"># - galore_adamw_layerwise</span></span>
|
|
<span id="cb1-349"><a href="#cb1-349" aria-hidden="true" tabindex="-1"></a><span class="co"># - galore_adamw_8bit_layerwise</span></span>
|
|
<span id="cb1-350"><a href="#cb1-350" aria-hidden="true" tabindex="-1"></a><span class="co"># - galore_adafactor_layerwise</span></span>
|
|
<span id="cb1-351"><a href="#cb1-351" aria-hidden="true" tabindex="-1"></a><span class="fu">optimizer</span><span class="kw">:</span></span>
|
|
<span id="cb1-352"><a href="#cb1-352" aria-hidden="true" tabindex="-1"></a><span class="co"># Dictionary of arguments to pass to the optimizer</span></span>
|
|
<span id="cb1-353"><a href="#cb1-353" aria-hidden="true" tabindex="-1"></a><span class="fu">optim_args</span><span class="kw">:</span></span>
|
|
<span id="cb1-354"><a href="#cb1-354" aria-hidden="true" tabindex="-1"></a><span class="co"># For Galore Optimizers the following optim_args are available</span></span>
|
|
<span id="cb1-355"><a href="#cb1-355" aria-hidden="true" tabindex="-1"></a><span class="co"># rank: # type: int</span></span>
|
|
<span id="cb1-356"><a href="#cb1-356" aria-hidden="true" tabindex="-1"></a><span class="co"># update_proj_gap # type: int</span></span>
|
|
<span id="cb1-357"><a href="#cb1-357" aria-hidden="true" tabindex="-1"></a><span class="co"># scale # type: float</span></span>
|
|
<span id="cb1-358"><a href="#cb1-358" aria-hidden="true" tabindex="-1"></a><span class="co"># proj_type: # type: str, default = std</span></span>
|
|
<span id="cb1-359"><a href="#cb1-359" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-360"><a href="#cb1-360" aria-hidden="true" tabindex="-1"></a><span class="co"># The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm</span></span>
|
|
<span id="cb1-361"><a href="#cb1-361" aria-hidden="true" tabindex="-1"></a><span class="fu">optim_target_modules</span><span class="kw">:</span></span>
|
|
<span id="cb1-362"><a href="#cb1-362" aria-hidden="true" tabindex="-1"></a><span class="co"># - self_attn # for llama</span></span>
|
|
<span id="cb1-363"><a href="#cb1-363" aria-hidden="true" tabindex="-1"></a><span class="co"># - mlp</span></span>
|
|
<span id="cb1-364"><a href="#cb1-364" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-365"><a href="#cb1-365" aria-hidden="true" tabindex="-1"></a><span class="co"># Specify weight decay</span></span>
|
|
<span id="cb1-366"><a href="#cb1-366" aria-hidden="true" tabindex="-1"></a><span class="fu">weight_decay</span><span class="kw">:</span></span>
|
|
<span id="cb1-367"><a href="#cb1-367" aria-hidden="true" tabindex="-1"></a><span class="co"># adamw hyperparams</span></span>
|
|
<span id="cb1-368"><a href="#cb1-368" aria-hidden="true" tabindex="-1"></a><span class="fu">adam_beta1</span><span class="kw">:</span></span>
|
|
<span id="cb1-369"><a href="#cb1-369" aria-hidden="true" tabindex="-1"></a><span class="fu">adam_beta2</span><span class="kw">:</span></span>
|
|
<span id="cb1-370"><a href="#cb1-370" aria-hidden="true" tabindex="-1"></a><span class="fu">adam_epsilon</span><span class="kw">:</span></span>
|
|
<span id="cb1-371"><a href="#cb1-371" aria-hidden="true" tabindex="-1"></a><span class="co"># Gradient clipping max norm</span></span>
|
|
<span id="cb1-372"><a href="#cb1-372" aria-hidden="true" tabindex="-1"></a><span class="fu">max_grad_norm</span><span class="kw">:</span></span>
|
|
<span id="cb1-373"><a href="#cb1-373" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-374"><a href="#cb1-374" aria-hidden="true" tabindex="-1"></a><span class="co"># Augmentation techniques</span></span>
|
|
<span id="cb1-375"><a href="#cb1-375" aria-hidden="true" tabindex="-1"></a><span class="co"># NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings</span></span>
|
|
<span id="cb1-376"><a href="#cb1-376" aria-hidden="true" tabindex="-1"></a><span class="co"># currently only supported on Llama and Mistral</span></span>
|
|
<span id="cb1-377"><a href="#cb1-377" aria-hidden="true" tabindex="-1"></a><span class="fu">neftune_noise_alpha</span><span class="kw">:</span></span>
|
|
<span id="cb1-378"><a href="#cb1-378" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-379"><a href="#cb1-379" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to bettertransformers</span></span>
|
|
<span id="cb1-380"><a href="#cb1-380" aria-hidden="true" tabindex="-1"></a><span class="fu">flash_optimum</span><span class="kw">:</span></span>
|
|
<span id="cb1-381"><a href="#cb1-381" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use xformers attention patch https://github.com/facebookresearch/xformers:</span></span>
|
|
<span id="cb1-382"><a href="#cb1-382" aria-hidden="true" tabindex="-1"></a><span class="fu">xformers_attention</span><span class="kw">:</span></span>
|
|
<span id="cb1-383"><a href="#cb1-383" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:</span></span>
|
|
<span id="cb1-384"><a href="#cb1-384" aria-hidden="true" tabindex="-1"></a><span class="fu">flash_attention</span><span class="kw">:</span></span>
|
|
<span id="cb1-385"><a href="#cb1-385" aria-hidden="true" tabindex="-1"></a><span class="fu">flash_attn_cross_entropy</span><span class="kw">:</span><span class="co"> # Whether to use flash-attention cross entropy implementation - advanced use only</span></span>
|
|
<span id="cb1-386"><a href="#cb1-386" aria-hidden="true" tabindex="-1"></a><span class="fu">flash_attn_rms_norm</span><span class="kw">:</span><span class="co"> # Whether to use flash-attention rms norm implementation - advanced use only</span></span>
|
|
<span id="cb1-387"><a href="#cb1-387" aria-hidden="true" tabindex="-1"></a><span class="fu">flash_attn_fuse_qkv</span><span class="kw">:</span><span class="co"> # Whether to fuse QKV into a single operation</span></span>
|
|
<span id="cb1-388"><a href="#cb1-388" aria-hidden="true" tabindex="-1"></a><span class="fu">flash_attn_fuse_mlp</span><span class="kw">:</span><span class="co"> # Whether to fuse part of the MLP into a single operation</span></span>
|
|
<span id="cb1-389"><a href="#cb1-389" aria-hidden="true" tabindex="-1"></a><span class="co"># Whether to use scaled-dot-product attention</span></span>
|
|
<span id="cb1-390"><a href="#cb1-390" aria-hidden="true" tabindex="-1"></a><span class="co"># https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html</span></span>
|
|
<span id="cb1-391"><a href="#cb1-391" aria-hidden="true" tabindex="-1"></a><span class="fu">sdp_attention</span><span class="kw">:</span></span>
|
|
<span id="cb1-392"><a href="#cb1-392" aria-hidden="true" tabindex="-1"></a><span class="co"># Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf</span></span>
|
|
<span id="cb1-393"><a href="#cb1-393" aria-hidden="true" tabindex="-1"></a><span class="fu">s2_attention</span><span class="kw">:</span></span>
|
|
<span id="cb1-394"><a href="#cb1-394" aria-hidden="true" tabindex="-1"></a><span class="co"># Resume from a specific checkpoint dir</span></span>
|
|
<span id="cb1-395"><a href="#cb1-395" aria-hidden="true" tabindex="-1"></a><span class="fu">resume_from_checkpoint</span><span class="kw">:</span></span>
|
|
<span id="cb1-396"><a href="#cb1-396" aria-hidden="true" tabindex="-1"></a><span class="co"># If resume_from_checkpoint isn't set and you simply want it to start where it left off.</span></span>
|
|
<span id="cb1-397"><a href="#cb1-397" aria-hidden="true" tabindex="-1"></a><span class="co"># Be careful with this being turned on between different models.</span></span>
|
|
<span id="cb1-398"><a href="#cb1-398" aria-hidden="true" tabindex="-1"></a><span class="fu">auto_resume_from_checkpoints</span><span class="kw">:</span><span class="at"> </span><span class="ch">false</span></span>
|
|
<span id="cb1-399"><a href="#cb1-399" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-400"><a href="#cb1-400" aria-hidden="true" tabindex="-1"></a><span class="co"># Don't mess with this, it's here for accelerate and torchrun</span></span>
|
|
<span id="cb1-401"><a href="#cb1-401" aria-hidden="true" tabindex="-1"></a><span class="fu">local_rank</span><span class="kw">:</span></span>
|
|
<span id="cb1-402"><a href="#cb1-402" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-403"><a href="#cb1-403" aria-hidden="true" tabindex="-1"></a><span class="co"># Add or change special tokens.</span></span>
|
|
<span id="cb1-404"><a href="#cb1-404" aria-hidden="true" tabindex="-1"></a><span class="co"># If you add tokens here, you don't need to add them to the `tokens` list.</span></span>
|
|
<span id="cb1-405"><a href="#cb1-405" aria-hidden="true" tabindex="-1"></a><span class="fu">special_tokens</span><span class="kw">:</span></span>
|
|
<span id="cb1-406"><a href="#cb1-406" aria-hidden="true" tabindex="-1"></a><span class="co"> # bos_token: "<s>"</span></span>
|
|
<span id="cb1-407"><a href="#cb1-407" aria-hidden="true" tabindex="-1"></a><span class="co"> # eos_token: "</s>"</span></span>
|
|
<span id="cb1-408"><a href="#cb1-408" aria-hidden="true" tabindex="-1"></a><span class="co"> # unk_token: "<unk>"</span></span>
|
|
<span id="cb1-409"><a href="#cb1-409" aria-hidden="true" tabindex="-1"></a><span class="co"> # pad_token: "[PAD]"</span></span>
|
|
<span id="cb1-410"><a href="#cb1-410" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-411"><a href="#cb1-411" aria-hidden="true" tabindex="-1"></a><span class="co"># Add extra tokens.</span></span>
|
|
<span id="cb1-412"><a href="#cb1-412" aria-hidden="true" tabindex="-1"></a><span class="fu">tokens</span><span class="kw">:</span></span>
|
|
<span id="cb1-413"><a href="#cb1-413" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-414"><a href="#cb1-414" aria-hidden="true" tabindex="-1"></a><span class="co"># FSDP</span></span>
|
|
<span id="cb1-415"><a href="#cb1-415" aria-hidden="true" tabindex="-1"></a><span class="fu">fsdp</span><span class="kw">:</span></span>
|
|
<span id="cb1-416"><a href="#cb1-416" aria-hidden="true" tabindex="-1"></a><span class="fu">fsdp_config</span><span class="kw">:</span></span>
|
|
<span id="cb1-417"><a href="#cb1-417" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-418"><a href="#cb1-418" aria-hidden="true" tabindex="-1"></a><span class="co"># Deepspeed config path. e.g., deepspeed_configs/zero3.json</span></span>
|
|
<span id="cb1-419"><a href="#cb1-419" aria-hidden="true" tabindex="-1"></a><span class="fu">deepspeed</span><span class="kw">:</span></span>
|
|
<span id="cb1-420"><a href="#cb1-420" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-421"><a href="#cb1-421" aria-hidden="true" tabindex="-1"></a><span class="co"># Advanced DDP Arguments</span></span>
|
|
<span id="cb1-422"><a href="#cb1-422" aria-hidden="true" tabindex="-1"></a><span class="fu">ddp_timeout</span><span class="kw">:</span></span>
|
|
<span id="cb1-423"><a href="#cb1-423" aria-hidden="true" tabindex="-1"></a><span class="fu">ddp_bucket_cap_mb</span><span class="kw">:</span></span>
|
|
<span id="cb1-424"><a href="#cb1-424" aria-hidden="true" tabindex="-1"></a><span class="fu">ddp_broadcast_buffers</span><span class="kw">:</span></span>
|
|
<span id="cb1-425"><a href="#cb1-425" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-426"><a href="#cb1-426" aria-hidden="true" tabindex="-1"></a><span class="co"># Path to torch distx for optim 'adamw_anyprecision'</span></span>
|
|
<span id="cb1-427"><a href="#cb1-427" aria-hidden="true" tabindex="-1"></a><span class="fu">torchdistx_path</span><span class="kw">:</span></span>
|
|
<span id="cb1-428"><a href="#cb1-428" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-429"><a href="#cb1-429" aria-hidden="true" tabindex="-1"></a><span class="co"># Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize</span></span>
|
|
<span id="cb1-430"><a href="#cb1-430" aria-hidden="true" tabindex="-1"></a><span class="fu">pretraining_dataset</span><span class="kw">:</span></span>
|
|
<span id="cb1-431"><a href="#cb1-431" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-432"><a href="#cb1-432" aria-hidden="true" tabindex="-1"></a><span class="co"># Debug mode</span></span>
|
|
<span id="cb1-433"><a href="#cb1-433" aria-hidden="true" tabindex="-1"></a><span class="fu">debug</span><span class="kw">:</span></span>
|
|
<span id="cb1-434"><a href="#cb1-434" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-435"><a href="#cb1-435" aria-hidden="true" tabindex="-1"></a><span class="co"># Seed</span></span>
|
|
<span id="cb1-436"><a href="#cb1-436" aria-hidden="true" tabindex="-1"></a><span class="fu">seed</span><span class="kw">:</span></span>
|
|
<span id="cb1-437"><a href="#cb1-437" aria-hidden="true" tabindex="-1"></a></span>
|
|
<span id="cb1-438"><a href="#cb1-438" aria-hidden="true" tabindex="-1"></a><span class="co"># Allow overwrite yml config using from cli</span></span>
|
|
<span id="cb1-439"><a href="#cb1-439" aria-hidden="true" tabindex="-1"></a><span class="fu">strict</span><span class="kw">:</span></span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
|
|
|
|
|
|
|
</main> <!-- /main -->
|
|
<script id="quarto-html-after-body" type="application/javascript">
|
|
window.document.addEventListener("DOMContentLoaded", function (event) {
|
|
const toggleBodyColorMode = (bsSheetEl) => {
|
|
const mode = bsSheetEl.getAttribute("data-mode");
|
|
const bodyEl = window.document.querySelector("body");
|
|
if (mode === "dark") {
|
|
bodyEl.classList.add("quarto-dark");
|
|
bodyEl.classList.remove("quarto-light");
|
|
} else {
|
|
bodyEl.classList.add("quarto-light");
|
|
bodyEl.classList.remove("quarto-dark");
|
|
}
|
|
}
|
|
const toggleBodyColorPrimary = () => {
|
|
const bsSheetEl = window.document.querySelector("link#quarto-bootstrap");
|
|
if (bsSheetEl) {
|
|
toggleBodyColorMode(bsSheetEl);
|
|
}
|
|
}
|
|
toggleBodyColorPrimary();
|
|
const icon = "";
|
|
const anchorJS = new window.AnchorJS();
|
|
anchorJS.options = {
|
|
placement: 'right',
|
|
icon: icon
|
|
};
|
|
anchorJS.add('.anchored');
|
|
const isCodeAnnotation = (el) => {
|
|
for (const clz of el.classList) {
|
|
if (clz.startsWith('code-annotation-')) {
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
const clipboard = new window.ClipboardJS('.code-copy-button', {
|
|
text: function(trigger) {
|
|
const codeEl = trigger.previousElementSibling.cloneNode(true);
|
|
for (const childEl of codeEl.children) {
|
|
if (isCodeAnnotation(childEl)) {
|
|
childEl.remove();
|
|
}
|
|
}
|
|
return codeEl.innerText;
|
|
}
|
|
});
|
|
clipboard.on('success', function(e) {
|
|
// button target
|
|
const button = e.trigger;
|
|
// don't keep focus
|
|
button.blur();
|
|
// flash "checked"
|
|
button.classList.add('code-copy-button-checked');
|
|
var currentTitle = button.getAttribute("title");
|
|
button.setAttribute("title", "Copied!");
|
|
let tooltip;
|
|
if (window.bootstrap) {
|
|
button.setAttribute("data-bs-toggle", "tooltip");
|
|
button.setAttribute("data-bs-placement", "left");
|
|
button.setAttribute("data-bs-title", "Copied!");
|
|
tooltip = new bootstrap.Tooltip(button,
|
|
{ trigger: "manual",
|
|
customClass: "code-copy-button-tooltip",
|
|
offset: [0, -8]});
|
|
tooltip.show();
|
|
}
|
|
setTimeout(function() {
|
|
if (tooltip) {
|
|
tooltip.hide();
|
|
button.removeAttribute("data-bs-title");
|
|
button.removeAttribute("data-bs-toggle");
|
|
button.removeAttribute("data-bs-placement");
|
|
}
|
|
button.setAttribute("title", currentTitle);
|
|
button.classList.remove('code-copy-button-checked');
|
|
}, 1000);
|
|
// clear code selection
|
|
e.clearSelection();
|
|
});
|
|
var localhostRegex = new RegExp(/^(?:http|https):\/\/localhost\:?[0-9]*\//);
|
|
var mailtoRegex = new RegExp(/^mailto:/);
|
|
var filterRegex = new RegExp("https:\/\/OpenAccess-AI-Collective\.github\.io\/axolotl\/");
|
|
var isInternal = (href) => {
|
|
return filterRegex.test(href) || localhostRegex.test(href) || mailtoRegex.test(href);
|
|
}
|
|
// Inspect non-navigation links and adorn them if external
|
|
var links = window.document.querySelectorAll('a[href]:not(.nav-link):not(.navbar-brand):not(.toc-action):not(.sidebar-link):not(.sidebar-item-toggle):not(.pagination-link):not(.no-external):not([aria-hidden]):not(.dropdown-item):not(.quarto-navigation-tool)');
|
|
for (var i=0; i<links.length; i++) {
|
|
const link = links[i];
|
|
if (!isInternal(link.href)) {
|
|
// undo the damage that might have been done by quarto-nav.js in the case of
|
|
// links that we want to consider external
|
|
if (link.dataset.originalHref !== undefined) {
|
|
link.href = link.dataset.originalHref;
|
|
}
|
|
}
|
|
}
|
|
function tippyHover(el, contentFn, onTriggerFn, onUntriggerFn) {
|
|
const config = {
|
|
allowHTML: true,
|
|
maxWidth: 500,
|
|
delay: 100,
|
|
arrow: false,
|
|
appendTo: function(el) {
|
|
return el.parentElement;
|
|
},
|
|
interactive: true,
|
|
interactiveBorder: 10,
|
|
theme: 'quarto',
|
|
placement: 'bottom-start',
|
|
};
|
|
if (contentFn) {
|
|
config.content = contentFn;
|
|
}
|
|
if (onTriggerFn) {
|
|
config.onTrigger = onTriggerFn;
|
|
}
|
|
if (onUntriggerFn) {
|
|
config.onUntrigger = onUntriggerFn;
|
|
}
|
|
window.tippy(el, config);
|
|
}
|
|
const noterefs = window.document.querySelectorAll('a[role="doc-noteref"]');
|
|
for (var i=0; i<noterefs.length; i++) {
|
|
const ref = noterefs[i];
|
|
tippyHover(ref, function() {
|
|
// use id or data attribute instead here
|
|
let href = ref.getAttribute('data-footnote-href') || ref.getAttribute('href');
|
|
try { href = new URL(href).hash; } catch {}
|
|
const id = href.replace(/^#\/?/, "");
|
|
const note = window.document.getElementById(id);
|
|
if (note) {
|
|
return note.innerHTML;
|
|
} else {
|
|
return "";
|
|
}
|
|
});
|
|
}
|
|
const xrefs = window.document.querySelectorAll('a.quarto-xref');
|
|
const processXRef = (id, note) => {
|
|
// Strip column container classes
|
|
const stripColumnClz = (el) => {
|
|
el.classList.remove("page-full", "page-columns");
|
|
if (el.children) {
|
|
for (const child of el.children) {
|
|
stripColumnClz(child);
|
|
}
|
|
}
|
|
}
|
|
stripColumnClz(note)
|
|
if (id === null || id.startsWith('sec-')) {
|
|
// Special case sections, only their first couple elements
|
|
const container = document.createElement("div");
|
|
if (note.children && note.children.length > 2) {
|
|
container.appendChild(note.children[0].cloneNode(true));
|
|
for (let i = 1; i < note.children.length; i++) {
|
|
const child = note.children[i];
|
|
if (child.tagName === "P" && child.innerText === "") {
|
|
continue;
|
|
} else {
|
|
container.appendChild(child.cloneNode(true));
|
|
break;
|
|
}
|
|
}
|
|
if (window.Quarto?.typesetMath) {
|
|
window.Quarto.typesetMath(container);
|
|
}
|
|
return container.innerHTML
|
|
} else {
|
|
if (window.Quarto?.typesetMath) {
|
|
window.Quarto.typesetMath(note);
|
|
}
|
|
return note.innerHTML;
|
|
}
|
|
} else {
|
|
// Remove any anchor links if they are present
|
|
const anchorLink = note.querySelector('a.anchorjs-link');
|
|
if (anchorLink) {
|
|
anchorLink.remove();
|
|
}
|
|
if (window.Quarto?.typesetMath) {
|
|
window.Quarto.typesetMath(note);
|
|
}
|
|
// TODO in 1.5, we should make sure this works without a callout special case
|
|
if (note.classList.contains("callout")) {
|
|
return note.outerHTML;
|
|
} else {
|
|
return note.innerHTML;
|
|
}
|
|
}
|
|
}
|
|
for (var i=0; i<xrefs.length; i++) {
|
|
const xref = xrefs[i];
|
|
tippyHover(xref, undefined, function(instance) {
|
|
instance.disable();
|
|
let url = xref.getAttribute('href');
|
|
let hash = undefined;
|
|
if (url.startsWith('#')) {
|
|
hash = url;
|
|
} else {
|
|
try { hash = new URL(url).hash; } catch {}
|
|
}
|
|
if (hash) {
|
|
const id = hash.replace(/^#\/?/, "");
|
|
const note = window.document.getElementById(id);
|
|
if (note !== null) {
|
|
try {
|
|
const html = processXRef(id, note.cloneNode(true));
|
|
instance.setContent(html);
|
|
} finally {
|
|
instance.enable();
|
|
instance.show();
|
|
}
|
|
} else {
|
|
// See if we can fetch this
|
|
fetch(url.split('#')[0])
|
|
.then(res => res.text())
|
|
.then(html => {
|
|
const parser = new DOMParser();
|
|
const htmlDoc = parser.parseFromString(html, "text/html");
|
|
const note = htmlDoc.getElementById(id);
|
|
if (note !== null) {
|
|
const html = processXRef(id, note);
|
|
instance.setContent(html);
|
|
}
|
|
}).finally(() => {
|
|
instance.enable();
|
|
instance.show();
|
|
});
|
|
}
|
|
} else {
|
|
// See if we can fetch a full url (with no hash to target)
|
|
// This is a special case and we should probably do some content thinning / targeting
|
|
fetch(url)
|
|
.then(res => res.text())
|
|
.then(html => {
|
|
const parser = new DOMParser();
|
|
const htmlDoc = parser.parseFromString(html, "text/html");
|
|
const note = htmlDoc.querySelector('main.content');
|
|
if (note !== null) {
|
|
// This should only happen for chapter cross references
|
|
// (since there is no id in the URL)
|
|
// remove the first header
|
|
if (note.children.length > 0 && note.children[0].tagName === "HEADER") {
|
|
note.children[0].remove();
|
|
}
|
|
const html = processXRef(null, note);
|
|
instance.setContent(html);
|
|
}
|
|
}).finally(() => {
|
|
instance.enable();
|
|
instance.show();
|
|
});
|
|
}
|
|
}, function(instance) {
|
|
});
|
|
}
|
|
let selectedAnnoteEl;
|
|
const selectorForAnnotation = ( cell, annotation) => {
|
|
let cellAttr = 'data-code-cell="' + cell + '"';
|
|
let lineAttr = 'data-code-annotation="' + annotation + '"';
|
|
const selector = 'span[' + cellAttr + '][' + lineAttr + ']';
|
|
return selector;
|
|
}
|
|
const selectCodeLines = (annoteEl) => {
|
|
const doc = window.document;
|
|
const targetCell = annoteEl.getAttribute("data-target-cell");
|
|
const targetAnnotation = annoteEl.getAttribute("data-target-annotation");
|
|
const annoteSpan = window.document.querySelector(selectorForAnnotation(targetCell, targetAnnotation));
|
|
const lines = annoteSpan.getAttribute("data-code-lines").split(",");
|
|
const lineIds = lines.map((line) => {
|
|
return targetCell + "-" + line;
|
|
})
|
|
let top = null;
|
|
let height = null;
|
|
let parent = null;
|
|
if (lineIds.length > 0) {
|
|
//compute the position of the single el (top and bottom and make a div)
|
|
const el = window.document.getElementById(lineIds[0]);
|
|
top = el.offsetTop;
|
|
height = el.offsetHeight;
|
|
parent = el.parentElement.parentElement;
|
|
if (lineIds.length > 1) {
|
|
const lastEl = window.document.getElementById(lineIds[lineIds.length - 1]);
|
|
const bottom = lastEl.offsetTop + lastEl.offsetHeight;
|
|
height = bottom - top;
|
|
}
|
|
if (top !== null && height !== null && parent !== null) {
|
|
// cook up a div (if necessary) and position it
|
|
let div = window.document.getElementById("code-annotation-line-highlight");
|
|
if (div === null) {
|
|
div = window.document.createElement("div");
|
|
div.setAttribute("id", "code-annotation-line-highlight");
|
|
div.style.position = 'absolute';
|
|
parent.appendChild(div);
|
|
}
|
|
div.style.top = top - 2 + "px";
|
|
div.style.height = height + 4 + "px";
|
|
div.style.left = 0;
|
|
let gutterDiv = window.document.getElementById("code-annotation-line-highlight-gutter");
|
|
if (gutterDiv === null) {
|
|
gutterDiv = window.document.createElement("div");
|
|
gutterDiv.setAttribute("id", "code-annotation-line-highlight-gutter");
|
|
gutterDiv.style.position = 'absolute';
|
|
const codeCell = window.document.getElementById(targetCell);
|
|
const gutter = codeCell.querySelector('.code-annotation-gutter');
|
|
gutter.appendChild(gutterDiv);
|
|
}
|
|
gutterDiv.style.top = top - 2 + "px";
|
|
gutterDiv.style.height = height + 4 + "px";
|
|
}
|
|
selectedAnnoteEl = annoteEl;
|
|
}
|
|
};
|
|
const unselectCodeLines = () => {
|
|
const elementsIds = ["code-annotation-line-highlight", "code-annotation-line-highlight-gutter"];
|
|
elementsIds.forEach((elId) => {
|
|
const div = window.document.getElementById(elId);
|
|
if (div) {
|
|
div.remove();
|
|
}
|
|
});
|
|
selectedAnnoteEl = undefined;
|
|
};
|
|
// Handle positioning of the toggle
|
|
window.addEventListener(
|
|
"resize",
|
|
throttle(() => {
|
|
elRect = undefined;
|
|
if (selectedAnnoteEl) {
|
|
selectCodeLines(selectedAnnoteEl);
|
|
}
|
|
}, 10)
|
|
);
|
|
function throttle(fn, ms) {
|
|
let throttle = false;
|
|
let timer;
|
|
return (...args) => {
|
|
if(!throttle) { // first call gets through
|
|
fn.apply(this, args);
|
|
throttle = true;
|
|
} else { // all the others get throttled
|
|
if(timer) clearTimeout(timer); // cancel #2
|
|
timer = setTimeout(() => {
|
|
fn.apply(this, args);
|
|
timer = throttle = false;
|
|
}, ms);
|
|
}
|
|
};
|
|
}
|
|
// Attach click handler to the DT
|
|
const annoteDls = window.document.querySelectorAll('dt[data-target-cell]');
|
|
for (const annoteDlNode of annoteDls) {
|
|
annoteDlNode.addEventListener('click', (event) => {
|
|
const clickedEl = event.target;
|
|
if (clickedEl !== selectedAnnoteEl) {
|
|
unselectCodeLines();
|
|
const activeEl = window.document.querySelector('dt[data-target-cell].code-annotation-active');
|
|
if (activeEl) {
|
|
activeEl.classList.remove('code-annotation-active');
|
|
}
|
|
selectCodeLines(clickedEl);
|
|
clickedEl.classList.add('code-annotation-active');
|
|
} else {
|
|
// Unselect the line
|
|
unselectCodeLines();
|
|
clickedEl.classList.remove('code-annotation-active');
|
|
}
|
|
});
|
|
}
|
|
const findCites = (el) => {
|
|
const parentEl = el.parentElement;
|
|
if (parentEl) {
|
|
const cites = parentEl.dataset.cites;
|
|
if (cites) {
|
|
return {
|
|
el,
|
|
cites: cites.split(' ')
|
|
};
|
|
} else {
|
|
return findCites(el.parentElement)
|
|
}
|
|
} else {
|
|
return undefined;
|
|
}
|
|
};
|
|
var bibliorefs = window.document.querySelectorAll('a[role="doc-biblioref"]');
|
|
for (var i=0; i<bibliorefs.length; i++) {
|
|
const ref = bibliorefs[i];
|
|
const citeInfo = findCites(ref);
|
|
if (citeInfo) {
|
|
tippyHover(citeInfo.el, function() {
|
|
var popup = window.document.createElement('div');
|
|
citeInfo.cites.forEach(function(cite) {
|
|
var citeDiv = window.document.createElement('div');
|
|
citeDiv.classList.add('hanging-indent');
|
|
citeDiv.classList.add('csl-entry');
|
|
var biblioDiv = window.document.getElementById('ref-' + cite);
|
|
if (biblioDiv) {
|
|
citeDiv.innerHTML = biblioDiv.innerHTML;
|
|
}
|
|
popup.appendChild(citeDiv);
|
|
});
|
|
return popup.innerHTML;
|
|
});
|
|
}
|
|
}
|
|
});
|
|
</script>
|
|
</div> <!-- /content -->
|
|
|
|
|
|
|
|
|
|
</body></html> |