Update bf16 options

This commit is contained in:
NanoCode012
2023-05-25 17:28:03 +09:00
parent 9083910036
commit 5b712afbe4

View File

@@ -172,7 +172,7 @@ gptq_model_v1: false # v1 or v2
load_in_8bit: true
# Use CUDA bf16
bf16: true
bf16: true # bool or 'full' for `bf16_full_eval`
# Use CUDA fp16
fp16: true
# Use CUDA tf32