Use WizardLM instead of Vicuna

This commit is contained in:
Anthony Wang 2023-05-30 03:18:36 +00:00
parent 951128cf7a
commit 181d2f5793
Signed by: a
GPG key ID: 42A5B952E6DD8D38

View file

@ -20,11 +20,11 @@ def llama(prompt):
"-ngl", "-ngl",
"32", "32",
"-m", "-m",
"/opt/llama.cpp/models/ggml-vicuna-7b-1.1-q4_0.bin", "/opt/llama.cpp/models/wizardLM-7B.ggmlv3.q4_0.bin",
"-n", "-n",
"1024", "1024",
"-p", "-p",
f"### Human: {prompt}\n### Assistant:", f"{prompt}\n\n### Response:",
], ],
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
) )