Use WizardLM instead of Vicuna
This commit is contained in:
parent
951128cf7a
commit
181d2f5793
4
main.py
4
main.py
|
@ -20,11 +20,11 @@ def llama(prompt):
|
||||||
"-ngl",
|
"-ngl",
|
||||||
"32",
|
"32",
|
||||||
"-m",
|
"-m",
|
||||||
"/opt/llama.cpp/models/ggml-vicuna-7b-1.1-q4_0.bin",
|
"/opt/llama.cpp/models/wizardLM-7B.ggmlv3.q4_0.bin",
|
||||||
"-n",
|
"-n",
|
||||||
"1024",
|
"1024",
|
||||||
"-p",
|
"-p",
|
||||||
f"### Human: {prompt}\n### Assistant:",
|
f"{prompt}\n\n### Response:",
|
||||||
],
|
],
|
||||||
stdout=subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
)
|
)
|
||||||
|
|
Loading…
Reference in a new issue