Use WizardLM instead of Vicuna
This commit is contained in:
parent
951128cf7a
commit
181d2f5793
4
main.py
4
main.py
|
@ -20,11 +20,11 @@ def llama(prompt):
|
|||
"-ngl",
|
||||
"32",
|
||||
"-m",
|
||||
"/opt/llama.cpp/models/ggml-vicuna-7b-1.1-q4_0.bin",
|
||||
"/opt/llama.cpp/models/wizardLM-7B.ggmlv3.q4_0.bin",
|
||||
"-n",
|
||||
"1024",
|
||||
"-p",
|
||||
f"### Human: {prompt}\n### Assistant:",
|
||||
f"{prompt}\n\n### Response:",
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
|
|
Loading…
Reference in a new issue