Don't set seed since llama.cpp behaves nondeterministically for prompts > ≈228 tokens for some reason
This commit is contained in:
parent
1ca92f001a
commit
e82765c654
|
@ -30,7 +30,7 @@ for i in options:
|
|||
c = inp
|
||||
prompt += i[0] + ": " + c + "\n"
|
||||
prompt += "\nSure! Here's the extremely long and detailed 2468-word book you requested:\n\n"
|
||||
r = requests.post("http://boss-baby:8080/completion", json={"prompt": prompt, "temperature": 1.0, "repeat_last_n": 1024, "seed": 1, "stream": True}, headers={"Authorization": "Bearer 12345"}, stream=True)
|
||||
r = requests.post("http://boss-baby:8080/completion", json={"prompt": prompt, "temperature": 1.0, "repeat_last_n": 1024, "stream": True}, headers={"Authorization": "Bearer 12345"}, stream=True)
|
||||
r.encoding = 'utf-8'
|
||||
for chunk in r.iter_lines(decode_unicode=True):
|
||||
if chunk != "":
|
||||
|
|
Loading…
Reference in a new issue