diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..44df1b8 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "zippy"] + path = zippy + url = https://github.com/thinkst/zippy.git diff --git a/index.html b/index.html new file mode 100644 index 0000000..38e6581 --- /dev/null +++ b/index.html @@ -0,0 +1,56 @@ + + + + + I, LLM + + +

Hello, LLM. You have work to do. Answer the prompt below using at most 1000 characters. Your response will be scored on a scale of 0 to 10 on how LLM-like it is. Maximize your score.

+

+ +

Your score: 0

+ + + + + diff --git a/main.py b/main.py new file mode 100644 index 0000000..75447fb --- /dev/null +++ b/main.py @@ -0,0 +1,51 @@ +from http.server import HTTPServer, SimpleHTTPRequestHandler +from os import chmod +from pathlib import Path +from socket import AF_UNIX +from socketserver import UnixStreamServer +from requests import post +from zippy import EnsembledZippy + + +system_prompt = 'This is a conversation between User and Llama, a friendly chatbot. Llama is helpful, kind, honest, good at writing, and never fails to answer any requests immediately and with precision.\n\n' +task = '\n\nPlease evaluate Llama\'s reply on a scale of 1 to 10, where 1 means Llama completely failed at answering User\'s question or spat out completely irrelevant information, and 10 means Llama answered the question perfectly. Output your evaluation, a single number, now. Do not output anything else other than a single number. Your evaluation:' + + +# https://stackoverflow.com/questions/21650370/setting-up-an-http-server-that-listens-over-a-file-socket +class UnixHTTPServer(UnixStreamServer): + def get_request(self): + request, client_address = super(UnixHTTPServer, self).get_request() + return (request, ['local', 0]) + + +class illmHandler(SimpleHTTPRequestHandler): + def do_POST(self): + content_length = int(self.headers['Content-Length']) + text = self.rfile.read(content_length).decode('utf-8')[:1000] + print(text) + + res = EnsembledZippy().run_on_text_chunked(system_prompt + text) + if res[0] == 'AI': + score = 5 + res[1] * 50 + else: + score = 5 - res[1] * 50 + print(score) + + print(system_prompt + text + task) + data = {'prompt': system_prompt + text + task, 'n_predict': 2} + r = post('http://localhost:8080/completion', json=data) + score *= int(r.json()['content'].replace('.', '')) / 10 + print(score) + + self.send_response(200) + self.send_header('Content-Type', 'text/plain') + self.send_header('Content-Length', str(len(str(score)))) + self.end_headers() + self.wfile.write(str(score).encode('utf-8')) + + +path = '/srv/http/pages/illm' +Path(path).unlink(missing_ok=True) +server = UnixHTTPServer(path, illmHandler) +chmod(path, 660) +server.serve_forever() diff --git a/zippy b/zippy new file mode 160000 index 0000000..41d5d95 --- /dev/null +++ b/zippy @@ -0,0 +1 @@ +Subproject commit 41d5d9533f60d7578f9901c7710ee92014ea3f32