Cleanup and reformat with autopep8

This commit is contained in:
Anthony Wang 2022-02-21 15:03:28 -06:00
parent 9e84768780
commit 423c1d8304
Signed by: a
GPG key ID: BC96B00AEC5F2D76
4 changed files with 17 additions and 9 deletions

13
bot.py
View file

@ -1,10 +1,17 @@
#!/usr/bin/python3
from argparse import ArgumentParser
from mastodon import Mastodon
parser = ArgumentParser()
parser.add_argument('-t', '--token', help='Mastodon application access token')
args = parser.parse_args()
mastodon = Mastodon(
access_token = 'token.secret',
access_token = args.token,
api_base_url = 'https://social.exozy.me/'
)
mastodon.status_post('i am pretty horrible but i can keep reading the same dtb thing to be a really cool people there a notification in every fediverse in liberal democracies and games iaposd looks like to enjoy course at that goes and is it in case youaposre always welcome this time ago maybe you can start things that i think you want me to screenshot you use straw as an owl party platform in the algerian population for that have the list i just wanted to be a registry that you are out where the app actually the')
mastodon.status_post('i am pretty horrible but i can keep reading the same dtb thing to be a really cool people there a notification in every fediverse in liberal democracies and games iaposd looks like to enjoy course at that goes and is it in case youaposre always welcome this time ago maybe you can start things that i think you want me to screenshot you use straw as an owl party platform in the algerian population for that have the list i just wanted to be a registry that you are out where the app actually the')

View file

@ -1,6 +1,6 @@
from re import sub
from html import unescape
from argparse import ArgumentParser
from html import unescape
from re import sub
from psycopg2 import connect
@ -29,6 +29,7 @@ words = [sub(r'[^a-z0-9]', '', word.lower())
words = [word for word in words if word != '']
# Save to output file
with open(args.output, 'w') as f:
for word in words:
f.write(word + '\n')

View file

@ -25,4 +25,4 @@ class Dataset(torch.utils.data.Dataset):
def __getitem__(self, index):
return (torch.tensor(self.words_indexes[index:index+self.seq_size]),
torch.tensor(self.words_indexes[index+1:index+self.seq_size+1]))
torch.tensor(self.words_indexes[index+1:index+self.seq_size+1]))

View file

@ -4,14 +4,14 @@ import torch.nn as nn
class Model(nn.Module):
def __init__(self, dataset, embedding_size, lstm_size, num_layers, dropout):
super(Model, self).__init__()
self.seq_size = dataset.seq_size
self.lstm_size = lstm_size
self.num_layers = num_layers
n_vocab = len(dataset.uniq_words)
self.embedding = nn.Embedding(n_vocab, embedding_size)
self.lstm = nn.LSTM(
input_size=embedding_size,
hidden_size=lstm_size,
@ -30,4 +30,4 @@ class Model(nn.Module):
def zero_state(self, batch_size):
return (torch.zeros(self.num_layers, batch_size, self.lstm_size),
torch.zeros(self.num_layers, batch_size, self.lstm_size))
torch.zeros(self.num_layers, batch_size, self.lstm_size))