from argparse import ArgumentParser from random import choice from mastodon import Mastodon from transformers import AutoTokenizer, AutoModelForCausalLM parser = ArgumentParser() parser.add_argument('-i', '--instance', help='Mastodon instance hosting the bot') parser.add_argument('-t', '--token', help='Mastodon application access token') parser.add_argument('-n', '--input', help='initial input text') parser.add_argument('-m', '--model', default='model', help='path to load saved model') args = parser.parse_args() tokenizer = AutoTokenizer.from_pretrained('distilgpt2') model = AutoModelForCausalLM.from_pretrained(args.model) if args.input is None: # Create random input args.input = choice([ 'I am', 'My life is', 'Computers are', 'This is', 'My', 'I\'ve', 'No one', 'I love', 'I will die of', 'I', 'The', 'Anime' ]) # Run the input through the model inputs = tokenizer.encode(args.input, return_tensors="pt") output = tokenizer.decode(model.generate( inputs, do_sample=True, max_length=100, top_p=0.9)[0]) print(output) # Post it to Mastodon mastodon = Mastodon( access_token=args.token, api_base_url=args.instance ) post = output.split('\n')[0] if len(post) < 100: post = output.split('\n')[0] + '\n' + output.split('\n')[1] mastodon.status_post(post[:500])