aboutsummaryrefslogtreecommitdiff
path: root/bot.py
blob: 495a22b9aba5f6af93828829f39767084a686f6d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
from argparse import ArgumentParser
from random import randint, choice

from mastodon import Mastodon
from transformers import AutoTokenizer, AutoModelForCausalLM


parser = ArgumentParser()
parser.add_argument('-i', '--instance', help='Mastodon instance hosting the bot')
parser.add_argument('-t', '--token', help='Mastodon application access token')
parser.add_argument('-n', '--input', help='initial input text')
parser.add_argument('-d', '--data', default='data',
                    help='data for automatic input generation')
parser.add_argument('-m', '--model', default='model',
                    help='path to load saved model')
args = parser.parse_args()


tokenizer = AutoTokenizer.from_pretrained('distilgpt2')
model = AutoModelForCausalLM.from_pretrained(args.model)


if args.input is None:
    # Create random input
    if randint(0, 1) == 0:
        args.input = choice([
            'I am',
            'My life is',
            'Computers are',
            'This is',
            'My',
            'I\'ve',
            'No one',
            'I love',
            'I will die of',
            'I',
            'The',
            'Anime',
            'I\'m going to die',
            'Hello',
            '@ta180m@exozy.me',
            'Life',
            'My favorite',
            'I\'m not',
            'I hate',
            'I think',
            'In my opinion',
            'Breaking news:',
            'Have I ever told you that',
            'I read on the news that',
            'I never knew that',
            'My dream is',
            'It\'s terrible that'
        ])
    else:
        with open(args.data, 'r') as f:
            lines = f.readlines()
            line = choice(lines).split()
            while len(line) < 2:
                line = choice(lines).split()
            args.input = line[0] + ' ' + line[1]


# Run the input through the model
print(args.input)
inputs = tokenizer.encode(args.input, return_tensors="pt")
output = tokenizer.decode(model.generate(
    inputs, do_sample=True, max_length=150, top_p=0.9)[0])
print(output)


# Post it to Mastodon
mastodon = Mastodon(
    access_token=args.token,
    api_base_url=args.instance
)
post = output.split('\n')[0]
if len(post) < 200:
    post = output.split('\n')[0] + '\n' + output.split('\n')[1]
mastodon.status_post(post[:500])