aboutsummaryrefslogtreecommitdiff
path: root/bot.py
diff options
context:
space:
mode:
Diffstat (limited to 'bot.py')
-rw-r--r--bot.py45
1 files changed, 30 insertions, 15 deletions
diff --git a/bot.py b/bot.py
index d9d0d93..52e29a8 100644
--- a/bot.py
+++ b/bot.py
@@ -1,5 +1,5 @@
from argparse import ArgumentParser
-from random import choice
+from random import randint, choice
from mastodon import Mastodon
from transformers import AutoTokenizer, AutoModelForCausalLM
@@ -20,23 +20,38 @@ model = AutoModelForCausalLM.from_pretrained(args.model)
if args.input is None:
# Create random input
- args.input = choice([
- 'I am',
- 'My life is',
- 'Computers are',
- 'This is',
- 'My',
- 'I\'ve',
- 'No one',
- 'I love',
- 'I will die of',
- 'I',
- 'The',
- 'Anime'
- ])
+ if randint(0, 1) == 0:
+ args.input = choice([
+ 'I am',
+ 'My life is',
+ 'Computers are',
+ 'This is',
+ 'My',
+ 'I\'ve',
+ 'No one',
+ 'I love',
+ 'I will die of',
+ 'I',
+ 'The',
+ 'Anime',
+ 'I\'m going to die',
+ 'Hello',
+ 'My PinePhone',
+ '@ta180m@exozy.me',
+ 'Life',
+ 'My favorite',
+ 'I\'m not',
+ 'I hate',
+ 'I think'
+ ])
+ else:
+ with open('data', 'r') as f:
+ line = choice(f.readlines()).split()
+ args.input = line[0] + ' ' + line[1]
# Run the input through the model
+print(args.input)
inputs = tokenizer.encode(args.input, return_tensors="pt")
output = tokenizer.decode(model.generate(
inputs, do_sample=True, max_length=100, top_p=0.9)[0])