diff options
author | Anthony Wang | 2022-07-15 18:48:19 -0500 |
---|---|---|
committer | Anthony Wang | 2022-07-15 18:48:19 -0500 |
commit | 47407b9fb644959b950f8d70cc33eea0bd08932e (patch) | |
tree | 18e546d510f1a1346a11b19b023342f0b2ac867f /bot.py | |
parent | 354ebba7892380d6935b9c9e0c72624e7fcced83 (diff) |
Use gpt2-large instead of distilgpt2
Diffstat (limited to 'bot.py')
-rw-r--r-- | bot.py | 7 |
1 files changed, 4 insertions, 3 deletions
@@ -1,6 +1,7 @@ from argparse import ArgumentParser from random import randint, choice +from torch import float16 from transformers import AutoTokenizer, AutoModelForCausalLM @@ -17,8 +18,8 @@ parser.add_argument('-m', '--model', default='model', args = parser.parse_args() -tokenizer = AutoTokenizer.from_pretrained('distilgpt2') -model = AutoModelForCausalLM.from_pretrained(args.model) +tokenizer = AutoTokenizer.from_pretrained('gpt2-large') +model = AutoModelForCausalLM.from_pretrained(args.model, torch_dtype=float16).to('cuda') if args.input is None: @@ -71,7 +72,7 @@ if args.input is None: # Run the input through the model print(args.input) -inputs = tokenizer.encode(args.input, return_tensors='pt') +inputs = tokenizer.encode(args.input, return_tensors='pt').to('cuda') output = tokenizer.decode(model.generate( inputs, do_sample=True, max_length=150, top_p=0.9)[0]) print(output) |