aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bot.py7
-rw-r--r--train.py12
2 files changed, 11 insertions, 8 deletions
diff --git a/bot.py b/bot.py
index f746e26..c3d5274 100644
--- a/bot.py
+++ b/bot.py
@@ -1,6 +1,7 @@
from argparse import ArgumentParser
from random import randint, choice
+from torch import float16
from transformers import AutoTokenizer, AutoModelForCausalLM
@@ -17,8 +18,8 @@ parser.add_argument('-m', '--model', default='model',
args = parser.parse_args()
-tokenizer = AutoTokenizer.from_pretrained('distilgpt2')
-model = AutoModelForCausalLM.from_pretrained(args.model)
+tokenizer = AutoTokenizer.from_pretrained('gpt2-large')
+model = AutoModelForCausalLM.from_pretrained(args.model, torch_dtype=float16).to('cuda')
if args.input is None:
@@ -71,7 +72,7 @@ if args.input is None:
# Run the input through the model
print(args.input)
-inputs = tokenizer.encode(args.input, return_tensors='pt')
+inputs = tokenizer.encode(args.input, return_tensors='pt').to('cuda')
output = tokenizer.decode(model.generate(
inputs, do_sample=True, max_length=150, top_p=0.9)[0])
print(output)
diff --git a/train.py b/train.py
index ed6beb9..11819bf 100644
--- a/train.py
+++ b/train.py
@@ -1,6 +1,7 @@
from argparse import ArgumentParser
from itertools import chain
+from torch import float16
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments, default_data_collator
@@ -15,13 +16,13 @@ args = parser.parse_args()
# Load and tokenize dataset
raw_dataset = load_dataset('text', data_files={'train': args.input}, keep_linebreaks=True)
-tokenizer = AutoTokenizer.from_pretrained('distilgpt2', use_fast=True)
+tokenizer = AutoTokenizer.from_pretrained('gpt2-large', use_fast=True)
tokenized_dataset = raw_dataset.map(lambda examples: tokenizer(examples['text']),
batched=True, remove_columns='text')
# Generate chunks of block_size
-block_size = tokenizer.model_max_length
+block_size = 256 # tokenizer.model_max_length
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
@@ -44,8 +45,9 @@ lm_dataset = tokenized_dataset.map(group_texts, batched=True)
# Create and train the model
-model = AutoModelForCausalLM.from_pretrained('distilgpt2')
-trainer = Trainer(model, TrainingArguments(output_dir=args.output),
- default_data_collator, lm_dataset['train'])
+model = AutoModelForCausalLM.from_pretrained('gpt2-large',
+ torch_dtype=float16, low_cpu_mem_usage=True).to('cuda')
+trainer = Trainer(model, TrainingArguments(output_dir=args.output, per_device_train_batch_size=1,
+ gradient_accumulation_steps=8), default_data_collator, lm_dataset['train'])
trainer.train()
trainer.save_model()