aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony Wang2022-07-16 20:20:34 -0500
committerAnthony Wang2022-07-16 20:20:34 -0500
commite61a793dd69a1759e58e4e87be27f516c47102c6 (patch)
tree3cf0e113ef084e1eac6db4110ae008de7ac45770
parentd47afd47a33d3fb41bb8912d42694cae93108ce9 (diff)
Adjust training parameters to train gpt2-large
-rw-r--r--bot.py6
-rw-r--r--train.py13
2 files changed, 9 insertions, 10 deletions
diff --git a/bot.py b/bot.py
index 5bcac18..d285471 100644
--- a/bot.py
+++ b/bot.py
@@ -18,8 +18,8 @@ parser.add_argument('-m', '--model', default='model',
args = parser.parse_args()
-tokenizer = AutoTokenizer.from_pretrained('gpt2-large')
-model = AutoModelForCausalLM.from_pretrained(args.model).to('cuda')
+tokenizer = AutoTokenizer.from_pretrained('gpt2-medium')
+model = AutoModelForCausalLM.from_pretrained(args.model, low_cpu_mem_usage=True).to('cuda')
if args.input is None:
@@ -74,7 +74,7 @@ if args.input is None:
print(args.input)
inputs = tokenizer.encode(args.input, return_tensors='pt').to('cuda')
output = tokenizer.decode(model.generate(
- inputs, do_sample=True, max_length=150, top_p=0.9)[0])
+ inputs, max_length=150, do_sample=True, top_p=0.9)[0])
print(output)
diff --git a/train.py b/train.py
index 2e7d6df..cbf5372 100644
--- a/train.py
+++ b/train.py
@@ -1,7 +1,6 @@
from argparse import ArgumentParser
from itertools import chain
-from torch import float16
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments, default_data_collator
@@ -22,7 +21,7 @@ tokenized_dataset = raw_dataset.map(lambda examples: tokenizer(examples['text'])
# Generate chunks of block_size
-block_size = 256 # tokenizer.model_max_length
+block_size = tokenizer.model_max_length
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
@@ -38,16 +37,16 @@ def group_texts(examples):
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
- result["labels"] = result["input_ids"].copy()
+ result['labels'] = result['input_ids'].copy()
return result
lm_dataset = tokenized_dataset.map(group_texts, batched=True)
# Create and train the model
-model = AutoModelForCausalLM.from_pretrained('gpt2-large',
- torch_dtype=float16, low_cpu_mem_usage=True).to('cuda')
-trainer = Trainer(model, TrainingArguments(output_dir=args.output, per_device_train_batch_size=1),
- default_data_collator, lm_dataset['train'])
+model = AutoModelForCausalLM.from_pretrained('gpt2-large', low_cpu_mem_usage=True).to('cuda')
+trainer = Trainer(model, TrainingArguments(output_dir=args.output, save_strategy='no',
+ per_device_train_batch_size=1, gradient_checkpointing=True, optim='adafactor'),
+ default_data_collator, lm_dataset['train'])
trainer.train()
trainer.save_model()