aboutsummaryrefslogtreecommitdiff
path: root/train.py
blob: 11819bfd5032218976a5b002ae1841241db23324 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
from argparse import ArgumentParser
from itertools import chain

from torch import float16
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments, default_data_collator


parser = ArgumentParser()
parser.add_argument('-i', '--input', default='data',
                    help='training data input file')
parser.add_argument('-o', '--output', default='model',
                    help='output directory for trained model')
args = parser.parse_args()


# Load and tokenize dataset
raw_dataset = load_dataset('text', data_files={'train': args.input}, keep_linebreaks=True)
tokenizer = AutoTokenizer.from_pretrained('gpt2-large', use_fast=True)
tokenized_dataset = raw_dataset.map(lambda examples: tokenizer(examples['text']),
                                    batched=True, remove_columns='text')


# Generate chunks of block_size
block_size = 256 # tokenizer.model_max_length

# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
    # Concatenate all texts.
    concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
    total_length = len(concatenated_examples[list(examples.keys())[0]])
    # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
    # customize this part to your needs.
    if total_length >= block_size:
        total_length = (total_length // block_size) * block_size
    # Split by chunks of max_len.
    result = {
        k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
        for k, t in concatenated_examples.items()
    }
    result["labels"] = result["input_ids"].copy()
    return result

lm_dataset = tokenized_dataset.map(group_texts, batched=True)


# Create and train the model
model = AutoModelForCausalLM.from_pretrained('gpt2-large',
    torch_dtype=float16, low_cpu_mem_usage=True).to('cuda')
trainer = Trainer(model, TrainingArguments(output_dir=args.output, per_device_train_batch_size=1,
                  gradient_accumulation_steps=8), default_data_collator, lm_dataset['train'])
trainer.train()
trainer.save_model()