Use gpt2-large instead of distilgpt2
This commit is contained in:
parent
354ebba789
commit
47407b9fb6
7
bot.py
7
bot.py
|
@ -1,6 +1,7 @@
|
|||
from argparse import ArgumentParser
|
||||
from random import randint, choice
|
||||
|
||||
from torch import float16
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
|
||||
|
@ -17,8 +18,8 @@ parser.add_argument('-m', '--model', default='model',
|
|||
args = parser.parse_args()
|
||||
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained('distilgpt2')
|
||||
model = AutoModelForCausalLM.from_pretrained(args.model)
|
||||
tokenizer = AutoTokenizer.from_pretrained('gpt2-large')
|
||||
model = AutoModelForCausalLM.from_pretrained(args.model, torch_dtype=float16).to('cuda')
|
||||
|
||||
|
||||
if args.input is None:
|
||||
|
@ -71,7 +72,7 @@ if args.input is None:
|
|||
|
||||
# Run the input through the model
|
||||
print(args.input)
|
||||
inputs = tokenizer.encode(args.input, return_tensors='pt')
|
||||
inputs = tokenizer.encode(args.input, return_tensors='pt').to('cuda')
|
||||
output = tokenizer.decode(model.generate(
|
||||
inputs, do_sample=True, max_length=150, top_p=0.9)[0])
|
||||
print(output)
|
||||
|
|
12
train.py
12
train.py
|
@ -1,6 +1,7 @@
|
|||
from argparse import ArgumentParser
|
||||
from itertools import chain
|
||||
|
||||
from torch import float16
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments, default_data_collator
|
||||
|
||||
|
@ -15,13 +16,13 @@ args = parser.parse_args()
|
|||
|
||||
# Load and tokenize dataset
|
||||
raw_dataset = load_dataset('text', data_files={'train': args.input}, keep_linebreaks=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained('distilgpt2', use_fast=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained('gpt2-large', use_fast=True)
|
||||
tokenized_dataset = raw_dataset.map(lambda examples: tokenizer(examples['text']),
|
||||
batched=True, remove_columns='text')
|
||||
|
||||
|
||||
# Generate chunks of block_size
|
||||
block_size = tokenizer.model_max_length
|
||||
block_size = 256 # tokenizer.model_max_length
|
||||
|
||||
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
|
||||
def group_texts(examples):
|
||||
|
@ -44,8 +45,9 @@ lm_dataset = tokenized_dataset.map(group_texts, batched=True)
|
|||
|
||||
|
||||
# Create and train the model
|
||||
model = AutoModelForCausalLM.from_pretrained('distilgpt2')
|
||||
trainer = Trainer(model, TrainingArguments(output_dir=args.output),
|
||||
default_data_collator, lm_dataset['train'])
|
||||
model = AutoModelForCausalLM.from_pretrained('gpt2-large',
|
||||
torch_dtype=float16, low_cpu_mem_usage=True).to('cuda')
|
||||
trainer = Trainer(model, TrainingArguments(output_dir=args.output, per_device_train_batch_size=1,
|
||||
gradient_accumulation_steps=8), default_data_collator, lm_dataset['train'])
|
||||
trainer.train()
|
||||
trainer.save_model()
|
||||
|
|
Loading…
Reference in a new issue