Adjust training parameters to train gpt2-large
This commit is contained in:
parent
d47afd47a3
commit
e61a793dd6
2 changed files with 9 additions and 10 deletions
6
bot.py
6
bot.py
|
@ -18,8 +18,8 @@ parser.add_argument('-m', '--model', default='model',
|
|||
args = parser.parse_args()
|
||||
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained('gpt2-large')
|
||||
model = AutoModelForCausalLM.from_pretrained(args.model).to('cuda')
|
||||
tokenizer = AutoTokenizer.from_pretrained('gpt2-medium')
|
||||
model = AutoModelForCausalLM.from_pretrained(args.model, low_cpu_mem_usage=True).to('cuda')
|
||||
|
||||
|
||||
if args.input is None:
|
||||
|
@ -74,7 +74,7 @@ if args.input is None:
|
|||
print(args.input)
|
||||
inputs = tokenizer.encode(args.input, return_tensors='pt').to('cuda')
|
||||
output = tokenizer.decode(model.generate(
|
||||
inputs, do_sample=True, max_length=150, top_p=0.9)[0])
|
||||
inputs, max_length=150, do_sample=True, top_p=0.9)[0])
|
||||
print(output)
|
||||
|
||||
|
||||
|
|
13
train.py
13
train.py
|
@ -1,7 +1,6 @@
|
|||
from argparse import ArgumentParser
|
||||
from itertools import chain
|
||||
|
||||
from torch import float16
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments, default_data_collator
|
||||
|
||||
|
@ -22,7 +21,7 @@ tokenized_dataset = raw_dataset.map(lambda examples: tokenizer(examples['text'])
|
|||
|
||||
|
||||
# Generate chunks of block_size
|
||||
block_size = 256 # tokenizer.model_max_length
|
||||
block_size = tokenizer.model_max_length
|
||||
|
||||
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
|
||||
def group_texts(examples):
|
||||
|
@ -38,16 +37,16 @@ def group_texts(examples):
|
|||
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
|
||||
for k, t in concatenated_examples.items()
|
||||
}
|
||||
result["labels"] = result["input_ids"].copy()
|
||||
result['labels'] = result['input_ids'].copy()
|
||||
return result
|
||||
|
||||
lm_dataset = tokenized_dataset.map(group_texts, batched=True)
|
||||
|
||||
|
||||
# Create and train the model
|
||||
model = AutoModelForCausalLM.from_pretrained('gpt2-large',
|
||||
torch_dtype=float16, low_cpu_mem_usage=True).to('cuda')
|
||||
trainer = Trainer(model, TrainingArguments(output_dir=args.output, per_device_train_batch_size=1),
|
||||
default_data_collator, lm_dataset['train'])
|
||||
model = AutoModelForCausalLM.from_pretrained('gpt2-large', low_cpu_mem_usage=True).to('cuda')
|
||||
trainer = Trainer(model, TrainingArguments(output_dir=args.output, save_strategy='no',
|
||||
per_device_train_batch_size=1, gradient_checkpointing=True, optim='adafactor'),
|
||||
default_data_collator, lm_dataset['train'])
|
||||
trainer.train()
|
||||
trainer.save_model()
|
||||
|
|
Loading…
Reference in a new issue