Adjust training parameters

This commit is contained in:
Anthony Wang 2022-07-16 11:17:56 -05:00
parent 47407b9fb6
commit d47afd47a3
Signed by: a
GPG key ID: BC96B00AEC5F2D76
2 changed files with 3 additions and 3 deletions

2
bot.py
View file

@ -19,7 +19,7 @@ args = parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained('gpt2-large')
model = AutoModelForCausalLM.from_pretrained(args.model, torch_dtype=float16).to('cuda')
model = AutoModelForCausalLM.from_pretrained(args.model).to('cuda')
if args.input is None:

View file

@ -47,7 +47,7 @@ lm_dataset = tokenized_dataset.map(group_texts, batched=True)
# Create and train the model
model = AutoModelForCausalLM.from_pretrained('gpt2-large',
torch_dtype=float16, low_cpu_mem_usage=True).to('cuda')
trainer = Trainer(model, TrainingArguments(output_dir=args.output, per_device_train_batch_size=1,
gradient_accumulation_steps=8), default_data_collator, lm_dataset['train'])
trainer = Trainer(model, TrainingArguments(output_dir=args.output, per_device_train_batch_size=1),
default_data_collator, lm_dataset['train'])
trainer.train()
trainer.save_model()