Modified model to use SGD for less optimal training

There's no point having a loss function testing sandbox where the baseling trains perfectly in 2 epochs. I've DEoptimised the training in this commit to ensure the testing sandbox is actually useful.
This commit is contained in:
2024-05-15 11:12:28 +01:00
parent 389c47ef28
commit 62649a39da

View File

@@ -19,7 +19,7 @@ def get_singleton_dataset():
return get_dataset(dataset=QMNIST)
def main(loss_func=nn.functional.cross_entropy, logger=None):
def main(loss_func=nn.functional.cross_entropy, logger=None, **kwargs):
import lightning as L
from .train import TrainingWrapper
@@ -31,7 +31,8 @@ def main(loss_func=nn.functional.cross_entropy, logger=None):
train, val, test = get_singleton_dataset()
lmodel = TrainingWrapper(model, loss_func=loss_func)
trainer = L.Trainer(max_epochs=5, logger=logger)
lmodel.configure_optimizers(**kwargs)
trainer = L.Trainer(max_epochs=20, logger=logger)
trainer.fit(model=lmodel, train_dataloaders=train, val_dataloaders=val)