From 62649a39da266dcff2e25dd01e323727c7a06e73 Mon Sep 17 00:00:00 2001 From: Cian-H Date: Wed, 15 May 2024 11:12:28 +0100 Subject: [PATCH] Modified model to use SGD for less optimal training There's no point having a loss function testing sandbox where the baseling trains perfectly in 2 epochs. I've DEoptimised the training in this commit to ensure the testing sandbox is actually useful. --- symbolic_nn_tests/model.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/symbolic_nn_tests/model.py b/symbolic_nn_tests/model.py index 2b88952..51abc69 100644 --- a/symbolic_nn_tests/model.py +++ b/symbolic_nn_tests/model.py @@ -19,7 +19,7 @@ def get_singleton_dataset(): return get_dataset(dataset=QMNIST) -def main(loss_func=nn.functional.cross_entropy, logger=None): +def main(loss_func=nn.functional.cross_entropy, logger=None, **kwargs): import lightning as L from .train import TrainingWrapper @@ -31,7 +31,8 @@ def main(loss_func=nn.functional.cross_entropy, logger=None): train, val, test = get_singleton_dataset() lmodel = TrainingWrapper(model, loss_func=loss_func) - trainer = L.Trainer(max_epochs=5, logger=logger) + lmodel.configure_optimizers(**kwargs) + trainer = L.Trainer(max_epochs=20, logger=logger) trainer.fit(model=lmodel, train_dataloaders=train, val_dataloaders=val)