changes to logging to sync frequency

This commit is contained in:
2024-06-11 19:57:48 +01:00
parent 6d0682bbed
commit dd4effc690
3 changed files with 17 additions and 10 deletions

View File

@@ -70,7 +70,7 @@ def run(tensorboard: bool = True, wandb: bool = True):
wandb_logger = wandb wandb_logger = wandb
test( test(
train_loss=semantic_loss.positive_slope_linear_loss(wandb_logger, version), train_loss=semantic_loss.PositiveSlopeLinearLoss(wandb_logger, version),
val_loss=unpacking_smooth_l1_loss, val_loss=unpacking_smooth_l1_loss,
test_loss=unpacking_smooth_l1_loss, test_loss=unpacking_smooth_l1_loss,
version=version, version=version,

View File

@@ -82,7 +82,7 @@ def main(
test_loss=test_loss, test_loss=test_loss,
) )
lmodel.configure_optimizers(optimizer=torch.optim.NAdam, **kwargs) lmodel.configure_optimizers(optimizer=torch.optim.NAdam, **kwargs)
trainer = L.Trainer(max_epochs=10, logger=logger) trainer = L.Trainer(max_epochs=5, logger=logger)
trainer.fit(model=lmodel, train_dataloaders=train, val_dataloaders=val) trainer.fit(model=lmodel, train_dataloaders=train, val_dataloaders=val)
trainer.test(dataloaders=test) trainer.test(dataloaders=test)

View File

@@ -18,10 +18,16 @@ import torch
# proportionality. # proportionality.
def positive_slope_linear_loss(wandb_logger=None, version="", device="cuda"): class PositiveSlopeLinearLoss:
a = nn.Parameter(data=torch.randn(1), requires_grad=True).to(device) def __init__(self, wandb_logger=None, version="", device="cuda", log_freq=50):
self.a = nn.Parameter(data=torch.randn(1), requires_grad=True).to(device)
self.wandb_logger = wandb_logger
self.version = version
self.device = device
self.log_freq = log_freq
self.steps_since_log = 0
def f(out, y): def __call__(self, out, y):
x, y_pred = out x, y_pred = out
x0, x1 = x x0, x1 = x
@@ -56,14 +62,15 @@ def positive_slope_linear_loss(wandb_logger=None, version="", device="cuda"):
# We also need to calculate a penalty that incentivizes a positive slope. For this, im using relu # We also need to calculate a penalty that incentivizes a positive slope. For this, im using relu
# to scale the slope as it will penalise negative slopes without just creating a reward hack for # to scale the slope as it will penalise negative slopes without just creating a reward hack for
# maximizing slope. # maximizing slope.
slope_penalty = (nn.functional.relu(a * (-m)) + 1).mean() slope_penalty = (nn.functional.relu(self.a * (-m)) + 1).mean()
if wandb_logger: if self.wandb_logger and (self.steps_since_log >= 50):
wandb_logger.log_metrics({f"{version}-a": a}) self.wandb_logger.log_metrics({f"{self.version}-a": self.a})
self.steps_since_log = 0
else:
self.steps_since_log += 1
# Finally, let's get a smooth L1 loss and scale it based on these penalty functions # Finally, let's get a smooth L1 loss and scale it based on these penalty functions
return ( return (
nn.functional.smooth_l1_loss(y_pred, y) * residual_penalty * slope_penalty nn.functional.smooth_l1_loss(y_pred, y) * residual_penalty * slope_penalty
) )
return f