diff --git a/symbolic_nn_tests/__main__.py b/symbolic_nn_tests/__main__.py index ac6d79b..3be6a0b 100644 --- a/symbolic_nn_tests/__main__.py +++ b/symbolic_nn_tests/__main__.py @@ -31,6 +31,7 @@ def main(): run_test(semantic_loss.hasline_cross_entropy, "hasline_cross_entropy") run_test(semantic_loss.hasloop_cross_entropy, "hasloop_cross_entropy") run_test(semantic_loss.multisemantic_cross_entropy, "multisemantic_cross_entropy") + run_test(semantic_loss.garbage_cross_entropy, "garbage_cross_entropy") if __name__ == "__main__": diff --git a/symbolic_nn_tests/semantic_loss.py b/symbolic_nn_tests/semantic_loss.py index 7634e4b..ade67ed 100644 --- a/symbolic_nn_tests/semantic_loss.py +++ b/symbolic_nn_tests/semantic_loss.py @@ -73,3 +73,11 @@ MULTISEMANTIC_MATRIX = SIMILARITY_MATRIX * HASLINE_MATRIX * HASLOOP_MATRIX MULTISEMANTIC_MATRIX /= MULTISEMANTIC_MATRIX.sum() multisemantic_cross_entropy = create_semantic_cross_entropy(MULTISEMANTIC_MATRIX) + +# NOTE: As a final test, lets make something similar to tehse but where there's no knowledge, +# just random data. This will create a benchmark for the effects of this process wothout the +# "knowledge" component +GARBAGE_MATRIX = torch.rand(10, 10).to("cuda") +GARBAGE_MATRIX /= GARBAGE_MATRIX.sum() + +garbage_cross_entropy = create_semantic_cross_entropy(GARBAGE_MATRIX)