From 7cce362fc84c9dd303ea78c1179ef0ed74e9cfec Mon Sep 17 00:00:00 2001 From: Colton Date: Sat, 21 Feb 2026 18:19:05 -0500 Subject: [PATCH 01/10] Detailed atom trace --- docs/inconsistency_example.py | 140 +++++++++++++++ ...anced_rule_trace_edges_20241119-012153.csv | 67 -------- ...anced_rule_trace_nodes_20241119-012153.csv | 32 ---- ...basic_rule_trace_nodes_20241119-012005.csv | 7 - ...basic_rule_trace_nodes_20241125-114246.csv | 7 - ...edges_rule_trace_edges_20241119-140955.csv | 10 -- ...edges_rule_trace_nodes_20241119-140955.csv | 11 -- .../scripts/interpretation/interpretation.py | 159 +++++++++++++----- .../interpretation/interpretation_fp.py | 159 +++++++++++++----- .../interpretation/interpretation_parallel.py | 159 +++++++++++++----- pyreason/scripts/utils/output.py | 26 ++- tests/functional/test_inconsistency_trace.py | 68 ++++++++ .../test_interpretation_common.py | 52 +++++- .../interpretations/test_reason_core.py | 22 ++- .../interpretations/test_reason_misc.py | 44 ++++- 15 files changed, 677 insertions(+), 286 deletions(-) create mode 100644 docs/inconsistency_example.py delete mode 100644 examples/csv outputs/advanced_rule_trace_edges_20241119-012153.csv delete mode 100644 examples/csv outputs/advanced_rule_trace_nodes_20241119-012153.csv delete mode 100644 examples/csv outputs/basic_rule_trace_nodes_20241119-012005.csv delete mode 100644 examples/csv outputs/basic_rule_trace_nodes_20241125-114246.csv delete mode 100644 examples/csv outputs/infer_edges_rule_trace_edges_20241119-140955.csv delete mode 100644 examples/csv outputs/infer_edges_rule_trace_nodes_20241119-140955.csv create mode 100644 tests/functional/test_inconsistency_trace.py diff --git a/docs/inconsistency_example.py b/docs/inconsistency_example.py new file mode 100644 index 00000000..bbe2e8b0 --- /dev/null +++ b/docs/inconsistency_example.py @@ -0,0 +1,140 @@ +""" +Example: Atom Trace with Inconsistencies +========================================= +This example demonstrates how PyReason detects and resolves inconsistencies +using the Inconsistent Predicate List (IPL) and how atom_trace provides +full explainability of what happened. + +Scenario: + - We have a small health network: Alice, Bob, and Carol. + - "sick" and "healthy" are declared as inconsistent node predicates. + - "close_contact" and "no_contact" are declared as inconsistent edge predicates. + - Node inconsistencies: + 1. sick(Alice) vs healthy(Alice) -- IPL-based conflict + 2. tired(Bob) with non-overlapping bounds -- same-predicate conflict + - Edge inconsistencies (triggered by rules firing): + 3. quarantine_rule infers no_contact(Alice,Bob) which conflicts with + the close_contact(Alice,Bob) fact via the IPL + 4. distrust_rule infers trust(Bob,Carol):[0.0,0.2] which conflicts with + the trust(Bob,Carol):[0.9,1.0] fact -- same-predicate non-overlapping bounds + - All inconsistencies are resolved to [0, 1] (complete uncertainty). + - A rule propagates sickness through edges, showing normal (non-conflicting) + reasoning alongside the inconsistency resolution. +""" + +import pyreason as pr +import networkx as nx + +# Reset PyReason to a clean state +pr.reset() +pr.reset_rules() + +# ================================ CREATE GRAPH ================================ +g = nx.DiGraph() + +# People in our health network +g.add_nodes_from(['Alice', 'Bob', 'Carol', 'Dave']) + +# Contact edges (who has been in contact with whom) +g.add_edge('Alice', 'Bob', contact=1) +g.add_edge('Bob', 'Carol', contact=1) +g.add_edge('Bob', 'Dave', contact=1) + +# ================================ CONFIGURE =================================== +pr.settings.verbose = True +pr.settings.atom_trace = True # Enable atom trace for full explainability +pr.settings.inconsistency_check = True # Enable inconsistency detection (default) + +# ================================ LOAD GRAPH ================================== +pr.load_graph(g) + +# Declare sick and healthy as inconsistent predicates +# When one is set, PyReason automatically gives the other the negated bound +pr.add_inconsistent_predicate('sick', 'healthy') + +# Declare close_contact and no_contact as inconsistent edge predicates +pr.add_inconsistent_predicate('close_contact', 'no_contact') + +# ================================ ADD RULES =================================== +# If someone is sick and in contact with another person, that person may get sick +pr.add_rule(pr.Rule('sick(y):[0.5,0.7] <- sick(x):[0.5,1.0], contact(x,y)', 'spread_rule')) + +# Rule that infers no_contact on an edge when both people are sick -- will conflict +# with the close_contact fact on (Alice, Bob) via the IPL +pr.add_rule(pr.Rule('no_contact(x,y):[0.8,1.0] <- sick(x):[0.5,1.0], sick(y):[0.5,1.0], contact(x,y)', 'quarantine_rule')) + +# Rule that infers low trust on an edge when someone is sick -- will conflict +# with the high trust fact on (Bob, Carol) via same-predicate conflicting bounds +pr.add_rule(pr.Rule('trust(x,y):[0.0,0.2] <- sick(x):[0.5,1.0], contact(x,y)', 'distrust_rule')) + +# Rule that infers risk on an edge (no conflict -- clean edge rule for comparison) +pr.add_rule(pr.Rule('risk(x,y):[0.6,0.8] <- sick(x):[0.5,1.0], contact(x,y)', 'risk_rule')) + +# ================================ ADD FACTS =================================== +# Fact 1: Alice is sick with high confidence +pr.add_fact(pr.Fact('sick(Alice):[0.8,1.0]', 'alice_sick_fact', 0, 0)) + +# Fact 2: Alice is also healthy with high confidence -- this CONTRADICTS Fact 1 +# Since sick and healthy are in the IPL, this creates an inconsistency +pr.add_fact(pr.Fact('healthy(Alice):[0.9,1.0]', 'alice_healthy_fact', 0, 0)) + +# Fact 3: Bob is sick (no contradiction here, normal reasoning) +pr.add_fact(pr.Fact('sick(Bob):[0.6,0.8]', 'bob_sick_fact', 0, 0)) +# Fact 3.5 : Carol is Healthy (will trigger contradiction later) +pr.add_fact(pr.Fact('healthy(Carol):[0.9,1.0]', 'carol_healthy_fact', 0, 0)) + +# Fact 4 & 5: Bob is tired with two conflicting, non-overlapping bounds +# Since "tired" is NOT in the IPL, this triggers a same-predicate inconsistency +pr.add_fact(pr.Fact('tired(Bob):[0.8,1.0]', 'bob_tired_fact_1', 0, 0)) +pr.add_fact(pr.Fact('tired(Bob):[0.0,0.1]', 'bob_tired_fact_2', 0, 0)) + +# Dave has no conflicting predicates -- spread_rule will cleanly set sick(Dave) +# This provides a normal node rule trace entry for comparison + +# ---- Edge inconsistencies (set up initial state for rule-triggered conflicts) ---- +# Fact 6: Set close_contact on (Alice, Bob) -- quarantine_rule will later infer +# no_contact on the same edge, triggering an IPL-based edge inconsistency +pr.add_fact(pr.Fact('close_contact(Alice,Bob):[0.8,1.0]', 'alice_bob_close_fact', 0, 0)) + +# Fact 7: Set high trust on (Bob, Carol) -- distrust_rule will later infer +# trust:[0.0,0.2] on the same edge, triggering a same-predicate edge inconsistency +pr.add_fact(pr.Fact('trust(Bob,Carol):[0.9,1.0]', 'bob_carol_trust_high', 0, 0)) + +# ================================ REASON ====================================== +print('=' * 60) +print('Running PyReason with inconsistency detection...') +print('=' * 60) +interpretation = pr.reason(timesteps=2) + +# ================================ VIEW RESULTS ================================ +print('\n' + '=' * 60) +print('Node Interpretation Changes (sick)') +print('=' * 60) +dataframes = pr.filter_and_sort_nodes(interpretation, ['sick']) +for t, df in enumerate(dataframes): + print(f'\nTIMESTEP {t}:') + print(df) + +print('\n' + '=' * 60) +print('Node Interpretation Changes (healthy)') +print('=' * 60) +dataframes = pr.filter_and_sort_nodes(interpretation, ['healthy']) +for t, df in enumerate(dataframes): + print(f'\nTIMESTEP {t}:') + print(df) + +# ================================ VIEW TRACE ================================== +print('\n' + '=' * 60) +print('Rule Trace (shows inconsistency resolution details)') +print('=' * 60) +node_trace, edge_trace = pr.get_rule_trace(interpretation) +print('\nNode trace:') +print(node_trace.to_string()) + +if not edge_trace.empty: + print('\nEdge trace:') + print(edge_trace.to_string()) + +# Save the rule trace to a file for further inspection +pr.save_rule_trace(interpretation) +print('\nRule trace saved to current directory.') diff --git a/examples/csv outputs/advanced_rule_trace_edges_20241119-012153.csv b/examples/csv outputs/advanced_rule_trace_edges_20241119-012153.csv deleted file mode 100644 index 1f937539..00000000 --- a/examples/csv outputs/advanced_rule_trace_edges_20241119-012153.csv +++ /dev/null @@ -1,67 +0,0 @@ -Time,Fixed-Point-Operation,Edge,Label,Old Bound,New Bound,Occurred Due To,Clause-1,Clause-2 -0,1,"('customer_3', 'customer_1')",car_friend,"[0.0,1.0]","[1.0,1.0]",car_friend_rule,"[('customer_3', 'Car_0')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -0,1,"('customer_0', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -0,1,"('customer_0', 'customer_2')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]" -0,1,"('customer_2', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -0,1,"('customer_3', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -0,1,"('customer_3', 'customer_4')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]" -0,1,"('customer_4', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" -0,1,"('customer_4', 'customer_5')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]" -0,1,"('customer_5', 'customer_3')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]" -0,1,"('customer_5', 'customer_6')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]" -0,1,"('customer_6', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" -1,3,"('customer_3', 'customer_1')",car_friend,"[0.0,1.0]","[1.0,1.0]",car_friend_rule,"[('customer_3', 'Car_0')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -1,3,"('customer_0', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -1,3,"('customer_0', 'customer_2')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]" -1,3,"('customer_2', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -1,3,"('customer_3', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -1,3,"('customer_3', 'customer_4')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]" -1,3,"('customer_4', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" -1,3,"('customer_4', 'customer_5')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]" -1,3,"('customer_5', 'customer_3')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]" -1,3,"('customer_5', 'customer_6')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]" -1,3,"('customer_6', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" -2,5,"('customer_3', 'customer_1')",car_friend,"[0.0,1.0]","[1.0,1.0]",car_friend_rule,"[('customer_3', 'Car_0')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -2,5,"('customer_0', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -2,5,"('customer_0', 'customer_2')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]" -2,5,"('customer_2', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -2,5,"('customer_3', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -2,5,"('customer_3', 'customer_4')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]" -2,5,"('customer_4', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" -2,5,"('customer_4', 'customer_5')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]" -2,5,"('customer_5', 'customer_3')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]" -2,5,"('customer_5', 'customer_6')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]" -2,5,"('customer_6', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" -3,7,"('customer_3', 'customer_1')",car_friend,"[0.0,1.0]","[1.0,1.0]",car_friend_rule,"[('customer_3', 'Car_0')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -3,7,"('customer_0', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -3,7,"('customer_0', 'customer_2')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]" -3,7,"('customer_2', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -3,7,"('customer_3', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -3,7,"('customer_3', 'customer_4')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]" -3,7,"('customer_4', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" -3,7,"('customer_4', 'customer_5')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]" -3,7,"('customer_5', 'customer_3')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]" -3,7,"('customer_5', 'customer_6')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]" -3,7,"('customer_6', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" -4,9,"('customer_3', 'customer_1')",car_friend,"[0.0,1.0]","[1.0,1.0]",car_friend_rule,"[('customer_3', 'Car_0')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -4,9,"('customer_0', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -4,9,"('customer_0', 'customer_2')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]" -4,9,"('customer_2', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -4,9,"('customer_3', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -4,9,"('customer_3', 'customer_4')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]" -4,9,"('customer_4', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" -4,9,"('customer_4', 'customer_5')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]" -4,9,"('customer_5', 'customer_3')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]" -4,9,"('customer_5', 'customer_6')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]" -4,9,"('customer_6', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" -5,11,"('customer_3', 'customer_1')",car_friend,"[0.0,1.0]","[1.0,1.0]",car_friend_rule,"[('customer_3', 'Car_0')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -5,11,"('customer_0', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -5,11,"('customer_0', 'customer_2')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]" -5,11,"('customer_2', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -5,11,"('customer_3', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" -5,11,"('customer_3', 'customer_4')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]" -5,11,"('customer_4', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" -5,11,"('customer_4', 'customer_5')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]" -5,11,"('customer_5', 'customer_3')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]" -5,11,"('customer_5', 'customer_6')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]" -5,11,"('customer_6', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" diff --git a/examples/csv outputs/advanced_rule_trace_nodes_20241119-012153.csv b/examples/csv outputs/advanced_rule_trace_nodes_20241119-012153.csv deleted file mode 100644 index f648b7e7..00000000 --- a/examples/csv outputs/advanced_rule_trace_nodes_20241119-012153.csv +++ /dev/null @@ -1,32 +0,0 @@ -Time,Fixed-Point-Operation,Node,Label,Old Bound,New Bound,Occurred Due To,Clause-1,Clause-2 -0,0,popular-fac,popular-fac,"[0.0,1.0]","[1.0,1.0]",popular(customer_0),, -1,2,popular-fac,popular-fac,"[0.0,1.0]","[1.0,1.0]",popular(customer_0),, -1,2,customer_4,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_4', 'Car_4')]",['Car_4'] -1,2,customer_6,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_6', 'Car_4')]",['Car_4'] -1,2,customer_3,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_3', 'Pet_2')]",['Pet_2'] -1,2,customer_4,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_4', 'Pet_2')]",['Pet_2'] -1,3,customer_4,trendy,"[0.0,1.0]","[1.0,1.0]",trendy_rule,['customer_4'],['customer_4'] -2,4,popular-fac,popular-fac,"[0.0,1.0]","[1.0,1.0]",popular(customer_0),, -2,4,customer_4,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_4', 'Car_4')]",['Car_4'] -2,4,customer_6,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_6', 'Car_4')]",['Car_4'] -2,4,customer_3,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_3', 'Pet_2')]",['Pet_2'] -2,4,customer_4,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_4', 'Pet_2')]",['Pet_2'] -2,5,customer_4,trendy,"[0.0,1.0]","[1.0,1.0]",trendy_rule,['customer_4'],['customer_4'] -3,6,popular-fac,popular-fac,"[0.0,1.0]","[1.0,1.0]",popular(customer_0),, -3,6,customer_4,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_4', 'Car_4')]",['Car_4'] -3,6,customer_6,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_6', 'Car_4')]",['Car_4'] -3,6,customer_3,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_3', 'Pet_2')]",['Pet_2'] -3,6,customer_4,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_4', 'Pet_2')]",['Pet_2'] -3,7,customer_4,trendy,"[0.0,1.0]","[1.0,1.0]",trendy_rule,['customer_4'],['customer_4'] -4,8,popular-fac,popular-fac,"[0.0,1.0]","[1.0,1.0]",popular(customer_0),, -4,8,customer_4,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_4', 'Car_4')]",['Car_4'] -4,8,customer_6,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_6', 'Car_4')]",['Car_4'] -4,8,customer_3,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_3', 'Pet_2')]",['Pet_2'] -4,8,customer_4,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_4', 'Pet_2')]",['Pet_2'] -4,9,customer_4,trendy,"[0.0,1.0]","[1.0,1.0]",trendy_rule,['customer_4'],['customer_4'] -5,10,popular-fac,popular-fac,"[0.0,1.0]","[1.0,1.0]",popular(customer_0),, -5,10,customer_4,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_4', 'Car_4')]",['Car_4'] -5,10,customer_6,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_6', 'Car_4')]",['Car_4'] -5,10,customer_3,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_3', 'Pet_2')]",['Pet_2'] -5,10,customer_4,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_4', 'Pet_2')]",['Pet_2'] -5,11,customer_4,trendy,"[0.0,1.0]","[1.0,1.0]",trendy_rule,['customer_4'],['customer_4'] diff --git a/examples/csv outputs/basic_rule_trace_nodes_20241119-012005.csv b/examples/csv outputs/basic_rule_trace_nodes_20241119-012005.csv deleted file mode 100644 index 02ece211..00000000 --- a/examples/csv outputs/basic_rule_trace_nodes_20241119-012005.csv +++ /dev/null @@ -1,7 +0,0 @@ -Time,Fixed-Point-Operation,Node,Label,Old Bound,New Bound,Occurred Due To -0,0,Mary,popular,-,"[1.0,1.0]",- -1,1,Mary,popular,-,"[1.0,1.0]",- -1,1,Justin,popular,-,"[1.0,1.0]",- -2,2,Mary,popular,-,"[1.0,1.0]",- -2,2,John,popular,-,"[1.0,1.0]",- -2,2,Justin,popular,-,"[1.0,1.0]",- diff --git a/examples/csv outputs/basic_rule_trace_nodes_20241125-114246.csv b/examples/csv outputs/basic_rule_trace_nodes_20241125-114246.csv deleted file mode 100644 index 02ece211..00000000 --- a/examples/csv outputs/basic_rule_trace_nodes_20241125-114246.csv +++ /dev/null @@ -1,7 +0,0 @@ -Time,Fixed-Point-Operation,Node,Label,Old Bound,New Bound,Occurred Due To -0,0,Mary,popular,-,"[1.0,1.0]",- -1,1,Mary,popular,-,"[1.0,1.0]",- -1,1,Justin,popular,-,"[1.0,1.0]",- -2,2,Mary,popular,-,"[1.0,1.0]",- -2,2,John,popular,-,"[1.0,1.0]",- -2,2,Justin,popular,-,"[1.0,1.0]",- diff --git a/examples/csv outputs/infer_edges_rule_trace_edges_20241119-140955.csv b/examples/csv outputs/infer_edges_rule_trace_edges_20241119-140955.csv deleted file mode 100644 index d540071b..00000000 --- a/examples/csv outputs/infer_edges_rule_trace_edges_20241119-140955.csv +++ /dev/null @@ -1,10 +0,0 @@ -Time,Fixed-Point-Operation,Edge,Label,Old Bound,New Bound,Occurred Due To,Clause-1,Clause-2,Clause-3 -0,0,"('Amsterdam_Airport_Schiphol', 'Yali')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, -0,0,"('Riga_International_Airport', 'Amsterdam_Airport_Schiphol')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, -0,0,"('Riga_International_Airport', 'Düsseldorf_Airport')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, -0,0,"('Chișinău_International_Airport', 'Riga_International_Airport')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, -0,0,"('Düsseldorf_Airport', 'Dubrovnik_Airport')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, -0,0,"('Pobedilovo_Airport', 'Vnukovo_International_Airport')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, -0,0,"('Dubrovnik_Airport', 'Athens_International_Airport')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, -0,0,"('Vnukovo_International_Airport', 'Hévíz-Balaton_Airport')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, -1,1,"('Vnukovo_International_Airport', 'Riga_International_Airport')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",connected_rule_1,"[('Riga_International_Airport', 'Amsterdam_Airport_Schiphol')]",['Amsterdam_Airport_Schiphol'],['Vnukovo_International_Airport'] diff --git a/examples/csv outputs/infer_edges_rule_trace_nodes_20241119-140955.csv b/examples/csv outputs/infer_edges_rule_trace_nodes_20241119-140955.csv deleted file mode 100644 index 17adc715..00000000 --- a/examples/csv outputs/infer_edges_rule_trace_nodes_20241119-140955.csv +++ /dev/null @@ -1,11 +0,0 @@ -Time,Fixed-Point-Operation,Node,Label,Old Bound,New Bound,Occurred Due To -0,0,Amsterdam_Airport_Schiphol,Amsterdam_Airport_Schiphol,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact -0,0,Riga_International_Airport,Riga_International_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact -0,0,Chișinău_International_Airport,Chișinău_International_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact -0,0,Yali,Yali,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact -0,0,Düsseldorf_Airport,Düsseldorf_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact -0,0,Pobedilovo_Airport,Pobedilovo_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact -0,0,Dubrovnik_Airport,Dubrovnik_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact -0,0,Hévíz-Balaton_Airport,Hévíz-Balaton_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact -0,0,Athens_International_Airport,Athens_International_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact -0,0,Vnukovo_International_Airport,Vnukovo_International_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact diff --git a/pyreason/scripts/interpretation/interpretation.py b/pyreason/scripts/interpretation/interpretation.py index 13f11fa9..515a555e 100755 --- a/pyreason/scripts/interpretation/interpretation.py +++ b/pyreason/scripts/interpretation/interpretation.py @@ -92,8 +92,8 @@ def __init__(self, graph, ipl, annotation_functions, head_functions, reverse_gra # Keep track of all the rules that have affected each node/edge at each timestep/fp operation, and all ground atoms that have affected the rules as well. Keep track of previous bounds and name of the rule/fact here self.rule_trace_node_atoms = numba.typed.List.empty_list(numba.types.Tuple((numba.types.ListType(numba.types.ListType(node_type)), numba.types.ListType(numba.types.ListType(edge_type)), interval.interval_type, numba.types.string))) self.rule_trace_edge_atoms = numba.typed.List.empty_list(numba.types.Tuple((numba.types.ListType(numba.types.ListType(node_type)), numba.types.ListType(numba.types.ListType(edge_type)), interval.interval_type, numba.types.string))) - self.rule_trace_node = numba.typed.List.empty_list(numba.types.Tuple((numba.types.uint16, numba.types.uint16, node_type, label.label_type, interval.interval_type))) - self.rule_trace_edge = numba.typed.List.empty_list(numba.types.Tuple((numba.types.uint16, numba.types.uint16, edge_type, label.label_type, interval.interval_type))) + self.rule_trace_node = numba.typed.List.empty_list(numba.types.Tuple((numba.types.uint16, numba.types.uint16, node_type, label.label_type, interval.interval_type, numba.types.boolean, numba.types.string, numba.types.string, numba.types.string))) + self.rule_trace_edge = numba.typed.List.empty_list(numba.types.Tuple((numba.types.uint16, numba.types.uint16, edge_type, label.label_type, interval.interval_type, numba.types.boolean, numba.types.string, numba.types.string, numba.types.string))) # Nodes and edges of the graph self.nodes = numba.typed.List.empty_list(node_type) @@ -281,16 +281,17 @@ def reason(interpretations_node, interpretations_edge, predicate_map_node, predi # Check if we should even store any of the changes to the rule trace etc. # Inverse of this is: if not save_graph_attributes_to_rule_trace and graph_attribute if (save_graph_attributes_to_rule_trace or not graph_attribute) and store_interpretation_changes: - rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, l, bnd)) + meta_name = facts_to_be_applied_node_trace[i] if atom_trace else '' + rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, l, bnd, True, 'Fact', meta_name, '')) if atom_trace: _update_rule_trace(rule_trace_node_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), bnd, facts_to_be_applied_node_trace[i]) for p1, p2 in ipl: if p1==l: - rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p2, interpretations_node[comp].world[p2])) + rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p2, interpretations_node[comp].world[p2], True, 'IPL', f'IPL: {l.get_value()}', '')) if atom_trace: _update_rule_trace(rule_trace_node_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), interpretations_node[comp].world[p2], facts_to_be_applied_node_trace[i]) elif p2==l: - rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p1, interpretations_node[comp].world[p1])) + rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p1, interpretations_node[comp].world[p1], True, 'IPL', f'IPL: {l.get_value()}', '')) if atom_trace: _update_rule_trace(rule_trace_node_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), interpretations_node[comp].world[p1], facts_to_be_applied_node_trace[i]) @@ -356,16 +357,17 @@ def reason(interpretations_node, interpretations_edge, predicate_map_node, predi if l in interpretations_edge[comp].world and interpretations_edge[comp].world[l].is_static(): # Inverse of this is: if not save_graph_attributes_to_rule_trace and graph_attribute if (save_graph_attributes_to_rule_trace or not graph_attribute) and store_interpretation_changes: - rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, l, interpretations_edge[comp].world[l])) + meta_name = facts_to_be_applied_edge_trace[i] if atom_trace else '' + rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, l, interpretations_edge[comp].world[l], True, 'Fact', meta_name, '')) if atom_trace: _update_rule_trace(rule_trace_edge_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), bnd, facts_to_be_applied_edge_trace[i]) for p1, p2 in ipl: if p1==l: - rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p2, interpretations_edge[comp].world[p2])) + rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p2, interpretations_edge[comp].world[p2], True, 'IPL', f'IPL: {l.get_value()}', '')) if atom_trace: _update_rule_trace(rule_trace_edge_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), interpretations_edge[comp].world[p2], facts_to_be_applied_edge_trace[i]) elif p2==l: - rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p1, interpretations_edge[comp].world[p1])) + rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p1, interpretations_edge[comp].world[p1], True, 'IPL', f'IPL: {l.get_value()}', '')) if atom_trace: _update_rule_trace(rule_trace_edge_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), interpretations_edge[comp].world[p1], facts_to_be_applied_edge_trace[i]) else: @@ -1480,7 +1482,18 @@ def _update_node(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c # Add to rule trace if update happened and add to atom trace if necessary if (save_graph_attributes_to_rule_trace or not mode=='graph-attribute-fact') and store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, l, world.world[l].copy())) + # Determine triggered_by and meta_name for the trace tuple + if mode == 'fact' or mode == 'graph-attribute-fact': + triggered_by = 'Fact' + else: + triggered_by = 'Rule' + meta_name = '' + if atom_trace: + if mode=='fact' or mode=='graph-attribute-fact': + meta_name = facts_to_be_applied_trace[idx] + elif mode=='rule': + meta_name = rules_to_be_applied_trace[idx][2] + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, l, world.world[l].copy(), True, triggered_by, meta_name, '')) if atom_trace: # Mode can be fact or rule, updation of trace will happen accordingly if mode=='fact' or mode=='graph-attribute-fact': @@ -1512,7 +1525,7 @@ def _update_node(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c ip_update_cnt += 1 updated_bnds.append(world.world[p2]) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(lower, upper))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(lower, upper), True, 'IPL', f'IPL: {l.get_value()}', '')) if p2 == l: if p1 not in world.world: world.world[p1] = interval.closed(0, 1) @@ -1529,7 +1542,7 @@ def _update_node(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c ip_update_cnt += 1 updated_bnds.append(world.world[p1]) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(lower, upper))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(lower, upper), True, 'IPL', f'IPL: {l.get_value()}', '')) # Gather convergence data change = 0 @@ -1588,7 +1601,18 @@ def _update_edge(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c # Add to rule trace if update happened and add to atom trace if necessary if (save_graph_attributes_to_rule_trace or not mode=='graph-attribute-fact') and store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, l, world.world[l].copy())) + # Determine triggered_by and meta_name for the trace tuple + if mode == 'fact' or mode == 'graph-attribute-fact': + triggered_by = 'Fact' + else: + triggered_by = 'Rule' + meta_name = '' + if atom_trace: + if mode=='fact' or mode=='graph-attribute-fact': + meta_name = facts_to_be_applied_trace[idx] + elif mode=='rule': + meta_name = rules_to_be_applied_trace[idx][2] + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, l, world.world[l].copy(), True, triggered_by, meta_name, '')) if atom_trace: # Mode can be fact or rule, updation of trace will happen accordingly if mode=='fact' or mode=='graph-attribute-fact': @@ -1620,7 +1644,7 @@ def _update_edge(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c ip_update_cnt += 1 updated_bnds.append(world.world[p2]) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(lower, upper))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(lower, upper), True, 'IPL', f'IPL: {l.get_value()}', '')) if p2 == l: if p1 not in world.world: world.world[p1] = interval.closed(0, 1) @@ -1637,7 +1661,7 @@ def _update_edge(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c ip_update_cnt += 1 updated_bnds.append(world.world[p1]) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(lower, upper))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(lower, upper), True, 'IPL', f'IPL: {l.get_value()}', '')) # Gather convergence data change = 0 @@ -1809,70 +1833,127 @@ def check_consistent_edge(interpretations, comp, na): @numba.njit(cache=True) def resolve_inconsistency_node(interpretations, comp, na, ipl, t_cnt, fp_cnt, idx, atom_trace, rule_trace, rule_trace_atoms, rules_to_be_applied_trace, facts_to_be_applied_trace, store_interpretation_changes, mode): world = interpretations[comp] - if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, na[0], interval.closed(0,1))) - if mode == 'fact' or mode == 'graph-attribute-fact' and atom_trace: - name = facts_to_be_applied_trace[idx] - elif mode == 'rule' and atom_trace: - name = rules_to_be_applied_trace[idx][2] + + # Determine triggered_by and actual_name + if mode == 'fact' or mode == 'graph-attribute-fact': + triggered_by = 'Fact' + else: + triggered_by = 'Rule' + actual_name = '' + if atom_trace: + if mode == 'fact' or mode == 'graph-attribute-fact': + actual_name = facts_to_be_applied_trace[idx] + elif mode == 'rule': + actual_name = rules_to_be_applied_trace[idx][2] + + # Build descriptive inconsistency message + msg = '' + if atom_trace: + comp_label_value = '' + for _p1, _p2 in ipl: + if _p1 == na[0]: + comp_label_value = _p2.get_value() + break + if _p2 == na[0]: + comp_label_value = _p1.get_value() + break + if comp_label_value != '': + msg = f'Inconsistency occurred. Grounding {na[0].get_value()}({comp}) conflicts with grounding {comp_label_value}({comp}). Setting bounds to [0,1] and static=True for this timestep.' else: - name = '-' + msg = f'Inconsistency occurred. Conflicting bounds for {na[0].get_value()}({comp}). Update from [{float_to_str(world.world[na[0]].lower)}, {float_to_str(world.world[na[0]].upper)}] to [{float_to_str(na[1].lower)}, {float_to_str(na[1].upper)}] is not allowed. Setting bounds to [0,1] and static=True for this timestep.' + + if store_interpretation_changes: + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, na[0], interval.closed(0,1), False, triggered_by, actual_name, msg)) if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), world.world[na[0]], f'Inconsistency due to {name}') + if mode == 'rule': + qn, qe, _ = rules_to_be_applied_trace[idx] + else: + qn = numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)) + qe = numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)) + _update_rule_trace(rule_trace_atoms, qn, qe, world.world[na[0]], actual_name) + # Resolve inconsistency and set static world.world[na[0]].set_lower_upper(0, 1) world.world[na[0]].set_static(True) for p1, p2 in ipl: if p1==na[0]: if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), world.world[p2], f'Inconsistency due to {name}') + _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), world.world[p2], actual_name) world.world[p2].set_lower_upper(0, 1) world.world[p2].set_static(True) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(0,1))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(0,1), False, 'IPL', actual_name, msg)) if p2==na[0]: if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), world.world[p1], f'Inconsistency due to {name}') + _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), world.world[p1], actual_name) world.world[p1].set_lower_upper(0, 1) world.world[p1].set_static(True) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(0,1))) - # Add inconsistent predicates to a list + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(0,1), False, 'IPL', actual_name, msg)) @numba.njit(cache=True) def resolve_inconsistency_edge(interpretations, comp, na, ipl, t_cnt, fp_cnt, idx, atom_trace, rule_trace, rule_trace_atoms, rules_to_be_applied_trace, facts_to_be_applied_trace, store_interpretation_changes, mode): w = interpretations[comp] - if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, na[0], interval.closed(0,1))) - if mode == 'fact' or mode == 'graph-attribute-fact' and atom_trace: - name = facts_to_be_applied_trace[idx] - elif mode == 'rule' and atom_trace: - name = rules_to_be_applied_trace[idx][2] + + # Determine triggered_by and actual_name + if mode == 'fact' or mode == 'graph-attribute-fact': + triggered_by = 'Fact' + else: + triggered_by = 'Rule' + actual_name = '' + if atom_trace: + if mode == 'fact' or mode == 'graph-attribute-fact': + actual_name = facts_to_be_applied_trace[idx] + elif mode == 'rule': + actual_name = rules_to_be_applied_trace[idx][2] + + # Build descriptive inconsistency message + msg = '' + if atom_trace: + comp_label_value = '' + for _p1, _p2 in ipl: + if _p1 == na[0]: + comp_label_value = _p2.get_value() + break + if _p2 == na[0]: + comp_label_value = _p1.get_value() + break + if comp_label_value != '': + msg = f'Inconsistency occurred. Grounding {na[0].get_value()}({comp[0]},{comp[1]}) conflicts with grounding {comp_label_value}({comp[0]},{comp[1]}). Setting bounds to [0,1] and static=True for this timestep.' else: - name = '-' + msg = f'Inconsistency occurred. Conflicting bounds for {na[0].get_value()}({comp[0]},{comp[1]}). Update from [{float_to_str(w.world[na[0]].lower)}, {float_to_str(w.world[na[0]].upper)}] to [{float_to_str(na[1].lower)}, {float_to_str(na[1].upper)}] is not allowed. Setting bounds to [0,1] and static=True for this timestep.' + + if store_interpretation_changes: + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, na[0], interval.closed(0,1), False, triggered_by, actual_name, msg)) if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), w.world[na[0]], f'Inconsistency due to {name}') + if mode == 'rule': + qn, qe, _ = rules_to_be_applied_trace[idx] + else: + qn = numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)) + qe = numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)) + _update_rule_trace(rule_trace_atoms, qn, qe, w.world[na[0]], actual_name) + # Resolve inconsistency and set static w.world[na[0]].set_lower_upper(0, 1) w.world[na[0]].set_static(True) for p1, p2 in ipl: if p1==na[0]: if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), w.world[p2], f'Inconsistency due to {name}') + _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), w.world[p2], actual_name) w.world[p2].set_lower_upper(0, 1) w.world[p2].set_static(True) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(0,1))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(0,1), False, 'IPL', actual_name, msg)) if p2==na[0]: if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), w.world[p1], f'Inconsistency due to {name}') + _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), w.world[p1], actual_name) w.world[p1].set_lower_upper(0, 1) w.world[p1].set_static(True) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(0,1))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(0,1), False, 'IPL', actual_name, msg)) @numba.njit(cache=True) diff --git a/pyreason/scripts/interpretation/interpretation_fp.py b/pyreason/scripts/interpretation/interpretation_fp.py index 339b002b..5d733db3 100755 --- a/pyreason/scripts/interpretation/interpretation_fp.py +++ b/pyreason/scripts/interpretation/interpretation_fp.py @@ -96,8 +96,8 @@ def __init__(self, graph, ipl, annotation_functions, head_functions, reverse_gra # Keep track of all the rules that have affected each node/edge at each timestep/fp operation, and all ground atoms that have affected the rules as well. Keep track of previous bounds and name of the rule/fact here self.rule_trace_node_atoms = numba.typed.List.empty_list(numba.types.Tuple((numba.types.ListType(numba.types.ListType(node_type)), numba.types.ListType(numba.types.ListType(edge_type)), interval.interval_type, numba.types.string))) self.rule_trace_edge_atoms = numba.typed.List.empty_list(numba.types.Tuple((numba.types.ListType(numba.types.ListType(node_type)), numba.types.ListType(numba.types.ListType(edge_type)), interval.interval_type, numba.types.string))) - self.rule_trace_node = numba.typed.List.empty_list(numba.types.Tuple((numba.types.uint16, numba.types.uint16, node_type, label.label_type, interval.interval_type))) - self.rule_trace_edge = numba.typed.List.empty_list(numba.types.Tuple((numba.types.uint16, numba.types.uint16, edge_type, label.label_type, interval.interval_type))) + self.rule_trace_node = numba.typed.List.empty_list(numba.types.Tuple((numba.types.uint16, numba.types.uint16, node_type, label.label_type, interval.interval_type, numba.types.boolean, numba.types.string, numba.types.string, numba.types.string))) + self.rule_trace_edge = numba.typed.List.empty_list(numba.types.Tuple((numba.types.uint16, numba.types.uint16, edge_type, label.label_type, interval.interval_type, numba.types.boolean, numba.types.string, numba.types.string, numba.types.string))) # Nodes and edges of the graph self.nodes = numba.typed.List.empty_list(node_type) @@ -361,16 +361,17 @@ def reason(interpretations_node, interpretations_edge, predicate_map_node, predi # Check if we should even store any of the changes to the rule trace etc. # Inverse of this is: if not save_graph_attributes_to_rule_trace and graph_attribute if (save_graph_attributes_to_rule_trace or not graph_attribute) and store_interpretation_changes: - rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, l, bnd)) + meta_name = facts_to_be_applied_node_trace[i] if atom_trace else '' + rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, l, bnd, True, 'Fact', meta_name, '')) if atom_trace: _update_rule_trace(rule_trace_node_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), bnd, facts_to_be_applied_node_trace[i]) for p1, p2 in ipl: if p1==l: - rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p2, interpretations_node[t][comp].world[p2])) + rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p2, interpretations_node[t][comp].world[p2], True, 'IPL', f'IPL: {l.get_value()}', '')) if atom_trace: _update_rule_trace(rule_trace_node_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), interpretations_node[t][comp].world[p2], facts_to_be_applied_node_trace[i]) elif p2==l: - rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p1, interpretations_node[t][comp].world[p1])) + rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p1, interpretations_node[t][comp].world[p1], True, 'IPL', f'IPL: {l.get_value()}', '')) if atom_trace: _update_rule_trace(rule_trace_node_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), interpretations_node[t][comp].world[p1], facts_to_be_applied_node_trace[i]) @@ -442,16 +443,17 @@ def reason(interpretations_node, interpretations_edge, predicate_map_node, predi if l in interpretations_edge[t][comp].world and interpretations_edge[t][comp].world[l].is_static(): # Inverse of this is: if not save_graph_attributes_to_rule_trace and graph_attribute if (save_graph_attributes_to_rule_trace or not graph_attribute) and store_interpretation_changes: - rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, l, interpretations_edge[t][comp].world[l])) + meta_name = facts_to_be_applied_edge_trace[i] if atom_trace else '' + rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, l, interpretations_edge[t][comp].world[l], True, 'Fact', meta_name, '')) if atom_trace: _update_rule_trace(rule_trace_edge_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), bnd, facts_to_be_applied_edge_trace[i]) for p1, p2 in ipl: if p1 == l: - rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p2, interpretations_edge[t][comp].world[p2])) + rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p2, interpretations_edge[t][comp].world[p2], True, 'IPL', f'IPL: {l.get_value()}', '')) if atom_trace: _update_rule_trace(rule_trace_edge_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), interpretations_edge[t][comp].world[p2], facts_to_be_applied_edge_trace[i]) elif p2 == l: - rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p1, interpretations_edge[t][comp].world[p1])) + rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p1, interpretations_edge[t][comp].world[p1], True, 'IPL', f'IPL: {l.get_value()}', '')) if atom_trace: _update_rule_trace(rule_trace_edge_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), interpretations_edge[t][comp].world[p1], facts_to_be_applied_edge_trace[i]) else: @@ -1598,7 +1600,18 @@ def _update_node(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c # Add to rule trace if update happened and add to atom trace if necessary if (save_graph_attributes_to_rule_trace or not mode=='graph-attribute-fact') and store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, l, world.world[l].copy())) + # Determine triggered_by and meta_name for the trace tuple + if mode == 'fact' or mode == 'graph-attribute-fact': + triggered_by = 'Fact' + else: + triggered_by = 'Rule' + meta_name = '' + if atom_trace: + if mode=='fact' or mode=='graph-attribute-fact': + meta_name = facts_to_be_applied_trace[idx] + elif mode=='rule': + meta_name = rules_to_be_applied_trace[idx][2] + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, l, world.world[l].copy(), True, triggered_by, meta_name, '')) if atom_trace: # Mode can be fact or rule, updation of trace will happen accordingly if mode=='fact' or mode=='graph-attribute-fact': @@ -1630,7 +1643,7 @@ def _update_node(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c ip_update_cnt += 1 updated_bnds.append(world.world[p2]) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(lower, upper))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(lower, upper), True, 'IPL', f'IPL: {l.get_value()}', '')) if p2 == l: if p1 not in world.world: world.world[p1] = interval.closed(0, 1) @@ -1647,7 +1660,7 @@ def _update_node(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c ip_update_cnt += 1 updated_bnds.append(world.world[p1]) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(lower, upper))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(lower, upper), True, 'IPL', f'IPL: {l.get_value()}', '')) # Gather convergence data change = 0 @@ -1700,7 +1713,18 @@ def _update_edge(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c # Add to rule trace if update happened and add to atom trace if necessary if (save_graph_attributes_to_rule_trace or not mode=='graph-attribute-fact') and store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, l, world.world[l].copy())) + # Determine triggered_by and meta_name for the trace tuple + if mode == 'fact' or mode == 'graph-attribute-fact': + triggered_by = 'Fact' + else: + triggered_by = 'Rule' + meta_name = '' + if atom_trace: + if mode=='fact' or mode=='graph-attribute-fact': + meta_name = facts_to_be_applied_trace[idx] + elif mode=='rule': + meta_name = rules_to_be_applied_trace[idx][2] + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, l, world.world[l].copy(), True, triggered_by, meta_name, '')) if atom_trace: # Mode can be fact or rule, updation of trace will happen accordingly if mode=='fact' or mode=='graph-attribute-fact': @@ -1732,7 +1756,7 @@ def _update_edge(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c ip_update_cnt += 1 updated_bnds.append(world.world[p2]) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(lower, upper))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(lower, upper), True, 'IPL', f'IPL: {l.get_value()}', '')) if p2 == l: if p1 not in world.world: world.world[p1] = interval.closed(0, 1) @@ -1749,7 +1773,7 @@ def _update_edge(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c ip_update_cnt += 1 updated_bnds.append(world.world[p1]) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(lower, upper))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(lower, upper), True, 'IPL', f'IPL: {l.get_value()}', '')) # Gather convergence data change = 0 @@ -1919,70 +1943,127 @@ def check_consistent_edge(interpretations, comp, na): @numba.njit(cache=True) def resolve_inconsistency_node(interpretations, comp, na, ipl, t_cnt, fp_cnt, idx, atom_trace, rule_trace, rule_trace_atoms, rules_to_be_applied_trace, facts_to_be_applied_trace, store_interpretation_changes, mode): world = interpretations[comp] - if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, na[0], interval.closed(0,1))) - if mode == 'fact' or mode == 'graph-attribute-fact' and atom_trace: - name = facts_to_be_applied_trace[idx] - elif mode == 'rule' and atom_trace: - name = rules_to_be_applied_trace[idx][2] + + # Determine triggered_by and actual_name + if mode == 'fact' or mode == 'graph-attribute-fact': + triggered_by = 'Fact' + else: + triggered_by = 'Rule' + actual_name = '' + if atom_trace: + if mode == 'fact' or mode == 'graph-attribute-fact': + actual_name = facts_to_be_applied_trace[idx] + elif mode == 'rule': + actual_name = rules_to_be_applied_trace[idx][2] + + # Build descriptive inconsistency message + msg = '' + if atom_trace: + comp_label_value = '' + for _p1, _p2 in ipl: + if _p1 == na[0]: + comp_label_value = _p2.get_value() + break + if _p2 == na[0]: + comp_label_value = _p1.get_value() + break + if comp_label_value != '': + msg = f'Inconsistency occurred. Grounding {na[0].get_value()}({comp}) conflicts with grounding {comp_label_value}({comp}). Setting bounds to [0,1] and static=True for this timestep.' else: - name = '-' + msg = f'Inconsistency occurred. Conflicting bounds for {na[0].get_value()}({comp}). Update from [{float_to_str(world.world[na[0]].lower)}, {float_to_str(world.world[na[0]].upper)}] to [{float_to_str(na[1].lower)}, {float_to_str(na[1].upper)}] is not allowed. Setting bounds to [0,1] and static=True for this timestep.' + + if store_interpretation_changes: + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, na[0], interval.closed(0,1), False, triggered_by, actual_name, msg)) if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), world.world[na[0]], f'Inconsistency due to {name}') + if mode == 'rule': + qn, qe, _ = rules_to_be_applied_trace[idx] + else: + qn = numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)) + qe = numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)) + _update_rule_trace(rule_trace_atoms, qn, qe, world.world[na[0]], actual_name) + # Resolve inconsistency and set static world.world[na[0]].set_lower_upper(0, 1) world.world[na[0]].set_static(True) for p1, p2 in ipl: if p1==na[0]: if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), world.world[p2], f'Inconsistency due to {name}') + _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), world.world[p2], actual_name) world.world[p2].set_lower_upper(0, 1) world.world[p2].set_static(True) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(0,1))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(0,1), False, 'IPL', actual_name, msg)) if p2==na[0]: if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), world.world[p1], f'Inconsistency due to {name}') + _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), world.world[p1], actual_name) world.world[p1].set_lower_upper(0, 1) world.world[p1].set_static(True) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(0,1))) - # Add inconsistent predicates to a list + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(0,1), False, 'IPL', actual_name, msg)) @numba.njit(cache=True) def resolve_inconsistency_edge(interpretations, comp, na, ipl, t_cnt, fp_cnt, idx, atom_trace, rule_trace, rule_trace_atoms, rules_to_be_applied_trace, facts_to_be_applied_trace, store_interpretation_changes, mode): w = interpretations[comp] - if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, na[0], interval.closed(0,1))) - if mode == 'fact' or mode == 'graph-attribute-fact' and atom_trace: - name = facts_to_be_applied_trace[idx] - elif mode == 'rule' and atom_trace: - name = rules_to_be_applied_trace[idx][2] + + # Determine triggered_by and actual_name + if mode == 'fact' or mode == 'graph-attribute-fact': + triggered_by = 'Fact' + else: + triggered_by = 'Rule' + actual_name = '' + if atom_trace: + if mode == 'fact' or mode == 'graph-attribute-fact': + actual_name = facts_to_be_applied_trace[idx] + elif mode == 'rule': + actual_name = rules_to_be_applied_trace[idx][2] + + # Build descriptive inconsistency message + msg = '' + if atom_trace: + comp_label_value = '' + for _p1, _p2 in ipl: + if _p1 == na[0]: + comp_label_value = _p2.get_value() + break + if _p2 == na[0]: + comp_label_value = _p1.get_value() + break + if comp_label_value != '': + msg = f'Inconsistency occurred. Grounding {na[0].get_value()}({comp[0]},{comp[1]}) conflicts with grounding {comp_label_value}({comp[0]},{comp[1]}). Setting bounds to [0,1] and static=True for this timestep.' else: - name = '-' + msg = f'Inconsistency occurred. Conflicting bounds for {na[0].get_value()}({comp[0]},{comp[1]}). Update from [{float_to_str(w.world[na[0]].lower)}, {float_to_str(w.world[na[0]].upper)}] to [{float_to_str(na[1].lower)}, {float_to_str(na[1].upper)}] is not allowed. Setting bounds to [0,1] and static=True for this timestep.' + + if store_interpretation_changes: + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, na[0], interval.closed(0,1), False, triggered_by, actual_name, msg)) if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), w.world[na[0]], f'Inconsistency due to {name}') + if mode == 'rule': + qn, qe, _ = rules_to_be_applied_trace[idx] + else: + qn = numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)) + qe = numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)) + _update_rule_trace(rule_trace_atoms, qn, qe, w.world[na[0]], actual_name) + # Resolve inconsistency and set static w.world[na[0]].set_lower_upper(0, 1) w.world[na[0]].set_static(True) for p1, p2 in ipl: if p1==na[0]: if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), w.world[p2], f'Inconsistency due to {name}') + _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), w.world[p2], actual_name) w.world[p2].set_lower_upper(0, 1) w.world[p2].set_static(True) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(0,1))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(0,1), False, 'IPL', actual_name, msg)) if p2==na[0]: if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), w.world[p1], f'Inconsistency due to {name}') + _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), w.world[p1], actual_name) w.world[p1].set_lower_upper(0, 1) w.world[p1].set_static(True) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(0,1))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(0,1), False, 'IPL', actual_name, msg)) @numba.njit(cache=True) diff --git a/pyreason/scripts/interpretation/interpretation_parallel.py b/pyreason/scripts/interpretation/interpretation_parallel.py index 2e12e697..ac61763b 100644 --- a/pyreason/scripts/interpretation/interpretation_parallel.py +++ b/pyreason/scripts/interpretation/interpretation_parallel.py @@ -92,8 +92,8 @@ def __init__(self, graph, ipl, annotation_functions, head_functions, reverse_gra # Keep track of all the rules that have affected each node/edge at each timestep/fp operation, and all ground atoms that have affected the rules as well. Keep track of previous bounds and name of the rule/fact here self.rule_trace_node_atoms = numba.typed.List.empty_list(numba.types.Tuple((numba.types.ListType(numba.types.ListType(node_type)), numba.types.ListType(numba.types.ListType(edge_type)), interval.interval_type, numba.types.string))) self.rule_trace_edge_atoms = numba.typed.List.empty_list(numba.types.Tuple((numba.types.ListType(numba.types.ListType(node_type)), numba.types.ListType(numba.types.ListType(edge_type)), interval.interval_type, numba.types.string))) - self.rule_trace_node = numba.typed.List.empty_list(numba.types.Tuple((numba.types.uint16, numba.types.uint16, node_type, label.label_type, interval.interval_type))) - self.rule_trace_edge = numba.typed.List.empty_list(numba.types.Tuple((numba.types.uint16, numba.types.uint16, edge_type, label.label_type, interval.interval_type))) + self.rule_trace_node = numba.typed.List.empty_list(numba.types.Tuple((numba.types.uint16, numba.types.uint16, node_type, label.label_type, interval.interval_type, numba.types.boolean, numba.types.string, numba.types.string, numba.types.string))) + self.rule_trace_edge = numba.typed.List.empty_list(numba.types.Tuple((numba.types.uint16, numba.types.uint16, edge_type, label.label_type, interval.interval_type, numba.types.boolean, numba.types.string, numba.types.string, numba.types.string))) # Nodes and edges of the graph self.nodes = numba.typed.List.empty_list(node_type) @@ -281,16 +281,17 @@ def reason(interpretations_node, interpretations_edge, predicate_map_node, predi # Check if we should even store any of the changes to the rule trace etc. # Inverse of this is: if not save_graph_attributes_to_rule_trace and graph_attribute if (save_graph_attributes_to_rule_trace or not graph_attribute) and store_interpretation_changes: - rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, l, bnd)) + meta_name = facts_to_be_applied_node_trace[i] if atom_trace else '' + rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, l, bnd, True, 'Fact', meta_name, '')) if atom_trace: _update_rule_trace(rule_trace_node_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), bnd, facts_to_be_applied_node_trace[i]) for p1, p2 in ipl: if p1==l: - rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p2, interpretations_node[comp].world[p2])) + rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p2, interpretations_node[comp].world[p2], True, 'IPL', f'IPL: {l.get_value()}', '')) if atom_trace: _update_rule_trace(rule_trace_node_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), interpretations_node[comp].world[p2], facts_to_be_applied_node_trace[i]) elif p2==l: - rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p1, interpretations_node[comp].world[p1])) + rule_trace_node.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p1, interpretations_node[comp].world[p1], True, 'IPL', f'IPL: {l.get_value()}', '')) if atom_trace: _update_rule_trace(rule_trace_node_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), interpretations_node[comp].world[p1], facts_to_be_applied_node_trace[i]) @@ -356,16 +357,17 @@ def reason(interpretations_node, interpretations_edge, predicate_map_node, predi if l in interpretations_edge[comp].world and interpretations_edge[comp].world[l].is_static(): # Inverse of this is: if not save_graph_attributes_to_rule_trace and graph_attribute if (save_graph_attributes_to_rule_trace or not graph_attribute) and store_interpretation_changes: - rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, l, interpretations_edge[comp].world[l])) + meta_name = facts_to_be_applied_edge_trace[i] if atom_trace else '' + rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, l, interpretations_edge[comp].world[l], True, 'Fact', meta_name, '')) if atom_trace: _update_rule_trace(rule_trace_edge_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), bnd, facts_to_be_applied_edge_trace[i]) for p1, p2 in ipl: if p1==l: - rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p2, interpretations_edge[comp].world[p2])) + rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p2, interpretations_edge[comp].world[p2], True, 'IPL', f'IPL: {l.get_value()}', '')) if atom_trace: _update_rule_trace(rule_trace_edge_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), interpretations_edge[comp].world[p2], facts_to_be_applied_edge_trace[i]) elif p2==l: - rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p1, interpretations_edge[comp].world[p1])) + rule_trace_edge.append((numba.types.uint16(t), numba.types.uint16(fp_cnt), comp, p1, interpretations_edge[comp].world[p1], True, 'IPL', f'IPL: {l.get_value()}', '')) if atom_trace: _update_rule_trace(rule_trace_edge_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), interpretations_edge[comp].world[p1], facts_to_be_applied_edge_trace[i]) else: @@ -1480,7 +1482,18 @@ def _update_node(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c # Add to rule trace if update happened and add to atom trace if necessary if (save_graph_attributes_to_rule_trace or not mode=='graph-attribute-fact') and store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, l, world.world[l].copy())) + # Determine triggered_by and meta_name for the trace tuple + if mode == 'fact' or mode == 'graph-attribute-fact': + triggered_by = 'Fact' + else: + triggered_by = 'Rule' + meta_name = '' + if atom_trace: + if mode=='fact' or mode=='graph-attribute-fact': + meta_name = facts_to_be_applied_trace[idx] + elif mode=='rule': + meta_name = rules_to_be_applied_trace[idx][2] + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, l, world.world[l].copy(), True, triggered_by, meta_name, '')) if atom_trace: # Mode can be fact or rule, updation of trace will happen accordingly if mode=='fact' or mode=='graph-attribute-fact': @@ -1512,7 +1525,7 @@ def _update_node(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c ip_update_cnt += 1 updated_bnds.append(world.world[p2]) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(lower, upper))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(lower, upper), True, 'IPL', f'IPL: {l.get_value()}', '')) if p2 == l: if p1 not in world.world: world.world[p1] = interval.closed(0, 1) @@ -1529,7 +1542,7 @@ def _update_node(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c ip_update_cnt += 1 updated_bnds.append(world.world[p1]) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(lower, upper))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(lower, upper), True, 'IPL', f'IPL: {l.get_value()}', '')) # Gather convergence data change = 0 @@ -1588,7 +1601,18 @@ def _update_edge(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c # Add to rule trace if update happened and add to atom trace if necessary if (save_graph_attributes_to_rule_trace or not mode=='graph-attribute-fact') and store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, l, world.world[l].copy())) + # Determine triggered_by and meta_name for the trace tuple + if mode == 'fact' or mode == 'graph-attribute-fact': + triggered_by = 'Fact' + else: + triggered_by = 'Rule' + meta_name = '' + if atom_trace: + if mode=='fact' or mode=='graph-attribute-fact': + meta_name = facts_to_be_applied_trace[idx] + elif mode=='rule': + meta_name = rules_to_be_applied_trace[idx][2] + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, l, world.world[l].copy(), True, triggered_by, meta_name, '')) if atom_trace: # Mode can be fact or rule, updation of trace will happen accordingly if mode=='fact' or mode=='graph-attribute-fact': @@ -1620,7 +1644,7 @@ def _update_edge(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c ip_update_cnt += 1 updated_bnds.append(world.world[p2]) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(lower, upper))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(lower, upper), True, 'IPL', f'IPL: {l.get_value()}', '')) if p2 == l: if p1 not in world.world: world.world[p1] = interval.closed(0, 1) @@ -1637,7 +1661,7 @@ def _update_edge(interpretations, predicate_map, comp, na, ipl, rule_trace, fp_c ip_update_cnt += 1 updated_bnds.append(world.world[p1]) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(lower, upper))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(lower, upper), True, 'IPL', f'IPL: {l.get_value()}', '')) # Gather convergence data change = 0 @@ -1809,70 +1833,127 @@ def check_consistent_edge(interpretations, comp, na): @numba.njit(cache=True) def resolve_inconsistency_node(interpretations, comp, na, ipl, t_cnt, fp_cnt, idx, atom_trace, rule_trace, rule_trace_atoms, rules_to_be_applied_trace, facts_to_be_applied_trace, store_interpretation_changes, mode): world = interpretations[comp] - if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, na[0], interval.closed(0,1))) - if mode == 'fact' or mode == 'graph-attribute-fact' and atom_trace: - name = facts_to_be_applied_trace[idx] - elif mode == 'rule' and atom_trace: - name = rules_to_be_applied_trace[idx][2] + + # Determine triggered_by and actual_name + if mode == 'fact' or mode == 'graph-attribute-fact': + triggered_by = 'Fact' + else: + triggered_by = 'Rule' + actual_name = '' + if atom_trace: + if mode == 'fact' or mode == 'graph-attribute-fact': + actual_name = facts_to_be_applied_trace[idx] + elif mode == 'rule': + actual_name = rules_to_be_applied_trace[idx][2] + + # Build descriptive inconsistency message + msg = '' + if atom_trace: + comp_label_value = '' + for _p1, _p2 in ipl: + if _p1 == na[0]: + comp_label_value = _p2.get_value() + break + if _p2 == na[0]: + comp_label_value = _p1.get_value() + break + if comp_label_value != '': + msg = f'Inconsistency occurred. Grounding {na[0].get_value()}({comp}) conflicts with grounding {comp_label_value}({comp}). Setting bounds to [0,1] and static=True for this timestep.' else: - name = '-' + msg = f'Inconsistency occurred. Conflicting bounds for {na[0].get_value()}({comp}). Update from [{float_to_str(world.world[na[0]].lower)}, {float_to_str(world.world[na[0]].upper)}] to [{float_to_str(na[1].lower)}, {float_to_str(na[1].upper)}] is not allowed. Setting bounds to [0,1] and static=True for this timestep.' + + if store_interpretation_changes: + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, na[0], interval.closed(0,1), False, triggered_by, actual_name, msg)) if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), world.world[na[0]], f'Inconsistency due to {name}') + if mode == 'rule': + qn, qe, _ = rules_to_be_applied_trace[idx] + else: + qn = numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)) + qe = numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)) + _update_rule_trace(rule_trace_atoms, qn, qe, world.world[na[0]], actual_name) + # Resolve inconsistency and set static world.world[na[0]].set_lower_upper(0, 1) world.world[na[0]].set_static(True) for p1, p2 in ipl: if p1==na[0]: if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), world.world[p2], f'Inconsistency due to {name}') + _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), world.world[p2], actual_name) world.world[p2].set_lower_upper(0, 1) world.world[p2].set_static(True) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(0,1))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(0,1), False, 'IPL', actual_name, msg)) if p2==na[0]: if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), world.world[p1], f'Inconsistency due to {name}') + _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), world.world[p1], actual_name) world.world[p1].set_lower_upper(0, 1) world.world[p1].set_static(True) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(0,1))) - # Add inconsistent predicates to a list + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(0,1), False, 'IPL', actual_name, msg)) @numba.njit(cache=True) def resolve_inconsistency_edge(interpretations, comp, na, ipl, t_cnt, fp_cnt, idx, atom_trace, rule_trace, rule_trace_atoms, rules_to_be_applied_trace, facts_to_be_applied_trace, store_interpretation_changes, mode): w = interpretations[comp] - if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, na[0], interval.closed(0,1))) - if mode == 'fact' or mode == 'graph-attribute-fact' and atom_trace: - name = facts_to_be_applied_trace[idx] - elif mode == 'rule' and atom_trace: - name = rules_to_be_applied_trace[idx][2] + + # Determine triggered_by and actual_name + if mode == 'fact' or mode == 'graph-attribute-fact': + triggered_by = 'Fact' + else: + triggered_by = 'Rule' + actual_name = '' + if atom_trace: + if mode == 'fact' or mode == 'graph-attribute-fact': + actual_name = facts_to_be_applied_trace[idx] + elif mode == 'rule': + actual_name = rules_to_be_applied_trace[idx][2] + + # Build descriptive inconsistency message + msg = '' + if atom_trace: + comp_label_value = '' + for _p1, _p2 in ipl: + if _p1 == na[0]: + comp_label_value = _p2.get_value() + break + if _p2 == na[0]: + comp_label_value = _p1.get_value() + break + if comp_label_value != '': + msg = f'Inconsistency occurred. Grounding {na[0].get_value()}({comp[0]},{comp[1]}) conflicts with grounding {comp_label_value}({comp[0]},{comp[1]}). Setting bounds to [0,1] and static=True for this timestep.' else: - name = '-' + msg = f'Inconsistency occurred. Conflicting bounds for {na[0].get_value()}({comp[0]},{comp[1]}). Update from [{float_to_str(w.world[na[0]].lower)}, {float_to_str(w.world[na[0]].upper)}] to [{float_to_str(na[1].lower)}, {float_to_str(na[1].upper)}] is not allowed. Setting bounds to [0,1] and static=True for this timestep.' + + if store_interpretation_changes: + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, na[0], interval.closed(0,1), False, triggered_by, actual_name, msg)) if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), w.world[na[0]], f'Inconsistency due to {name}') + if mode == 'rule': + qn, qe, _ = rules_to_be_applied_trace[idx] + else: + qn = numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)) + qe = numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)) + _update_rule_trace(rule_trace_atoms, qn, qe, w.world[na[0]], actual_name) + # Resolve inconsistency and set static w.world[na[0]].set_lower_upper(0, 1) w.world[na[0]].set_static(True) for p1, p2 in ipl: if p1==na[0]: if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), w.world[p2], f'Inconsistency due to {name}') + _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), w.world[p2], actual_name) w.world[p2].set_lower_upper(0, 1) w.world[p2].set_static(True) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(0,1))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p2, interval.closed(0,1), False, 'IPL', actual_name, msg)) if p2==na[0]: if atom_trace: - _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), w.world[p1], f'Inconsistency due to {name}') + _update_rule_trace(rule_trace_atoms, numba.typed.List.empty_list(numba.typed.List.empty_list(node_type)), numba.typed.List.empty_list(numba.typed.List.empty_list(edge_type)), w.world[p1], actual_name) w.world[p1].set_lower_upper(0, 1) w.world[p1].set_static(True) if store_interpretation_changes: - rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(0,1))) + rule_trace.append((numba.types.uint16(t_cnt), numba.types.uint16(fp_cnt), comp, p1, interval.closed(0,1), False, 'IPL', actual_name, msg)) @numba.njit(cache=True) diff --git a/pyreason/scripts/utils/output.py b/pyreason/scripts/utils/output.py index 614ade1d..110fafa4 100755 --- a/pyreason/scripts/utils/output.py +++ b/pyreason/scripts/utils/output.py @@ -10,19 +10,23 @@ def __init__(self, timestamp, clause_map=None): self.rule_trace_edge = None def _parse_internal_rule_trace(self, interpretation): - header_node = ['Time', 'Fixed-Point-Operation', 'Node', 'Label', 'Old Bound', 'New Bound', 'Occurred Due To'] + header_node = ['Time', 'Fixed-Point-Operation', 'Node', 'Label', 'Old Bound', 'New Bound', 'Occurred Due To', 'Consistent', 'Triggered By', 'Inconsistency Message'] # Nodes rule trace data = [] max_j = -1 for i, r in enumerate(interpretation.rule_trace_node): - row = [r[0], r[1], r[2], r[3]._value, '-', r[4].to_str(), '-'] + # r[5] = consistent, r[6] = triggered_by, r[7] = name, r[8] = inconsistency_msg + row = [r[0], r[1], r[2], r[3]._value, '-', r[4].to_str(), '-', r[5], r[6], r[8]] + + # Use r[7] (name) for Occurred Due To when atom_trace is off + if not interpretation.atom_trace and r[7] != '': + row[6] = r[7] + if interpretation.atom_trace: qn, qe, old_bnd, name = interpretation.rule_trace_node_atoms[i] row[4] = old_bnd.to_str() - # Go through all the changes in the rule trace - # len(qn) = len(qe) = num of clauses in rule that was used row[6] = name # Go through each clause @@ -45,19 +49,23 @@ def _parse_internal_rule_trace(self, interpretation): # Store the trace in a DataFrame self.rule_trace_node = pd.DataFrame(data, columns=header_node) - header_edge = ['Time', 'Fixed-Point-Operation', 'Edge', 'Label', 'Old Bound', 'New Bound', 'Occurred Due To'] + header_edge = ['Time', 'Fixed-Point-Operation', 'Edge', 'Label', 'Old Bound', 'New Bound', 'Occurred Due To', 'Consistent', 'Triggered By', 'Inconsistency Message'] # Edges rule trace data = [] max_j = -1 for i, r in enumerate(interpretation.rule_trace_edge): - row = [r[0], r[1], r[2], r[3]._value, '-', r[4].to_str(), '-'] + # r[5] = consistent, r[6] = triggered_by, r[7] = name, r[8] = inconsistency_msg + row = [r[0], r[1], r[2], r[3]._value, '-', r[4].to_str(), '-', r[5], r[6], r[8]] + + # Use r[7] (name) for Occurred Due To when atom_trace is off + if not interpretation.atom_trace and r[7] != '': + row[6] = r[7] + if interpretation.atom_trace: qn, qe, old_bnd, name = interpretation.rule_trace_edge_atoms[i] row[4] = old_bnd.to_str() - # Go through all the changes in the rule trace - # len(qn) = num of clauses in rule that was used row[6] = name # Go through each clause @@ -82,7 +90,7 @@ def _parse_internal_rule_trace(self, interpretation): # Now do the reordering if self.clause_map is not None: - offset = 7 + offset = 10 columns_to_reorder_node = header_node[offset:] columns_to_reorder_edge = header_edge[offset:] self.rule_trace_node = self.rule_trace_node.apply(self._reorder_row, axis=1, map_dict=self.clause_map, columns_to_reorder=columns_to_reorder_node) diff --git a/tests/functional/test_inconsistency_trace.py b/tests/functional/test_inconsistency_trace.py new file mode 100644 index 00000000..1a2d8387 --- /dev/null +++ b/tests/functional/test_inconsistency_trace.py @@ -0,0 +1,68 @@ +import pyreason as pr +import networkx as nx +import pytest + + +def setup_mode(mode): + """Configure PyReason settings for the specified mode.""" + pr.reset() + pr.reset_rules() + pr.settings.verbose = True + + if mode == "fp": + pr.settings.fp_version = True + elif mode == "parallel": + pr.settings.parallel_computing = True + + +@pytest.mark.slow +@pytest.mark.parametrize("mode", ["regular", "fp", "parallel"]) +def test_inconsistency_trace_message_format(mode): + """Test that inconsistency trace messages include descriptive grounding info.""" + setup_mode(mode) + pr.settings.atom_trace = True + pr.settings.inconsistency_check = True + + g = nx.DiGraph() + g.add_nodes_from(['Alice', 'Bob']) + g.add_edge('Alice', 'Bob', contact=1) + pr.load_graph(g) + pr.add_inconsistent_predicate('sick', 'healthy') + pr.add_rule(pr.Rule('sick(y):[0.5,0.7] <- sick(x):[0.5,1.0], contact(x,y)', 'spread_rule')) + pr.add_fact(pr.Fact('sick(Alice):[0.8,1.0]', 'alice_sick_fact', 0, 0)) + pr.add_fact(pr.Fact('healthy(Alice):[0.9,1.0]', 'alice_healthy_fact', 0, 0)) + + interpretation = pr.reason(timesteps=1) + node_trace, _ = pr.get_rule_trace(interpretation) + + # Check new columns exist + assert 'Consistent' in node_trace.columns + assert 'Triggered By' in node_trace.columns + assert 'Inconsistency Message' in node_trace.columns + + # Filter to inconsistency rows using the new Consistent column + incon_rows = node_trace[node_trace['Consistent'] == False] + assert len(incon_rows) >= 2, f'Expected at least 2 inconsistency trace rows, got {len(incon_rows)}' + + for _, row in incon_rows.iterrows(): + # Occurred Due To should now contain the actual fact/rule name, not the message + assert not row['Occurred Due To'].startswith('Inconsistency'), \ + f'Occurred Due To should contain fact/rule name, not message: {row["Occurred Due To"]}' + + # Inconsistency Message should contain the descriptive message + msg = row['Inconsistency Message'] + assert 'Inconsistency occurred.' in msg, f'Expected "Inconsistency occurred." in message: {msg}' + assert 'Setting bounds to [0,1] and static=True for this timestep.' in msg, f'Expected bounds/static info in message: {msg}' + + # Triggered By should be Fact or IPL (these are fact-triggered inconsistencies) + assert row['Triggered By'] in ('Fact', 'IPL'), \ + f'Expected Triggered By to be Fact or IPL, got: {row["Triggered By"]}' + + # Also check that consistent rows have the right metadata + consistent_rows = node_trace[node_trace['Consistent'] == True] + assert len(consistent_rows) > 0, 'Expected some consistent rows' + for _, row in consistent_rows.iterrows(): + assert row['Inconsistency Message'] == '', \ + f'Consistent rows should have empty Inconsistency Message, got: {row["Inconsistency Message"]}' + assert row['Triggered By'] in ('Fact', 'Rule', 'IPL'), \ + f'Expected Triggered By to be Fact/Rule/IPL, got: {row["Triggered By"]}' diff --git a/tests/unit/disable_jit/interpretations/test_interpretation_common.py b/tests/unit/disable_jit/interpretations/test_interpretation_common.py index 9a0c176e..d163ec71 100644 --- a/tests/unit/disable_jit/interpretations/test_interpretation_common.py +++ b/tests/unit/disable_jit/interpretations/test_interpretation_common.py @@ -328,6 +328,28 @@ def test_get_qualified_groundings_filters(monkeypatch, interpretations): # ---- check_consistent_node / check_consistent_edge tests ---- +class _Label: + """Label-like object with .get_value() for resolve_inconsistency tests.""" + def __init__(self, value): + self._value = value + + @property + def value(self): + return self._value + + def get_value(self): + return self._value + + def __hash__(self): + return hash(self._value) + + def __eq__(self, other): + return isinstance(other, _Label) and self._value == other._value + + def __repr__(self): + return f"_Label({self._value!r})" + + class _Interval: def __init__(self, lower, upper): self.lower = lower @@ -376,18 +398,32 @@ def test_check_consistent_functions(monkeypatch, check_fn_name): def test_resolve_inconsistency_updates_world_and_trace(monkeypatch, resolver_name, comp_key): resolver = globals()[resolver_name] monkeypatch.setattr(interpretation.interval, "closed", lambda lo, up: _Interval(lo, up)) + monkeypatch.setattr(interpretation.numba.types, "uint16", lambda x: x) + + class _ListShim: + def __call__(self, iterable=()): + return list(iterable) + def empty_list(self, *args, **kwargs): + return [] + + monkeypatch.setattr(interpretation.numba.typed, "List", _ListShim()) + calls = [] monkeypatch.setattr(interpretation, "_update_rule_trace", lambda *a: calls.append(a)) - world = _World({"p": _Interval(0, 0.5), "q": _Interval(0, 0.5), "r": _Interval(0, 0.5)}) + + p = _Label("p") + q = _Label("q") + r = _Label("r") + world = _World({p: _Interval(0, 0.5), q: _Interval(0, 0.5), r: _Interval(0, 0.5)}) interpretations = {comp_key: world} - ipl = [("p", "q"), ("r", "p")] + ipl = [(p, q), (r, p)] rule_trace = [] rule_trace_atoms = [] facts = ["fact"] resolver( interpretations, comp_key, - ("p", _Interval(0.9, 1.0)), + (p, _Interval(0.9, 1.0)), ipl, 1, 2, @@ -400,10 +436,14 @@ def test_resolve_inconsistency_updates_world_and_trace(monkeypatch, resolver_nam True, "fact", ) - assert world.world["p"].lower == 0 and world.world["p"].upper == 1 and world.world["p"].static - assert world.world["q"].lower == 0 and world.world["q"].upper == 1 and world.world["q"].static - assert world.world["r"].lower == 0 and world.world["r"].upper == 1 and world.world["r"].static + assert world.world[p].lower == 0 and world.world[p].upper == 1 and world.world[p].static + assert world.world[q].lower == 0 and world.world[q].upper == 1 and world.world[q].static + assert world.world[r].lower == 0 and world.world[r].upper == 1 and world.world[r].static assert len(rule_trace) == 3 + # Verify metadata fields in the 9-tuples + for entry in rule_trace: + assert entry[5] == False # consistent + assert entry[6] in ('Fact', 'IPL') # triggered_by assert len(calls) == 3 diff --git a/tests/unit/disable_jit/interpretations/test_reason_core.py b/tests/unit/disable_jit/interpretations/test_reason_core.py index 1c657e2c..64187e9f 100644 --- a/tests/unit/disable_jit/interpretations/test_reason_core.py +++ b/tests/unit/disable_jit/interpretations/test_reason_core.py @@ -311,7 +311,7 @@ def test_reason_logs_static_fact(monkeypatch, reason_env): prev_reasoning_data=[0, 1], ) - assert rule_trace == [(0, 1, node, label_, new_bnd)] + assert len(rule_trace) == 1 and rule_trace[0][:5] == (0, 1, node, label_, new_bnd) assert reason_env["interpretations_node"][0][node].world[label_] is static_bnd @@ -343,7 +343,7 @@ def test_reason_static_fact_traces_and_requeues(reason_env): assert facts == [(1, node, lbl, new_bnd, True, False)] assert trace == [["x"]] - assert rule_trace == [(0, 1, node, lbl, new_bnd), (0, 1, node, other, other_bnd)] + assert len(rule_trace) == 2 and rule_trace[0][:5] == (0, 1, node, lbl, new_bnd) and rule_trace[1][:5] == (0, 1, node, other, other_bnd) assert len(rule_trace_atoms) == 2 assert reason_env["interpretations_node"][0][node].world[lbl] is static_bnd @@ -376,7 +376,7 @@ def test_reason_static_fact_traces_complement_second(reason_env): assert facts == [(1, node, lbl, new_bnd, True, False)] assert trace == [["z"]] - assert rule_trace == [(0, 1, node, lbl, new_bnd), (0, 1, node, other, other_bnd)] + assert len(rule_trace) == 2 and rule_trace[0][:5] == (0, 1, node, lbl, new_bnd) and rule_trace[1][:5] == (0, 1, node, other, other_bnd) assert len(rule_trace_atoms) == 2 assert reason_env["interpretations_node"][0][node].world[lbl] is static_bnd @@ -614,10 +614,9 @@ def test_reason_static_edge_rule_trace_branches( ) if expect_trace: - assert rule_trace == [ - (0, 1, edge, lbl, static_bnd), - (0, 1, edge, other, other_bnd), - ] + assert len(rule_trace) == 2 + assert rule_trace[0][:5] == (0, 1, edge, lbl, static_bnd) + assert rule_trace[1][:5] == (0, 1, edge, other, other_bnd) else: assert rule_trace == [] assert facts == [(1, edge, lbl, reason_env["bnd"], True, graph_attr)] @@ -659,11 +658,10 @@ def test_reason_static_edge_atom_trace_complements(monkeypatch, reason_env): prev_reasoning_data=[0, 1], ) - assert rule_trace == [ - (0, 1, edge, lbl, static_bnd), - (0, 1, edge, other1, o1_bnd), - (0, 1, edge, other2, o2_bnd), - ] + assert len(rule_trace) == 3 + assert rule_trace[0][:5] == (0, 1, edge, lbl, static_bnd) + assert rule_trace[1][:5] == (0, 1, edge, other1, o1_bnd) + assert rule_trace[2][:5] == (0, 1, edge, other2, o2_bnd) assert facts == [(1, edge, lbl, reason_env["bnd"], True, False)] assert facts_trace == ["t"] assert mock_update.call_count == 3 diff --git a/tests/unit/disable_jit/interpretations/test_reason_misc.py b/tests/unit/disable_jit/interpretations/test_reason_misc.py index 662182c8..1a609c5a 100644 --- a/tests/unit/disable_jit/interpretations/test_reason_misc.py +++ b/tests/unit/disable_jit/interpretations/test_reason_misc.py @@ -26,8 +26,9 @@ def test_is_satisfied_edge_comparison_missing_bounds(): def test_resolve_inconsistency_node_rule_trace(monkeypatch): class SimpleInterval: - def __init__(self): - self.lower = self.upper = None + def __init__(self, lower=0.0, upper=1.0): + self.lower = lower + self.upper = upper self.static = False def set_lower_upper(self, l, u): @@ -47,11 +48,12 @@ def __call__(self, iterable=()): def empty_list(self, *args, **kwargs): return [] - monkeypatch.setattr(interpretation.interval, "closed", lambda l, u: SimpleInterval()) + monkeypatch.setattr(interpretation.interval, "closed", lambda l, u: SimpleInterval(l, u)) monkeypatch.setattr(interpretation.numba.typed, "List", _ListShim()) monkeypatch.setattr(interpretation.numba.types, "uint16", lambda x: x) l = label.Label("L") + l.value = l.get_value() world = SimpleWorld() world.world[l] = SimpleInterval() interpretations = {"n1": world} @@ -80,8 +82,16 @@ def empty_list(self, *args, **kwargs): "rule", ) - assert mock_update.call_args[0][-1].endswith("r") + # _update_rule_trace now receives the actual name, not the message + name = mock_update.call_args[0][-1] + assert name == "r" + # Metadata is now embedded in the rule_trace tuple assert len(rule_trace) == 1 + assert rule_trace[0][5] == False # consistent + assert rule_trace[0][6] == 'Rule' # triggered_by + assert rule_trace[0][7] == 'r' # actual_name + assert rule_trace[0][8].startswith("Inconsistency occurred.") + assert "Conflicting bounds for L(n1)" in rule_trace[0][8] def test_resolve_inconsistency_node_rule_trace_no_atom_trace(monkeypatch): @@ -140,12 +150,17 @@ def empty_list(self, *args, **kwargs): mock_update.assert_not_called() assert len(rule_trace) == 1 + # With atom_trace=False, msg is empty, but tuple still has 9 fields + assert rule_trace[0][5] == False # consistent + assert rule_trace[0][6] == 'Rule' # triggered_by + assert rule_trace[0][8] == '' # no message when atom_trace is off def test_resolve_inconsistency_edge_rule_trace(monkeypatch): class SimpleInterval: - def __init__(self): - self.lower = self.upper = None + def __init__(self, lower=0.0, upper=1.0): + self.lower = lower + self.upper = upper self.static = False def set_lower_upper(self, l, u): @@ -165,11 +180,12 @@ def __call__(self, iterable=()): def empty_list(self, *args, **kwargs): return [] - monkeypatch.setattr(interpretation.interval, "closed", lambda l, u: SimpleInterval()) + monkeypatch.setattr(interpretation.interval, "closed", lambda l, u: SimpleInterval(l, u)) monkeypatch.setattr(interpretation.numba.typed, "List", _ListShim()) monkeypatch.setattr(interpretation.numba.types, "uint16", lambda x: x) l = label.Label("L") + l.value = l.get_value() world = SimpleWorld() world.world[l] = SimpleInterval() interpretations = {("a", "b"): world} @@ -198,8 +214,16 @@ def empty_list(self, *args, **kwargs): "rule", ) - assert mock_update.call_args[0][-1].endswith("r") + # _update_rule_trace now receives the actual name, not the message + name = mock_update.call_args[0][-1] + assert name == "r" + # Metadata is now embedded in the rule_trace tuple assert len(rule_trace) == 1 + assert rule_trace[0][5] == False # consistent + assert rule_trace[0][6] == 'Rule' # triggered_by + assert rule_trace[0][7] == 'r' # actual_name + assert rule_trace[0][8].startswith("Inconsistency occurred.") + assert "Conflicting bounds for L(a,b)" in rule_trace[0][8] def test_resolve_inconsistency_edge_rule_trace_no_atom_trace(monkeypatch): @@ -258,3 +282,7 @@ def empty_list(self, *args, **kwargs): mock_update.assert_not_called() assert len(rule_trace) == 1 + # With atom_trace=False, msg is empty, but tuple still has 9 fields + assert rule_trace[0][5] == False # consistent + assert rule_trace[0][6] == 'Rule' # triggered_by + assert rule_trace[0][8] == '' # no message when atom_trace is off From 901aad9621e489255c0fe964a33553a463c584cf Mon Sep 17 00:00:00 2001 From: Colton Date: Sat, 21 Feb 2026 18:50:43 -0500 Subject: [PATCH 02/10] Fix functional test --- pyreason/scripts/utils/filter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyreason/scripts/utils/filter.py b/pyreason/scripts/utils/filter.py index c39a6b13..541f49e1 100755 --- a/pyreason/scripts/utils/filter.py +++ b/pyreason/scripts/utils/filter.py @@ -20,9 +20,9 @@ def filter_and_sort_nodes(self, interpretation, labels, bound, sort_by='lower', # change contains the timestep, fp operation, component, label and interval # Keep only the latest/most recent changes. Since list is sequencial, whatever was earlier will be overwritten for change in interpretation.rule_trace_node: - t, fp, comp, label, bnd = change + t, fp, comp, label, bnd = change[:5] latest_changes[t][(comp, label)] = bnd - + # Create a list that needs to be sorted. This contains only the latest changes list_to_be_sorted = [] for t, d in latest_changes.items(): @@ -74,7 +74,7 @@ def filter_and_sort_edges(self, interpretation, labels, bound, sort_by='lower', # change contains the timestep, fp operation, component, label and interval # Keep only the latest/most recent changes. Since list is sequential, whatever was earlier will be overwritten for change in interpretation.rule_trace_edge: - t, fp, comp, label, bnd = change + t, fp, comp, label, bnd = change[:5] latest_changes[t][(comp, label)] = bnd # Create a list that needs to be sorted. This contains only the latest changes From 98263f2d19e06ccfb91446759f40293e3cd8b5a5 Mon Sep 17 00:00:00 2001 From: Colton Date: Sun, 22 Feb 2026 09:19:41 -0500 Subject: [PATCH 03/10] Revert "Fix functional test" This reverts commit 901aad9621e489255c0fe964a33553a463c584cf. --- pyreason/scripts/utils/filter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyreason/scripts/utils/filter.py b/pyreason/scripts/utils/filter.py index 541f49e1..c39a6b13 100755 --- a/pyreason/scripts/utils/filter.py +++ b/pyreason/scripts/utils/filter.py @@ -20,9 +20,9 @@ def filter_and_sort_nodes(self, interpretation, labels, bound, sort_by='lower', # change contains the timestep, fp operation, component, label and interval # Keep only the latest/most recent changes. Since list is sequencial, whatever was earlier will be overwritten for change in interpretation.rule_trace_node: - t, fp, comp, label, bnd = change[:5] + t, fp, comp, label, bnd = change latest_changes[t][(comp, label)] = bnd - + # Create a list that needs to be sorted. This contains only the latest changes list_to_be_sorted = [] for t, d in latest_changes.items(): @@ -74,7 +74,7 @@ def filter_and_sort_edges(self, interpretation, labels, bound, sort_by='lower', # change contains the timestep, fp operation, component, label and interval # Keep only the latest/most recent changes. Since list is sequential, whatever was earlier will be overwritten for change in interpretation.rule_trace_edge: - t, fp, comp, label, bnd = change[:5] + t, fp, comp, label, bnd = change latest_changes[t][(comp, label)] = bnd # Create a list that needs to be sorted. This contains only the latest changes From 2e607fc4c32f36b2c8a7a5a61acdd697a9ace4e2 Mon Sep 17 00:00:00 2001 From: ColtonPayne <72282946+ColtonPayne@users.noreply.github.com> Date: Sun, 8 Mar 2026 10:09:15 -0400 Subject: [PATCH 04/10] fix too few values error --- pyreason/.cache_status.yaml | 2 +- pyreason/scripts/interpretation/interpretation.py | 4 ++-- pyreason/scripts/interpretation/interpretation_fp.py | 4 ++-- pyreason/scripts/interpretation/interpretation_parallel.py | 4 ++-- pyreason/scripts/utils/filter.py | 4 ++-- tests/api_tests/test_pyreason_file_loading.py | 2 +- .../disable_jit/interpretations/test_interpretation_common.py | 4 ++-- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/pyreason/.cache_status.yaml b/pyreason/.cache_status.yaml index 32458f5d..71173842 100644 --- a/pyreason/.cache_status.yaml +++ b/pyreason/.cache_status.yaml @@ -1 +1 @@ -initialized: false +initialized: true diff --git a/pyreason/scripts/interpretation/interpretation.py b/pyreason/scripts/interpretation/interpretation.py index 515a555e..b6a53e0f 100755 --- a/pyreason/scripts/interpretation/interpretation.py +++ b/pyreason/scripts/interpretation/interpretation.py @@ -695,7 +695,7 @@ def get_dict(self): # Update interpretation nodes for change in self.rule_trace_node: - time, _, node, l, bnd = change + time, _, node, l, bnd, consistent, triggered_by, name, inconsistency_msg = change interpretations[time][node][l._value] = (bnd.lower, bnd.upper) # If persistent, update all following timesteps as well @@ -705,7 +705,7 @@ def get_dict(self): # Update interpretation edges for change in self.rule_trace_edge: - time, _, edge, l, bnd, = change + time, _, edge, l, bnd, consistent, triggered_by, name, inconsistency_msg = change interpretations[time][edge][l._value] = (bnd.lower, bnd.upper) # If persistent, update all following timesteps as well diff --git a/pyreason/scripts/interpretation/interpretation_fp.py b/pyreason/scripts/interpretation/interpretation_fp.py index 5d733db3..289cc337 100755 --- a/pyreason/scripts/interpretation/interpretation_fp.py +++ b/pyreason/scripts/interpretation/interpretation_fp.py @@ -816,7 +816,7 @@ def get_dict(self): # Update interpretation nodes for change in self.rule_trace_node: - time, _, node, l, bnd = change + time, _, node, l, bnd, consistent, triggered_by, name, inconsistency_msg = change interpretations[time][node][l._value] = (bnd.lower, bnd.upper) # If persistent, update all following timesteps as well @@ -826,7 +826,7 @@ def get_dict(self): # Update interpretation edges for change in self.rule_trace_edge: - time, _, edge, l, bnd, = change + time, _, node, l, bnd, consistent, triggered_by, name, inconsistency_msg = change interpretations[time][edge][l._value] = (bnd.lower, bnd.upper) # If persistent, update all following timesteps as well diff --git a/pyreason/scripts/interpretation/interpretation_parallel.py b/pyreason/scripts/interpretation/interpretation_parallel.py index ac61763b..8471ed76 100644 --- a/pyreason/scripts/interpretation/interpretation_parallel.py +++ b/pyreason/scripts/interpretation/interpretation_parallel.py @@ -695,7 +695,7 @@ def get_dict(self): # Update interpretation nodes for change in self.rule_trace_node: - time, _, node, l, bnd = change + time, _, node, l, bnd, consistent, triggered_by, name, inconsistency_msg = change interpretations[time][node][l._value] = (bnd.lower, bnd.upper) # If persistent, update all following timesteps as well @@ -705,7 +705,7 @@ def get_dict(self): # Update interpretation edges for change in self.rule_trace_edge: - time, _, edge, l, bnd, = change + time, _, edge, l, bnd, consistent, triggered_by, name, inconsistency_msg = change interpretations[time][edge][l._value] = (bnd.lower, bnd.upper) # If persistent, update all following timesteps as well diff --git a/pyreason/scripts/utils/filter.py b/pyreason/scripts/utils/filter.py index c39a6b13..9a58f737 100755 --- a/pyreason/scripts/utils/filter.py +++ b/pyreason/scripts/utils/filter.py @@ -20,7 +20,7 @@ def filter_and_sort_nodes(self, interpretation, labels, bound, sort_by='lower', # change contains the timestep, fp operation, component, label and interval # Keep only the latest/most recent changes. Since list is sequencial, whatever was earlier will be overwritten for change in interpretation.rule_trace_node: - t, fp, comp, label, bnd = change + t, fp, comp, label, bnd, consistent, triggered_by, name, inconsistency_msg = change latest_changes[t][(comp, label)] = bnd # Create a list that needs to be sorted. This contains only the latest changes @@ -74,7 +74,7 @@ def filter_and_sort_edges(self, interpretation, labels, bound, sort_by='lower', # change contains the timestep, fp operation, component, label and interval # Keep only the latest/most recent changes. Since list is sequential, whatever was earlier will be overwritten for change in interpretation.rule_trace_edge: - t, fp, comp, label, bnd = change + t, fp, comp, label, bnd, consistent, triggered_by, name, inconsistency_msg = change latest_changes[t][(comp, label)] = bnd # Create a list that needs to be sorted. This contains only the latest changes diff --git a/tests/api_tests/test_pyreason_file_loading.py b/tests/api_tests/test_pyreason_file_loading.py index 392cf97d..0710fa47 100644 --- a/tests/api_tests/test_pyreason_file_loading.py +++ b/tests/api_tests/test_pyreason_file_loading.py @@ -1715,4 +1715,4 @@ def test_add_rule_from_json_with_thresholds_and_weights(self): rules = pr.get_rules() assert len(rules) == 1 finally: - os.unlink(tmp_path) \ No newline at end of file + os.unlink(tmp_path) diff --git a/tests/unit/disable_jit/interpretations/test_interpretation_common.py b/tests/unit/disable_jit/interpretations/test_interpretation_common.py index d163ec71..af1a41a6 100644 --- a/tests/unit/disable_jit/interpretations/test_interpretation_common.py +++ b/tests/unit/disable_jit/interpretations/test_interpretation_common.py @@ -704,8 +704,8 @@ def build_dummy(persistent): time=1, nodes=["n1"], edges=[("n1", "n2")], - rule_trace_node=[(0, 0, "n1", DummyLabel("L1"), DummyBound(0.1, 0.2))], - rule_trace_edge=[(0, 0, ("n1", "n2"), DummyLabel("L2"), DummyBound(0.3, 0.4))], + rule_trace_node=[(0, 0, "n1", DummyLabel("L1"), DummyBound(0.1, 0.2), True, "Rule", "rule_name", "")], + rule_trace_edge=[(0, 0, ("n1", "n2"), DummyLabel("L2"), DummyBound(0.3, 0.4), True, "Rule", "rule_name", "")], persistent=persistent, ) From 5c5eb78944f2ea66e302398101fd8b1b7eff0de7 Mon Sep 17 00:00:00 2001 From: Colton Date: Tue, 10 Mar 2026 14:56:21 -0400 Subject: [PATCH 05/10] Count graph attributes --- pyreason/scripts/utils/graphml_parser.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyreason/scripts/utils/graphml_parser.py b/pyreason/scripts/utils/graphml_parser.py index 2eb2c7ae..4f44b4ec 100755 --- a/pyreason/scripts/utils/graphml_parser.py +++ b/pyreason/scripts/utils/graphml_parser.py @@ -89,5 +89,6 @@ def parse_graph_attributes(self, static_facts): specific_edge_labels[label.Label(label_str)].append((e[0], e[1])) f = fact_edge.Fact('graph-attribute-fact', (e[0], e[1]), label.Label(label_str), interval.closed(lower_bnd, upper_bnd), 0, 0, static=static_facts) facts_edge.append(f) - + + print("Added ", len(fact_node), "graph-attribute node facts and ", len(fact_edge), "graph_attribute edge facts.") return facts_node, facts_edge, specific_node_labels, specific_edge_labels From d9aadfb67bcfdda8893d499b31db8db35dbc59c1 Mon Sep 17 00:00:00 2001 From: Colton Date: Tue, 10 Mar 2026 15:02:00 -0400 Subject: [PATCH 06/10] Fix typo --- pyreason/scripts/utils/graphml_parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyreason/scripts/utils/graphml_parser.py b/pyreason/scripts/utils/graphml_parser.py index 4f44b4ec..0f909c09 100755 --- a/pyreason/scripts/utils/graphml_parser.py +++ b/pyreason/scripts/utils/graphml_parser.py @@ -90,5 +90,5 @@ def parse_graph_attributes(self, static_facts): f = fact_edge.Fact('graph-attribute-fact', (e[0], e[1]), label.Label(label_str), interval.closed(lower_bnd, upper_bnd), 0, 0, static=static_facts) facts_edge.append(f) - print("Added ", len(fact_node), "graph-attribute node facts and ", len(fact_edge), "graph_attribute edge facts.") + print("Added ", len(facts_node), "graph-attribute node facts and ", len(facts_edge), "graph_attribute edge facts.") return facts_node, facts_edge, specific_node_labels, specific_edge_labels From f8b92c26b0aac86de09c4f7bec1ea3d09812ff51 Mon Sep 17 00:00:00 2001 From: Colton Date: Wed, 11 Mar 2026 15:14:50 -0400 Subject: [PATCH 07/10] Test partial edge grounding --- pyreason/scripts/interpretation/interpretation.py | 6 ++++++ pyreason/scripts/interpretation/interpretation_fp.py | 6 ++++++ pyreason/scripts/interpretation/interpretation_parallel.py | 6 ++++++ 3 files changed, 18 insertions(+) diff --git a/pyreason/scripts/interpretation/interpretation.py b/pyreason/scripts/interpretation/interpretation.py index b6a53e0f..4b45a3a6 100755 --- a/pyreason/scripts/interpretation/interpretation.py +++ b/pyreason/scripts/interpretation/interpretation.py @@ -864,6 +864,12 @@ def _ground_rule(rule, interpretations_node, interpretations_edge, predicate_map if allow_ground_rules and (clause_var_1, clause_var_2) in edges_set: grounding = numba.typed.List([(clause_var_1, clause_var_2)]) else: + # Pre-populate groundings for any variable that matches an existing node (partial grounding) + if allow_ground_rules: + if clause_var_1 in nodes_set and clause_var_1 not in groundings: + groundings[clause_var_1] = numba.typed.List([clause_var_1]) + if clause_var_2 in nodes_set and clause_var_2 not in groundings: + groundings[clause_var_2] = numba.typed.List([clause_var_2]) grounding = get_rule_edge_clause_grounding(clause_var_1, clause_var_2, groundings, groundings_edges, neighbors, reverse_neighbors, predicate_map_edge, clause_label, edges) # Narrow subset based on predicate (save the edges that are qualified to use for finding future groundings faster) diff --git a/pyreason/scripts/interpretation/interpretation_fp.py b/pyreason/scripts/interpretation/interpretation_fp.py index 289cc337..b62f1e60 100755 --- a/pyreason/scripts/interpretation/interpretation_fp.py +++ b/pyreason/scripts/interpretation/interpretation_fp.py @@ -986,6 +986,12 @@ def _ground_rule(rule, interpretations_node, interpretations_edge, predicate_map if allow_ground_rules and (clause_var_1, clause_var_2) in edges_set: grounding = numba.typed.List([(clause_var_1, clause_var_2)]) else: + # Pre-populate groundings for any variable that matches an existing node (partial grounding) + if allow_ground_rules: + if clause_var_1 in nodes_set and clause_var_1 not in groundings: + groundings[clause_var_1] = numba.typed.List([clause_var_1]) + if clause_var_2 in nodes_set and clause_var_2 not in groundings: + groundings[clause_var_2] = numba.typed.List([clause_var_2]) grounding = get_rule_edge_clause_grounding(clause_var_1, clause_var_2, groundings, groundings_edges, neighbors, reverse_neighbors, predicate_map_edge, clause_label, edges) # Narrow subset based on predicate (save the edges that are qualified to use for finding future groundings faster) diff --git a/pyreason/scripts/interpretation/interpretation_parallel.py b/pyreason/scripts/interpretation/interpretation_parallel.py index 8471ed76..2fead984 100644 --- a/pyreason/scripts/interpretation/interpretation_parallel.py +++ b/pyreason/scripts/interpretation/interpretation_parallel.py @@ -864,6 +864,12 @@ def _ground_rule(rule, interpretations_node, interpretations_edge, predicate_map if allow_ground_rules and (clause_var_1, clause_var_2) in edges_set: grounding = numba.typed.List([(clause_var_1, clause_var_2)]) else: + # Pre-populate groundings for any variable that matches an existing node (partial grounding) + if allow_ground_rules: + if clause_var_1 in nodes_set and clause_var_1 not in groundings: + groundings[clause_var_1] = numba.typed.List([clause_var_1]) + if clause_var_2 in nodes_set and clause_var_2 not in groundings: + groundings[clause_var_2] = numba.typed.List([clause_var_2]) grounding = get_rule_edge_clause_grounding(clause_var_1, clause_var_2, groundings, groundings_edges, neighbors, reverse_neighbors, predicate_map_edge, clause_label, edges) # Narrow subset based on predicate (save the edges that are qualified to use for finding future groundings faster) From 08e8d023f4aaf9cd57652fe8a07d38e044a68b66 Mon Sep 17 00:00:00 2001 From: Colton Date: Mon, 16 Mar 2026 13:59:31 -0400 Subject: [PATCH 08/10] Revert "Test partial edge grounding" This reverts commit f8b92c26b0aac86de09c4f7bec1ea3d09812ff51. --- pyreason/scripts/interpretation/interpretation.py | 6 ------ pyreason/scripts/interpretation/interpretation_fp.py | 6 ------ pyreason/scripts/interpretation/interpretation_parallel.py | 6 ------ 3 files changed, 18 deletions(-) diff --git a/pyreason/scripts/interpretation/interpretation.py b/pyreason/scripts/interpretation/interpretation.py index 4b45a3a6..b6a53e0f 100755 --- a/pyreason/scripts/interpretation/interpretation.py +++ b/pyreason/scripts/interpretation/interpretation.py @@ -864,12 +864,6 @@ def _ground_rule(rule, interpretations_node, interpretations_edge, predicate_map if allow_ground_rules and (clause_var_1, clause_var_2) in edges_set: grounding = numba.typed.List([(clause_var_1, clause_var_2)]) else: - # Pre-populate groundings for any variable that matches an existing node (partial grounding) - if allow_ground_rules: - if clause_var_1 in nodes_set and clause_var_1 not in groundings: - groundings[clause_var_1] = numba.typed.List([clause_var_1]) - if clause_var_2 in nodes_set and clause_var_2 not in groundings: - groundings[clause_var_2] = numba.typed.List([clause_var_2]) grounding = get_rule_edge_clause_grounding(clause_var_1, clause_var_2, groundings, groundings_edges, neighbors, reverse_neighbors, predicate_map_edge, clause_label, edges) # Narrow subset based on predicate (save the edges that are qualified to use for finding future groundings faster) diff --git a/pyreason/scripts/interpretation/interpretation_fp.py b/pyreason/scripts/interpretation/interpretation_fp.py index b62f1e60..289cc337 100755 --- a/pyreason/scripts/interpretation/interpretation_fp.py +++ b/pyreason/scripts/interpretation/interpretation_fp.py @@ -986,12 +986,6 @@ def _ground_rule(rule, interpretations_node, interpretations_edge, predicate_map if allow_ground_rules and (clause_var_1, clause_var_2) in edges_set: grounding = numba.typed.List([(clause_var_1, clause_var_2)]) else: - # Pre-populate groundings for any variable that matches an existing node (partial grounding) - if allow_ground_rules: - if clause_var_1 in nodes_set and clause_var_1 not in groundings: - groundings[clause_var_1] = numba.typed.List([clause_var_1]) - if clause_var_2 in nodes_set and clause_var_2 not in groundings: - groundings[clause_var_2] = numba.typed.List([clause_var_2]) grounding = get_rule_edge_clause_grounding(clause_var_1, clause_var_2, groundings, groundings_edges, neighbors, reverse_neighbors, predicate_map_edge, clause_label, edges) # Narrow subset based on predicate (save the edges that are qualified to use for finding future groundings faster) diff --git a/pyreason/scripts/interpretation/interpretation_parallel.py b/pyreason/scripts/interpretation/interpretation_parallel.py index 2fead984..8471ed76 100644 --- a/pyreason/scripts/interpretation/interpretation_parallel.py +++ b/pyreason/scripts/interpretation/interpretation_parallel.py @@ -864,12 +864,6 @@ def _ground_rule(rule, interpretations_node, interpretations_edge, predicate_map if allow_ground_rules and (clause_var_1, clause_var_2) in edges_set: grounding = numba.typed.List([(clause_var_1, clause_var_2)]) else: - # Pre-populate groundings for any variable that matches an existing node (partial grounding) - if allow_ground_rules: - if clause_var_1 in nodes_set and clause_var_1 not in groundings: - groundings[clause_var_1] = numba.typed.List([clause_var_1]) - if clause_var_2 in nodes_set and clause_var_2 not in groundings: - groundings[clause_var_2] = numba.typed.List([clause_var_2]) grounding = get_rule_edge_clause_grounding(clause_var_1, clause_var_2, groundings, groundings_edges, neighbors, reverse_neighbors, predicate_map_edge, clause_label, edges) # Narrow subset based on predicate (save the edges that are qualified to use for finding future groundings faster) From c47f3848726a6a270e1a5bfdf94b6a24c97f74ff Mon Sep 17 00:00:00 2001 From: Colton Date: Wed, 18 Mar 2026 08:17:59 -0400 Subject: [PATCH 09/10] Cache Status --- pyreason/.cache_status.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyreason/.cache_status.yaml b/pyreason/.cache_status.yaml index 71173842..32458f5d 100644 --- a/pyreason/.cache_status.yaml +++ b/pyreason/.cache_status.yaml @@ -1 +1 @@ -initialized: true +initialized: false From f415eca6c38004c573da3e24bebd06b53295ecc3 Mon Sep 17 00:00:00 2001 From: Colton Date: Wed, 18 Mar 2026 08:48:47 -0400 Subject: [PATCH 10/10] Update traces to be conistent with previous examples --- ...anced_rule_trace_edges_20241119-012153.csv | 67 +++++++++++++++++++ ...anced_rule_trace_nodes_20241119-012153.csv | 32 +++++++++ ...basic_rule_trace_nodes_20241119-012005.csv | 7 ++ ...basic_rule_trace_nodes_20241125-114246.csv | 7 ++ ...sistency_example_edges_20260318-083700.csv | 10 +++ ...sistency_example_nodes_20260318-083700.csv | 15 +++++ ...edges_rule_trace_edges_20241119-140955.csv | 10 +++ ...edges_rule_trace_nodes_20241119-140955.csv | 11 +++ {docs => examples}/inconsistency_example.py | 0 9 files changed, 159 insertions(+) create mode 100644 examples/csv_outputs/advanced_rule_trace_edges_20241119-012153.csv create mode 100644 examples/csv_outputs/advanced_rule_trace_nodes_20241119-012153.csv create mode 100644 examples/csv_outputs/basic_rule_trace_nodes_20241119-012005.csv create mode 100644 examples/csv_outputs/basic_rule_trace_nodes_20241125-114246.csv create mode 100644 examples/csv_outputs/inconsistency_example_edges_20260318-083700.csv create mode 100644 examples/csv_outputs/inconsistency_example_nodes_20260318-083700.csv create mode 100644 examples/csv_outputs/infer_edges_rule_trace_edges_20241119-140955.csv create mode 100644 examples/csv_outputs/infer_edges_rule_trace_nodes_20241119-140955.csv rename {docs => examples}/inconsistency_example.py (100%) diff --git a/examples/csv_outputs/advanced_rule_trace_edges_20241119-012153.csv b/examples/csv_outputs/advanced_rule_trace_edges_20241119-012153.csv new file mode 100644 index 00000000..1f937539 --- /dev/null +++ b/examples/csv_outputs/advanced_rule_trace_edges_20241119-012153.csv @@ -0,0 +1,67 @@ +Time,Fixed-Point-Operation,Edge,Label,Old Bound,New Bound,Occurred Due To,Clause-1,Clause-2 +0,1,"('customer_3', 'customer_1')",car_friend,"[0.0,1.0]","[1.0,1.0]",car_friend_rule,"[('customer_3', 'Car_0')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +0,1,"('customer_0', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +0,1,"('customer_0', 'customer_2')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]" +0,1,"('customer_2', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +0,1,"('customer_3', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +0,1,"('customer_3', 'customer_4')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]" +0,1,"('customer_4', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" +0,1,"('customer_4', 'customer_5')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]" +0,1,"('customer_5', 'customer_3')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]" +0,1,"('customer_5', 'customer_6')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]" +0,1,"('customer_6', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" +1,3,"('customer_3', 'customer_1')",car_friend,"[0.0,1.0]","[1.0,1.0]",car_friend_rule,"[('customer_3', 'Car_0')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +1,3,"('customer_0', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +1,3,"('customer_0', 'customer_2')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]" +1,3,"('customer_2', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +1,3,"('customer_3', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +1,3,"('customer_3', 'customer_4')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]" +1,3,"('customer_4', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" +1,3,"('customer_4', 'customer_5')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]" +1,3,"('customer_5', 'customer_3')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]" +1,3,"('customer_5', 'customer_6')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]" +1,3,"('customer_6', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" +2,5,"('customer_3', 'customer_1')",car_friend,"[0.0,1.0]","[1.0,1.0]",car_friend_rule,"[('customer_3', 'Car_0')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +2,5,"('customer_0', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +2,5,"('customer_0', 'customer_2')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]" +2,5,"('customer_2', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +2,5,"('customer_3', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +2,5,"('customer_3', 'customer_4')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]" +2,5,"('customer_4', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" +2,5,"('customer_4', 'customer_5')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]" +2,5,"('customer_5', 'customer_3')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]" +2,5,"('customer_5', 'customer_6')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]" +2,5,"('customer_6', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" +3,7,"('customer_3', 'customer_1')",car_friend,"[0.0,1.0]","[1.0,1.0]",car_friend_rule,"[('customer_3', 'Car_0')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +3,7,"('customer_0', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +3,7,"('customer_0', 'customer_2')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]" +3,7,"('customer_2', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +3,7,"('customer_3', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +3,7,"('customer_3', 'customer_4')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]" +3,7,"('customer_4', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" +3,7,"('customer_4', 'customer_5')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]" +3,7,"('customer_5', 'customer_3')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]" +3,7,"('customer_5', 'customer_6')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]" +3,7,"('customer_6', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" +4,9,"('customer_3', 'customer_1')",car_friend,"[0.0,1.0]","[1.0,1.0]",car_friend_rule,"[('customer_3', 'Car_0')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +4,9,"('customer_0', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +4,9,"('customer_0', 'customer_2')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]" +4,9,"('customer_2', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +4,9,"('customer_3', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +4,9,"('customer_3', 'customer_4')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]" +4,9,"('customer_4', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" +4,9,"('customer_4', 'customer_5')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]" +4,9,"('customer_5', 'customer_3')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]" +4,9,"('customer_5', 'customer_6')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]" +4,9,"('customer_6', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" +5,11,"('customer_3', 'customer_1')",car_friend,"[0.0,1.0]","[1.0,1.0]",car_friend_rule,"[('customer_3', 'Car_0')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +5,11,"('customer_0', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +5,11,"('customer_0', 'customer_2')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]","[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]" +5,11,"('customer_2', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_2', 'Car_1'), ('customer_2', 'Car_3'), ('customer_2', 'Car_11')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +5,11,"('customer_3', 'customer_1')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_1', 'Car_0'), ('customer_1', 'Car_8')]" +5,11,"('customer_3', 'customer_4')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]","[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]" +5,11,"('customer_4', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" +5,11,"('customer_4', 'customer_5')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_4', 'Car_4'), ('customer_4', 'Car_9')]","[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]" +5,11,"('customer_5', 'customer_3')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_3', 'Car_3'), ('customer_3', 'Car_0'), ('customer_3', 'Car_10')]" +5,11,"('customer_5', 'customer_6')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_5', 'Car_5'), ('customer_5', 'Car_2')]","[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]" +5,11,"('customer_6', 'customer_0')",same_color_car,"[0.0,1.0]","[1.0,1.0]",same_car_color_rule,"[('customer_6', 'Car_6'), ('customer_6', 'Car_4')]","[('customer_0', 'Car_2'), ('customer_0', 'Car_7')]" diff --git a/examples/csv_outputs/advanced_rule_trace_nodes_20241119-012153.csv b/examples/csv_outputs/advanced_rule_trace_nodes_20241119-012153.csv new file mode 100644 index 00000000..f648b7e7 --- /dev/null +++ b/examples/csv_outputs/advanced_rule_trace_nodes_20241119-012153.csv @@ -0,0 +1,32 @@ +Time,Fixed-Point-Operation,Node,Label,Old Bound,New Bound,Occurred Due To,Clause-1,Clause-2 +0,0,popular-fac,popular-fac,"[0.0,1.0]","[1.0,1.0]",popular(customer_0),, +1,2,popular-fac,popular-fac,"[0.0,1.0]","[1.0,1.0]",popular(customer_0),, +1,2,customer_4,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_4', 'Car_4')]",['Car_4'] +1,2,customer_6,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_6', 'Car_4')]",['Car_4'] +1,2,customer_3,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_3', 'Pet_2')]",['Pet_2'] +1,2,customer_4,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_4', 'Pet_2')]",['Pet_2'] +1,3,customer_4,trendy,"[0.0,1.0]","[1.0,1.0]",trendy_rule,['customer_4'],['customer_4'] +2,4,popular-fac,popular-fac,"[0.0,1.0]","[1.0,1.0]",popular(customer_0),, +2,4,customer_4,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_4', 'Car_4')]",['Car_4'] +2,4,customer_6,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_6', 'Car_4')]",['Car_4'] +2,4,customer_3,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_3', 'Pet_2')]",['Pet_2'] +2,4,customer_4,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_4', 'Pet_2')]",['Pet_2'] +2,5,customer_4,trendy,"[0.0,1.0]","[1.0,1.0]",trendy_rule,['customer_4'],['customer_4'] +3,6,popular-fac,popular-fac,"[0.0,1.0]","[1.0,1.0]",popular(customer_0),, +3,6,customer_4,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_4', 'Car_4')]",['Car_4'] +3,6,customer_6,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_6', 'Car_4')]",['Car_4'] +3,6,customer_3,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_3', 'Pet_2')]",['Pet_2'] +3,6,customer_4,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_4', 'Pet_2')]",['Pet_2'] +3,7,customer_4,trendy,"[0.0,1.0]","[1.0,1.0]",trendy_rule,['customer_4'],['customer_4'] +4,8,popular-fac,popular-fac,"[0.0,1.0]","[1.0,1.0]",popular(customer_0),, +4,8,customer_4,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_4', 'Car_4')]",['Car_4'] +4,8,customer_6,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_6', 'Car_4')]",['Car_4'] +4,8,customer_3,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_3', 'Pet_2')]",['Pet_2'] +4,8,customer_4,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_4', 'Pet_2')]",['Pet_2'] +4,9,customer_4,trendy,"[0.0,1.0]","[1.0,1.0]",trendy_rule,['customer_4'],['customer_4'] +5,10,popular-fac,popular-fac,"[0.0,1.0]","[1.0,1.0]",popular(customer_0),, +5,10,customer_4,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_4', 'Car_4')]",['Car_4'] +5,10,customer_6,cool_car,"[0.0,1.0]","[1.0,1.0]",cool_car_rule,"[('customer_6', 'Car_4')]",['Car_4'] +5,10,customer_3,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_3', 'Pet_2')]",['Pet_2'] +5,10,customer_4,cool_pet,"[0.0,1.0]","[1.0,1.0]",cool_pet_rule,"[('customer_4', 'Pet_2')]",['Pet_2'] +5,11,customer_4,trendy,"[0.0,1.0]","[1.0,1.0]",trendy_rule,['customer_4'],['customer_4'] diff --git a/examples/csv_outputs/basic_rule_trace_nodes_20241119-012005.csv b/examples/csv_outputs/basic_rule_trace_nodes_20241119-012005.csv new file mode 100644 index 00000000..02ece211 --- /dev/null +++ b/examples/csv_outputs/basic_rule_trace_nodes_20241119-012005.csv @@ -0,0 +1,7 @@ +Time,Fixed-Point-Operation,Node,Label,Old Bound,New Bound,Occurred Due To +0,0,Mary,popular,-,"[1.0,1.0]",- +1,1,Mary,popular,-,"[1.0,1.0]",- +1,1,Justin,popular,-,"[1.0,1.0]",- +2,2,Mary,popular,-,"[1.0,1.0]",- +2,2,John,popular,-,"[1.0,1.0]",- +2,2,Justin,popular,-,"[1.0,1.0]",- diff --git a/examples/csv_outputs/basic_rule_trace_nodes_20241125-114246.csv b/examples/csv_outputs/basic_rule_trace_nodes_20241125-114246.csv new file mode 100644 index 00000000..02ece211 --- /dev/null +++ b/examples/csv_outputs/basic_rule_trace_nodes_20241125-114246.csv @@ -0,0 +1,7 @@ +Time,Fixed-Point-Operation,Node,Label,Old Bound,New Bound,Occurred Due To +0,0,Mary,popular,-,"[1.0,1.0]",- +1,1,Mary,popular,-,"[1.0,1.0]",- +1,1,Justin,popular,-,"[1.0,1.0]",- +2,2,Mary,popular,-,"[1.0,1.0]",- +2,2,John,popular,-,"[1.0,1.0]",- +2,2,Justin,popular,-,"[1.0,1.0]",- diff --git a/examples/csv_outputs/inconsistency_example_edges_20260318-083700.csv b/examples/csv_outputs/inconsistency_example_edges_20260318-083700.csv new file mode 100644 index 00000000..5a27c76d --- /dev/null +++ b/examples/csv_outputs/inconsistency_example_edges_20260318-083700.csv @@ -0,0 +1,10 @@ +Time,Fixed-Point-Operation,Edge,Label,Old Bound,New Bound,Occurred Due To,Consistent,Triggered By,Inconsistency Message,Clause-1,Clause-2,Clause-3 +0,0,"('Alice', 'Bob')",close_contact,"[0.0,1.0]","[0.8,1.0]",alice_bob_close_fact,True,Fact,,,, +0,0,"('Alice', 'Bob')",no_contact,"[0.0,1.0]","[0.0,0.19999999999999996]",IPL: close_contact,True,IPL,,,, +0,0,"('Bob', 'Carol')",trust,"[0.0,1.0]","[0.9,1.0]",bob_carol_trust_high,True,Fact,,,, +0,1,"('Bob', 'Carol')",trust,"[0.9,1.0]","[0.0,1.0]",distrust_rule,False,Rule,"Inconsistency occurred. Conflicting bounds for trust(Bob,Carol). Update from [0.900, 1.000] to [0.000, 0.200] is not allowed. Setting bounds to [0,1] and static=True for this timestep.",['Bob'],"[('Bob', 'Carol')]", +0,1,"('Bob', 'Dave')",trust,"[0.0,1.0]","[0.0,0.2]",distrust_rule,True,Rule,,['Bob'],"[('Bob', 'Dave')]", +0,1,"('Bob', 'Carol')",risk,"[0.0,1.0]","[0.6,0.8]",risk_rule,True,Rule,,['Bob'],"[('Bob', 'Carol')]", +0,1,"('Bob', 'Dave')",risk,"[0.0,1.0]","[0.6,0.8]",risk_rule,True,Rule,,['Bob'],"[('Bob', 'Dave')]", +0,2,"('Bob', 'Dave')",no_contact,"[0.0,1.0]","[0.8,1.0]",quarantine_rule,True,Rule,,['Bob'],['Dave'],"[('Bob', 'Dave')]" +0,2,"('Bob', 'Dave')",close_contact,"[0.0,1.0]","[0.0,0.19999999999999996]",IPL: no_contact,True,IPL,,,, diff --git a/examples/csv_outputs/inconsistency_example_nodes_20260318-083700.csv b/examples/csv_outputs/inconsistency_example_nodes_20260318-083700.csv new file mode 100644 index 00000000..228e19cc --- /dev/null +++ b/examples/csv_outputs/inconsistency_example_nodes_20260318-083700.csv @@ -0,0 +1,15 @@ +Time,Fixed-Point-Operation,Node,Label,Old Bound,New Bound,Occurred Due To,Consistent,Triggered By,Inconsistency Message,Clause-1,Clause-2 +0,0,Alice,sick,"[0.0,1.0]","[0.8,1.0]",alice_sick_fact,True,Fact,,, +0,0,Alice,healthy,"[0.0,1.0]","[0.0,0.19999999999999996]",IPL: sick,True,IPL,,, +0,0,Alice,healthy,"[0.0,0.19999999999999996]","[0.0,1.0]",alice_healthy_fact,False,Fact,"Inconsistency occurred. Grounding healthy(Alice) conflicts with grounding sick(Alice). Setting bounds to [0,1] and static=True for this timestep.",, +0,0,Alice,sick,"[0.8,1.0]","[0.0,1.0]",alice_healthy_fact,False,IPL,"Inconsistency occurred. Grounding healthy(Alice) conflicts with grounding sick(Alice). Setting bounds to [0,1] and static=True for this timestep.",, +0,0,Bob,sick,"[0.0,1.0]","[0.6,0.8]",bob_sick_fact,True,Fact,,, +0,0,Bob,healthy,"[0.0,1.0]","[0.19999999999999996,0.4]",IPL: sick,True,IPL,,, +0,0,Carol,healthy,"[0.0,1.0]","[0.9,1.0]",carol_healthy_fact,True,Fact,,, +0,0,Carol,sick,"[0.0,1.0]","[0.0,0.09999999999999998]",IPL: healthy,True,IPL,,, +0,0,Bob,tired,"[0.0,1.0]","[0.8,1.0]",bob_tired_fact_1,True,Fact,,, +0,0,Bob,tired,"[0.8,1.0]","[0.0,1.0]",bob_tired_fact_2,False,Fact,"Inconsistency occurred. Conflicting bounds for tired(Bob). Update from [0.800, 1.000] to [0.000, 0.100] is not allowed. Setting bounds to [0,1] and static=True for this timestep.",, +0,1,Carol,sick,"[0.0,0.09999999999999998]","[0.0,1.0]",spread_rule,False,Rule,"Inconsistency occurred. Grounding sick(Carol) conflicts with grounding healthy(Carol). Setting bounds to [0,1] and static=True for this timestep.",['Bob'],"[('Bob', 'Carol')]" +0,1,Carol,healthy,"[0.9,1.0]","[0.0,1.0]",spread_rule,False,IPL,"Inconsistency occurred. Grounding sick(Carol) conflicts with grounding healthy(Carol). Setting bounds to [0,1] and static=True for this timestep.",, +0,1,Dave,sick,"[0.0,1.0]","[0.5,0.7]",spread_rule,True,Rule,,['Bob'],"[('Bob', 'Dave')]" +0,1,Dave,healthy,"[0.0,1.0]","[0.30000000000000004,0.5]",IPL: sick,True,IPL,,, diff --git a/examples/csv_outputs/infer_edges_rule_trace_edges_20241119-140955.csv b/examples/csv_outputs/infer_edges_rule_trace_edges_20241119-140955.csv new file mode 100644 index 00000000..d540071b --- /dev/null +++ b/examples/csv_outputs/infer_edges_rule_trace_edges_20241119-140955.csv @@ -0,0 +1,10 @@ +Time,Fixed-Point-Operation,Edge,Label,Old Bound,New Bound,Occurred Due To,Clause-1,Clause-2,Clause-3 +0,0,"('Amsterdam_Airport_Schiphol', 'Yali')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, +0,0,"('Riga_International_Airport', 'Amsterdam_Airport_Schiphol')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, +0,0,"('Riga_International_Airport', 'Düsseldorf_Airport')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, +0,0,"('Chișinău_International_Airport', 'Riga_International_Airport')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, +0,0,"('Düsseldorf_Airport', 'Dubrovnik_Airport')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, +0,0,"('Pobedilovo_Airport', 'Vnukovo_International_Airport')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, +0,0,"('Dubrovnik_Airport', 'Athens_International_Airport')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, +0,0,"('Vnukovo_International_Airport', 'Hévíz-Balaton_Airport')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact,,, +1,1,"('Vnukovo_International_Airport', 'Riga_International_Airport')",isConnectedTo,"[0.0,1.0]","[1.0,1.0]",connected_rule_1,"[('Riga_International_Airport', 'Amsterdam_Airport_Schiphol')]",['Amsterdam_Airport_Schiphol'],['Vnukovo_International_Airport'] diff --git a/examples/csv_outputs/infer_edges_rule_trace_nodes_20241119-140955.csv b/examples/csv_outputs/infer_edges_rule_trace_nodes_20241119-140955.csv new file mode 100644 index 00000000..17adc715 --- /dev/null +++ b/examples/csv_outputs/infer_edges_rule_trace_nodes_20241119-140955.csv @@ -0,0 +1,11 @@ +Time,Fixed-Point-Operation,Node,Label,Old Bound,New Bound,Occurred Due To +0,0,Amsterdam_Airport_Schiphol,Amsterdam_Airport_Schiphol,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact +0,0,Riga_International_Airport,Riga_International_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact +0,0,Chișinău_International_Airport,Chișinău_International_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact +0,0,Yali,Yali,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact +0,0,Düsseldorf_Airport,Düsseldorf_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact +0,0,Pobedilovo_Airport,Pobedilovo_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact +0,0,Dubrovnik_Airport,Dubrovnik_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact +0,0,Hévíz-Balaton_Airport,Hévíz-Balaton_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact +0,0,Athens_International_Airport,Athens_International_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact +0,0,Vnukovo_International_Airport,Vnukovo_International_Airport,"[0.0,1.0]","[1.0,1.0]",graph-attribute-fact diff --git a/docs/inconsistency_example.py b/examples/inconsistency_example.py similarity index 100% rename from docs/inconsistency_example.py rename to examples/inconsistency_example.py