diff --git a/.github/workflows/bench-graphql.yml b/.github/workflows/bench-graphql.yml index 22332a2b5e..8dd6a85dc3 100644 --- a/.github/workflows/bench-graphql.yml +++ b/.github/workflows/bench-graphql.yml @@ -40,8 +40,10 @@ jobs: k6-version: '1.0.0' - name: Run GraphQL benchmarks run: cd graphql-bench && make bench-local - - name: Restore metadata file - run: git restore graphql-bench/data/apache/master # otherwise github-action-benchmark fails to create the commit + - name: Restore modified files + run: | + git restore Cargo.lock # modified by build; github-action-benchmark can't switch to gh-pages with dirty working tree + git restore graphql-bench/data/apache/master # otherwise github-action-benchmark fails to create the commit - name: Print bench results run: cat graphql-bench/output.json - name: Store benchmark results from master branch diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index c8925c5684..fb8a561e63 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -49,8 +49,8 @@ jobs: run: | set -o pipefail cargo bench --bench base --bench algobench -p raphtory-benchmark -- --output-format=bencher | tee benchmark-result.txt - - name: Delete cargo.lock if it exists - run: rm -f Cargo.lock + - name: Restore Cargo.lock to avoid dirty working tree + run: git checkout -- Cargo.lock - name: Store benchmark results from master branch if: github.ref == 'refs/heads/master' uses: benchmark-action/github-action-benchmark@v1 diff --git a/.github/workflows/stress-test.yml b/.github/workflows/stress-test.yml index 450c335aa2..bf8a52417b 100644 --- a/.github/workflows/stress-test.yml +++ b/.github/workflows/stress-test.yml @@ -37,8 +37,8 @@ jobs: env: RUST_BACKTRACE: 1 run: | - cargo build --package raphtory-graphql --bin raphtory-graphql --profile=build-fast - ./target/build-fast/raphtory-graphql server --work-dir graphs & + cargo build --package raphtory-server --bin raphtory-server --profile=build-fast + ./target/build-fast/raphtory-server server --work-dir graphs & cd graphql-bench make stress-test - name: Upload k6 report diff --git a/Cargo.lock b/Cargo.lock index 8e09a82198..b98ec4a4a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5024,6 +5024,10 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "raphtory-auth-noop" +version = "0.17.0" + [[package]] name = "raphtory-benchmark" version = "0.17.0" @@ -5145,7 +5149,17 @@ dependencies = [ "pyo3", "pyo3-build-config", "raphtory", + "raphtory-auth-noop", + "raphtory-graphql", +] + +[[package]] +name = "raphtory-server" +version = "0.17.0" +dependencies = [ + "raphtory-auth-noop", "raphtory-graphql", + "tokio", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index c8aae7e8ea..aa4c77d321 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,14 +7,17 @@ members = [ "examples/custom-gql-apis", "python", "raphtory-graphql", + "raphtory-auth-noop", + "raphtory-server", "raphtory-api", "raphtory-core", "raphtory-storage", "raphtory-api-macros", "raphtory-itertools", "clam-core", - "clam-core/snb" - , "raphtory-itertools"] + "clam-core/snb", + "raphtory-itertools" +] default-members = ["raphtory"] exclude = ["optd"] resolver = "2" @@ -193,3 +196,7 @@ disjoint-sets = "0.4.2" [workspace.dependencies.storage] package = "db4-storage" path = "db4-storage" + +[workspace.dependencies.auth] +package = "raphtory-auth-noop" +path = "raphtory-auth-noop" diff --git a/docs/reference/graphql/graphql_API.md b/docs/reference/graphql/graphql_API.md index fb1f57bdce..2c0049945a 100644 --- a/docs/reference/graphql/graphql_API.md +++ b/docs/reference/graphql/graphql_API.md @@ -30,11 +30,26 @@ Hello world demo graph -Graph! +Graph Returns a graph + + + +path +String! + + + +graphMetadata +MetaGraph + + +Returns lightweight metadata for a graph (node/edge counts, timestamps) without loading it. +Requires at least INTROSPECT permission. + @@ -126,7 +141,8 @@ Returns a plugin. String! -Encodes graph and returns as string +Encodes graph and returns as string. +If the caller has filtered access, the returned graph is a materialized view of the filter. Returns:: Base64 url safe encoded string diff --git a/python/Cargo.toml b/python/Cargo.toml index 1767936c68..bbdb6b79ea 100644 --- a/python/Cargo.toml +++ b/python/Cargo.toml @@ -26,8 +26,8 @@ raphtory = { workspace = true, features = [ raphtory-graphql = { workspace = true, features = [ "python", ] } -clam-core = { path = "../clam-core", version = "0.17.0", features = ["python"] } - +auth = { workspace = true } +clam-core = { workspace = true, features = ["python"] } [features] extension-module = ["pyo3/extension-module"] diff --git a/python/python/raphtory/graphql/__init__.pyi b/python/python/raphtory/graphql/__init__.pyi index 22047a51d6..b2dd837a1a 100644 --- a/python/python/raphtory/graphql/__init__.pyi +++ b/python/python/raphtory/graphql/__init__.pyi @@ -44,6 +44,7 @@ __all__ = [ "decode_graph", "schema", "cli", + "has_permissions_extension", ] class GraphServer(object): @@ -61,7 +62,7 @@ class GraphServer(object): otlp_tracing_service_name (str, optional): The OTLP tracing service name config_path (str | PathLike, optional): Path to the config file auth_public_key: - auth_enabled_for_reads: + require_auth_for_reads: create_index: """ @@ -77,9 +78,10 @@ class GraphServer(object): otlp_agent_port: Optional[str] = None, otlp_tracing_service_name: Optional[str] = None, auth_public_key: Any = None, - auth_enabled_for_reads: Any = None, + require_auth_for_reads: Any = None, config_path: Optional[str | PathLike] = None, create_index: Any = None, + permissions_store_path=None, ) -> GraphServer: """Create and return a new object. See help(type) for accurate signature.""" @@ -780,3 +782,5 @@ def schema(): """ def cli(): ... +def has_permissions_extension(): + """Returns True if the permissions extension (raphtory-auth) is compiled in.""" diff --git a/python/src/lib.rs b/python/src/lib.rs index 2b2d92569d..b1ca6c95d9 100644 --- a/python/src/lib.rs +++ b/python/src/lib.rs @@ -13,7 +13,8 @@ use raphtory_graphql::python::pymodule::base_graphql_module; /// Raphtory graph analytics library #[pymodule] fn _raphtory(py: Python<'_>, m: &Bound) -> PyResult<()> { - let _ = add_raphtory_classes(m); + auth::init(); + add_raphtory_classes(m)?; let graphql_module = base_graphql_module(py)?; let algorithm_module = base_algorithm_module(py)?; diff --git a/python/tests/test_auth.py b/python/tests/test_auth.py index a9c733c0b8..9c6fe3d52a 100644 --- a/python/tests/test_auth.py +++ b/python/tests/test_auth.py @@ -19,16 +19,48 @@ RAPHTORY = "http://localhost:1736" -READ_JWT = jwt.encode({"a": "ro"}, PRIVATE_KEY, algorithm="EdDSA") +READ_JWT = jwt.encode({"access": "ro"}, PRIVATE_KEY, algorithm="EdDSA") READ_HEADERS = { "Authorization": f"Bearer {READ_JWT}", } -WRITE_JWT = jwt.encode({"a": "rw"}, PRIVATE_KEY, algorithm="EdDSA") +WRITE_JWT = jwt.encode({"access": "rw"}, PRIVATE_KEY, algorithm="EdDSA") WRITE_HEADERS = { "Authorization": f"Bearer {WRITE_JWT}", } +# openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -out rsa-key.pem +# openssl pkey -in rsa-key.pem -pubout -outform DER | base64 | tr -d '\n' +RSA_PUB_KEY = "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4sqe3DlHB/DaSm8Ab99yKj0KDc/WZGFPwXeTbPwCMKKSEc8zuSuIZc/fHXLSORn1apMnDq3aLryfPwyNTbpvhGiYVyp76XQGwSlN+EF2TsJZVAzp4/EI+bnHeHyv2Yc5q6AkFtoBPNtAz2P/18g7Yv/eZqNNSd7FOeuRFRs9y0LkswvMelQmoMOK7UKdC00AyiGksvFvljNC70VT9b0uVHggJwUYT0hdCbdaDj2fCJZBEmTqBBr97u3fIHo5T41sIEEPgE2j368mI+uk6V1saEU1BU+hkcq56TabgVqUYZTln5Rdm1MuBsNz+NQwOmVxgPNo45H2cNwTfsPDAAESlwIDAQAB" +RSA_PRIVATE_KEY = """-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDiyp7cOUcH8NpK +bwBv33IqPQoNz9ZkYU/Bd5Ns/AIwopIRzzO5K4hlz98dctI5GfVqkycOrdouvJ8/ +DI1Num+EaJhXKnvpdAbBKU34QXZOwllUDOnj8Qj5ucd4fK/ZhzmroCQW2gE820DP +Y//XyDti/95mo01J3sU565EVGz3LQuSzC8x6VCagw4rtQp0LTQDKIaSy8W+WM0Lv +RVP1vS5UeCAnBRhPSF0Jt1oOPZ8IlkESZOoEGv3u7d8gejlPjWwgQQ+ATaPfryYj +66TpXWxoRTUFT6GRyrnpNpuBWpRhlOWflF2bUy4Gw3P41DA6ZXGA82jjkfZw3BN+ +w8MAARKXAgMBAAECggEAWIH78nU2B97Syja8xGw/KUXODSreACnMDvRkKCXHkwR3 +HhUvmeXn4tf3uo3rhhZf5TpNhViK7C93tIrpAHswd0u8nFP7rNW3px3ADJE7oywM +4ZTymJ8iQhdjRd3fYPT5qEWkn/hvgDkO94EOwT8nEhFKUeMMUDZs4RhSdBrACHk0 +CrOC2S9xbgYb5OWGV6vkSqNB0k0Kv+LxU8sS46BLE7DxfpzSXDyeYaCAkk+wbwfb +hX7lysczbSl5l5Bulcf/LHL4Oa/5t+NcBZqyN6ylRXyqQ8LEdK4+TOJfvnePX1go +3rG4rtyaBCuW5JD1ytxUsyfh8WE4GinUbHWzxvaYQQKBgQD5PxF2CmqMY6yiaxU3 +0LFtRS9DtwIPnPX3Wdchq7ivSU1W6sHJjNfyEggi10DSOOINalRM/ZnVlDo8hJ3A +SybESWWzLuDZNAAAWkmoir0UpnURz847tKd8hJUivhsbdQBeKwaCuepcW6Hdwzh1 +JsJjXPovrzVGQe5FSRfBy7gswQKBgQDo78p/jEVHzuxHqSn3AsOdBdMZvPavpHb2 +Bx7tRhZOOp2QiGUHZLfjI++sQQyTu1PJqmmxOOF+eD/zkqCkLLeZsmRYOQVDOQDM +Z+u+zKYRj7KaWBeGB2Oy/WEU0pGnhyMB/T5iHmroO0Hn4gDHqkEDvwFI7SUjLNAK +1RjTxVgdVwKBgCRHNMBspbOHcoI1eeIk4x5Xepitk4Q4QWjeT7zb5MbGsZYcF1bB +xFC8pSiFEi9HDkgLmPeX1gNLTuquFtP9XEgnssDQ6vNSaUmj2qLIhtrxm4qbJ5Zz +JgmutpJW/1UQw5vxQUJX0y/cOoQvvRD4MkUKLHQyWVu/jvHQwL95anZBAoGBAIrZ +9aGWYe3uINaOth8yHJzLTgz3oS0OIoOBtyPFNaKoOihfxalklmDlmQbbN74QWl/K +H3qu52vWDnkJHI0Awujxd/NG+iYaIqm2AMcZgpzRRavPeyY/3WRiua4J3x035txW +swsWCrAoMp8hD0n16Q9smj14bzzKh7ENWeFSr7W9AoGBAMOSyRdVQxVHXagh3fAa ++FNbR8pFmQC6bQGCO74DzGe6uKYpgu+XD1yinufwwsXxjieDXCHkKTGR92Kzp5VY +Hp6HhhhCcXICRRnbxhvdpyaDbCQrT522bqRJ4rNmSVYOQQiD2vng/HVB2oWMVwa+ +fEtYNjbxjhX9qInHjHxeaNOp +-----END PRIVATE KEY-----""" + NEW_TEST_GRAPH = """mutation { newGraph(path:"test", graphType:EVENT) }""" QUERY_NAMESPACES = """query { namespaces { list{ path} } }""" @@ -54,7 +86,7 @@ def test_expired_token(): work_dir = tempfile.mkdtemp() with GraphServer(work_dir, auth_public_key=PUB_KEY).start(): exp = time() - 100 - token = jwt.encode({"a": "ro", "exp": exp}, PRIVATE_KEY, algorithm="EdDSA") + token = jwt.encode({"access": "ro", "exp": exp}, PRIVATE_KEY, algorithm="EdDSA") headers = { "Authorization": f"Bearer {token}", } @@ -63,7 +95,7 @@ def test_expired_token(): ) assert response.status_code == 401 - token = jwt.encode({"a": "rw", "exp": exp}, PRIVATE_KEY, algorithm="EdDSA") + token = jwt.encode({"access": "rw", "exp": exp}, PRIVATE_KEY, algorithm="EdDSA") headers = { "Authorization": f"Bearer {token}", } @@ -94,7 +126,7 @@ def test_default_read_access(query): def test_disabled_read_access(query): work_dir = tempfile.mkdtemp() with GraphServer( - work_dir, auth_public_key=PUB_KEY, auth_enabled_for_reads=False + work_dir, auth_public_key=PUB_KEY, require_auth_for_reads=False ).start(): add_test_graph() data = json.dumps({"query": query}) @@ -206,6 +238,70 @@ def test_raphtory_client(): assert g.node("test") is not None +def test_raphtory_client_write_denied_for_read_jwt(): + """RaphtoryClient initialized with a read JWT is denied write operations.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, auth_public_key=PUB_KEY).start(): + client = RaphtoryClient(url=RAPHTORY, token=READ_JWT) + with pytest.raises(Exception, match="requires write access"): + client.new_graph("test", "EVENT") + + +# --- RSA JWT support --- + + +def test_rsa_signed_jwt_rs256_accepted(): + """Server configured with an RSA public key accepts RS256-signed JWTs.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, auth_public_key=RSA_PUB_KEY).start(): + token = jwt.encode({"access": "ro"}, RSA_PRIVATE_KEY, algorithm="RS256") + response = requests.post( + RAPHTORY, + headers={"Authorization": f"Bearer {token}"}, + data=json.dumps({"query": QUERY_ROOT}), + ) + assert_successful_response(response) + + +def test_rsa_signed_jwt_rs512_accepted(): + """RS512 JWT is also accepted for the same RSA key (different hash, same key material).""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, auth_public_key=RSA_PUB_KEY).start(): + token = jwt.encode({"access": "ro"}, RSA_PRIVATE_KEY, algorithm="RS512") + response = requests.post( + RAPHTORY, + headers={"Authorization": f"Bearer {token}"}, + data=json.dumps({"query": QUERY_ROOT}), + ) + assert_successful_response(response) + + +def test_eddsa_jwt_rejected_against_rsa_key(): + """EdDSA JWT is rejected when the server is configured with an RSA public key.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, auth_public_key=RSA_PUB_KEY).start(): + token = jwt.encode({"access": "ro"}, PRIVATE_KEY, algorithm="EdDSA") + response = requests.post( + RAPHTORY, + headers={"Authorization": f"Bearer {token}"}, + data=json.dumps({"query": QUERY_ROOT}), + ) + assert response.status_code == 401 + + +def test_raphtory_client_read_jwt_can_receive_graph(): + """RaphtoryClient initialized with a read JWT can download graphs.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, auth_public_key=PUB_KEY).start(): + client = RaphtoryClient(url=RAPHTORY, token=WRITE_JWT) + client.new_graph("test", "EVENT") + client.remote_graph("test").add_node(0, "mynode") + + client2 = RaphtoryClient(url=RAPHTORY, token=READ_JWT) + g = client2.receive_graph("test") + assert g.node("mynode") is not None + + def test_upload_graph(): work_dir = tempfile.mkdtemp() with GraphServer(work_dir, auth_public_key=PUB_KEY).start(): diff --git a/python/tests/test_permissions.py b/python/tests/test_permissions.py new file mode 100644 index 0000000000..43488acc9d --- /dev/null +++ b/python/tests/test_permissions.py @@ -0,0 +1,1735 @@ +import json +import os +import tempfile +import requests +import jwt +import pytest +from raphtory.graphql import GraphServer, RaphtoryClient, has_permissions_extension + +pytestmark = pytest.mark.skipif( + not has_permissions_extension(), + reason="raphtory-auth not compiled in (open-source build)", +) + +# Reuse the same key pair as test_auth.py +PUB_KEY = "MCowBQYDK2VwAyEADdrWr1kTLj+wSHlr45eneXmOjlHo3N1DjLIvDa2ozno=" +PRIVATE_KEY = """-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEIFzEcSO/duEjjX4qKxDVy4uLqfmiEIA6bEw1qiPyzTQg +-----END PRIVATE KEY-----""" + +RAPHTORY = "http://localhost:1736" + +ANALYST_JWT = jwt.encode( + {"access": "ro", "role": "analyst"}, PRIVATE_KEY, algorithm="EdDSA" +) +ANALYST_HEADERS = {"Authorization": f"Bearer {ANALYST_JWT}"} + +ADMIN_JWT = jwt.encode( + {"access": "rw", "role": "admin"}, PRIVATE_KEY, algorithm="EdDSA" +) +ADMIN_HEADERS = {"Authorization": f"Bearer {ADMIN_JWT}"} + +NO_ROLE_JWT = jwt.encode({"access": "ro"}, PRIVATE_KEY, algorithm="EdDSA") +NO_ROLE_HEADERS = {"Authorization": f"Bearer {NO_ROLE_JWT}"} + +QUERY_JIRA = """query { graph(path: "jira") { path } }""" +QUERY_ADMIN = """query { graph(path: "admin") { path } }""" +QUERY_NS_GRAPHS = """query { root { graphs { list { path } } } }""" +QUERY_NS_CHILDREN = """query { root { children { list { path } } } }""" +QUERY_META_JIRA = """query { graphMetadata(path: "jira") { path nodeCount } }""" +CREATE_JIRA = """mutation { newGraph(path:"jira", graphType:EVENT) }""" +CREATE_ADMIN = """mutation { newGraph(path:"admin", graphType:EVENT) }""" +CREATE_TEAM_JIRA = """mutation { newGraph(path:"team/jira", graphType:EVENT) }""" +CREATE_TEAM_CONFLUENCE = ( + """mutation { newGraph(path:"team/confluence", graphType:EVENT) }""" +) +CREATE_DEEP = """mutation { newGraph(path:"a/b/c", graphType:EVENT) }""" +QUERY_TEAM_JIRA = """query { graph(path: "team/jira") { path } }""" +QUERY_TEAM_GRAPHS = """query { namespace(path: "team") { graphs { list { path } } } }""" +QUERY_A_CHILDREN = """query { namespace(path: "a") { children { list { path } } } }""" + + +def gql(query: str, headers=None) -> dict: + h = headers if headers is not None else ADMIN_HEADERS + return requests.post(RAPHTORY, headers=h, data=json.dumps({"query": query})).json() + + +def create_role(role: str) -> None: + gql(f'mutation {{ permissions {{ createRole(name: "{role}") {{ success }} }} }}') + + +def grant_graph(role: str, path: str, permission: str) -> None: + gql( + f'mutation {{ permissions {{ grantGraph(role: "{role}", path: "{path}", permission: {permission}) {{ success }} }} }}' + ) + + +def grant_namespace(role: str, path: str, permission: str) -> None: + gql( + f'mutation {{ permissions {{ grantNamespace(role: "{role}", path: "{path}", permission: {permission}) {{ success }} }} }}' + ) + + +def revoke_graph(role: str, path: str) -> None: + gql( + f'mutation {{ permissions {{ revokeGraph(role: "{role}", path: "{path}") {{ success }} }} }}' + ) + + +def grant_graph_filtered_read_only(role: str, path: str, filter_gql: str) -> None: + """Call grantGraphFilteredReadOnly with a raw GQL filter fragment.""" + resp = gql( + f'mutation {{ permissions {{ grantGraphFilteredReadOnly(role: "{role}", path: "{path}", filter: {filter_gql}) {{ success }} }} }}' + ) + assert "errors" not in resp, f"grantGraphFilteredReadOnly failed: {resp}" + + +def make_server(work_dir: str): + """Create a GraphServer wired with a permissions store at {work_dir}/permissions.json.""" + return GraphServer( + work_dir, + auth_public_key=PUB_KEY, + permissions_store_path=os.path.join(work_dir, "permissions.json"), + ) + + +def test_analyst_can_access_permitted_graph(): + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + gql(CREATE_ADMIN) + create_role("analyst") + grant_graph("analyst", "jira", "READ") + + response = gql(QUERY_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["graph"]["path"] == "jira" + + +def test_analyst_cannot_access_denied_graph(): + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_ADMIN) + create_role("analyst") + grant_graph("analyst", "jira", "READ") # only jira, not admin + + # "admin" graph is silently null — analyst has no namespace INTROSPECT, so + # existence of "admin" is not revealed. + response = gql(QUERY_ADMIN, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["graph"] is None + + +def test_admin_can_access_all_graphs(): + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + gql(CREATE_ADMIN) + + for query in [QUERY_JIRA, QUERY_ADMIN]: + response = gql(query, headers=ADMIN_HEADERS) + assert "errors" not in response, response + + +def test_no_role_is_denied_when_policy_is_active(): + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + grant_graph("analyst", "jira", "READ") + + response = gql(QUERY_JIRA, headers=NO_ROLE_HEADERS) + assert "errors" not in response, response + assert response["data"]["graph"] is None + + +def test_unknown_role_is_denied_when_policy_is_active(): + """JWT has a role claim but that role does not exist in the store → Denied. + + Distinct from test_no_role_is_denied_when_policy_is_active: here the JWT + does carry a role claim ('analyst'), but 'analyst' was never created in the + store. Both paths deny, but via different branches of the policy flowchart. + """ + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + # Make the store non-empty with a different role — but never create "analyst" + create_role("other_team") + + response = gql(QUERY_JIRA, headers=ANALYST_HEADERS) # JWT says role="analyst" + assert "errors" not in response, response + assert response["data"]["graph"] is None + + +def test_empty_store_denies_non_admin(): + """With an empty permissions store (no roles configured), non-admin users are denied.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + + response = gql(QUERY_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["graph"] is None + + +def test_empty_store_allows_admin(): + """With an empty permissions store, admin (rw JWT) still gets full access.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + + response = gql(QUERY_JIRA, headers=ADMIN_HEADERS) + assert "errors" not in response, response + assert response["data"]["graph"]["path"] == "jira" + + +def test_introspection_allowed_with_introspect_permission(): + """Namespace INTROSPECT makes graphs visible in listings but graph() is denied.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + grant_namespace("analyst", "team", "INTROSPECT") + + # Namespace listing shows the graph as MetaGraph + response = gql(QUERY_TEAM_GRAPHS, headers=ANALYST_HEADERS) + assert "errors" not in response, response + paths = [g["path"] for g in response["data"]["namespace"]["graphs"]["list"]] + assert "team/jira" in paths + + # graph() resolver returns null — INTROSPECT does not grant data access + response = gql(QUERY_TEAM_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["graph"] is None + + +def test_read_implies_introspect(): + """READ also shows the graph in namespace listings (implies INTROSPECT).""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + grant_graph("analyst", "jira", "READ") + + response = gql(QUERY_NS_GRAPHS, headers=ANALYST_HEADERS) + assert "errors" not in response, response + paths = [g["path"] for g in response["data"]["root"]["graphs"]["list"]] + assert "jira" in paths + + +def test_permissions_update_via_mutation(): + """Granting access via mutation takes effect immediately.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + + # No grants yet — graph returns null (indistinguishable from "graph not found") + response = gql(QUERY_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["graph"] is None + + # Grant via mutation + grant_graph("analyst", "jira", "READ") + + response = gql(QUERY_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["graph"]["path"] == "jira" + + +def test_namespace_grant_does_not_cover_root_level_graphs(): + """Namespace grants only apply to graphs within that namespace; root-level graphs require explicit graph grants.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + gql(CREATE_TEAM_JIRA) + create_role("analyst") + grant_namespace( + "analyst", "team", "READ" + ) # covers team/jira but not root-level jira + + response = gql(QUERY_TEAM_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + + response = gql(QUERY_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert ( + response["data"]["graph"] is None + ) # root-level graph not covered by namespace grant + + +# --- WRITE permission enforcement --- + +UPDATE_JIRA = """query { updateGraph(path: "jira") { addNode(time: 1, name: "test_node") { success } } }""" +CREATE_JIRA_NS = """mutation { newGraph(path:"team/jira", graphType:EVENT) }""" + + +def test_admin_bypasses_policy_for_reads(): + """'access':'rw' admin can read any graph even without a role entry in the store.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + # Policy is active (analyst role exists) but admin has no role entry + create_role("analyst") + grant_graph("analyst", "jira", "READ") + + response = gql(QUERY_JIRA, headers=ADMIN_HEADERS) + assert "errors" not in response, response + assert response["data"]["graph"]["path"] == "jira" + + +def test_analyst_can_write_with_write_grant(): + """'access':'ro' user with WRITE grant on a specific graph can call updateGraph.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + grant_graph("analyst", "jira", "WRITE") + + response = gql(UPDATE_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + + +def test_analyst_cannot_write_without_write_grant(): + """'access':'ro' user with READ-only grant cannot call updateGraph.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + grant_graph("analyst", "jira", "READ") # READ only, no WRITE + + response = gql(UPDATE_JIRA, headers=ANALYST_HEADERS) + assert response["data"] is None or response["data"].get("updateGraph") is None + assert "errors" in response + assert "Access denied" in response["errors"][0]["message"] + + +def test_analyst_can_create_graph_in_namespace(): + """'access':'ro' user with namespace WRITE grant can create a new graph in that namespace.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + create_role("analyst") + grant_namespace("analyst", "team/", "WRITE") + + response = gql(CREATE_JIRA_NS, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["newGraph"] is True + + +def test_analyst_cannot_create_graph_outside_namespace(): + """'access':'ro' user with namespace WRITE grant cannot create a graph outside that namespace.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + create_role("analyst") + grant_namespace("analyst", "team/", "WRITE") + + response = gql(CREATE_JIRA, headers=ANALYST_HEADERS) # "jira" not under "team/" + assert "errors" in response + assert "Access denied" in response["errors"][0]["message"] + # Verify "jira" was not created as a side effect + ns_graphs = gql(QUERY_NS_GRAPHS)["data"]["root"]["graphs"]["list"] + assert "jira" not in [g["path"] for g in ns_graphs] + + +def test_analyst_cannot_call_permissions_mutations(): + """'access':'ro' user with WRITE grant on a graph cannot manage roles/permissions.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + create_role("analyst") + grant_namespace("analyst", "team", "WRITE") + + response = gql( + 'mutation { permissions { createRole(name: "hacker") { success } } }', + headers=ANALYST_HEADERS, + ) + assert "errors" in response + assert "Access denied" in response["errors"][0]["message"] + # Verify "hacker" role was not created as a side effect + roles = gql("query { permissions { listRoles } }")["data"]["permissions"][ + "listRoles" + ] + assert "hacker" not in roles + + +def test_admin_can_list_roles(): + """'access':'rw' admin can query permissions { listRoles }.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + create_role("analyst") + + response = gql("query { permissions { listRoles } }", headers=ADMIN_HEADERS) + assert "errors" not in response, response + assert "analyst" in response["data"]["permissions"]["listRoles"] + + +def test_analyst_cannot_list_roles(): + """'access':'ro' user cannot query permissions { listRoles }.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + create_role("analyst") + + response = gql("query { permissions { listRoles } }", headers=ANALYST_HEADERS) + assert "errors" in response + assert "Access denied" in response["errors"][0]["message"] + + +def test_admin_can_get_role(): + """'access':'rw' admin can query permissions { getRole(...) }.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + create_role("analyst") + grant_graph("analyst", "jira", "READ") + + response = gql( + 'query { permissions { getRole(name: "analyst") { name graphs { path permission } } } }', + headers=ADMIN_HEADERS, + ) + assert "errors" not in response, response + role_data = response["data"]["permissions"]["getRole"] + assert role_data["name"] == "analyst" + assert role_data["graphs"][0]["path"] == "jira" + assert role_data["graphs"][0]["permission"] == "READ" + + +def test_analyst_cannot_get_role(): + """'access':'ro' user cannot query permissions { getRole(...) }.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + create_role("analyst") + + response = gql( + 'query { permissions { getRole(name: "analyst") { name } } }', + headers=ANALYST_HEADERS, + ) + assert "errors" in response + assert "Access denied" in response["errors"][0]["message"] + + +def test_introspect_only_cannot_access_graph_data(): + """Namespace INTROSPECT is denied by graph() — READ is required to access graph data.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + grant_namespace("analyst", "team", "INTROSPECT") # no READ + + response = gql(QUERY_TEAM_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["graph"] is None + + +def test_no_grant_hidden_from_namespace_and_graph(): + """A role with no namespace INTROSPECT sees graph() as null, not an 'Access denied' error. + + Returning an error would leak that the graph exists. Null is indistinguishable from + 'graph not found'. An error is only appropriate when the role already has INTROSPECT + on the namespace (and therefore can list the graph name anyway). + """ + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + # analyst has no grant at all + + # graph() returns null silently — does not reveal the graph exists + response = gql(QUERY_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["graph"] is None + + # namespace listing hides it + response = gql(QUERY_NS_GRAPHS, headers=ANALYST_HEADERS) + assert "errors" not in response, response + paths = [g["path"] for g in response["data"]["root"]["graphs"]["list"]] + assert "jira" not in paths + + +def test_grantgraph_introspect_rejected(): + """grantGraph with INTROSPECT permission is rejected — INTROSPECT is namespace-only.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + + response = gql( + 'mutation { permissions { grantGraph(role: "analyst", path: "jira", permission: INTROSPECT) { success } } }' + ) + assert "errors" in response + assert ( + "INTROSPECT cannot be granted on a graph" + in response["errors"][0]["message"] + ) + + +def test_graph_metadata_allowed_with_introspect(): + """graphMetadata is accessible with INTROSPECT permission (namespace grant).""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + grant_namespace("analyst", "team", "INTROSPECT") + + response = gql( + 'query { graphMetadata(path: "team/jira") { path nodeCount } }', + headers=ANALYST_HEADERS, + ) + assert "errors" not in response, response + assert response["data"]["graphMetadata"]["path"] == "team/jira" + + # graph() returns null — INTROSPECT does not grant data access + response = gql(QUERY_TEAM_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["graph"] is None + + +def test_graph_metadata_allowed_with_read(): + """graphMetadata is also accessible with READ.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + grant_graph("analyst", "jira", "READ") + + response = gql(QUERY_META_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["graphMetadata"]["path"] == "jira" + + +def test_graph_metadata_denied_without_grant(): + """graphMetadata is denied when the role has no grant on the graph.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + # no grant on jira + + response = gql(QUERY_META_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["graphMetadata"] is None + + +def test_analyst_sees_only_filtered_nodes(): + """grantGraphFilteredReadOnly applies a node filter transparently for the role. + + Admin sees all nodes; analyst only sees nodes matching the stored filter. + Calling grantGraph(READ) clears the filter and restores full access. + """ + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + # Create graph and add nodes with a "region" property + gql(CREATE_JIRA) + for name, region in [ + ("alice", "us-west"), + ("bob", "us-east"), + ("carol", "us-west"), + ]: + resp = gql(f"""query {{ + updateGraph(path: "jira") {{ + addNode( + time: 1, + name: "{name}", + properties: [{{ key: "region", value: {{ str: "{region}" }} }}] + ) {{ + success + node {{ + name + }} + }} + }} + }}""") + assert resp["data"]["updateGraph"]["addNode"]["success"] is True, resp + + create_role("analyst") + # Grant filtered read-only: analyst only sees nodes where region = "us-west" + grant_graph_filtered_read_only( + "analyst", + "jira", + '{ node: { property: { name: "region", where: { eq: { str: "us-west" } } } } }', + ) + + QUERY_NODES = 'query { graph(path: "jira") { nodes { list { name } } } }' + + # Analyst should only see alice and carol (region=us-west) + analyst_response = gql(QUERY_NODES, headers=ANALYST_HEADERS) + assert "errors" not in analyst_response, analyst_response + analyst_names = { + n["name"] for n in analyst_response["data"]["graph"]["nodes"]["list"] + } + assert analyst_names == { + "alice", + "carol", + }, f"expected {{alice, carol}}, got {analyst_names}" + + # Admin should see all three nodes (filter is bypassed for "access":"rw") + admin_response = gql(QUERY_NODES, headers=ADMIN_HEADERS) + assert "errors" not in admin_response, admin_response + admin_names = { + n["name"] for n in admin_response["data"]["graph"]["nodes"]["list"] + } + assert admin_names == { + "alice", + "bob", + "carol", + }, f"expected all 3 nodes, got {admin_names}" + + # Clear the filter by calling grantGraph(READ) — analyst should now see all nodes + grant_graph("analyst", "jira", "READ") + analyst_response_after = gql(QUERY_NODES, headers=ANALYST_HEADERS) + assert "errors" not in analyst_response_after, analyst_response_after + names_after = { + n["name"] for n in analyst_response_after["data"]["graph"]["nodes"]["list"] + } + assert names_after == { + "alice", + "bob", + "carol", + }, f"after plain grant, expected all 3 nodes, got {names_after}" + + +def test_analyst_sees_only_filtered_edges(): + """grantGraphFilteredReadOnly with an edge filter hides edges that don't match. + + Edges with weight >= 5 are visible; edges with weight < 5 are hidden. + Admin bypasses the filter and sees all edges. + """ + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + # Add three edges: (a->b weight=3), (b->c weight=7), (a->c weight=9) + for src, dst, weight in [("a", "b", 3), ("b", "c", 7), ("a", "c", 9)]: + resp = gql(f"""query {{ + updateGraph(path: "jira") {{ + addEdge( + time: 1, + src: "{src}", + dst: "{dst}", + properties: [{{ key: "weight", value: {{ i64: {weight} }} }}] + ) {{ + success + edge {{ + src {{ name }} + dst {{ name }} + }} + }} + }} + }}""") + assert resp["data"]["updateGraph"]["addEdge"]["success"] is True, resp + + create_role("analyst") + # Only show edges where weight >= 5 + grant_graph_filtered_read_only( + "analyst", + "jira", + '{ edge: { property: { name: "weight", where: { ge: { i64: 5 } } } } }', + ) + + QUERY_EDGES = 'query { graph(path: "jira") { edges { list { src { name } dst { name } } } } }' + + analyst_response = gql(QUERY_EDGES, headers=ANALYST_HEADERS) + assert "errors" not in analyst_response, analyst_response + analyst_edges = { + (e["src"]["name"], e["dst"]["name"]) + for e in analyst_response["data"]["graph"]["edges"]["list"] + } + assert analyst_edges == { + ("b", "c"), + ("a", "c"), + }, f"expected only heavy edges, got {analyst_edges}" + + # Admin sees all three edges + admin_response = gql(QUERY_EDGES, headers=ADMIN_HEADERS) + assert "errors" not in admin_response, admin_response + admin_edges = { + (e["src"]["name"], e["dst"]["name"]) + for e in admin_response["data"]["graph"]["edges"]["list"] + } + assert admin_edges == { + ("a", "b"), + ("b", "c"), + ("a", "c"), + }, f"expected all edges for admin, got {admin_edges}" + + +def test_raphtory_client_analyst_can_query_permitted_graph(): + """RaphtoryClient with analyst role can query a graph it has READ access to.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + grant_graph("analyst", "jira", "READ") + + client = RaphtoryClient(url=RAPHTORY, token=ANALYST_JWT) + result = client.query(QUERY_JIRA) + assert result["graph"]["path"] == "jira" + + +def test_raphtory_client_analyst_denied_unpermitted_graph(): + """RaphtoryClient with analyst role gets null for a graph it has no grant for.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + # No grant on jira — graph returns null (indistinguishable from "graph not found") + + client = RaphtoryClient(url=RAPHTORY, token=ANALYST_JWT) + response = client.query(QUERY_JIRA) + assert response["graph"] is None + + +def test_raphtory_client_analyst_write_with_write_grant(): + """RaphtoryClient with analyst role and WRITE grant can add nodes via remote_graph.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + grant_graph("analyst", "jira", "WRITE") + + client = RaphtoryClient(url=RAPHTORY, token=ANALYST_JWT) + client.remote_graph("jira").add_node(1, "client_node") + + client2 = RaphtoryClient(url=RAPHTORY, token=ADMIN_JWT) + received = client2.receive_graph("jira") + assert received.node("client_node") is not None + + +def test_raphtory_client_analyst_write_denied_without_write_grant(): + """RaphtoryClient with analyst role and READ-only grant cannot add nodes via remote_graph.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + grant_graph("analyst", "jira", "READ") + + client = RaphtoryClient(url=RAPHTORY, token=ANALYST_JWT) + with pytest.raises(Exception, match="Access denied"): + client.remote_graph("jira").add_node(1, "client_node") + + +def test_receive_graph_requires_read(): + """receive_graph (graph download) requires at least READ; namespace INTROSPECT is not enough.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + + # No grant — looks like the graph doesn't exist (no information leakage) + client = RaphtoryClient(url=RAPHTORY, token=ANALYST_JWT) + with pytest.raises(Exception, match="does not exist"): + client.receive_graph("team/jira") + + # Namespace INTROSPECT only — also denied for receive_graph, but now reveals access denied + grant_namespace("analyst", "team", "INTROSPECT") + with pytest.raises(Exception, match="Access denied"): + client.receive_graph("team/jira") + + # READ — allowed + grant_namespace("analyst", "team", "READ") + g = client.receive_graph("team/jira") + assert g is not None + + +def test_receive_graph_without_introspect_hides_existence(): + """Without namespace INTROSPECT, receive_graph acts as if the graph does not exist. + + This prevents information leakage: a role without any grants cannot distinguish + between 'graph does not exist' and 'graph exists but you are denied'. + """ + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + + client = RaphtoryClient(url=RAPHTORY, token=ANALYST_JWT) + + # No grants at all — error must be indistinguishable from a missing graph + with pytest.raises(Exception, match="does not exist") as exc_no_grant: + client.receive_graph("team/jira") + + # Compare with a truly non-existent graph — error should look the same + with pytest.raises(Exception, match="does not exist") as exc_missing: + client.receive_graph("team/nonexistent") + + assert "Access denied" not in str(exc_no_grant.value) + assert "Access denied" not in str(exc_missing.value) + + +def test_receive_graph_with_filtered_access(): + """receive_graph with grantGraphFilteredReadOnly returns a materialized view of the filtered graph. + + The downloaded graph should only contain nodes/edges that pass the stored filter, + not the full unfiltered graph. + """ + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + for name, region in [ + ("alice", "us-west"), + ("bob", "us-east"), + ("carol", "us-west"), + ]: + resp = gql(f"""query {{ + updateGraph(path: "jira") {{ + addNode( + time: 1, + name: "{name}", + properties: [{{ key: "region", value: {{ str: "{region}" }} }}] + ) {{ success }} + }} + }}""") + assert resp["data"]["updateGraph"]["addNode"]["success"] is True, resp + + create_role("analyst") + grant_graph_filtered_read_only( + "analyst", + "jira", + '{ node: { property: { name: "region", where: { eq: { str: "us-west" } } } } }', + ) + + client = RaphtoryClient(url=RAPHTORY, token=ANALYST_JWT) + received = client.receive_graph("jira") + + names = {n.name for n in received.nodes} + assert names == {"alice", "carol"}, f"Expected only us-west nodes, got: {names}" + assert "bob" not in names + + +def test_analyst_sees_only_graph_filter_window(): + """grantGraphFilteredReadOnly with a graph-level window filter restricts the temporal view. + + Nodes added inside the window [5, 15) are visible; those outside are not. + Admin bypasses the filter and sees all nodes. + """ + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + # Add nodes at different timestamps: t=1 (outside), t=10 (inside), t=20 (outside) + for name, t in [("early", 1), ("middle", 10), ("late", 20)]: + resp = gql(f"""query {{ + updateGraph(path: "jira") {{ + addNode(time: {t}, name: "{name}") {{ + success + node {{ + name + }} + }} + }} + }}""") + assert resp["data"]["updateGraph"]["addNode"]["success"] is True, resp + + create_role("analyst") + # Window [5, 15) — only "middle" (t=10) falls inside + grant_graph_filtered_read_only( + "analyst", + "jira", + "{ graph: { window: { start: 5, end: 15 } } }", + ) + + QUERY_NODES = 'query { graph(path: "jira") { nodes { list { name } } } }' + + analyst_response = gql(QUERY_NODES, headers=ANALYST_HEADERS) + assert "errors" not in analyst_response, analyst_response + analyst_names = { + n["name"] for n in analyst_response["data"]["graph"]["nodes"]["list"] + } + assert analyst_names == { + "middle" + }, f"expected only 'middle' in window, got {analyst_names}" + + # Admin sees all three nodes + admin_response = gql(QUERY_NODES, headers=ADMIN_HEADERS) + assert "errors" not in admin_response, admin_response + admin_names = { + n["name"] for n in admin_response["data"]["graph"]["nodes"]["list"] + } + assert admin_names == { + "early", + "middle", + "late", + }, f"expected all nodes for admin, got {admin_names}" + + +# --- Filter composition (And / Or) tests --- + + +def test_filter_and_node_node(): + """And([node, node]): both node predicates must match (intersection).""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + for name, region, role in [ + ("alice", "us-west", "admin"), + ("bob", "us-east", "admin"), + ("carol", "us-west", "user"), + ]: + resp = gql(f"""query {{ + updateGraph(path: "jira") {{ + addNode( + time: 1, name: "{name}", + properties: [ + {{ key: "region", value: {{ str: "{region}" }} }}, + {{ key: "role", value: {{ str: "{role}" }} }} + ] + ) {{ success }} + }} + }}""") + assert resp["data"]["updateGraph"]["addNode"]["success"] is True, resp + + create_role("analyst") + # region=us-west AND role=admin → only alice + grant_graph_filtered_read_only( + "analyst", + "jira", + "{ and: [" + '{ node: { property: { name: "region", where: { eq: { str: "us-west" } } } } },' + '{ node: { property: { name: "role", where: { eq: { str: "admin" } } } } }' + "] }", + ) + + QUERY_NODES = 'query { graph(path: "jira") { nodes { list { name } } } }' + analyst_names = { + n["name"] + for n in gql(QUERY_NODES, headers=ANALYST_HEADERS)["data"]["graph"][ + "nodes" + ]["list"] + } + assert analyst_names == {"alice"}, f"expected only alice, got {analyst_names}" + + +def test_filter_and_edge_edge(): + """And([edge, edge]): both edge predicates must match (intersection).""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + for src, dst, weight, kind in [ + ("a", "b", 3, "follows"), + ("b", "c", 7, "mentions"), + ("a", "c", 9, "follows"), + ]: + resp = gql(f"""query {{ + updateGraph(path: "jira") {{ + addEdge( + time: 1, src: "{src}", dst: "{dst}", + properties: [ + {{ key: "weight", value: {{ i64: {weight} }} }}, + {{ key: "kind", value: {{ str: "{kind}" }} }} + ] + ) {{ success }} + }} + }}""") + assert resp["data"]["updateGraph"]["addEdge"]["success"] is True, resp + + create_role("analyst") + # weight >= 5 AND kind=follows → only (a,c) weight=9 follows + grant_graph_filtered_read_only( + "analyst", + "jira", + "{ and: [" + '{ edge: { property: { name: "weight", where: { ge: { i64: 5 } } } } },' + '{ edge: { property: { name: "kind", where: { eq: { str: "follows" } } } } }' + "] }", + ) + + QUERY_EDGES = 'query { graph(path: "jira") { edges { list { src { name } dst { name } } } } }' + analyst_edges = { + (e["src"]["name"], e["dst"]["name"]) + for e in gql(QUERY_EDGES, headers=ANALYST_HEADERS)["data"]["graph"][ + "edges" + ]["list"] + } + assert analyst_edges == { + ("a", "c") + }, f"expected only (a,c), got {analyst_edges}" + + +def test_filter_and_graph_graph(): + """And([graph, graph]): two graph-level views intersect (sequential narrowing).""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + for name, t in [("early", 1), ("middle", 10), ("late", 20)]: + resp = gql(f"""query {{ + updateGraph(path: "jira") {{ + addNode(time: {t}, name: "{name}") {{ success }} + }} + }}""") + assert resp["data"]["updateGraph"]["addNode"]["success"] is True, resp + + create_role("analyst") + # window [1,15) ∩ window [5,25) → effective [5,15) → only middle (t=10) + grant_graph_filtered_read_only( + "analyst", + "jira", + "{ and: [" + "{ graph: { window: { start: 1, end: 15 } } }," + "{ graph: { window: { start: 5, end: 25 } } }" + "] }", + ) + + QUERY_NODES = 'query { graph(path: "jira") { nodes { list { name } } } }' + analyst_names = { + n["name"] + for n in gql(QUERY_NODES, headers=ANALYST_HEADERS)["data"]["graph"][ + "nodes" + ]["list"] + } + assert analyst_names == {"middle"}, f"expected only middle, got {analyst_names}" + + +def test_filter_and_node_edge(): + """And([node, edge]): node filter applied first restricts nodes (and their edges), then edge filter further restricts.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + for name, region in [ + ("alice", "us-west"), + ("bob", "us-east"), + ("carol", "us-west"), + ]: + resp = gql(f"""query {{ + updateGraph(path: "jira") {{ + addNode( + time: 1, name: "{name}", + properties: [{{ key: "region", value: {{ str: "{region}" }} }}] + ) {{ success }} + }} + }}""") + assert resp["data"]["updateGraph"]["addNode"]["success"] is True, resp + + for src, dst, weight in [ + ("alice", "bob", 3), + ("alice", "carol", 7), + ("bob", "carol", 9), + ]: + resp = gql(f"""query {{ + updateGraph(path: "jira") {{ + addEdge( + time: 1, src: "{src}", dst: "{dst}", + properties: [{{ key: "weight", value: {{ i64: {weight} }} }}] + ) {{ success }} + }} + }}""") + assert resp["data"]["updateGraph"]["addEdge"]["success"] is True, resp + + create_role("analyst") + # Node(us-west) applied first: bob hidden, bob's edges hidden. + # Then Edge(weight≥5): of remaining edges (alice→carol weight=7), only alice→carol passes. + grant_graph_filtered_read_only( + "analyst", + "jira", + "{ and: [" + '{ node: { property: { name: "region", where: { eq: { str: "us-west" } } } } },' + '{ edge: { property: { name: "weight", where: { ge: { i64: 5 } } } } }' + "] }", + ) + + QUERY_NODES = 'query { graph(path: "jira") { nodes { list { name } } } }' + QUERY_EDGES = 'query { graph(path: "jira") { edges { list { src { name } dst { name } } } } }' + + analyst_names = { + n["name"] + for n in gql(QUERY_NODES, headers=ANALYST_HEADERS)["data"]["graph"][ + "nodes" + ]["list"] + } + assert analyst_names == { + "alice", + "carol", + }, f"expected us-west nodes, got {analyst_names}" + + analyst_edges = { + (e["src"]["name"], e["dst"]["name"]) + for e in gql(QUERY_EDGES, headers=ANALYST_HEADERS)["data"]["graph"][ + "edges" + ]["list"] + } + # Sequential And: Node(us-west) hides bob and bob's edges, then Edge(weight≥5) keeps alice→carol (7). + assert analyst_edges == { + ("alice", "carol"), + }, f"expected only (alice,carol), got {analyst_edges}" + + +def test_filter_and_node_graph(): + """And([node, graph]): node property filter combined with a graph window.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + for name, region, t in [ + ("alice", "us-west", 1), + ("bob", "us-west", 10), + ("carol", "us-east", 10), + ]: + resp = gql(f"""query {{ + updateGraph(path: "jira") {{ + addNode( + time: {t}, name: "{name}", + properties: [{{ key: "region", value: {{ str: "{region}" }} }}] + ) {{ success }} + }} + }}""") + assert resp["data"]["updateGraph"]["addNode"]["success"] is True, resp + + create_role("analyst") + # window [5,15): bob(t=10) + carol(t=10); then node us-west → only bob + grant_graph_filtered_read_only( + "analyst", + "jira", + "{ and: [" + "{ graph: { window: { start: 5, end: 15 } } }," + '{ node: { property: { name: "region", where: { eq: { str: "us-west" } } } } }' + "] }", + ) + + QUERY_NODES = 'query { graph(path: "jira") { nodes { list { name } } } }' + analyst_names = { + n["name"] + for n in gql(QUERY_NODES, headers=ANALYST_HEADERS)["data"]["graph"][ + "nodes" + ]["list"] + } + assert analyst_names == {"bob"}, f"expected only bob, got {analyst_names}" + + +def test_filter_and_edge_graph(): + """And([edge, graph]): edge property filter combined with a graph window.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + for src, dst, weight, t in [ + ("a", "b", 3, 1), + ("b", "c", 7, 10), + ("a", "c", 9, 20), + ]: + resp = gql(f"""query {{ + updateGraph(path: "jira") {{ + addEdge( + time: {t}, src: "{src}", dst: "{dst}", + properties: [{{ key: "weight", value: {{ i64: {weight} }} }}] + ) {{ success }} + }} + }}""") + assert resp["data"]["updateGraph"]["addEdge"]["success"] is True, resp + + create_role("analyst") + # window [5,15): b→c(t=10); then edge weight≥5 → b→c(weight=7) passes + grant_graph_filtered_read_only( + "analyst", + "jira", + "{ and: [" + "{ graph: { window: { start: 5, end: 15 } } }," + '{ edge: { property: { name: "weight", where: { ge: { i64: 5 } } } } }' + "] }", + ) + + QUERY_EDGES = 'query { graph(path: "jira") { edges { list { src { name } dst { name } } } } }' + analyst_edges = { + (e["src"]["name"], e["dst"]["name"]) + for e in gql(QUERY_EDGES, headers=ANALYST_HEADERS)["data"]["graph"][ + "edges" + ]["list"] + } + assert analyst_edges == { + ("b", "c") + }, f"expected only (b,c), got {analyst_edges}" + + +def test_filter_or_node_node(): + """Or([node, node]): nodes matching either predicate are visible (union).""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + for name, region in [("alice", "us-west"), ("bob", "us-east"), ("carol", "eu")]: + resp = gql(f"""query {{ + updateGraph(path: "jira") {{ + addNode( + time: 1, name: "{name}", + properties: [{{ key: "region", value: {{ str: "{region}" }} }}] + ) {{ success }} + }} + }}""") + assert resp["data"]["updateGraph"]["addNode"]["success"] is True, resp + + create_role("analyst") + # us-west OR us-east → alice + bob; carol(eu) filtered out + grant_graph_filtered_read_only( + "analyst", + "jira", + "{ or: [" + '{ node: { property: { name: "region", where: { eq: { str: "us-west" } } } } },' + '{ node: { property: { name: "region", where: { eq: { str: "us-east" } } } } }' + "] }", + ) + + QUERY_NODES = 'query { graph(path: "jira") { nodes { list { name } } } }' + analyst_names = { + n["name"] + for n in gql(QUERY_NODES, headers=ANALYST_HEADERS)["data"]["graph"][ + "nodes" + ]["list"] + } + assert analyst_names == { + "alice", + "bob", + }, f"expected alice+bob, got {analyst_names}" + + +def test_filter_or_edge_edge(): + """Or([edge, edge]): edges matching either predicate are visible (union).""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + for src, dst, weight in [("a", "b", 3), ("b", "c", 7), ("a", "c", 9)]: + resp = gql(f"""query {{ + updateGraph(path: "jira") {{ + addEdge( + time: 1, src: "{src}", dst: "{dst}", + properties: [{{ key: "weight", value: {{ i64: {weight} }} }}] + ) {{ success }} + }} + }}""") + assert resp["data"]["updateGraph"]["addEdge"]["success"] is True, resp + + create_role("analyst") + # weight=3 OR weight=9 → (a,b) + (a,c); (b,c) weight=7 filtered out + grant_graph_filtered_read_only( + "analyst", + "jira", + "{ or: [" + '{ edge: { property: { name: "weight", where: { eq: { i64: 3 } } } } },' + '{ edge: { property: { name: "weight", where: { eq: { i64: 9 } } } } }' + "] }", + ) + + QUERY_EDGES = 'query { graph(path: "jira") { edges { list { src { name } dst { name } } } } }' + analyst_edges = { + (e["src"]["name"], e["dst"]["name"]) + for e in gql(QUERY_EDGES, headers=ANALYST_HEADERS)["data"]["graph"][ + "edges" + ]["list"] + } + assert analyst_edges == { + ("a", "b"), + ("a", "c"), + }, f"expected (a,b)+(a,c), got {analyst_edges}" + + +# --- Namespace permission tests --- + + +def test_namespace_introspect_shows_graphs_in_listing(): + """grantNamespace INTROSPECT: graphs appear in namespace listing but graph() is denied.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + grant_namespace("analyst", "team", "INTROSPECT") + + # Graphs visible as MetaGraph in namespace listing + response = gql(QUERY_TEAM_GRAPHS, headers=ANALYST_HEADERS) + assert "errors" not in response, response + paths = [g["path"] for g in response["data"]["namespace"]["graphs"]["list"]] + assert "team/jira" in paths + + # Direct graph access returns null — INTROSPECT does not grant data access. + response = gql(QUERY_TEAM_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["graph"] is None + + +def test_namespace_read_exposes_graphs(): + """grantNamespace READ: graphs in the namespace are fully accessible via graph().""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + grant_namespace("analyst", "team", "READ") + + response = gql(QUERY_TEAM_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["graph"]["path"] == "team/jira" + + +def test_child_namespace_restriction_overrides_parent(): + """More-specific child namespace grant overrides a broader parent grant. + + team → READ (parent) + team/restricted → INTROSPECT (child — more specific, should win) + + Graphs under team/jira are reachable via READ (only parent matches). + Graphs under team/restricted/ are only introspectable — the child INTROSPECT + entry overrides the parent READ, so graph() is denied there. + """ + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + gql("""mutation { newGraph(path:"team/restricted/secret", graphType:EVENT) }""") + create_role("analyst") + grant_namespace("analyst", "team", "READ") + grant_namespace("analyst", "team/restricted", "INTROSPECT") + + # team/jira: only matched by "team" → READ — direct access allowed + response = gql(QUERY_TEAM_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["graph"]["path"] == "team/jira" + + # team/restricted/secret: "team/restricted" is the most specific match → INTROSPECT only + response = gql( + """query { graph(path: "team/restricted/secret") { path } }""", + headers=ANALYST_HEADERS, + ) + assert "errors" not in response, response + assert response["data"]["graph"] is None + + # But team/restricted/secret should still appear in the namespace listing + response = gql( + """query { namespace(path: "team/restricted") { graphs { list { path } } } }""", + headers=ANALYST_HEADERS, + ) + assert "errors" not in response, response + paths = [g["path"] for g in response["data"]["namespace"]["graphs"]["list"]] + assert "team/restricted/secret" in paths + + +def test_discover_derivation(): + """grantGraph READ on a namespaced graph → ancestor namespace gets DISCOVER (visible in children).""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + grant_graph("analyst", "team/jira", "READ") # no explicit namespace grant + + # "team" namespace appears in root children due to DISCOVER derivation + response = gql(QUERY_NS_CHILDREN, headers=ANALYST_HEADERS) + assert "errors" not in response, response + paths = [n["path"] for n in response["data"]["root"]["children"]["list"]] + assert "team" in paths + + +def test_discover_revoked_when_only_child_revoked(): + """Revoking the only child READ grant removes DISCOVER from the parent namespace.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + grant_graph("analyst", "team/jira", "READ") + + paths = [ + n["path"] + for n in gql(QUERY_NS_CHILDREN, headers=ANALYST_HEADERS)["data"]["root"][ + "children" + ]["list"] + ] + assert "team" in paths # baseline: DISCOVER present + + revoke_graph("analyst", "team/jira") + + paths = [ + n["path"] + for n in gql(QUERY_NS_CHILDREN, headers=ANALYST_HEADERS)["data"]["root"][ + "children" + ]["list"] + ] + assert "team" not in paths # DISCOVER gone + + +def test_discover_stays_when_one_of_two_children_revoked(): + """DISCOVER persists while at least one child grant remains; clears only when all are revoked.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + gql(CREATE_TEAM_CONFLUENCE) + create_role("analyst") + grant_graph("analyst", "team/jira", "READ") + grant_graph("analyst", "team/confluence", "READ") + + revoke_graph("analyst", "team/jira") + paths = [ + n["path"] + for n in gql(QUERY_NS_CHILDREN, headers=ANALYST_HEADERS)["data"]["root"][ + "children" + ]["list"] + ] + assert "team" in paths # still visible via team/confluence + + revoke_graph("analyst", "team/confluence") + paths = [ + n["path"] + for n in gql(QUERY_NS_CHILDREN, headers=ANALYST_HEADERS)["data"]["root"][ + "children" + ]["list"] + ] + assert "team" not in paths # now gone + + +def test_discover_stays_when_parent_has_explicit_namespace_read(): + """Revoking a child graph READ does not remove an explicit namespace READ on the parent.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + grant_graph("analyst", "team/jira", "READ") + grant_namespace("analyst", "team", "READ") # explicit, higher than DISCOVER + + revoke_graph("analyst", "team/jira") + + paths = [ + n["path"] + for n in gql(QUERY_NS_CHILDREN, headers=ANALYST_HEADERS)["data"]["root"][ + "children" + ]["list"] + ] + assert "team" in paths # still visible via explicit namespace READ + + +def test_discover_revoked_for_nested_namespaces(): + """Revoking the only deep grant removes DISCOVER from all ancestor namespaces.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_DEEP) + create_role("analyst") + grant_graph("analyst", "a/b/c", "READ") # "a" and "a/b" both get DISCOVER + + root_paths = [ + n["path"] + for n in gql(QUERY_NS_CHILDREN, headers=ANALYST_HEADERS)["data"]["root"][ + "children" + ]["list"] + ] + assert "a" in root_paths + + a_paths = [ + n["path"] + for n in gql(QUERY_A_CHILDREN, headers=ANALYST_HEADERS)["data"][ + "namespace" + ]["children"]["list"] + ] + assert "a/b" in a_paths + + revoke_graph("analyst", "a/b/c") + + root_paths = [ + n["path"] + for n in gql(QUERY_NS_CHILDREN, headers=ANALYST_HEADERS)["data"]["root"][ + "children" + ]["list"] + ] + assert "a" not in root_paths + + a_paths = [ + n["path"] + for n in gql(QUERY_A_CHILDREN, headers=ANALYST_HEADERS)["data"][ + "namespace" + ]["children"]["list"] + ] + assert "a/b" not in a_paths + + +def test_no_namespace_grant_hidden_from_children(): + """No grants at all → namespace is hidden from root children listing.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + # analyst has no grants at all + + response = gql(QUERY_NS_CHILDREN, headers=ANALYST_HEADERS) + assert "errors" not in response, response + paths = [n["path"] for n in response["data"]["root"]["children"]["list"]] + assert "team" not in paths + + +# --- deleteGraph / sendGraph policy delegation --- + +DELETE_JIRA = """mutation { deleteGraph(path: "jira") }""" +DELETE_TEAM_JIRA = """mutation { deleteGraph(path: "team/jira") }""" + + +def test_analyst_can_delete_with_graph_and_namespace_write(): + """deleteGraph requires WRITE on both the graph and its parent namespace.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + grant_graph("analyst", "team/jira", "WRITE") + grant_namespace("analyst", "team", "WRITE") + + response = gql(DELETE_TEAM_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["deleteGraph"] is True + + +def test_analyst_cannot_delete_with_graph_write_only(): + """Graph WRITE alone is insufficient for deleteGraph — namespace WRITE is also required.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + grant_graph("analyst", "jira", "WRITE") + + response = gql(DELETE_JIRA, headers=ANALYST_HEADERS) + assert "errors" in response + assert "Access denied" in response["errors"][0]["message"] + # Verify "jira" was not deleted as a side effect + check = gql(QUERY_JIRA) + assert check["data"]["graph"]["path"] == "jira" + + +def test_analyst_cannot_delete_with_read_grant(): + """'access':'ro' user with READ-only grant is denied by deleteGraph.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + grant_graph("analyst", "jira", "READ") + + response = gql(DELETE_JIRA, headers=ANALYST_HEADERS) + assert "errors" in response + assert "Access denied" in response["errors"][0]["message"] + # Verify "jira" was not deleted as a side effect + check = gql(QUERY_JIRA) + assert check["data"]["graph"]["path"] == "jira" + + +def test_analyst_can_delete_with_namespace_write(): + """'access':'ro' user with namespace WRITE (cascades to graph WRITE) can delete a graph.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + grant_namespace("analyst", "team", "WRITE") + + response = gql(DELETE_TEAM_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["deleteGraph"] is True + + +def test_analyst_cannot_send_graph_without_namespace_write(): + """'access':'ro' user without namespace WRITE is denied by sendGraph.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + create_role("analyst") + grant_namespace("analyst", "team", "READ") # READ, not WRITE + + response = gql( + 'mutation { sendGraph(path: "team/new", graph: "dummydata", overwrite: false) }', + headers=ANALYST_HEADERS, + ) + assert "errors" in response + assert "Access denied" in response["errors"][0]["message"] + + +def test_analyst_send_graph_passes_auth_with_namespace_write(): + """'access':'ro' user with namespace WRITE passes the auth gate in sendGraph. + + The request fails on graph decoding (invalid data), not on access control — + proving the namespace WRITE check is honoured. + """ + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + create_role("analyst") + grant_namespace("analyst", "team", "WRITE") + + response = gql( + 'mutation { sendGraph(path: "team/new", graph: "not_valid_base64", overwrite: false) }', + headers=ANALYST_HEADERS, + ) + # Auth passed — error is about graph decoding, not access + assert "errors" in response + assert "Access denied" not in response["errors"][0]["message"] + + +def test_analyst_send_graph_valid_data_with_namespace_write(): + """'access':'ro' user with namespace WRITE can successfully send a valid graph via sendGraph. + + Admin creates a graph and downloads it; analyst with WRITE sends it to a new path. + The graph appears at the new path and its data matches the original. + """ + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + # Add a node so the graph has content to verify after the roundtrip + gql("""query { + updateGraph(path: "jira") { + addNode(time: 1, name: "alice", properties: []) { success } + } + }""") + + # Admin downloads the graph as valid base64 + encoded = gql('query { receiveGraph(path: "jira") }')["data"]["receiveGraph"] + + create_role("analyst") + grant_namespace("analyst", "team", "WRITE") + + # Analyst sends the encoded graph to a new path + response = gql( + f'mutation {{ sendGraph(path: "team/copy", graph: "{encoded}", overwrite: false) }}', + headers=ANALYST_HEADERS, + ) + assert "errors" not in response, response + assert response["data"]["sendGraph"] == "team/copy" + + # Verify the copy exists and contains the expected node + check = gql('query { graph(path: "team/copy") { nodes { list { name } } } }') + names = [n["name"] for n in check["data"]["graph"]["nodes"]["list"]] + assert "alice" in names + + +# --- moveGraph policy --- + +MOVE_TEAM_JIRA = """mutation { moveGraph(path: "team/jira", newPath: "team/jira-moved", overwrite: false) }""" + + +def test_analyst_can_move_with_graph_write_and_namespace_write(): + """moveGraph requires WRITE on the source graph and its parent namespace, plus WRITE on the destination namespace.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + grant_graph("analyst", "team/jira", "WRITE") + grant_namespace("analyst", "team", "WRITE") + + response = gql(MOVE_TEAM_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["moveGraph"] is True + + +def test_analyst_cannot_move_with_graph_write_only(): + """Graph WRITE alone is insufficient for moveGraph — namespace WRITE on source is also required.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + grant_graph("analyst", "team/jira", "WRITE") + # no namespace grant → namespace WRITE check fails + + response = gql(MOVE_TEAM_JIRA, headers=ANALYST_HEADERS) + assert "errors" in response + assert "Access denied" in response["errors"][0]["message"] + # Verify "team/jira" still exists and "team/jira-moved" was not created + team_graphs = gql(QUERY_TEAM_GRAPHS)["data"]["namespace"]["graphs"]["list"] + paths = [g["path"] for g in team_graphs] + assert "team/jira" in paths + assert "team/jira-moved" not in paths + + +def test_analyst_cannot_move_with_read_grant(): + """READ on source graph is insufficient for moveGraph — WRITE is required.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_TEAM_JIRA) + create_role("analyst") + grant_graph("analyst", "team/jira", "READ") + grant_namespace("analyst", "team", "WRITE") + + response = gql(MOVE_TEAM_JIRA, headers=ANALYST_HEADERS) + assert "errors" in response + assert "Access denied" in response["errors"][0]["message"] + # Verify "team/jira" still exists and "team/jira-moved" was not created + team_graphs = gql(QUERY_TEAM_GRAPHS)["data"]["namespace"]["graphs"]["list"] + paths = [g["path"] for g in team_graphs] + assert "team/jira" in paths + assert "team/jira-moved" not in paths + + +# --- newGraph namespace write enforcement --- + + +def test_analyst_can_create_namespaced_graph_with_namespace_write(): + """'access':'ro' user with namespace WRITE can create a graph inside that namespace.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + create_role("analyst") + grant_namespace("analyst", "team", "WRITE") + + response = gql(CREATE_TEAM_JIRA, headers=ANALYST_HEADERS) + assert "errors" not in response, response + assert response["data"]["newGraph"] is True + + +def test_analyst_cannot_create_graph_with_namespace_read_only(): + """'access':'ro' user with namespace READ (not WRITE) is denied by newGraph.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + create_role("analyst") + grant_namespace("analyst", "team", "READ") + + response = gql(CREATE_TEAM_JIRA, headers=ANALYST_HEADERS) + assert "errors" in response + assert "Access denied" in response["errors"][0]["message"] + # Verify "team/jira" was not created as a side effect — "team" namespace should be absent + children = gql(QUERY_NS_CHILDREN)["data"]["root"]["children"]["list"] + assert "team" not in [c["path"] for c in children] + + +# --- permissions entry point admin gate --- + + +def test_analyst_cannot_access_permissions_query_entry_point(): + """'access':'ro' user is denied at the permissions query entry point, not just the individual ops. + + This verifies the entry-point-level admin check added to query { permissions { ... } }. + Even with full namespace WRITE, a non-admin JWT cannot reach the permissions resolver. + """ + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + create_role("analyst") + grant_namespace("analyst", "team", "WRITE") # full write, still not admin + + response = gql( + "query { permissions { listRoles } }", + headers=ANALYST_HEADERS, + ) + assert "errors" in response + assert "Access denied" in response["errors"][0]["message"] + + +def test_analyst_cannot_access_permissions_mutation_entry_point(): + """'access':'ro' user is denied at the mutation { permissions { ... } } entry point. + + Even with full namespace WRITE, a non-admin JWT is blocked before reaching any op. + """ + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + create_role("analyst") + grant_namespace("analyst", "team", "WRITE") # full write, still not admin + + response = gql( + 'mutation { permissions { createRole(name: "hacker") { success } } }', + headers=ANALYST_HEADERS, + ) + assert "errors" in response + assert "Access denied" in response["errors"][0]["message"] + + +# --- createIndex policy --- + + +def test_analyst_can_create_index_with_graph_write(): + """A user with WRITE on a graph can call createIndex (not admin-only).""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + grant_graph("analyst", "jira", "WRITE") + + response = gql( + 'mutation { createIndex(path: "jira", inRam: true) }', + headers=ANALYST_HEADERS, + ) + # Auth passed — success or a feature-not-compiled error, not an access denial + if "errors" in response: + assert "Access denied" not in response["errors"][0]["message"] + + +def test_analyst_cannot_create_index_with_read_grant(): + """READ on a graph is insufficient for createIndex — WRITE is required.""" + work_dir = tempfile.mkdtemp() + with make_server(work_dir).start(): + gql(CREATE_JIRA) + create_role("analyst") + grant_graph("analyst", "jira", "READ") + + response = gql( + 'mutation { createIndex(path: "jira", inRam: true) }', + headers=ANALYST_HEADERS, + ) + assert "errors" in response + assert "Access denied" in response["errors"][0]["message"] diff --git a/python/tox.ini b/python/tox.ini index b716ef94ef..a0b42869dd 100644 --- a/python/tox.ini +++ b/python/tox.ini @@ -10,7 +10,7 @@ package = wheel wheel_build_env = .pkg extras = tox - all, storage, auth, timezone: test + all, storage, auth, timezone, permissions: test export: export all: all pass_env = @@ -40,6 +40,9 @@ commands = pytest --nbmake --nbmake-timeout=1200 {tty:--color=yes} tests/test_ba [testenv:auth] commands = pytest tests/test_auth.py +[testenv:permissions] +commands = pytest tests/test_permissions.py + [testenv:vectors] commands = pytest tests/test_vectors diff --git a/raphtory-api/src/core/entities/properties/prop/mod.rs b/raphtory-api/src/core/entities/properties/prop/mod.rs index 5aeeb202d7..4f563cdf57 100644 --- a/raphtory-api/src/core/entities/properties/prop/mod.rs +++ b/raphtory-api/src/core/entities/properties/prop/mod.rs @@ -1,7 +1,5 @@ pub mod arrow; - -mod prop_array; - +pub mod prop_array; pub mod prop_col; mod prop_enum; mod prop_ref_enum; diff --git a/raphtory-auth-noop/Cargo.toml b/raphtory-auth-noop/Cargo.toml new file mode 100644 index 0000000000..7b20f3a52c --- /dev/null +++ b/raphtory-auth-noop/Cargo.toml @@ -0,0 +1,6 @@ +[package] +name = "raphtory-auth-noop" +version.workspace = true +edition.workspace = true + +[dependencies] diff --git a/raphtory-auth-noop/src/lib.rs b/raphtory-auth-noop/src/lib.rs new file mode 100644 index 0000000000..12cd021c75 --- /dev/null +++ b/raphtory-auth-noop/src/lib.rs @@ -0,0 +1 @@ +pub fn init() {} diff --git a/raphtory-graphql/schema.graphql b/raphtory-graphql/schema.graphql index bf1bbe56d1..9327ef2fb6 100644 --- a/raphtory-graphql/schema.graphql +++ b/raphtory-graphql/schema.graphql @@ -3136,7 +3136,12 @@ type QueryRoot { """ Returns a graph """ - graph(path: String!): Graph! + graph(path: String!): Graph + """ + Returns lightweight metadata for a graph (node/edge counts, timestamps) without loading it. + Requires at least INTROSPECT permission. + """ + graphMetadata(path: String!): MetaGraph """ Update graph query, has side effects to update graph state @@ -3172,7 +3177,8 @@ type QueryRoot { """ plugins: QueryPlugin! """ - Encodes graph and returns as string + Encodes graph and returns as string. + If the caller has filtered access, the returned graph is a materialized view of the filter. Returns:: Base64 url safe encoded string """ diff --git a/raphtory-graphql/src/auth.rs b/raphtory-graphql/src/auth.rs index 1626bf38a3..e4bcdc9da1 100644 --- a/raphtory-graphql/src/auth.rs +++ b/raphtory-graphql/src/auth.rs @@ -16,18 +16,21 @@ use poem::{ use reqwest::header::AUTHORIZATION; use serde::Deserialize; use std::{sync::Arc, time::Duration}; -use tokio::sync::{RwLock, Semaphore}; +use tokio::sync::Semaphore; +use tracing::{debug, warn}; #[derive(Clone, Debug, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] -pub(crate) enum Access { +pub enum Access { Ro, Rw, } #[derive(Deserialize, Debug, Clone)] pub(crate) struct TokenClaims { - pub(crate) a: Access, + pub(crate) access: Access, + #[serde(default)] + pub(crate) role: Option, } // TODO: maybe this should be renamed as it doens't only take care of auth anymore @@ -35,7 +38,7 @@ pub struct AuthenticatedGraphQL { executor: E, config: AuthConfig, semaphore: Option, - lock: Option>, + lock: Option>, } impl AuthenticatedGraphQL { @@ -58,7 +61,7 @@ impl AuthenticatedGraphQL { .and_then(|thread_safe| { if thread_safe == "1" { println!("Server running in threadsafe mode"); - Some(RwLock::new(())) + Some(tokio::sync::RwLock::new(())) } else { None } @@ -124,23 +127,28 @@ where async fn call(&self, req: Request) -> Result { // here ANY error when trying to validate the Authorization header is equivalent to it not being present at all - let access = match &self.config.public_key { + let (access, role) = match &self.config.public_key { Some(public_key) => { - let presented_access = req + let claims = req .header(AUTHORIZATION) - .and_then(|header| extract_access_from_header(header, public_key)); - match presented_access { - Some(access) => access, + .and_then(|header| extract_claims_from_header(header, public_key)); + match claims { + Some(claims) => { + debug!(role = ?claims.role, "JWT validated successfully"); + (claims.access, claims.role) + } None => { - if self.config.enabled_for_reads { + if self.config.require_auth_for_reads { + warn!("Request missing valid JWT — rejecting (require_auth_for_reads=true)"); return Err(Unauthorized(AuthError::RequireRead)); } else { - Access::Ro // if read access is not required, we give read access to all requests + debug!("No valid JWT but require_auth_for_reads=false — granting read access"); + (Access::Ro, None) } } } } - None => Access::Rw, // if auth is not setup, we give write access to all requests + None => (Access::Rw, None), // if auth is not setup, we give write access to all requests }; let is_accept_multipart_mixed = req @@ -151,7 +159,7 @@ where if is_accept_multipart_mixed { let (req, mut body) = req.split(); let req = GraphQLRequest::from_request(&req, &mut body).await?; - let req = req.0.data(access); + let req = req.0.data(access).data(role); let stream = self.executor.execute_stream(req, None); Ok(Response::builder() .header("content-type", "multipart/mixed; boundary=graphql") @@ -162,7 +170,7 @@ where } else { let (req, mut body) = req.split(); let req = GraphQLBatchRequest::from_request(&req, &mut body).await?; - let req = req.0.data(access); + let req = req.0.data(access).data(role); let contains_update = match &req { BatchRequest::Single(request) => request.query.contains("updateGraph"), @@ -200,28 +208,50 @@ fn is_query_heavy(query: &str) -> bool { || query.contains("inNeighbours") } -fn extract_access_from_header(header: &str, public_key: &PublicKey) -> Option { +fn extract_claims_from_header(header: &str, public_key: &PublicKey) -> Option { if header.starts_with("Bearer ") { let jwt = header.replace("Bearer ", ""); - let mut validation = Validation::new(Algorithm::EdDSA); + let mut validation = Validation::new(public_key.algorithms[0]); + validation.algorithms = public_key.algorithms.clone(); validation.set_required_spec_claims::(&[]); // we don't require 'exp' to be present let decoded = decode::(&jwt, &public_key.decoding_key, &validation); - Some(decoded.ok()?.claims.a) + match decoded { + Ok(token_data) => Some(token_data.claims), + Err(e) => { + warn!(error = %e, "JWT signature validation failed"); + None + } + } } else { + warn!("Authorization header is missing or does not start with 'Bearer '"); None } } pub(crate) trait ContextValidation { - fn require_write_access(&self) -> Result<(), AuthError>; + fn require_jwt_write_access(&self) -> Result<(), AuthError>; +} + +/// Check that the request carries a write-access JWT (`"access": "rw"`). +/// For use in dynamic resolver ops that run under `query { ... }` and are +/// therefore not covered by the `MutationAuth` extension. +pub fn require_jwt_write_access_dynamic( + ctx: &async_graphql::dynamic::ResolverContext, +) -> Result<(), async_graphql::Error> { + if ctx.data::().is_ok_and(|a| a == &Access::Rw) { + Ok(()) + } else { + Err(async_graphql::Error::new( + "Access denied: write access required", + )) + } } impl<'a> ContextValidation for &Context<'a> { - fn require_write_access(&self) -> Result<(), AuthError> { - if self.data::().is_ok_and(|role| role == &Access::Rw) { - Ok(()) - } else { - Err(AuthError::RequireWrite) + fn require_jwt_write_access(&self) -> Result<(), AuthError> { + match self.data::() { + Ok(access) if access == &Access::Rw => Ok(()), + _ => Err(AuthError::RequireWrite), } } } @@ -249,10 +279,18 @@ impl Extension for MutationAuth { .iter() .any(|op| op.1.node.ty == OperationType::Mutation); if mutation && ctx.data::() != Ok(&Access::Rw) { - Err(AuthError::RequireWrite.into()) - } else { - Ok(doc) + // If a policy is active, allow "ro" users through to resolvers — + // each resolver enforces its own per-graph or admin-only check. + // Without a policy (OSS), preserve the original blanket deny. + let policy_active = ctx + .data::() + .map(|d| d.auth_policy.is_some()) + .unwrap_or(false); + if !policy_active { + return Err(AuthError::RequireWrite.into()); + } } + Ok(doc) }) } } diff --git a/raphtory-graphql/src/auth_policy.rs b/raphtory-graphql/src/auth_policy.rs new file mode 100644 index 0000000000..30696c3cc6 --- /dev/null +++ b/raphtory-graphql/src/auth_policy.rs @@ -0,0 +1,128 @@ +use crate::model::graph::filtering::GraphAccessFilter; + +/// Opaque error returned by [`AuthorizationPolicy::graph_permissions`] when access is entirely +/// denied. The message is intended for logging only; callers must not surface it to end users. +#[derive(Debug)] +pub struct AuthPolicyError(String); + +impl AuthPolicyError { + pub fn new(msg: impl Into) -> Self { + Self(msg.into()) + } +} + +impl std::fmt::Display for AuthPolicyError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&self.0) + } +} + +// async_graphql's blanket `impl From for Error` covers +// AuthPolicyError automatically via its Display impl. + +/// The effective permission level a principal has on a specific graph. +/// Variants are ordered by the hierarchy: `Write` > `Read{filter:None}` > `Read{filter:Some}` > `Introspect`. +/// A filtered `Read` is less powerful than an unfiltered `Read` because it sees a restricted view. +#[derive(Clone)] +pub enum GraphPermission { + /// May query graph metadata (counts, schema) but not read data. + Introspect, + /// May read graph data; optionally restricted by a data filter. + Read { filter: Option }, + /// May read and mutate the graph (implies `Read` and `Introspect`, never filtered). + Write, +} + +impl GraphPermission { + /// Numeric level used for ordering: `Introspect`=0, `Read{Some}`=1, `Read{None}`=2, `Write`=3. + fn level(&self) -> u8 { + match self { + GraphPermission::Introspect => 0, + GraphPermission::Read { filter: Some(_) } => 1, + GraphPermission::Read { filter: None } => 2, + GraphPermission::Write => 3, + } + } + + /// Returns `true` if the permission level is `Read` or higher. + pub fn is_at_least_read(&self) -> bool { + self.level() >= 1 + } + + /// Returns `true` only for `Write` permission. + pub fn is_write(&self) -> bool { + self.level() >= 3 + } + + /// Returns `Some(self)` if at least `Read` (filtered or not), `None` otherwise. + /// Use with `?` to gate access and preserve the permission value for filter extraction. + pub fn at_least_read(self) -> Option { + self.is_at_least_read().then_some(self) + } + + /// Returns `Some(self)` if `Write`, `None` otherwise. + pub fn at_least_write(self) -> Option { + self.is_write().then_some(self) + } +} + +impl PartialEq for GraphPermission { + fn eq(&self, other: &Self) -> bool { + self.level() == other.level() + } +} + +impl Eq for GraphPermission {} + +impl PartialOrd for GraphPermission { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for GraphPermission { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.level().cmp(&other.level()) + } +} + +/// The effective permission level a principal has on a namespace. +/// Variants are ordered lowest to highest so that `PartialOrd`/`Ord` reflect the hierarchy. +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub enum NamespacePermission { + /// No access — namespace is invisible. + Denied, + /// Namespace is visible in parent `children()` listings but cannot be browsed. + Discover, + /// Namespace is browseable; graphs inside are visible as MetaGraph in `graphs()`. + Introspect, + /// All descendant graphs are fully readable. + Read, + /// All descendants are writable; `newGraph` is allowed. + Write, +} + +pub trait AuthorizationPolicy: Send + Sync + 'static { + /// Resolves the effective permission level for a principal on a graph. + /// Returns `Err(denial message)` only when access is entirely denied (not even introspect). + /// Admin principals (`"access": "rw"` JWT) always yield `Write`. + /// Empty store (no roles configured) yields `Read` — fail open for reads, + /// but write still requires an explicit `Write` grant. + /// The implementation is responsible for extracting principal identity from `ctx`. + fn graph_permissions( + &self, + ctx: &async_graphql::Context<'_>, + path: &str, + ) -> Result; + + /// Resolves the effective namespace permission for a principal. + /// Admin principals always yield `Write`. + /// Empty store yields `Read` (fail open, consistent with graph_permissions). + /// Missing role yields `Denied`. + /// The implementation is responsible for extracting principal identity from `ctx`. + fn namespace_permissions( + &self, + ctx: &async_graphql::Context<'_>, + path: &str, + ) -> NamespacePermission; +} diff --git a/raphtory-graphql/src/cli.rs b/raphtory-graphql/src/cli.rs index 4cc5190322..49da454e84 100644 --- a/raphtory-graphql/src/cli.rs +++ b/raphtory-graphql/src/cli.rs @@ -3,7 +3,7 @@ use crate::config::index_config::DEFAULT_CREATE_INDEX; use crate::{ config::{ app_config::AppConfigBuilder, - auth_config::{DEFAULT_AUTH_ENABLED_FOR_READS, PUBLIC_KEY_DECODING_ERR_MSG}, + auth_config::{DEFAULT_REQUIRE_AUTH_FOR_READS, PUBLIC_KEY_DECODING_ERR_MSG}, cache_config::{DEFAULT_CAPACITY, DEFAULT_TTI_SECONDS}, log_config::DEFAULT_LOG_LEVEL, otlp_config::{ @@ -12,7 +12,7 @@ use crate::{ }, }, model::App, - server::DEFAULT_PORT, + server::{apply_server_extension, DEFAULT_PORT}, GraphServer, }; use clap::{Parser, Subcommand}; @@ -75,12 +75,15 @@ struct ServerArgs { #[arg(long, env = "RAPHTORY_AUTH_PUBLIC_KEY", default_value = None, help = "Public key for auth")] auth_public_key: Option, - #[arg(long, env = "RAPHTORY_AUTH_ENABLED_FOR_READS", default_value_t = DEFAULT_AUTH_ENABLED_FOR_READS, help = "Enable auth for reads")] - auth_enabled_for_reads: bool, + #[arg(long, env = "RAPHTORY_REQUIRE_AUTH_FOR_READS", default_value_t = DEFAULT_REQUIRE_AUTH_FOR_READS, help = "Require JWT authentication for read requests (default: true)")] + require_auth_for_reads: bool, #[arg(long, env = "RAPHTORY_PUBLIC_DIR", default_value = None, help = "Public directory path")] public_dir: Option, + #[arg(long, env = "RAPHTORY_PERMISSIONS_STORE_PATH", default_value = None, help = "Path to the JSON permissions store file")] + permissions_store_path: Option, + #[cfg(feature = "search")] #[arg(long, env = "RAPHTORY_CREATE_INDEX", default_value_t = DEFAULT_CREATE_INDEX, help = "Enable index creation")] create_index: bool, @@ -114,7 +117,7 @@ where .with_auth_public_key(server_args.auth_public_key) .expect(PUBLIC_KEY_DECODING_ERR_MSG) .with_public_dir(server_args.public_dir) - .with_auth_enabled_for_reads(server_args.auth_enabled_for_reads); + .with_require_auth_for_reads(server_args.require_auth_for_reads); #[cfg(feature = "search")] { @@ -123,14 +126,15 @@ where let app_config = Some(builder.build()); - GraphServer::new( + let server = GraphServer::new( server_args.work_dir, app_config, None, server_args.graph_config, - )? - .run_with_port(server_args.port) - .await?; + )?; + let server = + apply_server_extension(server, server_args.permissions_store_path.as_deref()); + server.run_with_port(server_args.port).await?; } } Ok(()) diff --git a/raphtory-graphql/src/config/app_config.rs b/raphtory-graphql/src/config/app_config.rs index 9404d678e6..56c6ba29a1 100644 --- a/raphtory-graphql/src/config/app_config.rs +++ b/raphtory-graphql/src/config/app_config.rs @@ -106,8 +106,8 @@ impl AppConfigBuilder { Ok(self) } - pub fn with_auth_enabled_for_reads(mut self, enabled_for_reads: bool) -> Self { - self.auth.enabled_for_reads = enabled_for_reads; + pub fn with_require_auth_for_reads(mut self, require_auth_for_reads: bool) -> Self { + self.auth.require_auth_for_reads = require_auth_for_reads; self } @@ -195,8 +195,8 @@ pub fn load_config( .with_auth_public_key(public_key) .map_err(|_| ConfigError::Message(PUBLIC_KEY_DECODING_ERR_MSG.to_owned()))?; } - if let Ok(enabled_for_reads) = settings.get::("auth.enabled_for_reads") { - app_config_builder = app_config_builder.with_auth_enabled_for_reads(enabled_for_reads); + if let Ok(require_auth_for_reads) = settings.get::("auth.require_auth_for_reads") { + app_config_builder = app_config_builder.with_require_auth_for_reads(require_auth_for_reads); } if let Ok(public_dir) = settings.get::>("public_dir") { diff --git a/raphtory-graphql/src/config/auth_config.rs b/raphtory-graphql/src/config/auth_config.rs index 166429ad1f..8a29300cad 100644 --- a/raphtory-graphql/src/config/auth_config.rs +++ b/raphtory-graphql/src/config/auth_config.rs @@ -1,16 +1,52 @@ use base64::{prelude::BASE64_STANDARD, DecodeError, Engine}; -use jsonwebtoken::DecodingKey; +use jsonwebtoken::{Algorithm, DecodingKey}; use serde::{de, Deserialize, Deserializer, Serialize}; use spki::SubjectPublicKeyInfoRef; use std::fmt::Debug; -pub const DEFAULT_AUTH_ENABLED_FOR_READS: bool = true; -pub const PUBLIC_KEY_DECODING_ERR_MSG: &str = "Could not successfully decode the public key. Make sure you use the standard alphabet with padding"; +pub const DEFAULT_REQUIRE_AUTH_FOR_READS: bool = true; +pub const PUBLIC_KEY_DECODING_ERR_MSG: &str = + "Could not decode public key. Provide a base64-encoded DER (X.509 SPKI) public key \ + for Ed25519 or RSA (2048-4096 bit)."; + +/// Describes one family of asymmetric public-key algorithms that Raphtory can validate JWTs with. +/// +/// To add support for a new algorithm family (e.g. EC/ECDSA), append one entry to +/// [`SUPPORTED_ALGORITHMS`] — no other code needs to change. +struct AlgorithmSpec { + /// X.509 SPKI algorithm OID string (e.g. `"1.3.101.112"` for Ed25519). + oid: &'static str, + /// Constructs the `DecodingKey` from the raw subject-public-key bytes extracted from the SPKI + /// structure (i.e. the inner key bytes, not the full DER-encoded SPKI wrapper). + make_key: fn(&[u8]) -> DecodingKey, + /// JWT algorithms accepted for this key family. All listed variants are allowed during + /// validation; the first entry is used as the `Validation` default. + algorithms: &'static [Algorithm], +} + +/// Registry of supported public-key algorithm families. +/// +/// # Adding a new family +/// Append an [`AlgorithmSpec`] entry here. `TryFrom for PublicKey` will pick it up +/// automatically — no other changes required. +const SUPPORTED_ALGORITHMS: &[AlgorithmSpec] = &[ + AlgorithmSpec { + oid: "1.3.101.112", // id-EdDSA (Ed25519) + make_key: DecodingKey::from_ed_der, + algorithms: &[Algorithm::EdDSA], + }, + AlgorithmSpec { + oid: "1.2.840.113549.1.1.1", // rsaEncryption (PKCS#1) + make_key: DecodingKey::from_rsa_der, + algorithms: &[Algorithm::RS256, Algorithm::RS384, Algorithm::RS512], + }, +]; #[derive(Clone)] pub struct PublicKey { source: String, pub(crate) decoding_key: DecodingKey, + pub(crate) algorithms: Vec, } impl PartialEq for PublicKey { @@ -23,8 +59,10 @@ impl PartialEq for PublicKey { pub enum PublicKeyError { #[error(transparent)] Base64(#[from] DecodeError), - #[error("The provided key is not a a valid X.509 Subject Public Key Info ASN.1 structure")] + #[error("The provided key is not a valid X.509 Subject Public Key Info ASN.1 structure")] Spki, + #[error("Key algorithm is not supported; see SUPPORTED_ALGORITHMS for accepted OIDs")] + UnsupportedAlgorithm, } impl TryFrom for PublicKey { @@ -33,10 +71,16 @@ impl TryFrom for PublicKey { let der = BASE64_STANDARD.decode(&value)?; let spki_ref = SubjectPublicKeyInfoRef::try_from(der.as_ref()).map_err(|_| PublicKeyError::Spki)?; - let decoding_key = DecodingKey::from_ed_der(spki_ref.subject_public_key.raw_bytes()); + let oid = spki_ref.algorithm.oid.to_string(); + let spec = SUPPORTED_ALGORITHMS + .iter() + .find(|s| s.oid == oid.as_str()) + .ok_or(PublicKeyError::UnsupportedAlgorithm)?; + let raw = spki_ref.subject_public_key.raw_bytes(); Ok(Self { source: value, - decoding_key, + decoding_key: (spec.make_key)(raw), + algorithms: spec.algorithms.to_vec(), }) } } @@ -69,14 +113,14 @@ impl Debug for PublicKey { #[derive(Debug, Deserialize, Clone, Serialize, PartialEq)] pub struct AuthConfig { pub public_key: Option, - pub enabled_for_reads: bool, + pub require_auth_for_reads: bool, } impl Default for AuthConfig { fn default() -> Self { Self { public_key: None, - enabled_for_reads: DEFAULT_AUTH_ENABLED_FOR_READS, + require_auth_for_reads: DEFAULT_REQUIRE_AUTH_FOR_READS, } } } diff --git a/raphtory-graphql/src/data.rs b/raphtory-graphql/src/data.rs index 46462837de..94955f9665 100644 --- a/raphtory-graphql/src/data.rs +++ b/raphtory-graphql/src/data.rs @@ -1,4 +1,5 @@ use crate::{ + auth_policy::AuthorizationPolicy, config::app_config::AppConfig, graph::GraphWithVectors, model::blocking_io, @@ -136,6 +137,7 @@ pub struct Data { pub(crate) create_index: bool, pub(crate) embedding_conf: Option, pub(crate) graph_conf: Config, + pub(crate) auth_policy: Option>, } impl Data { @@ -173,6 +175,7 @@ impl Data { create_index, embedding_conf: Default::default(), graph_conf, + auth_policy: None, } } diff --git a/raphtory-graphql/src/lib.rs b/raphtory-graphql/src/lib.rs index f13c416acf..d1b68476c2 100644 --- a/raphtory-graphql/src/lib.rs +++ b/raphtory-graphql/src/lib.rs @@ -1,9 +1,14 @@ -pub use crate::server::GraphServer; +pub use crate::{ + auth::{require_jwt_write_access_dynamic, Access}, + model::graph::filtering::GraphAccessFilter, + server::GraphServer, +}; use crate::{data::InsertionError, paths::PathValidationError}; use raphtory::errors::GraphError; use std::sync::Arc; mod auth; +pub mod auth_policy; pub mod client; pub mod data; mod embeddings; @@ -41,6 +46,7 @@ mod graphql_test { #[cfg(feature = "search")] use crate::config::app_config::AppConfigBuilder; use crate::{ + auth::Access, config::app_config::AppConfig, data::{data_tests::save_graphs_to_work_dir, Data}, model::App, @@ -86,7 +92,7 @@ mod graphql_test { ) }"#; - let req = Request::new(query); + let req = Request::new(query).data(Access::Rw); let res = schema.execute(req).await; assert_eq!(res.errors, []); } @@ -1064,7 +1070,9 @@ mod graphql_test { "##; let variables = json!({ "file": null, "overwrite": false }); - let mut req = Request::new(query).variables(Variables::from_json(variables)); + let mut req = Request::new(query) + .variables(Variables::from_json(variables)) + .data(Access::Rw); req.set_upload("variables.file", upload_val); let res = schema.execute(req).await; assert_eq!(res.errors, vec![]); @@ -1109,9 +1117,11 @@ mod graphql_test { sendGraph(path: "test", graph: $graph, overwrite: $overwrite) } "#; - let req = Request::new(query).variables(Variables::from_json( - json!({ "graph": graph_str, "overwrite": false }), - )); + let req = Request::new(query) + .variables(Variables::from_json( + json!({ "graph": graph_str, "overwrite": false }), + )) + .data(Access::Rw); let res = schema.execute(req).await; assert_eq!(res.errors, []); @@ -1624,7 +1634,7 @@ mod graphql_test { createSubgraph(parentPath: "graph", newPath: "graph2", nodes: ["1", "2"], overwrite: false) } "#; - let req = Request::new(req); + let req = Request::new(req).data(Access::Rw); let res = schema.execute(req).await; assert_eq!(res.errors, vec![]); let req = r#" @@ -1632,7 +1642,7 @@ mod graphql_test { createSubgraph(parentPath: "graph", newPath: "namespace1/graph3", nodes: ["2", "3", "4"], overwrite: false) } "#; - let req = Request::new(req); + let req = Request::new(req).data(Access::Rw); let res = schema.execute(req).await; assert_eq!(res.errors, vec![]); diff --git a/raphtory-graphql/src/model/graph/filtering.rs b/raphtory-graphql/src/model/graph/filtering.rs index fdbfd8187e..b2bfe9cc8b 100644 --- a/raphtory-graphql/src/model/graph/filtering.rs +++ b/raphtory-graphql/src/model/graph/filtering.rs @@ -31,6 +31,7 @@ use raphtory_api::core::{ entities::{properties::prop::Prop, Layer, GID}, storage::timeindex::{AsTime, EventTime}, }; +use serde::{Deserialize, Serialize}; use std::{ borrow::Cow, collections::HashSet, @@ -40,7 +41,7 @@ use std::{ sync::Arc, }; -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct Window { /// Window start time. pub start: GqlTimeInput, @@ -260,7 +261,8 @@ pub enum PathFromNodeViewCollection { ShrinkEnd(GqlTimeInput), } -#[derive(Enum, Copy, Clone, Debug)] +#[derive(Enum, Copy, Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] pub enum NodeField { /// Node ID field. /// @@ -303,7 +305,7 @@ impl Display for NodeField { /// ```graphql /// { Property: { name: "weight", where: { Gt: 0.5 } } } /// ``` -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct PropertyFilterNew { /// Property (or metadata) key. pub name: String, @@ -311,6 +313,7 @@ pub struct PropertyFilterNew { /// /// Exposed as `where` in GraphQL. #[graphql(name = "where")] + #[serde(rename = "where")] pub where_: PropCondition, } @@ -331,7 +334,8 @@ pub struct PropertyFilterNew { /// - `Value` is interpreted according to the property’s type. /// - Aggregators/qualifiers like `Sum` and `Len` apply when the underlying /// property is list-like or aggregatable (depending on your engine rules). -#[derive(OneOfInput, Clone, Debug)] +#[derive(OneOfInput, Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] pub enum PropCondition { /// Equality: property value equals the given value. Eq(Value), @@ -448,7 +452,7 @@ impl PropCondition { /// ```graphql /// { Window: { start: 0, end: 10, expr: { Layers: { names: ["A"] } } } } /// ``` -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct GraphWindowExpr { /// Window start time (inclusive). pub start: GqlTimeInput, @@ -464,7 +468,7 @@ pub struct GraphWindowExpr { /// /// Example: /// `{ At: { time: 5, expr: { Layers: { names: ["L1"] } } } }` -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct GraphTimeExpr { /// Reference time for the operation. pub time: GqlTimeInput, @@ -475,7 +479,7 @@ pub struct GraphTimeExpr { /// Graph view restriction that takes only a nested expression. /// /// Used for unary view operations like `Latest` and `SnapshotLatest`. -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct GraphUnaryExpr { /// Optional nested filter applied after the unary operation. pub expr: Option>, @@ -484,7 +488,7 @@ pub struct GraphUnaryExpr { /// Graph view restriction by layer membership, optionally chaining another `GraphFilter`. /// /// Used by `GqlGraphFilter::Layers`. -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct GraphLayersExpr { /// Layer names to include. pub names: Vec, @@ -504,8 +508,9 @@ pub struct GraphLayersExpr { /// /// These filters can be nested via the `expr` field on the corresponding /// `*Expr` input objects to form pipelines. -#[derive(OneOfInput, Clone, Debug)] +#[derive(OneOfInput, Clone, Debug, Serialize, Deserialize)] #[graphql(name = "GraphFilter")] +#[serde(rename_all = "camelCase")] pub enum GqlGraphFilter { /// Restrict evaluation to a time window (inclusive start, exclusive end). Window(GraphWindowExpr), @@ -534,7 +539,8 @@ pub enum GqlGraphFilter { /// /// Supports comparisons, string predicates, and set membership. /// (Presence checks and aggregations are handled via property filters instead.) -#[derive(OneOfInput, Clone, Debug)] +#[derive(OneOfInput, Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] pub enum NodeFieldCondition { /// Equality. Eq(Value), @@ -590,7 +596,7 @@ impl NodeFieldCondition { /// ```graphql /// { Node: { field: NodeName, where: { Contains: "ali" } } } /// ``` -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct NodeFieldFilterNew { /// Which built-in field to filter. pub field: NodeField, @@ -598,6 +604,7 @@ pub struct NodeFieldFilterNew { /// /// Exposed as `where` in GraphQL. #[graphql(name = "where")] + #[serde(rename = "where")] pub where_: NodeFieldCondition, } @@ -606,7 +613,7 @@ pub struct NodeFieldFilterNew { /// Used by `GqlNodeFilter::Window`. /// /// The window is inclusive of `start` and exclusive of `end`. -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct NodeWindowExpr { /// Window start time (inclusive). pub start: GqlTimeInput, @@ -619,7 +626,7 @@ pub struct NodeWindowExpr { /// Restricts node evaluation to a single time bound and applies a nested `NodeFilter`. /// /// Used by `At`, `Before`, and `After` node filters. -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct NodeTimeExpr { /// Reference time for the operation. pub time: GqlTimeInput, @@ -630,7 +637,7 @@ pub struct NodeTimeExpr { /// Applies a unary node-view operation and then evaluates a nested `NodeFilter`. /// /// Used by `Latest` and `SnapshotLatest` node filters. -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct NodeUnaryExpr { /// Filter evaluated after applying the unary operation. pub expr: Wrapped, @@ -639,7 +646,7 @@ pub struct NodeUnaryExpr { /// Restricts node evaluation to one or more layers and applies a nested `NodeFilter`. /// /// Used by `GqlNodeFilter::Layers`. -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct NodeLayersExpr { /// Layer names to include. pub names: Vec, @@ -661,8 +668,9 @@ pub struct NodeLayersExpr { /// /// Filters can be combined recursively using logical operators /// (`And`, `Or`, `Not`). -#[derive(OneOfInput, Clone, Debug)] +#[derive(OneOfInput, Clone, Debug, Serialize, Deserialize)] #[graphql(name = "NodeFilter")] +#[serde(rename_all = "camelCase")] pub enum GqlNodeFilter { /// Filters a built-in node field (ID, name, or type). Node(NodeFieldFilterNew), @@ -718,7 +726,7 @@ pub enum GqlNodeFilter { /// Used by `GqlEdgeFilter::Window`. /// /// The window is inclusive of `start` and exclusive of `end`. -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct EdgeWindowExpr { /// Window start time (inclusive). pub start: GqlTimeInput, @@ -731,7 +739,7 @@ pub struct EdgeWindowExpr { /// Restricts edge evaluation to a single time bound and applies a nested `EdgeFilter`. /// /// Used by `At`, `Before`, and `After` edge filters. -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct EdgeTimeExpr { /// Reference time for the operation. pub time: GqlTimeInput, @@ -742,7 +750,7 @@ pub struct EdgeTimeExpr { /// Applies a unary edge-view operation and then evaluates a nested `EdgeFilter`. /// /// Used by `Latest` and `SnapshotLatest` edge filters. -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct EdgeUnaryExpr { /// Filter evaluated after applying the unary operation. pub expr: Wrapped, @@ -751,7 +759,7 @@ pub struct EdgeUnaryExpr { /// Restricts edge evaluation to one or more layers and applies a nested `EdgeFilter`. /// /// Used by `GqlEdgeFilter::Layers`. -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct EdgeLayersExpr { /// Layer names to include. pub names: Vec, @@ -787,8 +795,9 @@ pub struct EdgeLayersExpr { /// } /// } /// ``` -#[derive(OneOfInput, Clone, Debug)] +#[derive(OneOfInput, Clone, Debug, Serialize, Deserialize)] #[graphql(name = "EdgeFilter")] +#[serde(rename_all = "camelCase")] pub enum GqlEdgeFilter { /// Applies a filter to the **source node** of the edge. /// @@ -903,7 +912,8 @@ pub enum GqlEdgeFilter { IsSelfLoop(bool), } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] pub struct Wrapped(Box); impl Deref for Wrapped { type Target = T; @@ -1734,3 +1744,23 @@ impl TryFrom for DynView { }) } } + +/// Combined filter input covering all three filter levels (node, edge, graph-level). +/// Used by `grantGraphFilteredReadOnly` to express a data-access restriction +/// that is transparently applied whenever the role queries the graph. +/// Use `and` / `or` to compose multiple sub-filters. +#[derive(OneOfInput, Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum GraphAccessFilter { + /// Filter by node properties, fields, or temporal state. + Node(GqlNodeFilter), + /// Filter by edge properties, source/destination, or temporal state. + Edge(GqlEdgeFilter), + /// Apply a graph-level view (window, snapshot, layer restriction, …). + Graph(GqlGraphFilter), + /// All sub-filters must pass (intersection). + And(Vec), + /// At least one sub-filter must pass (union within each filter type; + /// cross-type sub-filters are applied as independent restrictions). + Or(Vec), +} diff --git a/raphtory-graphql/src/model/graph/graph.rs b/raphtory-graphql/src/model/graph/graph.rs index d235d6a68a..927379189c 100644 --- a/raphtory-graphql/src/model/graph/graph.rs +++ b/raphtory-graphql/src/model/graph/graph.rs @@ -284,62 +284,59 @@ impl GqlGraph { } /// Returns the time entry of the earliest activity in the graph. - async fn earliest_time(&self) -> GqlEventTime { + async fn earliest_time(&self) -> Result { let self_clone = self.clone(); - blocking_compute(move || self_clone.graph.earliest_time().into()).await + Ok(blocking_compute(move || self_clone.graph.earliest_time().into()).await) } /// Returns the time entry of the latest activity in the graph. - async fn latest_time(&self) -> GqlEventTime { + async fn latest_time(&self) -> Result { let self_clone = self.clone(); - blocking_compute(move || self_clone.graph.latest_time().into()).await + Ok(blocking_compute(move || self_clone.graph.latest_time().into()).await) } /// Returns the start time of the window. Errors if there is no window. - async fn start(&self) -> GqlEventTime { - self.graph.start().into() + async fn start(&self) -> Result { + Ok(self.graph.start().into()) } /// Returns the end time of the window. Errors if there is no window. - async fn end(&self) -> GqlEventTime { - self.graph.end().into() + async fn end(&self) -> Result { + Ok(self.graph.end().into()) } /// Returns the earliest time that any edge in this graph is valid. - async fn earliest_edge_time(&self, include_negative: Option) -> GqlEventTime { + async fn earliest_edge_time(&self, include_negative: Option) -> Result { let self_clone = self.clone(); - blocking_compute(move || { + Ok(blocking_compute(move || { let include_negative = include_negative.unwrap_or(true); - let all_edges = self_clone + self_clone .graph .edges() .earliest_time() .into_iter() .filter_map(|edge_time| edge_time.filter(|&time| include_negative || time.t() >= 0)) .min() - .into(); - all_edges + .into() }) - .await + .await) } /// Returns the latest time that any edge in this graph is valid. - async fn latest_edge_time(&self, include_negative: Option) -> GqlEventTime { + async fn latest_edge_time(&self, include_negative: Option) -> Result { let self_clone = self.clone(); - blocking_compute(move || { + Ok(blocking_compute(move || { let include_negative = include_negative.unwrap_or(true); - let all_edges = self_clone + self_clone .graph .edges() .latest_time() .into_iter() .filter_map(|edge_time| edge_time.filter(|&time| include_negative || time.t() >= 0)) .max() - .into(); - - all_edges + .into() }) - .await + .await) } //////////////////////// @@ -374,20 +371,20 @@ impl GqlGraph { //////////////////////// /// Returns true if the graph contains the specified node. - async fn has_node(&self, name: String) -> bool { - self.graph.has_node(name) + async fn has_node(&self, name: String) -> Result { + Ok(self.graph.has_node(name)) } /// Returns true if the graph contains the specified edge. Edges are specified by providing a source and destination node id. You can restrict the search to a specified layer. - async fn has_edge(&self, src: String, dst: String, layer: Option) -> bool { - match layer { + async fn has_edge(&self, src: String, dst: String, layer: Option) -> Result { + Ok(match layer { Some(name) => self .graph .layers(name) .map(|l| l.has_edge(src, dst)) .unwrap_or(false), None => self.graph.has_edge(src, dst), - } + }) } //////////////////////// @@ -395,12 +392,12 @@ impl GqlGraph { //////////////////////// /// Gets the node with the specified id. - async fn node(&self, name: String) -> Option { - self.graph.node(name).map(|node| node.into()) + async fn node(&self, name: String) -> Result> { + Ok(self.graph.node(name).map(|node| node.into())) } /// Gets (optionally a subset of) the nodes in the graph. - async fn nodes(&self, select: Option) -> Result { + async fn nodes(&self, select: Option) -> Result { let nn = self.graph.nodes(); if let Some(sel) = select { @@ -417,12 +414,12 @@ impl GqlGraph { } /// Gets the edge with the specified source and destination nodes. - async fn edge(&self, src: String, dst: String) -> Option { - self.graph.edge(src, dst).map(|e| e.into()) + async fn edge(&self, src: String, dst: String) -> Result> { + Ok(self.graph.edge(src, dst).map(|e| e.into())) } /// Gets the edges in the graph. - async fn edges<'a>(&self, select: Option) -> Result { + async fn edges<'a>(&self, select: Option) -> Result { let base = self.graph.edges_unlocked(); if let Some(sel) = select { @@ -439,13 +436,13 @@ impl GqlGraph { //////////////////////// /// Returns the properties of the graph. - async fn properties(&self) -> GqlProperties { - Into::::into(self.graph.properties()).into() + async fn properties(&self) -> Result { + Ok(Into::::into(self.graph.properties()).into()) } /// Returns the metadata of the graph. - async fn metadata(&self) -> GqlMetadata { - self.graph.metadata().into() + async fn metadata(&self) -> Result { + Ok(self.graph.metadata().into()) } //////////////////////// @@ -475,18 +472,18 @@ impl GqlGraph { } /// Returns the graph schema. - async fn schema(&self) -> GraphSchema { + async fn schema(&self) -> Result { let self_clone = self.clone(); - blocking_compute(move || GraphSchema::new(&self_clone.graph)).await + Ok(blocking_compute(move || GraphSchema::new(&self_clone.graph)).await) } async fn algorithms(&self) -> GraphAlgorithmPlugin { self.graph.clone().into() } - async fn shared_neighbours(&self, selected_nodes: Vec) -> Vec { + async fn shared_neighbours(&self, selected_nodes: Vec) -> Result> { let self_clone = self.clone(); - blocking_compute(move || { + Ok(blocking_compute(move || { if selected_nodes.is_empty() { return vec![]; } @@ -512,11 +509,11 @@ impl GqlGraph { None => vec![], } }) - .await + .await) } /// Export all nodes and edges from this graph view to another existing graph - async fn export_to<'a>(&self, ctx: &Context<'a>, path: String) -> Result { + async fn export_to<'a>(&self, ctx: &Context<'a>, path: String) -> Result { let data = ctx.data_unchecked::(); let other_g = data.get_graph(path.as_ref()).await?.graph; let g = self.graph.clone(); @@ -602,7 +599,7 @@ impl GqlGraph { filter: GqlNodeFilter, limit: usize, offset: usize, - ) -> Result, GraphError> { + ) -> Result> { #[cfg(feature = "search")] { let self_clone = self.clone(); @@ -628,7 +625,7 @@ impl GqlGraph { filter: GqlEdgeFilter, limit: usize, offset: usize, - ) -> Result, GraphError> { + ) -> Result> { #[cfg(feature = "search")] { let self_clone = self.clone(); diff --git a/raphtory-graphql/src/model/graph/meta_graph.rs b/raphtory-graphql/src/model/graph/meta_graph.rs index 3e34abbccf..534183fde3 100644 --- a/raphtory-graphql/src/model/graph/meta_graph.rs +++ b/raphtory-graphql/src/model/graph/meta_graph.rs @@ -48,6 +48,10 @@ impl MetaGraph { } } + pub(crate) fn local_path(&self) -> &str { + self.folder.local_path() + } + async fn meta(&self) -> Result<&GraphMetadata> { Ok(self .meta diff --git a/raphtory-graphql/src/model/graph/mod.rs b/raphtory-graphql/src/model/graph/mod.rs index 726fac2b67..3464a89bcc 100644 --- a/raphtory-graphql/src/model/graph/mod.rs +++ b/raphtory-graphql/src/model/graph/mod.rs @@ -6,7 +6,7 @@ pub(crate) mod collection; mod document; pub(crate) mod edge; mod edges; -pub(crate) mod filtering; +pub mod filtering; pub(crate) mod graph; pub(crate) mod history; pub(crate) mod index; diff --git a/raphtory-graphql/src/model/graph/namespace.rs b/raphtory-graphql/src/model/graph/namespace.rs index 89f259d011..e80f09f4b3 100644 --- a/raphtory-graphql/src/model/graph/namespace.rs +++ b/raphtory-graphql/src/model/graph/namespace.rs @@ -1,14 +1,16 @@ use crate::{ - data::get_relative_path, + auth_policy::{AuthorizationPolicy, NamespacePermission}, + data::{get_relative_path, Data}, model::graph::{ collection::GqlCollection, meta_graph::MetaGraph, namespaced_item::NamespacedItem, }, paths::{ExistingGraphFolder, PathValidationError, ValidPath}, rayon::blocking_compute, }; +use async_graphql::Context; use dynamic_graphql::{ResolvedObject, ResolvedObjectFields}; use itertools::Itertools; -use std::path::PathBuf; +use std::{path::PathBuf, sync::Arc}; use walkdir::WalkDir; #[derive(ResolvedObject, Clone, Ord, Eq, PartialEq, PartialOrd)] @@ -135,24 +137,46 @@ impl Namespace { } } +fn is_graph_visible( + ctx: &Context<'_>, + policy: &Option>, + g: &MetaGraph, +) -> bool { + policy + .as_ref() + .map_or(true, |p| p.graph_permissions(ctx, &g.local_path()).is_ok()) +} + +fn is_namespace_visible( + ctx: &Context<'_>, + policy: &Option>, + n: &Namespace, +) -> bool { + policy.as_ref().map_or(true, |p| { + p.namespace_permissions(ctx, &n.relative_path) >= NamespacePermission::Discover + }) +} + #[ResolvedObjectFields] impl Namespace { - async fn graphs(&self) -> GqlCollection { + async fn graphs(&self, ctx: &Context<'_>) -> GqlCollection { + let data = ctx.data_unchecked::(); let self_clone = self.clone(); - blocking_compute(move || { - GqlCollection::new( - self_clone - .get_children() - .into_iter() - .filter_map(|g| match g { - NamespacedItem::MetaGraph(g) => Some(g), - NamespacedItem::Namespace(_) => None, - }) - .sorted() - .collect(), - ) - }) - .await + let items = blocking_compute(move || self_clone.get_children().collect::>()).await; + GqlCollection::new( + items + .into_iter() + .filter_map(|item| match item { + NamespacedItem::MetaGraph(g) + if is_graph_visible(ctx, &data.auth_policy, &g) => + { + Some(g) + } + _ => None, + }) + .sorted() + .collect(), + ) } async fn path(&self) -> String { self.relative_path.clone() @@ -174,28 +198,42 @@ impl Namespace { } } - async fn children(&self) -> GqlCollection { + async fn children(&self, ctx: &Context<'_>) -> GqlCollection { + let data = ctx.data_unchecked::(); let self_clone = self.clone(); - blocking_compute(move || { - GqlCollection::new( - self_clone - .get_children() - .filter_map(|item| match item { - NamespacedItem::MetaGraph(_) => None, - NamespacedItem::Namespace(n) => Some(n), - }) - .sorted() - .collect(), - ) - }) - .await + let items = blocking_compute(move || self_clone.get_children().collect::>()).await; + GqlCollection::new( + items + .into_iter() + .filter_map(|item| match item { + NamespacedItem::Namespace(n) + if is_namespace_visible(ctx, &data.auth_policy, &n) => + { + Some(n) + } + _ => None, + }) + .sorted() + .collect(), + ) } // Fetch the collection of namespaces/graphs in this namespace. // Namespaces will be listed before graphs. - async fn items(&self) -> GqlCollection { + async fn items(&self, ctx: &Context<'_>) -> GqlCollection { + let data = ctx.data_unchecked::(); let self_clone = self.clone(); - blocking_compute(move || GqlCollection::new(self_clone.get_children().sorted().collect())) - .await + let all_items = + blocking_compute(move || self_clone.get_children().collect::>()).await; + GqlCollection::new( + all_items + .into_iter() + .filter(|item| match item { + NamespacedItem::MetaGraph(g) => is_graph_visible(ctx, &data.auth_policy, g), + NamespacedItem::Namespace(n) => is_namespace_visible(ctx, &data.auth_policy, n), + }) + .sorted() + .collect(), + ) } } diff --git a/raphtory-graphql/src/model/graph/property.rs b/raphtory-graphql/src/model/graph/property.rs index 8321302ab3..d08b3f377c 100644 --- a/raphtory-graphql/src/model/graph/property.rs +++ b/raphtory-graphql/src/model/graph/property.rs @@ -28,6 +28,7 @@ use raphtory_api::core::{ utils::time::{IntoTime, TryIntoTime}, }; use rustc_hash::FxHashMap; +use serde::{Deserialize, Serialize}; use serde_json::Number; use std::{ collections::HashMap, @@ -38,7 +39,7 @@ use std::{ sync::Arc, }; -#[derive(InputObject, Clone, Debug)] +#[derive(InputObject, Clone, Debug, Serialize, Deserialize)] pub struct ObjectEntry { /// Key. pub key: String, @@ -46,7 +47,8 @@ pub struct ObjectEntry { pub value: Value, } -#[derive(OneOfInput, Clone, Debug)] +#[derive(OneOfInput, Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] pub enum Value { /// 8 bit unsigned integer. U8(u8), diff --git a/raphtory-graphql/src/model/graph/timeindex.rs b/raphtory-graphql/src/model/graph/timeindex.rs index 42cc7e713a..840ef37688 100644 --- a/raphtory-graphql/src/model/graph/timeindex.rs +++ b/raphtory-graphql/src/model/graph/timeindex.rs @@ -5,12 +5,13 @@ use raphtory_api::core::{ storage::timeindex::{AsTime, EventTime}, utils::time::{IntoTime, TryIntoTime}, }; +use serde::{Deserialize, Serialize}; /// Input for primary time component. Expects Int, DateTime formatted String, or Object { timestamp, eventId } /// where the timestamp is either an Int or a DateTime formatted String, and eventId is a non-negative Int. /// Valid string formats are RFC3339, RFC2822, %Y-%m-%d, %Y-%m-%dT%H:%M:%S%.3f, %Y-%m-%dT%H:%M:%S%, /// %Y-%m-%d %H:%M:%S%.3f and %Y-%m-%d %H:%M:%S%. -#[derive(Scalar, Clone, Debug)] +#[derive(Scalar, Clone, Debug, Serialize, Deserialize)] #[graphql(name = "TimeInput")] pub struct GqlTimeInput(pub EventTime); @@ -87,11 +88,7 @@ impl IntoTime for GqlTimeInput { } pub fn dt_format_str_is_valid(fmt_str: &str) -> bool { - if StrftimeItems::new(fmt_str).any(|it| matches!(it, Item::Error)) { - false - } else { - true - } + !StrftimeItems::new(fmt_str).any(|it| matches!(it, Item::Error)) } /// Raphtory’s EventTime. diff --git a/raphtory-graphql/src/model/mod.rs b/raphtory-graphql/src/model/mod.rs index 3044b7b64d..86a284858e 100644 --- a/raphtory-graphql/src/model/mod.rs +++ b/raphtory-graphql/src/model/mod.rs @@ -1,15 +1,25 @@ use crate::{ - auth::ContextValidation, - data::{Data, DeletionError}, + auth::{AuthError, ContextValidation}, + auth_policy::{AuthPolicyError, AuthorizationPolicy, GraphPermission, NamespacePermission}, + data::Data, model::{ graph::{ - collection::GqlCollection, graph::GqlGraph, index::IndexSpecInput, - mutable_graph::GqlMutableGraph, namespace::Namespace, namespaced_item::NamespacedItem, + collection::GqlCollection, + filtering::{GqlEdgeFilter, GqlNodeFilter, GraphAccessFilter}, + graph::GqlGraph, + index::IndexSpecInput, + meta_graph::MetaGraph, + mutable_graph::GqlMutableGraph, + namespace::Namespace, + namespaced_item::NamespacedItem, vectorised_graph::GqlVectorisedGraph, }, - plugins::{mutation_plugin::MutationPlugin, query_plugin::QueryPlugin}, + plugins::{ + mutation_plugin::MutationPlugin, query_plugin::QueryPlugin, PermissionsEntrypointMut, + PermissionsEntrypointQuery, + }, }, - paths::{ValidGraphPaths, ValidWriteableGraphFolder}, + paths::{ExistingGraphFolder, ValidGraphPaths, ValidWriteableGraphFolder}, rayon::blocking_compute, url_encode::{url_decode_graph_at, url_encode_graph}, }; @@ -23,9 +33,9 @@ use raphtory::{ db::{ api::{ storage::storage::{Extension, PersistenceStrategy}, - view::MaterializedGraph, + view::{DynamicGraph, Filter, IntoDynamic, MaterializedGraph}, }, - graph::views::deletion_graph::PersistentGraph, + graph::views::{deletion_graph::PersistentGraph, filter::model::NodeViewFilterOps}, }, errors::GraphError, prelude::*, @@ -34,9 +44,13 @@ use raphtory::{ use std::{ error::Error, fmt::{Display, Formatter}, + future::Future, + pin::Pin, + sync::Arc, }; +use tracing::{error, warn}; -pub(crate) mod graph; +pub mod graph; pub mod plugins; pub(crate) mod schema; pub(crate) mod sorting; @@ -84,6 +98,241 @@ pub enum GqlGraphType { Event, } +/// Checks that the caller has at least READ permission for the graph at `path`. +/// Returns the effective `GraphPermission` (including any stored filter) on success. +/// When denied and the caller has no INTROSPECT on the parent namespace, returns a +/// "Graph does not exist" error to avoid leaking that the graph is present. +fn require_at_least_read( + ctx: &Context<'_>, + policy: &Option>, + path: &str, +) -> async_graphql::Result { + if let Some(policy) = policy { + let role = ctx.data::>().ok().and_then(|r| r.as_deref()); + match policy.graph_permissions(ctx, path) { + Err(msg) => { + let ns = parent_namespace(path); + if policy.namespace_permissions(ctx, ns) >= NamespacePermission::Introspect { + warn!( + role = role.unwrap_or(""), + graph = path, + "Access denied by auth policy" + ); + return Err(msg.into()); + } else { + // Don't leak graph existence — act as if it doesn't exist. + return Err(async_graphql::Error::new(MissingGraph.to_string())); + } + } + Ok(perm) => { + if let Some(p) = perm.at_least_read() { + return Ok(p); + } else { + warn!( + role = role.unwrap_or(""), + graph = path, + "Introspect-only access — graph() denied; use graphMetadata() instead" + ); + return Err(async_graphql::Error::new(format!( + "Access denied: role '{}' has introspect-only access to graph '{path}' — \ + use graphMetadata(path:) for counts and timestamps, or namespace listings to browse graphs", + role.unwrap_or("") + ))); + } + } + } + } + Ok(GraphPermission::Write) +} + +/// Applies a stored data filter (serialised as `serde_json::Value` with optional `node`, `edge`, +/// `graph` keys) to a `DynamicGraph`, returning a new filtered view. +fn apply_graph_filter( + mut graph: DynamicGraph, + filter: GraphAccessFilter, +) -> Pin> + Send>> { + Box::pin(async move { + use raphtory::db::graph::views::filter::model::{ + edge_filter::CompositeEdgeFilter, node_filter::CompositeNodeFilter, DynView, + }; + + match filter { + GraphAccessFilter::Node(gql_filter) => { + let raphtory_filter = CompositeNodeFilter::try_from(gql_filter).map_err(|e| { + error!(error = %e, "node filter conversion failed"); + async_graphql::Error::new("internal error applying access filter") + })?; + graph = blocking_compute({ + let g = graph.clone(); + move || g.filter(raphtory_filter) + }) + .await + .map_err(|e| { + error!(error = %e, "node filter apply failed"); + async_graphql::Error::new("internal error applying access filter") + })? + .into_dynamic(); + } + GraphAccessFilter::Edge(gql_filter) => { + let raphtory_filter = CompositeEdgeFilter::try_from(gql_filter).map_err(|e| { + error!(error = %e, "edge filter conversion failed"); + async_graphql::Error::new("internal error applying access filter") + })?; + graph = blocking_compute({ + let g = graph.clone(); + move || g.filter(raphtory_filter) + }) + .await + .map_err(|e| { + error!(error = %e, "edge filter apply failed"); + async_graphql::Error::new("internal error applying access filter") + })? + .into_dynamic(); + } + GraphAccessFilter::Graph(gql_filter) => { + let dyn_view = DynView::try_from(gql_filter).map_err(|e| { + error!(error = %e, "graph filter conversion failed"); + async_graphql::Error::new("internal error applying access filter") + })?; + graph = blocking_compute({ + let g = graph.clone(); + move || g.filter(dyn_view) + }) + .await + .map_err(|e| { + error!(error = %e, "graph filter apply failed"); + async_graphql::Error::new("internal error applying access filter") + })? + .into_dynamic(); + } + GraphAccessFilter::And(filters) => { + for f in filters { + graph = apply_graph_filter(graph, f).await?; + } + } + GraphAccessFilter::Or(filters) => { + // Group same-type sub-filters and combine with native Or; + // cross-type sub-filters are applied as independent restrictions. + let mut node_fs: Vec = vec![]; + let mut edge_fs: Vec = vec![]; + let mut rest: Vec = vec![]; + for f in filters { + match f { + GraphAccessFilter::Node(n) => node_fs.push(n), + GraphAccessFilter::Edge(e) => edge_fs.push(e), + other => rest.push(other), + } + } + if !node_fs.is_empty() { + let combined = if node_fs.len() == 1 { + node_fs.pop().unwrap() + } else { + GqlNodeFilter::Or(node_fs) + }; + graph = apply_graph_filter(graph, GraphAccessFilter::Node(combined)).await?; + } + if !edge_fs.is_empty() { + let combined = if edge_fs.len() == 1 { + edge_fs.pop().unwrap() + } else { + GqlEdgeFilter::Or(edge_fs) + }; + graph = apply_graph_filter(graph, GraphAccessFilter::Edge(combined)).await?; + } + for f in rest { + graph = apply_graph_filter(graph, f).await?; + } + } + } + + Ok(graph) + }) +} + +/// Returns the namespace portion of a graph path: everything before the last `/`. +/// For top-level graphs (no `/`), returns `""` (the root namespace). +fn parent_namespace(path: &str) -> &str { + path.rfind('/').map(|i| &path[..i]).unwrap_or("") +} + +fn write_denied(role: Option<&str>, msg: impl std::fmt::Display) -> async_graphql::Error { + match role { + Some(_) => async_graphql::Error::new(msg.to_string()), + None => AuthError::RequireWrite.into(), + } +} + +fn require_graph_write( + ctx: &Context<'_>, + policy: &Option>, + path: &str, +) -> async_graphql::Result<()> { + match policy { + None => ctx.require_jwt_write_access().map_err(Into::into), + Some(p) => { + let role = ctx.data::>().ok().and_then(|r| r.as_deref()); + p.graph_permissions(ctx, path) + .map_err(async_graphql::Error::from)? + .at_least_write() + .ok_or_else(|| { + write_denied( + role, + format!("Access denied: WRITE permission required for graph '{path}'"), + ) + })?; + Ok(()) + } + } +} + +fn require_namespace_write( + ctx: &Context<'_>, + policy: &Option>, + ns_path: &str, + new_path: &str, + operation: &str, +) -> async_graphql::Result<()> { + match policy { + None => ctx.require_jwt_write_access().map_err(Into::into), + Some(p) => { + let role = ctx.data::>().ok().and_then(|r| r.as_deref()); + if p.namespace_permissions(ctx, ns_path) < NamespacePermission::Write { + return Err(write_denied( + role, + format!("Access denied: WRITE required on namespace '{ns_path}' to {operation} graph '{new_path}'"), + )); + } + Ok(()) + } + } +} + +fn require_graph_read_src( + ctx: &Context<'_>, + policy: &Option>, + path: &str, + operation: &str, +) -> async_graphql::Result<()> { + match policy { + None => ctx.require_jwt_write_access().map_err(Into::into), + Some(p) => { + let role = ctx.data::>().ok().and_then(|r| r.as_deref()); + p.graph_permissions(ctx, path) + .map_err(async_graphql::Error::from)? + .at_least_read() + .ok_or_else(|| { + write_denied( + role, + format!( + "Access denied: READ required on source graph '{path}' to {operation}" + ), + ) + })?; + Ok(()) + } + } +} + #[derive(ResolvedObject)] #[graphql(root)] pub(crate) struct QueryRoot; @@ -96,17 +345,65 @@ impl QueryRoot { } /// Returns a graph - async fn graph<'a>(ctx: &Context<'a>, path: &str) -> Result { + async fn graph<'a>(ctx: &Context<'a>, path: &str) -> Result> { + let data = ctx.data_unchecked::(); + + // Permission check: Err (denied or introspect-only) is converted to Ok(None) so the + // user sees null — indistinguishable from "graph not found". Warnings are logged inside + // require_at_least_read for cases where the user has namespace INTROSPECT visibility. + let perms = match require_at_least_read(ctx, &data.auth_policy, path) { + Ok(p) => p, + Err(_) => return Ok(None), + }; + + let graph_with_vecs = data.get_graph(path).await?; + let graph: DynamicGraph = graph_with_vecs.graph.into_dynamic(); + + let graph = if let GraphPermission::Read { + filter: Some(ref f), + } = perms + { + apply_graph_filter(graph, f.clone()).await? + } else { + graph + }; + + Ok(Some(GqlGraph::new(graph_with_vecs.folder, graph))) + } + + /// Returns lightweight metadata for a graph (node/edge counts, timestamps) without loading it. + /// Requires at least INTROSPECT permission. + async fn graph_metadata<'a>(ctx: &Context<'a>, path: String) -> Result> { let data = ctx.data_unchecked::(); - Ok(data.get_graph(path).await?.into()) + + if let Some(policy) = &data.auth_policy { + let role = ctx.data::>().ok().and_then(|r| r.as_deref()); + if let Err(_) = policy.graph_permissions(ctx, &path) { + let ns = parent_namespace(&path); + if policy.namespace_permissions(ctx, ns) >= NamespacePermission::Introspect { + warn!( + role = role.unwrap_or(""), + graph = path.as_str(), + "Access denied by auth policy" + ); + } + // Always return null — permission denial is indistinguishable from "not found" + // from the user's perspective. The warning above is the only signal in the logs. + return Ok(None); + } + } + + let folder = ExistingGraphFolder::try_from(data.work_dir.clone(), &path) + .map_err(|e| async_graphql::Error::new(e.to_string()))?; + Ok(Some(MetaGraph::new(folder))) } /// Update graph query, has side effects to update graph state /// /// Returns:: GqlMutableGraph async fn update_graph<'a>(ctx: &Context<'a>, path: String) -> Result { - ctx.require_write_access()?; let data = ctx.data_unchecked::(); + require_graph_write(ctx, &data.auth_policy, &path)?; let graph = data.get_graph(path.as_ref()).await?.into(); @@ -116,10 +413,18 @@ impl QueryRoot { /// Create vectorised graph in the format used for queries /// /// Returns:: GqlVectorisedGraph - async fn vectorised_graph<'a>(ctx: &Context<'a>, path: &str) -> Option { + async fn vectorised_graph<'a>( + ctx: &Context<'a>, + path: &str, + ) -> Result> { let data = ctx.data_unchecked::(); - let g = data.get_graph(path).await.ok()?.vectors?; - Some(g.into()) + require_at_least_read(ctx, &data.auth_policy, path)?; + Ok(data + .get_graph(path) + .await + .ok() + .and_then(|g| g.vectors) + .map(|v| v.into())) } /// Returns all namespaces using recursive search @@ -162,14 +467,26 @@ impl QueryRoot { QueryPlugin::default() } - /// Encodes graph and returns as string + /// Encodes graph and returns as string. + /// If the caller has filtered access, the returned graph is a materialized view of the filter. /// /// Returns:: Base64 url safe encoded string async fn receive_graph<'a>(ctx: &Context<'a>, path: String) -> Result { - let path = path.as_ref(); let data = ctx.data_unchecked::(); - let g = data.get_graph(path).await?.graph.clone(); - let res = url_encode_graph(g)?; + let perm = require_at_least_read(ctx, &data.auth_policy, &path)?; + let raw = data.get_graph(&path).await?.graph; + let res = if let GraphPermission::Read { + filter: Some(ref f), + } = perm + { + let filtered = apply_graph_filter(raw.into_dynamic(), f.clone()).await?; + let materialized = blocking_compute(move || filtered.materialize()) + .await + .map_err(|e| async_graphql::Error::new(e.to_string()))?; + url_encode_graph(materialized)? + } else { + url_encode_graph(raw)? + }; Ok(res) } @@ -193,8 +510,11 @@ impl Mut { /// Delete graph from a path on the server. // If namespace is not provided, it will be set to the current working directory. - async fn delete_graph<'a>(ctx: &Context<'a>, path: String) -> Result { + async fn delete_graph<'a>(ctx: &Context<'a>, path: String) -> Result { let data = ctx.data_unchecked::(); + require_graph_write(ctx, &data.auth_policy, &path)?; + let src_ns = parent_namespace(&path); + require_namespace_write(ctx, &data.auth_policy, src_ns, &path, "delete")?; data.delete_graph(&path).await?; Ok(true) } @@ -206,6 +526,8 @@ impl Mut { graph_type: GqlGraphType, ) -> Result { let data = ctx.data_unchecked::(); + let ns = parent_namespace(&path); + require_namespace_write(ctx, &data.auth_policy, ns, &path, "create")?; let overwrite = false; let folder = data.validate_path_for_insert(&path, overwrite)?; let graph_path = folder.graph_folder(); @@ -233,8 +555,14 @@ impl Mut { new_path: &str, overwrite: Option, ) -> Result { - Self::copy_graph(ctx, path, new_path, overwrite).await?; let data = ctx.data_unchecked::(); + // src: require WRITE on graph (moving = deleting source) + require_graph_write(ctx, &data.auth_policy, path)?; + // src: require WRITE on parent namespace (removing graph from namespace) + let src_ns = parent_namespace(path); + require_namespace_write(ctx, &data.auth_policy, src_ns, path, "move")?; + // copy_graph handles dst namespace WRITE check (and src READ, which WRITE implies) + Self::copy_graph(ctx, path, new_path, overwrite).await?; data.delete_graph(path).await?; Ok(true) } @@ -246,11 +574,14 @@ impl Mut { new_path: &str, overwrite: Option, ) -> Result { + let data = ctx.data_unchecked::(); + require_graph_read_src(ctx, &data.auth_policy, path, "copy it")?; + let dst_ns = parent_namespace(new_path); + require_namespace_write(ctx, &data.auth_policy, dst_ns, new_path, "create")?; // doing this in a more efficient way is not trivial, this at least is correct // there are questions like, maybe the new vectorised graph have different rules // for the templates or if it needs to be vectorised at all let overwrite = overwrite.unwrap_or(false); - let data = ctx.data_unchecked::(); let graph = data.get_graph(path).await?.graph; let folder = data.validate_path_for_insert(new_path, overwrite)?; data.insert_graph(folder, graph).await?; @@ -269,6 +600,8 @@ impl Mut { overwrite: bool, ) -> Result { let data = ctx.data_unchecked::(); + let dst_ns = parent_namespace(&path); + require_namespace_write(ctx, &data.auth_policy, dst_ns, &path, "upload")?; let in_file = graph.value(ctx)?.content; let folder = data.validate_path_for_insert(&path, overwrite)?; data.insert_graph_as_bytes(folder, in_file).await?; @@ -287,6 +620,8 @@ impl Mut { overwrite: bool, ) -> Result { let data = ctx.data_unchecked::(); + let dst_ns = parent_namespace(path); + require_namespace_write(ctx, &data.auth_policy, dst_ns, path, "send")?; let folder = if overwrite { ValidWriteableGraphFolder::try_existing_or_new(data.work_dir.clone(), path)? } else { @@ -314,6 +649,9 @@ impl Mut { overwrite: bool, ) -> Result { let data = ctx.data_unchecked::(); + require_graph_read_src(ctx, &data.auth_policy, parent_path, "create a subgraph")?; + let dst_ns = parent_namespace(&new_path); + require_namespace_write(ctx, &data.auth_policy, dst_ns, &new_path, "create")?; let folder = data.validate_path_for_insert(&new_path, overwrite)?; let parent_graph = data.get_graph(parent_path).await?.graph; let folder_clone = folder.clone(); @@ -338,9 +676,10 @@ impl Mut { index_spec: Option, in_ram: bool, ) -> Result { + let data = ctx.data_unchecked::(); + require_graph_write(ctx, &data.auth_policy, path)?; #[cfg(feature = "search")] { - let data = ctx.data_unchecked::(); let graph = data.get_graph(path).await?.graph; match index_spec { Some(index_spec) => { @@ -370,4 +709,10 @@ impl Mut { } #[derive(App)] -pub struct App(QueryRoot, MutRoot, Mut); +pub struct App( + QueryRoot, + MutRoot, + Mut, + PermissionsEntrypointMut, + PermissionsEntrypointQuery, +); diff --git a/raphtory-graphql/src/model/plugins/mod.rs b/raphtory-graphql/src/model/plugins/mod.rs index d499e1d39c..8cab98fafa 100644 --- a/raphtory-graphql/src/model/plugins/mod.rs +++ b/raphtory-graphql/src/model/plugins/mod.rs @@ -7,7 +7,36 @@ pub mod graph_algorithm_plugin; pub mod mutation_entry_point; pub mod mutation_plugin; pub mod operation; +pub mod permissions_entrypoint; +pub mod permissions_plugin; pub mod query_entry_point; pub mod query_plugin; +pub use permissions_entrypoint::{PermissionsEntrypointMut, PermissionsEntrypointQuery}; + pub type RegisterFunction = Box (Registry, Object) + Send>; + +/// Register an operation into the `PermissionsPlugin` entry point (mutation root). +/// Call this before `GraphServer::run()` / `create_schema()`. +pub fn register_permissions_mutation(name: &'static str) +where + O: for<'a> operation::Operation<'a, permissions_plugin::PermissionsPlugin> + 'static, +{ + permissions_plugin::PERMISSIONS_MUTATIONS + .lock() + .unwrap() + .insert(name.to_string(), Box::new(O::register_operation)); +} + +/// Register an operation into the `PermissionsQueryPlugin` entry point (query root). +/// Ops registered here must call `require_jwt_write_access_dynamic` themselves since they +/// are not covered by the `MutationAuth` extension. +pub fn register_permissions_query(name: &'static str) +where + O: for<'a> operation::Operation<'a, permissions_plugin::PermissionsQueryPlugin> + 'static, +{ + permissions_plugin::PERMISSIONS_QUERIES + .lock() + .unwrap() + .insert(name.to_string(), Box::new(O::register_operation)); +} diff --git a/raphtory-graphql/src/model/plugins/permissions_entrypoint.rs b/raphtory-graphql/src/model/plugins/permissions_entrypoint.rs new file mode 100644 index 0000000000..18e3235979 --- /dev/null +++ b/raphtory-graphql/src/model/plugins/permissions_entrypoint.rs @@ -0,0 +1,61 @@ +use super::permissions_plugin::{ + PermissionsPlugin, PermissionsQueryPlugin, PERMISSIONS_MUT_ENTRYPOINT, + PERMISSIONS_QRY_ENTRYPOINT, +}; +use crate::auth::require_jwt_write_access_dynamic; +use async_graphql::dynamic::{Field, FieldFuture, FieldValue, TypeRef}; +use dynamic_graphql::internal::{Register, Registry}; +use std::sync::atomic::Ordering; + +/// Conditionally adds the `permissions` field to the root Mutation type. +/// Only registers when `register_permissions_entrypoint()` has been called +/// (i.e., when RBAC is configured via `raphtory-auth::init()`). +pub struct PermissionsEntrypointMut; + +/// Conditionally adds the `permissions` field to the root Query type. +/// Only registers when `register_permissions_query_entrypoint()` has been called. +pub struct PermissionsEntrypointQuery; + +impl Register for PermissionsEntrypointMut { + fn register(registry: Registry) -> Registry { + if !PERMISSIONS_MUT_ENTRYPOINT.load(Ordering::SeqCst) { + return registry; + } + let registry = registry.register::(); + registry.update_object("MutRoot", "PermissionsEntrypointMut", |obj| { + obj.field(Field::new( + "permissions", + TypeRef::named_nn("PermissionsPlugin"), + |ctx| { + FieldFuture::new(async move { + require_jwt_write_access_dynamic(&ctx)?; + Ok(Some(FieldValue::owned_any(PermissionsPlugin::default()))) + }) + }, + )) + }) + } +} + +impl Register for PermissionsEntrypointQuery { + fn register(registry: Registry) -> Registry { + if !PERMISSIONS_QRY_ENTRYPOINT.load(Ordering::SeqCst) { + return registry; + } + let registry = registry.register::(); + registry.update_object("QueryRoot", "PermissionsEntrypointQuery", |obj| { + obj.field(Field::new( + "permissions", + TypeRef::named_nn("PermissionsQueryPlugin"), + |ctx| { + FieldFuture::new(async move { + require_jwt_write_access_dynamic(&ctx)?; + Ok(Some(FieldValue::owned_any( + PermissionsQueryPlugin::default(), + ))) + }) + }, + )) + }) + } +} diff --git a/raphtory-graphql/src/model/plugins/permissions_plugin.rs b/raphtory-graphql/src/model/plugins/permissions_plugin.rs new file mode 100644 index 0000000000..187cb9f41a --- /dev/null +++ b/raphtory-graphql/src/model/plugins/permissions_plugin.rs @@ -0,0 +1,96 @@ +use super::RegisterFunction; +use crate::model::plugins::entry_point::EntryPoint; +use async_graphql::{dynamic::FieldValue, indexmap::IndexMap, Context}; +use dynamic_graphql::internal::{OutputTypeName, Register, Registry, ResolveOwned, TypeName}; +use once_cell::sync::Lazy; +use std::{ + borrow::Cow, + sync::{ + atomic::{AtomicBool, Ordering}, + Mutex, MutexGuard, + }, +}; + +pub(crate) static PERMISSIONS_MUT_ENTRYPOINT: AtomicBool = AtomicBool::new(false); +pub(crate) static PERMISSIONS_QRY_ENTRYPOINT: AtomicBool = AtomicBool::new(false); + +pub fn register_permissions_entrypoint() { + PERMISSIONS_MUT_ENTRYPOINT.store(true, Ordering::SeqCst); +} + +pub fn register_permissions_query_entrypoint() { + PERMISSIONS_QRY_ENTRYPOINT.store(true, Ordering::SeqCst); +} + +pub static PERMISSIONS_MUTATIONS: Lazy>> = + Lazy::new(|| Mutex::new(IndexMap::new())); + +pub static PERMISSIONS_QUERIES: Lazy>> = + Lazy::new(|| Mutex::new(IndexMap::new())); + +#[derive(Clone, Default)] +pub struct PermissionsPlugin; + +impl<'a> EntryPoint<'a> for PermissionsPlugin { + fn predefined_operations() -> IndexMap<&'static str, RegisterFunction> { + IndexMap::new() + } + + fn lock_plugins() -> MutexGuard<'static, IndexMap> { + PERMISSIONS_MUTATIONS.lock().unwrap() + } +} + +impl Register for PermissionsPlugin { + fn register(registry: Registry) -> Registry { + Self::register_operations(registry) + } +} + +impl TypeName for PermissionsPlugin { + fn get_type_name() -> Cow<'static, str> { + "PermissionsPlugin".into() + } +} + +impl OutputTypeName for PermissionsPlugin {} + +impl<'a> ResolveOwned<'a> for PermissionsPlugin { + fn resolve_owned(self, _ctx: &Context) -> dynamic_graphql::Result>> { + Ok(Some(FieldValue::owned_any(self))) + } +} + +/// Read-only entry point for permissions queries (admin-gated via require_jwt_write_access_dynamic). +#[derive(Clone, Default)] +pub struct PermissionsQueryPlugin; + +impl<'a> EntryPoint<'a> for PermissionsQueryPlugin { + fn predefined_operations() -> IndexMap<&'static str, RegisterFunction> { + IndexMap::new() + } + + fn lock_plugins() -> MutexGuard<'static, IndexMap> { + PERMISSIONS_QUERIES.lock().unwrap() + } +} + +impl Register for PermissionsQueryPlugin { + fn register(registry: Registry) -> Registry { + Self::register_operations(registry) + } +} + +impl TypeName for PermissionsQueryPlugin { + fn get_type_name() -> Cow<'static, str> { + "PermissionsQueryPlugin".into() + } +} + +impl OutputTypeName for PermissionsQueryPlugin {} + +impl<'a> ResolveOwned<'a> for PermissionsQueryPlugin { + fn resolve_owned(self, _ctx: &Context) -> dynamic_graphql::Result>> { + Ok(Some(FieldValue::owned_any(self))) + } +} diff --git a/raphtory-graphql/src/python/pymodule.rs b/raphtory-graphql/src/python/pymodule.rs index 53c00759b6..fa19c92795 100644 --- a/raphtory-graphql/src/python/pymodule.rs +++ b/raphtory-graphql/src/python/pymodule.rs @@ -13,6 +13,12 @@ use crate::{ }; use pyo3::prelude::*; +/// Returns True if the permissions extension (raphtory-auth) is compiled in. +#[pyfunction] +pub fn has_permissions_extension() -> bool { + crate::server::has_server_extension() +} + pub fn base_graphql_module(py: Python<'_>) -> Result, PyErr> { let graphql_module = PyModule::new(py, "graphql")?; graphql_module.add_class::()?; @@ -33,6 +39,10 @@ pub fn base_graphql_module(py: Python<'_>) -> Result, PyErr> graphql_module.add_function(wrap_pyfunction!(decode_graph, &graphql_module)?)?; graphql_module.add_function(wrap_pyfunction!(schema, &graphql_module)?)?; graphql_module.add_function(wrap_pyfunction!(python_cli, &graphql_module)?)?; + graphql_module.add_function(wrap_pyfunction!( + has_permissions_extension, + &graphql_module + )?)?; Ok(graphql_module) } diff --git a/raphtory-graphql/src/python/server/server.rs b/raphtory-graphql/src/python/server/server.rs index 96e3108cf6..3a7febe928 100644 --- a/raphtory-graphql/src/python/server/server.rs +++ b/raphtory-graphql/src/python/server/server.rs @@ -6,6 +6,7 @@ use crate::{ python::server::{ running_server::PyRunningGraphServer, take_server_ownership, wait_server, BridgeCommand, }, + server::apply_server_extension, GraphServer, }; use pyo3::{ @@ -36,7 +37,7 @@ use std::{path::PathBuf, sync::Arc, thread}; /// otlp_tracing_service_name (str, optional): The OTLP tracing service name /// config_path (str | PathLike, optional): Path to the config file /// auth_public_key: -/// auth_enabled_for_reads: +/// require_auth_for_reads: /// create_index: #[pyclass(name = "GraphServer", module = "raphtory.graphql")] pub struct PyGraphServer(pub Option); @@ -86,7 +87,7 @@ impl PyGraphServer { impl PyGraphServer { #[new] #[pyo3( - signature = (work_dir, cache_capacity = None, cache_tti_seconds = None, log_level = None, tracing=None, tracing_level=None, otlp_agent_host=None, otlp_agent_port=None, otlp_tracing_service_name=None, auth_public_key=None, auth_enabled_for_reads=None, config_path = None, create_index = None) + signature = (work_dir, cache_capacity = None, cache_tti_seconds = None, log_level = None, tracing=None, tracing_level=None, otlp_agent_host=None, otlp_agent_port=None, otlp_tracing_service_name=None, auth_public_key=None, require_auth_for_reads=None, config_path = None, create_index = None, permissions_store_path = None) )] fn py_new( work_dir: PathBuf, @@ -99,9 +100,10 @@ impl PyGraphServer { otlp_agent_port: Option, otlp_tracing_service_name: Option, auth_public_key: Option, - auth_enabled_for_reads: Option, + require_auth_for_reads: Option, config_path: Option, create_index: Option, + permissions_store_path: Option, ) -> PyResult { let mut app_config_builder = AppConfigBuilder::new(); if let Some(log_level) = log_level { @@ -139,9 +141,9 @@ impl PyGraphServer { app_config_builder = app_config_builder .with_auth_public_key(auth_public_key) .map_err(|_| PyValueError::new_err(PUBLIC_KEY_DECODING_ERR_MSG))?; - if let Some(auth_enabled_for_reads) = auth_enabled_for_reads { + if let Some(require_auth_for_reads) = require_auth_for_reads { app_config_builder = - app_config_builder.with_auth_enabled_for_reads(auth_enabled_for_reads); + app_config_builder.with_require_auth_for_reads(require_auth_for_reads); } #[cfg(feature = "search")] if let Some(create_index) = create_index { @@ -150,6 +152,7 @@ impl PyGraphServer { let app_config = Some(app_config_builder.build()); let server = GraphServer::new(work_dir, app_config, config_path, Config::default())?; + let server = apply_server_extension(server, permissions_store_path.as_deref()); Ok(PyGraphServer::new(server)) } diff --git a/raphtory-graphql/src/server.rs b/raphtory-graphql/src/server.rs index 7d8342e1ab..5e79bc7d0b 100644 --- a/raphtory-graphql/src/server.rs +++ b/raphtory-graphql/src/server.rs @@ -1,5 +1,6 @@ use crate::{ auth::{AuthenticatedGraphQL, MutationAuth}, + auth_policy::AuthorizationPolicy, config::app_config::{load_config, AppConfig}, data::{Data, EmbeddingConf}, model::{ @@ -11,6 +12,7 @@ use crate::{ server::ServerError::SchemaError, }; use config::ConfigError; +use once_cell::sync::Lazy; use opentelemetry::trace::TracerProvider; use opentelemetry_sdk::trace::{Tracer, TracerProvider as TP}; use poem::{ @@ -29,6 +31,7 @@ use serde_json::json; use std::{ fs::create_dir_all, path::{Path, PathBuf}, + sync::RwLock, }; use thiserror::Error; use tokio::{ @@ -42,7 +45,7 @@ use tokio::{ task, task::JoinHandle, }; -use tracing::{debug, info}; +use tracing::{debug, info, warn}; use tracing_subscriber::{ fmt, fmt::format::FmtSpan, layer::SubscriberExt, util::SubscriberInitExt, Registry, }; @@ -50,6 +53,25 @@ use url::ParseError; pub const DEFAULT_PORT: u16 = 1736; +type ServerExtensionFn = Box) -> GraphServer + Send + Sync>; + +static SERVER_EXTENSION: Lazy>> = Lazy::new(|| RwLock::new(None)); + +pub fn register_server_extension(f: ServerExtensionFn) { + *SERVER_EXTENSION.write().unwrap() = Some(f); +} + +pub fn apply_server_extension(server: GraphServer, path: Option<&Path>) -> GraphServer { + match SERVER_EXTENSION.read().unwrap().as_ref() { + Some(ext) => ext(server, path), + None => server, + } +} + +pub fn has_server_extension() -> bool { + SERVER_EXTENSION.read().unwrap().is_some() +} + #[derive(Error, Debug)] pub enum ServerError { #[error("Config error: {0}")] @@ -78,10 +100,17 @@ impl From for io::Error { } } +type SchemaDataInjector = Box< + dyn FnOnce(async_graphql::dynamic::SchemaBuilder) -> async_graphql::dynamic::SchemaBuilder + + Send + + Sync, +>; + /// A struct for defining and running a Raphtory GraphQL server pub struct GraphServer { data: Data, config: AppConfig, + schema_data: Vec, } pub fn register_query_plugin< @@ -120,7 +149,16 @@ impl GraphServer { } let config = load_config(app_config, config_path).map_err(ServerError::ConfigError)?; let data = Data::new(work_dir.as_path(), &config, graph_config); - Ok(Self { data, config }) + Ok(Self { + data, + config, + schema_data: Vec::new(), + }) + } + + /// Returns the working directory for this server. + pub fn work_dir(&self) -> &Path { + &self.data.work_dir } /// Turn off index for all graphs @@ -129,6 +167,18 @@ impl GraphServer { self } + /// Set the authorization policy used for graph access checks. + pub fn with_auth_policy(mut self, policy: std::sync::Arc) -> Self { + self.data.auth_policy = Some(policy); + self + } + + /// Inject arbitrary typed data into the GQL schema (accessible via `ctx.data::()`). + pub fn with_schema_data(mut self, data: T) -> Self { + self.schema_data.push(Box::new(move |sb| sb.data(data))); + self + } + pub async fn set_embeddings( mut self, embedding: F, @@ -242,9 +292,12 @@ impl GraphServer { self, tracer: Option, ) -> Result>, ServerError> { - let schema_builder = App::create_schema(); - let schema_builder = schema_builder.data(self.data); - let schema_builder = schema_builder.extension(MutationAuth); + let mut schema_builder = App::create_schema(); + schema_builder = schema_builder.data(self.data); + for inject in self.schema_data { + schema_builder = inject(schema_builder); + } + schema_builder = schema_builder.extension(MutationAuth); let trace_level = self.config.tracing.tracing_level.clone(); let schema = if let Some(t) = tracer { schema_builder diff --git a/raphtory-server/Cargo.toml b/raphtory-server/Cargo.toml new file mode 100644 index 0000000000..8015995507 --- /dev/null +++ b/raphtory-server/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "raphtory-server" +version.workspace = true +edition.workspace = true + +[[bin]] +name = "raphtory-server" +path = "src/main.rs" + +[dependencies] +raphtory-graphql = { workspace = true } +auth = { workspace = true } +tokio = { workspace = true } diff --git a/raphtory-graphql/src/main.rs b/raphtory-server/src/main.rs similarity index 87% rename from raphtory-graphql/src/main.rs rename to raphtory-server/src/main.rs index 9a15f7580b..733c411eb4 100644 --- a/raphtory-graphql/src/main.rs +++ b/raphtory-server/src/main.rs @@ -2,5 +2,6 @@ use std::io::Result as IoResult; #[tokio::main] async fn main() -> IoResult<()> { + auth::init(); raphtory_graphql::cli::cli().await }