diff --git a/crates/openshell-sandbox/data/sandbox-policy.rego b/crates/openshell-sandbox/data/sandbox-policy.rego index ded7c8c1..1544dfe5 100644 --- a/crates/openshell-sandbox/data/sandbox-policy.rego +++ b/crates/openshell-sandbox/data/sandbox-policy.rego @@ -171,16 +171,25 @@ network_action := "allow" if { default allow_request = false -# L7 request allowed if: L4 policy matches AND the specific endpoint's rules allow the request. +# Per-policy helper: true when this single policy has at least one endpoint +# matching the L4 request whose L7 rules also permit the specific request. +# Isolating the endpoint iteration inside a function avoids the regorus +# "duplicated definition of local variable" error that occurs when the +# outer `some name` iterates over multiple policies that share a host:port. +_policy_allows_l7(policy) if { + some ep + ep := policy.endpoints[_] + endpoint_matches_request(ep, input.network) + request_allowed_for_endpoint(input.request, ep) +} + +# L7 request allowed if any matching L4 policy also allows the L7 request. allow_request if { some name policy := data.network_policies[name] endpoint_allowed(policy, input.network) binary_allowed(policy, input.exec) - some ep - ep := policy.endpoints[_] - endpoint_matches_request(ep, input.network) - request_allowed_for_endpoint(input.request, ep) + _policy_allows_l7(policy) } # --- L7 deny reason --- @@ -239,19 +248,24 @@ command_matches(actual, expected) if { # Used by Rust to extract L7 config (protocol, tls, enforcement) and/or # allowed_ips for SSRF allowlist validation. -# Collect all matching endpoint configs into an array to avoid complete-rule -# conflicts when multiple policies cover the same endpoint. Return the first. -_matching_endpoint_configs := [ep | - some name - policy := data.network_policies[name] - endpoint_allowed(policy, input.network) - binary_allowed(policy, input.exec) +# Per-policy helper: returns matching endpoint configs for a single policy. +_policy_endpoint_configs(policy) := [ep | some ep ep := policy.endpoints[_] endpoint_matches_request(ep, input.network) endpoint_has_extended_config(ep) ] +# Collect matching endpoint configs across all policies. Iterates over +# _matching_policy_names (a set, safe from regorus variable collisions) +# then collects per-policy configs via the helper function. +_matching_endpoint_configs := [cfg | + some pname + _matching_policy_names[pname] + cfgs := _policy_endpoint_configs(data.network_policies[pname]) + cfg := cfgs[_] +] + matched_endpoint_config := _matching_endpoint_configs[0] if { count(_matching_endpoint_configs) > 0 } diff --git a/crates/openshell-sandbox/src/opa.rs b/crates/openshell-sandbox/src/opa.rs index bfe9c68f..cd2931b3 100644 --- a/crates/openshell-sandbox/src/opa.rs +++ b/crates/openshell-sandbox/src/opa.rs @@ -1568,6 +1568,92 @@ process: assert_eq!(val, regorus::Value::from(true)); } + // ======================================================================== + // Overlapping policies (duplicate host:port) — regression tests + // ======================================================================== + + /// Two network_policies entries covering the same host:port with L7 rules. + /// Before the fix, this caused regorus to fail with + /// "duplicated definition of local variable ep" in allow_request. + const OVERLAPPING_L7_TEST_DATA: &str = r#" +network_policies: + test_server: + name: test_server + endpoints: + - host: 192.168.1.100 + port: 8567 + protocol: rest + enforcement: enforce + rules: + - allow: + method: GET + path: "**" + binaries: + - { path: /usr/bin/curl } + allow_192_168_1_100_8567: + name: allow_192_168_1_100_8567 + endpoints: + - host: 192.168.1.100 + port: 8567 + protocol: rest + enforcement: enforce + allowed_ips: + - 192.168.1.100 + rules: + - allow: + method: GET + path: "**" + binaries: + - { path: /usr/bin/curl } +filesystem_policy: + include_workdir: true + read_only: [] + read_write: [] +landlock: + compatibility: best_effort +process: + run_as_user: sandbox + run_as_group: sandbox +"#; + + #[test] + fn l7_overlapping_policies_allow_request_does_not_crash() { + let engine = OpaEngine::from_strings(TEST_POLICY, OVERLAPPING_L7_TEST_DATA) + .expect("engine should load overlapping data"); + let input = l7_input("192.168.1.100", 8567, "GET", "/test"); + // Should not panic or error — must evaluate to true. + assert!(eval_l7(&engine, &input)); + } + + #[test] + fn l7_overlapping_policies_deny_request_does_not_crash() { + let engine = OpaEngine::from_strings(TEST_POLICY, OVERLAPPING_L7_TEST_DATA) + .expect("engine should load overlapping data"); + let input = l7_input("192.168.1.100", 8567, "DELETE", "/test"); + // DELETE is not in the rules, so should deny — but must not crash. + assert!(!eval_l7(&engine, &input)); + } + + #[test] + fn overlapping_policies_endpoint_config_returns_result() { + let engine = OpaEngine::from_strings(TEST_POLICY, OVERLAPPING_L7_TEST_DATA) + .expect("engine should load overlapping data"); + let input = NetworkInput { + host: "192.168.1.100".into(), + port: 8567, + binary_path: PathBuf::from("/usr/bin/curl"), + binary_sha256: String::new(), + ancestors: vec![], + cmdline_paths: vec![], + }; + // Should return config from one of the entries without error. + let config = engine.query_endpoint_config(&input).unwrap(); + assert!( + config.is_some(), + "Expected endpoint config for overlapping policies" + ); + } + // ======================================================================== // network_action tests // ======================================================================== diff --git a/crates/openshell-server/src/grpc.rs b/crates/openshell-server/src/grpc.rs index dd0aa7b2..fd4bf585 100644 --- a/crates/openshell-server/src/grpc.rs +++ b/crates/openshell-server/src/grpc.rs @@ -2522,22 +2522,63 @@ async fn merge_chunk_into_policy( let base_version = latest.as_ref().map_or(0, |r| r.version); - // Merge: if a rule for this endpoint already exists, add the binary - // to its binaries list. Otherwise insert the whole proposed rule. - if let Some(existing) = policy.network_policies.get_mut(&chunk.rule_name) { - // Add the chunk's binary if not already present. + // Merge: find an existing rule that covers the same (host, port), + // regardless of its map key / rule name. This prevents duplicate + // entries when the mechanistic mapper generates a name like + // "allow_192_168_86_157_8567" and the user's original rule uses a + // different name (e.g. "test_server"). + // + // Search order: + // 1. Exact rule_name match (fast path — covers auto-generated chunks + // being re-approved and user rules whose names happen to match). + // 2. Scan all entries for a host:port endpoint match. + // 3. Fall through to insertion if neither matches. + let chunk_host_lc = chunk.host.to_lowercase(); + let chunk_port = chunk.port as u32; + + let merge_key = if policy.network_policies.contains_key(&chunk.rule_name) { + Some(chunk.rule_name.clone()) + } else { + policy + .network_policies + .iter() + .find_map(|(key, existing_rule)| { + let has_match = existing_rule.endpoints.iter().any(|ep| { + let host_match = ep.host.to_lowercase() == chunk_host_lc; + let port_match = if ep.ports.is_empty() { + ep.port == chunk_port + } else { + ep.ports.contains(&chunk_port) + }; + host_match && port_match + }); + has_match.then(|| key.clone()) + }) + }; + + if let Some(key) = merge_key { + let existing = policy.network_policies.get_mut(&key).unwrap(); + // Add the chunk's binaries if not already present. for b in &rule.binaries { if !existing.binaries.iter().any(|eb| eb.path == b.path) { existing.binaries.push(b.clone()); } } - // Also merge endpoints and L7 rules in case they differ. + // Merge endpoints: for matching host:port, merge fields like + // allowed_ips into the existing endpoint rather than duplicating. for ep in &rule.endpoints { - if !existing - .endpoints - .iter() - .any(|e| e.host == ep.host && e.port == ep.port) - { + if let Some(existing_ep) = existing.endpoints.iter_mut().find(|e| { + e.host.to_lowercase() == ep.host.to_lowercase() + && (e.port == ep.port + || (!e.ports.is_empty() && e.ports.contains(&ep.port))) + }) { + // Merge allowed_ips into the existing endpoint. + for ip in &ep.allowed_ips { + if !existing_ep.allowed_ips.contains(ip) { + existing_ep.allowed_ips.push(ip.clone()); + } + } + } else { existing.endpoints.push(ep.clone()); } } @@ -5356,6 +5397,206 @@ mod tests { assert_eq!(stored_rule.binaries[0].path, "/usr/bin/curl"); } + #[tokio::test] + async fn merge_chunk_merges_into_existing_rule_by_host_port() { + // When a user's policy has a rule named "test_server" covering + // 192.168.1.100:8567, and the mechanistic mapper generates a chunk + // named "allow_192_168_1_100_8567" for the same host:port, the merge + // should add allowed_ips into the existing "test_server" entry rather + // than creating a duplicate. + use openshell_core::proto::{ + NetworkBinary, NetworkEndpoint, NetworkPolicyRule, SandboxPolicy, + }; + + let store = Store::connect("sqlite::memory:").await.unwrap(); + let sandbox_id = "sb-merge"; + + // Seed an initial policy with a user-named rule. + let initial_policy = SandboxPolicy { + network_policies: [( + "test_server".to_string(), + NetworkPolicyRule { + name: "test_server".to_string(), + endpoints: vec![NetworkEndpoint { + host: "192.168.1.100".to_string(), + port: 8567, + ..Default::default() + }], + binaries: vec![NetworkBinary { + path: "/usr/bin/curl".to_string(), + ..Default::default() + }], + }, + )] + .into_iter() + .collect(), + ..Default::default() + }; + store + .put_policy_revision( + "p-seed", + sandbox_id, + 1, + &initial_policy.encode_to_vec(), + "seed-hash", + ) + .await + .unwrap(); + + // Build a chunk with a different rule_name but same host:port. + let proposed = NetworkPolicyRule { + name: "allow_192_168_1_100_8567".to_string(), + endpoints: vec![NetworkEndpoint { + host: "192.168.1.100".to_string(), + port: 8567, + allowed_ips: vec!["192.168.1.100".to_string()], + ..Default::default() + }], + binaries: vec![NetworkBinary { + path: "/usr/bin/curl".to_string(), + ..Default::default() + }], + }; + let chunk = DraftChunkRecord { + id: "chunk-merge".to_string(), + sandbox_id: sandbox_id.to_string(), + draft_version: 1, + status: "pending".to_string(), + rule_name: "allow_192_168_1_100_8567".to_string(), + proposed_rule: proposed.encode_to_vec(), + rationale: String::new(), + security_notes: String::new(), + confidence: 0.3, + created_at_ms: 0, + decided_at_ms: None, + host: "192.168.1.100".to_string(), + port: 8567, + binary: "/usr/bin/curl".to_string(), + hit_count: 1, + first_seen_ms: 0, + last_seen_ms: 0, + }; + + let (version, _) = merge_chunk_into_policy(&store, sandbox_id, &chunk) + .await + .unwrap(); + assert_eq!(version, 2); + + let latest = store + .get_latest_policy(sandbox_id) + .await + .unwrap() + .expect("policy revision should be persisted"); + let policy = SandboxPolicy::decode(latest.policy_payload.as_slice()).unwrap(); + + // Should have exactly one network_policies entry (no duplicate). + assert_eq!( + policy.network_policies.len(), + 1, + "expected 1 rule, got {}: {:?}", + policy.network_policies.len(), + policy.network_policies.keys().collect::>() + ); + // The entry should keep the user's original key name. + let rule = policy + .network_policies + .get("test_server") + .expect("original rule name 'test_server' should be preserved"); + assert_eq!(rule.endpoints[0].host, "192.168.1.100"); + // allowed_ips should have been merged in. + assert_eq!(rule.endpoints[0].allowed_ips, vec!["192.168.1.100"]); + } + + #[tokio::test] + async fn merge_chunk_new_host_port_inserts_new_entry() { + // When a chunk's host:port doesn't match any existing rule, it should + // be inserted as a new entry (existing behavior preserved). + use openshell_core::proto::{ + NetworkBinary, NetworkEndpoint, NetworkPolicyRule, SandboxPolicy, + }; + + let store = Store::connect("sqlite::memory:").await.unwrap(); + let sandbox_id = "sb-new"; + + let initial_policy = SandboxPolicy { + network_policies: [( + "existing_rule".to_string(), + NetworkPolicyRule { + name: "existing_rule".to_string(), + endpoints: vec![NetworkEndpoint { + host: "api.example.com".to_string(), + port: 443, + ..Default::default() + }], + binaries: vec![NetworkBinary { + path: "/usr/bin/curl".to_string(), + ..Default::default() + }], + }, + )] + .into_iter() + .collect(), + ..Default::default() + }; + store + .put_policy_revision( + "p-seed", + sandbox_id, + 1, + &initial_policy.encode_to_vec(), + "seed-hash", + ) + .await + .unwrap(); + + // Chunk for a different host:port. + let proposed = NetworkPolicyRule { + name: "allow_10_0_0_5_8080".to_string(), + endpoints: vec![NetworkEndpoint { + host: "10.0.0.5".to_string(), + port: 8080, + allowed_ips: vec!["10.0.0.5".to_string()], + ..Default::default() + }], + binaries: vec![NetworkBinary { + path: "/usr/bin/curl".to_string(), + ..Default::default() + }], + }; + let chunk = DraftChunkRecord { + id: "chunk-new".to_string(), + sandbox_id: sandbox_id.to_string(), + draft_version: 1, + status: "pending".to_string(), + rule_name: "allow_10_0_0_5_8080".to_string(), + proposed_rule: proposed.encode_to_vec(), + rationale: String::new(), + security_notes: String::new(), + confidence: 0.3, + created_at_ms: 0, + decided_at_ms: None, + host: "10.0.0.5".to_string(), + port: 8080, + binary: "/usr/bin/curl".to_string(), + hit_count: 1, + first_seen_ms: 0, + last_seen_ms: 0, + }; + + let (version, _) = merge_chunk_into_policy(&store, sandbox_id, &chunk) + .await + .unwrap(); + assert_eq!(version, 2); + + let latest = store.get_latest_policy(sandbox_id).await.unwrap().unwrap(); + let policy = SandboxPolicy::decode(latest.policy_payload.as_slice()).unwrap(); + + // Should have two entries now. + assert_eq!(policy.network_policies.len(), 2); + assert!(policy.network_policies.contains_key("existing_rule")); + assert!(policy.network_policies.contains_key("allow_10_0_0_5_8080")); + } + // ── petname default name generation ─────────────────────────────── /// Verify that `petname::petname(2, "-")` produces names matching the diff --git a/e2e/python/test_sandbox_policy.py b/e2e/python/test_sandbox_policy.py index e0269bbc..1615bef6 100644 --- a/e2e/python/test_sandbox_policy.py +++ b/e2e/python/test_sandbox_policy.py @@ -607,10 +607,14 @@ def test_ssrf_blocks_metadata_endpoint_despite_policy_allow( assert "403" in result.stdout -def test_ssrf_log_shows_internal_address_block( +def test_ssrf_log_shows_blocked_address( sandbox: Callable[..., Sandbox], ) -> None: - """SSRF-3: Proxy log includes 'internal address' reason when SSRF check fires.""" + """SSRF-3: Proxy log includes block reason when SSRF check fires. + + Loopback addresses are always-blocked even with implicit allowed_ips. + The log should show 'always-blocked' for 127.0.0.1. + """ policy = _base_policy( network_policies={ "internal": sandbox_pb2.NetworkPolicyRule( @@ -629,8 +633,8 @@ def test_ssrf_log_shows_internal_address_block( log_result = sb.exec_python(_read_openshell_log()) assert log_result.exit_code == 0, log_result.stderr log = log_result.stdout - assert "internal address" in log.lower(), ( - f"Expected 'internal address' in proxy log, got:\n{log}" + assert "always-blocked" in log.lower(), ( + f"Expected 'always-blocked' in proxy log, got:\n{log}" ) @@ -716,16 +720,21 @@ def test_ssrf_allowed_ips_hostless_permits_private_ip( ) -def test_ssrf_private_ip_blocked_without_allowed_ips( +def test_ssrf_private_ip_allowed_with_literal_ip_host( sandbox: Callable[..., Sandbox], ) -> None: - """SSRF-6: Private IP blocked when endpoint has no allowed_ips (default).""" + """SSRF-6: Private IP allowed when policy host is a literal IP address. + + When the policy endpoint host is a literal IP, the user has explicitly + declared intent. The proxy synthesizes an implicit allowed_ips entry, + so the CONNECT succeeds (200) even without explicit allowed_ips. + """ policy = _base_policy( network_policies={ "internal": sandbox_pb2.NetworkPolicyRule( name="internal", endpoints=[ - # No allowed_ips — private IP should be blocked + # No allowed_ips — but host is a literal IP, so implicit sandbox_pb2.NetworkEndpoint(host="10.200.0.1", port=19999), ], binaries=[sandbox_pb2.NetworkBinary(path="/**")], @@ -736,8 +745,11 @@ def test_ssrf_private_ip_blocked_without_allowed_ips( with sandbox(spec=spec, delete_on_exit=True) as sb: result = sb.exec_python(_proxy_connect(), args=("10.200.0.1", 19999)) assert result.exit_code == 0, result.stderr - assert "403" in result.stdout, ( - "Expected private IP to be blocked without allowed_ips" + # Should not get 403 — the SSRF check should pass. + # The actual TCP connection may fail (nothing listening on 19999) + # so recv() might return empty, but 403 must not appear. + assert "403" not in result.stdout, ( + "Expected SSRF check to pass for literal IP host, but got 403" ) @@ -1393,20 +1405,21 @@ def test_forward_proxy_allows_private_ip_with_allowed_ips( ) -def test_forward_proxy_denied_without_allowed_ips( +def test_forward_proxy_allows_private_ip_host_without_allowed_ips( sandbox: Callable[..., Sandbox], ) -> None: - """FWD-2: Forward proxy to private IP without allowed_ips -> 403. + """FWD-2: Forward proxy to literal IP host without allowed_ips -> 200. - Even though the endpoint matches, forward proxy requires explicit - allowed_ips on the endpoint. + When the policy host field is a literal IP address, the user has explicitly + declared intent to allow that destination. The SSRF guard synthesizes an + implicit allowed_ips entry, so explicit allowed_ips is not required. """ policy = _base_policy( network_policies={ "internal_http": sandbox_pb2.NetworkPolicyRule( name="internal_http", endpoints=[ - # No allowed_ips — forward proxy should be denied + # No allowed_ips — but host is a literal IP, so implicit sandbox_pb2.NetworkEndpoint( host=_SANDBOX_IP, port=_FORWARD_PROXY_PORT, @@ -1419,16 +1432,15 @@ def test_forward_proxy_denied_without_allowed_ips( spec = datamodel_pb2.SandboxSpec(policy=policy) with sandbox(spec=spec, delete_on_exit=True) as sb: result = sb.exec_python( - _forward_proxy_raw(), - args=( - _PROXY_HOST, - _PROXY_PORT, - f"http://{_SANDBOX_IP}:{_FORWARD_PROXY_PORT}/test", - ), + _forward_proxy_with_server(), + args=(_PROXY_HOST, _PROXY_PORT, _SANDBOX_IP, _FORWARD_PROXY_PORT), ) assert result.exit_code == 0, result.stderr - assert "403" in result.stdout, ( - f"Expected 403 without allowed_ips, got: {result.stdout}" + assert "200" in result.stdout, ( + f"Expected 200 for literal IP host, got: {result.stdout}" + ) + assert "forward-proxy-ok" in result.stdout, ( + f"Expected response body relayed, got: {result.stdout}" ) @@ -1918,3 +1930,104 @@ def test_host_wildcard_rejects_deep_subdomain( assert "403" in result.stdout, ( f"*.anthropic.com should NOT match deep.sub.anthropic.com: {result.stdout}" ) + + +# ============================================================================= +# Overlapping policies (duplicate host:port) — regression tests +# ============================================================================= + + +def test_overlapping_policies_do_not_crash_opa( + sandbox: Callable[..., Sandbox], +) -> None: + """OVL-1: Two policies covering the same host:port must not crash OPA. + + After a draft rule approval, the merged policy can contain two entries + for the same (host, port). The OPA engine must handle this without + a 'duplicated definition of local variable' error. This test creates + the overlap directly to simulate the post-approval state. + """ + policy = _base_policy( + network_policies={ + "user_rule": sandbox_pb2.NetworkPolicyRule( + name="user_rule", + endpoints=[ + sandbox_pb2.NetworkEndpoint( + host=_SANDBOX_IP, + port=_FORWARD_PROXY_PORT, + ), + ], + binaries=[sandbox_pb2.NetworkBinary(path="/**")], + ), + "approved_rule": sandbox_pb2.NetworkPolicyRule( + name="approved_rule", + endpoints=[ + sandbox_pb2.NetworkEndpoint( + host=_SANDBOX_IP, + port=_FORWARD_PROXY_PORT, + allowed_ips=["10.200.0.0/24"], + ), + ], + binaries=[sandbox_pb2.NetworkBinary(path="/**")], + ), + }, + ) + spec = datamodel_pb2.SandboxSpec(policy=policy) + with sandbox(spec=spec, delete_on_exit=True) as sb: + result = sb.exec_python( + _forward_proxy_with_server(), + args=(_PROXY_HOST, _PROXY_PORT, _SANDBOX_IP, _FORWARD_PROXY_PORT), + ) + assert result.exit_code == 0, result.stderr + assert "200" in result.stdout, ( + f"Overlapping policies should not crash; expected 200, got: {result.stdout}" + ) + + +def test_overlapping_policies_l7_connect_does_not_crash( + sandbox: Callable[..., Sandbox], +) -> None: + """OVL-2: CONNECT to overlapping L7 policies must not crash OPA. + + Two policies with L7 rules (protocol: rest) covering the same host:port + must evaluate without a regorus variable collision error. + """ + policy = _base_policy( + network_policies={ + "user_api": sandbox_pb2.NetworkPolicyRule( + name="user_api", + endpoints=[ + sandbox_pb2.NetworkEndpoint( + host="api.anthropic.com", + port=443, + protocol="rest", + enforcement="enforce", + access="read-only", + ), + ], + binaries=[sandbox_pb2.NetworkBinary(path="/**")], + ), + "auto_approved_api": sandbox_pb2.NetworkPolicyRule( + name="auto_approved_api", + endpoints=[ + sandbox_pb2.NetworkEndpoint( + host="api.anthropic.com", + port=443, + protocol="rest", + enforcement="enforce", + access="read-only", + ), + ], + binaries=[sandbox_pb2.NetworkBinary(path="/**")], + ), + }, + ) + spec = datamodel_pb2.SandboxSpec(policy=policy) + with sandbox(spec=spec, delete_on_exit=True) as sb: + # CONNECT should succeed at the tunnel level (200 Connection Established) + # even with two overlapping L7 policies. + result = sb.exec_python(_proxy_connect(), args=("api.anthropic.com", 443)) + assert result.exit_code == 0, result.stderr + assert "200" in result.stdout, ( + f"Overlapping L7 policies should not crash; expected 200, got: {result.stdout}" + ) diff --git a/e2e/python/test_sandbox_providers.py b/e2e/python/test_sandbox_providers.py index 2cdba29c..4cbd76b5 100644 --- a/e2e/python/test_sandbox_providers.py +++ b/e2e/python/test_sandbox_providers.py @@ -280,8 +280,12 @@ def test_update_provider_preserves_unset_credentials_and_config( got = stub.GetProvider(openshell_pb2.GetProviderRequest(name=name)) p = got.provider - # Credentials are redacted in gRPC responses (security hardening). - assert len(p.credentials) == 0, "credentials must be redacted in gRPC responses" + # Credential keys are preserved but values are redacted. + assert len(p.credentials) > 0, "credential keys should be preserved" + for key, val in p.credentials.items(): + assert val == "REDACTED", ( + f"credential '{key}' should be REDACTED, got '{val}'" + ) assert p.config["BASE_URL"] == "https://example.com", ( "config should be preserved" ) @@ -320,8 +324,12 @@ def test_update_provider_empty_maps_preserves_all( got = stub.GetProvider(openshell_pb2.GetProviderRequest(name=name)) p = got.provider - # Credentials are redacted in gRPC responses (security hardening). - assert len(p.credentials) == 0, "credentials must be redacted in gRPC responses" + # Credential keys are preserved but values are redacted. + assert len(p.credentials) > 0, "credential keys should be preserved" + for key, val in p.credentials.items(): + assert val == "REDACTED", ( + f"credential '{key}' should be REDACTED, got '{val}'" + ) assert p.config["URL"] == "https://api.example.com" finally: _delete_provider(stub, name) @@ -359,8 +367,12 @@ def test_update_provider_merges_config_preserves_credentials( got = stub.GetProvider(openshell_pb2.GetProviderRequest(name=name)) p = got.provider - # Credentials are redacted in gRPC responses (security hardening). - assert len(p.credentials) == 0, "credentials must be redacted in gRPC responses" + # Credential keys are preserved but values are redacted. + assert len(p.credentials) > 0, "credential keys should be preserved" + for key, val in p.credentials.items(): + assert val == "REDACTED", ( + f"credential '{key}' should be REDACTED, got '{val}'" + ) assert p.config["ENDPOINT"] == "https://new.example.com" finally: _delete_provider(stub, name)