From e7f30203a3cbe506956381796cfbe99b3404591c Mon Sep 17 00:00:00 2001 From: JasonVranek Date: Wed, 18 Mar 2026 17:52:44 +0000 Subject: [PATCH 01/25] v0.9.4-rc1 changelog notes (#437) --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..1bebb339 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,4 @@ +### v0.9.4-rc1 +- Unifies the `pbs`, `signer`, and `cli` binaries into one: `commit-boost`. This change changes the CLI, notably the `init` command is now invoked as `commit-boost init --config `. +- Includes new quality of life testing improvements in the Justfile: unit test coverage tooling, local Kurtosis testnet, and microbenchmark diffing. +- Robustifies the release process to ensure no compromised maintainer can unilaterally cut a release. Additionally all binaries are now signed during CI and can easily be verified before use. From 04ae8b724dbb251862b05c7767ef177b3f7c398f Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 12:10:05 -0700 Subject: [PATCH 02/25] address Dirk issues in docker_init.rs: - add missing ADMIN_JWT_ENV and SIGNER_TLS_CERTIFICATES_PATH_ENV - support https healthchecks --- crates/cli/src/docker_init.rs | 79 ++++++++++++++++++++++++++++++++++- 1 file changed, 77 insertions(+), 2 deletions(-) diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 7976ce17..f2c5e2e4 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -497,6 +497,8 @@ fn create_signer_service_dirk( let mut envs = IndexMap::from([ get_env_val(CONFIG_ENV, CONFIG_DEFAULT), get_env_same(JWTS_ENV), + get_env_same(ADMIN_JWT_ENV), + get_env_val(SIGNER_TLS_CERTIFICATES_PATH_ENV, SIGNER_TLS_CERTIFICATES_PATH_DEFAULT), get_env_val(DIRK_CERT_ENV, DIRK_CERT_DEFAULT), get_env_val(DIRK_KEY_ENV, DIRK_KEY_DEFAULT), get_env_val(DIRK_DIR_SECRETS_ENV, DIRK_DIR_SECRETS_DEFAULT), @@ -548,6 +550,7 @@ fn create_signer_service_dirk( // write jwts to env service_config.envs.insert(JWTS_ENV.into(), format_comma_separated(&service_config.jwts)); + service_config.envs.insert(ADMIN_JWT_ENV.into(), random_jwt_secret()); // CA cert volume and env if let Some(ca_cert_path) = ca_cert_path { @@ -589,8 +592,8 @@ fn create_signer_service_dirk( environment: Environment::KvPair(envs), healthcheck: Some(Healthcheck { test: Some(HealthcheckTest::Single(format!( - "curl -f http://localhost:{}/status", - signer_config.port, + "curl -k -f {}/status", + cb_config.signer_server_url(SIGNER_PORT_DEFAULT), ))), interval: Some("30s".into()), timeout: Some("5s".into()), @@ -932,6 +935,13 @@ mod tests { service.volumes.iter().any(|v| matches!(v, Volumes::Simple(s) if s.contains(substr))) } + fn get_healthcheck_cmd(service: &Service) -> Option { + service.healthcheck.as_ref().and_then(|hc| match &hc.test { + Some(HealthcheckTest::Single(cmd)) => Some(cmd.clone()), + _ => None, + }) + } + fn has_port(service: &Service, substr: &str) -> bool { match &service.ports { Ports::Short(ports) => ports.iter().any(|p| p.contains(substr)), @@ -1309,12 +1319,33 @@ mod tests { assert!(env_str(&service, DIRK_CERT_ENV).is_some()); assert!(env_str(&service, DIRK_KEY_ENV).is_some()); assert!(env_str(&service, DIRK_DIR_SECRETS_ENV).is_some()); + assert!(has_env_key(&service, ADMIN_JWT_ENV)); + assert!(has_env_key(&service, SIGNER_TLS_CERTIFICATES_PATH_ENV)); assert!(has_volume(&service, "client.crt")); assert!(has_volume(&service, "client.key")); assert!(has_volume(&service, "dirk_secrets")); Ok(()) } + #[test] + fn test_create_signer_service_dirk_generates_admin_jwt() -> eyre::Result<()> { + let mut sc = minimal_service_config(); + let signer_config = dirk_signer_config(); + create_signer_service_dirk( + &mut sc, + &signer_config, + Path::new("/certs/client.crt"), + Path::new("/certs/client.key"), + Path::new("/dirk_secrets"), + &None, + &None, + )?; + + let admin_jwt = sc.envs.get(ADMIN_JWT_ENV).expect("ADMIN_JWT_ENV must be set"); + assert!(!admin_jwt.is_empty(), "admin JWT secret must not be empty"); + Ok(()) + } + #[test] fn test_create_signer_service_dirk_with_ca_cert() -> eyre::Result<()> { let mut sc = minimal_service_config(); @@ -1690,6 +1721,50 @@ mod tests { Ok(()) } + #[test] + fn test_create_signer_service_dirk_healthcheck_uses_https_with_tls() -> eyre::Result<()> { + let dir = tempfile::tempdir()?; + let certs_path = dir.path().to_path_buf(); + std::fs::write(certs_path.join(SIGNER_TLS_CERTIFICATE_NAME), b"cert")?; + std::fs::write(certs_path.join(SIGNER_TLS_KEY_NAME), b"key")?; + + let mut sc = service_config_with_tls(certs_path); + let signer_config = dirk_signer_config(); + let service = create_signer_service_dirk( + &mut sc, + &signer_config, + Path::new("/certs/client.crt"), + Path::new("/certs/client.key"), + Path::new("/dirk_secrets"), + &None, + &None, + )?; + + let cmd = get_healthcheck_cmd(&service).expect("healthcheck must be set"); + assert!(cmd.contains("https://"), "healthcheck must use https with TLS: {cmd}"); + assert!(cmd.contains("-k"), "healthcheck must use -k flag for self-signed certs: {cmd}"); + Ok(()) + } + + #[test] + fn test_create_signer_service_dirk_healthcheck_uses_http_without_tls() -> eyre::Result<()> { + let mut sc = minimal_service_config(); + let signer_config = dirk_signer_config(); + let service = create_signer_service_dirk( + &mut sc, + &signer_config, + Path::new("/certs/client.crt"), + Path::new("/certs/client.key"), + Path::new("/dirk_secrets"), + &None, + &None, + )?; + + let cmd = get_healthcheck_cmd(&service).expect("healthcheck must be set"); + assert!(cmd.contains("http://"), "healthcheck must use http without TLS: {cmd}"); + Ok(()) + } + // ------------------------------------------------------------------------- // create_module_service – TLS cert env/volume // ------------------------------------------------------------------------- From 6ec37820207a28789c0bef343b764614b9b8d20e Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 12:31:49 -0700 Subject: [PATCH 03/25] cleaner error message if TLS CryptoProvider fails --- crates/signer/src/service.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index fdf86879..b056c96c 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -177,15 +177,14 @@ impl SigningService { break; } Err(e) => { + if attempts >= 3 { + return Err(eyre::eyre!( + "Exceeded maximum attempts to install AWS-LC as default TLS provider: {e:?}" + )); + } error!( "Failed to install AWS-LC as default TLS provider: {e:?}. Retrying..." ); - if attempts >= 3 { - error!( - "Exceeded maximum attempts to install AWS-LC as default TLS provider" - ); - break; - } attempts += 1; } } From 58b4b2254837c8a89034151d3290e8740f71ec91 Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 13:03:26 -0700 Subject: [PATCH 04/25] users get error message on missing [signer] section instead of toml deserialization error message --- crates/common/src/config/module.rs | 4 ++-- crates/common/src/config/pbs.rs | 8 ++++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/crates/common/src/config/module.rs b/crates/common/src/config/module.rs index 22884551..aec45289 100644 --- a/crates/common/src/config/module.rs +++ b/crates/common/src/config/module.rs @@ -83,7 +83,7 @@ pub fn load_commit_module_config() -> Result { chain: Chain, modules: Vec>, - signer: SignerConfig, + signer: Option, } // load module config including the extra data (if any) @@ -106,7 +106,7 @@ pub fn load_commit_module_config() -> Result None, TlsMode::Certificate(path) => Some( load_env_var(SIGNER_TLS_CERTIFICATES_PATH_ENV) diff --git a/crates/common/src/config/pbs.rs b/crates/common/src/config/pbs.rs index 30964a20..907fbecf 100644 --- a/crates/common/src/config/pbs.rs +++ b/crates/common/src/config/pbs.rs @@ -347,7 +347,7 @@ pub async fn load_pbs_custom_config() -> Result<(PbsModuleC chain: Chain, relays: Vec, pbs: CustomPbsConfig, - signer: SignerConfig, + signer: Option, muxes: Option, } @@ -404,7 +404,11 @@ pub async fn load_pbs_custom_config() -> Result<(PbsModuleC // if custom pbs requires a signer client, load jwt let module_jwt = Jwt(load_env_var(MODULE_JWT_ENV)?); let signer_server_url = load_env_var(SIGNER_URL_ENV)?.parse()?; - let certs_path = match cb_config.signer.tls_mode { + let certs_path = match cb_config + .signer + .ok_or_else(|| eyre::eyre!("with_signer = true but no [signer] section in config"))? + .tls_mode + { TlsMode::Insecure => None, TlsMode::Certificate(path) => Some( load_env_var(SIGNER_TLS_CERTIFICATES_PATH_ENV) From 001b31231fb670c1c8f6193d7d1c18a5cfc522e4 Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 15:06:20 -0700 Subject: [PATCH 05/25] support partial jwt reloads --- crates/signer/src/service.rs | 180 ++++++++++++++++++++++++++++++++--- 1 file changed, 168 insertions(+), 12 deletions(-) diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index b056c96c..b301f8dc 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -632,7 +632,6 @@ async fn handle_reload( ) -> Result { debug!(event = "reload", "New request"); - // Regenerate the config let config = match StartSignerConfig::load_from_env() { Ok(config) => config, Err(err) => { @@ -641,7 +640,6 @@ async fn handle_reload( } }; - // Start a new manager with the updated config let new_manager = match start_manager(config).await { Ok(manager) => manager, Err(err) => { @@ -650,17 +648,24 @@ async fn handle_reload( } }; - // Update the JWT configs if provided in the request + apply_reload(state, request, new_manager).await +} + +/// Applies a reload request to the signing state. Separated from +/// `handle_reload` so the business logic can be tested without requiring a +/// live environment (config file, env vars, keystore on disk). +async fn apply_reload( + state: SigningState, + request: ReloadRequest, + new_manager: SigningManager, +) -> Result { + // Update the JWT configs if provided in the request. Only the provided + // modules are updated; omitted modules keep their existing secrets. if let Some(jwt_secrets) = request.jwt_secrets { let mut jwt_configs = state.jwts.write(); - let mut new_configs = HashMap::new(); for (module_id, jwt_secret) in jwt_secrets { - if let Some(signing_id) = jwt_configs.get(&module_id).map(|cfg| cfg.signing_id) { - new_configs.insert(module_id.clone(), ModuleSigningConfig { - module_name: module_id, - jwt_secret, - signing_id, - }); + if let Some(cfg) = jwt_configs.get_mut(&module_id) { + cfg.jwt_secret = jwt_secret; } else { let error_message = format!( "Module {module_id} signing ID not found in commit-boost config, cannot reload" @@ -669,10 +674,8 @@ async fn handle_reload( return Err(SignerModuleError::RequestError(error_message)); } } - *jwt_configs = new_configs; } - // Update the rest of the state once everything has passed if let Some(admin_secret) = request.admin_secret { *state.admin_secret.write() = admin_secret; } @@ -722,3 +725,156 @@ async fn start_manager(config: StartSignerConfig) -> eyre::Result ModuleSigningConfig { + ModuleSigningConfig { + module_name: ModuleId(module_name.to_string()), + jwt_secret: secret.to_string(), + signing_id, + } + } + + fn make_state(jwts: HashMap) -> SigningState { + SigningState { + manager: Arc::new(RwLock::new(SigningManager::Local( + LocalSigningManager::new(Chain::Holesky, None).unwrap(), + ))), + jwts: Arc::new(ParkingRwLock::new(jwts)), + admin_secret: Arc::new(ParkingRwLock::new("admin".to_string())), + jwt_auth_failures: Arc::new(ParkingRwLock::new(HashMap::new())), + jwt_auth_fail_limit: 3, + jwt_auth_fail_timeout: Duration::from_secs(60), + reverse_proxy: ReverseProxyHeaderSetup::None, + } + } + + fn empty_manager() -> SigningManager { + SigningManager::Local(LocalSigningManager::new(Chain::Holesky, None).unwrap()) + } + + /// Partial reload must update only the provided modules and leave omitted + /// modules with their existing secrets. + #[tokio::test] + async fn test_partial_reload_preserves_omitted_modules() { + let module_a = ModuleId("module-a".to_string()); + let module_b = ModuleId("module-b".to_string()); + let signing_id_a = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let signing_id_b = + b256!("0202020202020202020202020202020202020202020202020202020202020202"); + + let state = make_state(HashMap::from([ + (module_a.clone(), make_signing_config("module-a", "secret-a", signing_id_a)), + (module_b.clone(), make_signing_config("module-b", "secret-b", signing_id_b)), + ])); + + let request = ReloadRequest { + jwt_secrets: Some(HashMap::from([(module_a.clone(), "rotated-secret-a".to_string())])), + admin_secret: None, + }; + + let result = apply_reload(state.clone(), request, empty_manager()).await; + assert!(result.is_ok(), "apply_reload should succeed"); + + let jwts = state.jwts.read(); + assert_eq!( + jwts[&module_a].jwt_secret, "rotated-secret-a", + "module_a secret should be updated" + ); + assert_eq!( + jwts[&module_b].jwt_secret, "secret-b", + "module_b secret must be preserved when omitted" + ); + } + + /// A full reload (all modules provided) should update every module. + #[tokio::test] + async fn test_full_reload_updates_all_modules() { + let module_a = ModuleId("module-a".to_string()); + let module_b = ModuleId("module-b".to_string()); + let signing_id_a = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let signing_id_b = + b256!("0202020202020202020202020202020202020202020202020202020202020202"); + + let state = make_state(HashMap::from([ + (module_a.clone(), make_signing_config("module-a", "secret-a", signing_id_a)), + (module_b.clone(), make_signing_config("module-b", "secret-b", signing_id_b)), + ])); + + let request = ReloadRequest { + jwt_secrets: Some(HashMap::from([ + (module_a.clone(), "new-secret-a".to_string()), + (module_b.clone(), "new-secret-b".to_string()), + ])), + admin_secret: None, + }; + + apply_reload(state.clone(), request, empty_manager()).await.unwrap(); + + let jwts = state.jwts.read(); + assert_eq!(jwts[&module_a].jwt_secret, "new-secret-a"); + assert_eq!(jwts[&module_b].jwt_secret, "new-secret-b"); + } + + /// Reload with an unknown module ID in jwt_secrets should return an error + /// and leave the existing state unchanged. + #[tokio::test] + async fn test_reload_unknown_module_returns_error() { + let module_a = ModuleId("module-a".to_string()); + let signing_id_a = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + + let state = make_state(HashMap::from([( + module_a.clone(), + make_signing_config("module-a", "secret-a", signing_id_a), + )])); + + let request = ReloadRequest { + jwt_secrets: Some(HashMap::from([( + ModuleId("unknown-module".to_string()), + "some-secret".to_string(), + )])), + admin_secret: None, + }; + + let result = apply_reload(state.clone(), request, empty_manager()).await; + assert!(result.is_err(), "unknown module should return an error"); + + // Existing module must be untouched + let jwts = state.jwts.read(); + assert_eq!(jwts[&module_a].jwt_secret, "secret-a"); + } + + /// Reload with no jwt_secrets should leave all module secrets unchanged. + #[tokio::test] + async fn test_reload_without_jwt_secrets_preserves_all() { + let module_a = ModuleId("module-a".to_string()); + let signing_id_a = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + + let state = make_state(HashMap::from([( + module_a.clone(), + make_signing_config("module-a", "secret-a", signing_id_a), + )])); + + let request = ReloadRequest { jwt_secrets: None, admin_secret: None }; + + apply_reload(state.clone(), request, empty_manager()).await.unwrap(); + + let jwts = state.jwts.read(); + assert_eq!(jwts[&module_a].jwt_secret, "secret-a"); + } +} From 76905ded6158c4c08ba2db441c6f54a3d3a5925e Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 15:32:54 -0700 Subject: [PATCH 06/25] require the nonce in signer-api.yml - previously was marked optional which conflicted with deserializing `SignConsensusRequest` --- api/signer-api.yml | 15 ++++++++++----- crates/common/src/commit/request.rs | 12 ++++++++++-- docs/docs/developing/prop-commit-signing.md | 4 ++-- 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/api/signer-api.yml b/api/signer-api.yml index 95897ecd..be44f8fd 100644 --- a/api/signer-api.yml +++ b/api/signer-api.yml @@ -86,7 +86,7 @@ paths: application/json: schema: type: object - required: [pubkey, object_root] + required: [pubkey, object_root, nonce] properties: pubkey: description: The 48-byte BLS public key, with optional `0x` prefix, of the proposer key that you want to request a signature from. @@ -234,7 +234,7 @@ paths: application/json: schema: type: object - required: [proxy, object_root] + required: [proxy, object_root, nonce] properties: proxy: description: The 48-byte BLS public key (for `proxy_bls` mode) or the 20-byte Ethereum address (for `proxy_ecdsa` mode), with optional `0x` prefix, of the proxy key that you want to request a signature from. @@ -382,7 +382,7 @@ paths: application/json: schema: type: object - required: [proxy, object_root] + required: [proxy, object_root, nonce] properties: proxy: description: The 20-byte Ethereum address, with optional `0x` prefix, of the proxy key that you want to request a signature from. @@ -695,7 +695,12 @@ components: $ref: "#/components/schemas/EcdsaSignature" Nonce: type: integer - description: If your module tracks nonces per signature (e.g., to prevent replay attacks), this is the unique nonce to use for the signature. It should be an unsigned 64-bit integer in big-endian format. It must be between 0 and 2^64-2, inclusive. If your module doesn't use nonces, we suggest setting this to 2^64-1 instead of 0 because 0 is a legal nonce and will cause complications with your module if you ever want to use a nonce in the future. + description: | + Replay-protection nonce, always mixed into the signing root via `PropCommitSigningInfo`. It + must be an unsigned 64-bit integer between 0 and 2^64-2 (18446744073709551614), inclusive. + + Modules that track nonces for replay protection should use a monotonically increasing value + per key. Modules that do not use replay protection should always send `0`. minimum: 0 - maximum: 18446744073709551614 // 2^64-2 + maximum: 18446744073709551614 example: 1 diff --git a/crates/common/src/commit/request.rs b/crates/common/src/commit/request.rs index a64e9a67..cd780446 100644 --- a/crates/common/src/commit/request.rs +++ b/crates/common/src/commit/request.rs @@ -84,6 +84,10 @@ impl fmt::Display for SignedProxyDelegation { pub struct SignConsensusRequest { pub pubkey: BlsPublicKey, pub object_root: B256, + /// Replay-protection nonce mixed into the signing root via + /// `PropCommitSigningInfo`. Modules that do not track nonces should + /// send `0`. Modules that do track nonces should use a monotonically + /// increasing value per key to prevent signature reuse. pub nonce: u64, } @@ -93,7 +97,7 @@ impl SignConsensusRequest { } pub fn builder(pubkey: BlsPublicKey) -> Self { - Self::new(pubkey, B256::ZERO, u64::MAX - 1) + Self::new(pubkey, B256::ZERO, 0) } pub fn with_root>(self, object_root: R) -> Self { @@ -125,6 +129,10 @@ impl Display for SignConsensusRequest { pub struct SignProxyRequest { pub proxy: T, pub object_root: B256, + /// Replay-protection nonce mixed into the signing root via + /// `PropCommitSigningInfo`. Modules that do not track nonces should + /// send `0`. Modules that do track nonces should use a monotonically + /// increasing value per key to prevent signature reuse. pub nonce: u64, } @@ -134,7 +142,7 @@ impl SignProxyRequest { } pub fn builder(proxy: T) -> Self { - Self::new(proxy, B256::ZERO, u64::MAX - 1) + Self::new(proxy, B256::ZERO, 0) } pub fn with_root>(self, object_root: R) -> Self { diff --git a/docs/docs/developing/prop-commit-signing.md b/docs/docs/developing/prop-commit-signing.md index 1e8bd249..30f70413 100644 --- a/docs/docs/developing/prop-commit-signing.md +++ b/docs/docs/developing/prop-commit-signing.md @@ -44,7 +44,7 @@ Your module has the option of using **Nonces** for each of its signature request If you want to use them within your module, your module (or whatever remote backend system it connects to) **will be responsible** for storing, comparing, validating, and otherwise using the nonces. Commit-Boost's signer service by itself **does not** store nonces or track which ones have already been used by a given module. -In terms of implementation, the nonce format conforms to the specification in [EIP-2681](https://eips.ethereum.org/EIPS/eip-2681). It is an unsigned 64-bit big-endian integer, with a minimum value of 0 and a maximum value of `2^64-2`. We recommend using `2^64-1` as a signifier indicating that your module doesn't use nonces, rather than using 0 for such a purpose. +In terms of implementation, the nonce format conforms to the specification in [EIP-2681](https://eips.ethereum.org/EIPS/eip-2681). It is an unsigned 64-bit integer, with a minimum value of 0 and a maximum value of `2^64-2`. The field is required and is always mixed into the signing root. Modules that do not use nonces for replay protection should always send `0`; modules that do should use a monotonically increasing value per key. ## Structure of a Signature @@ -63,7 +63,7 @@ where, for the sub-tree in blue: - `Signing ID` is your module's 32-byte signing ID. The signer service will load this for your module from its configuration file. -- `Nonce` is the nonce value for the signature request. While this value must be present, it can be effectively ignored by setting it to some arbitrary value if your module does not track nonces. Conforming with the tree specification, it must be added as a 256-bit unsigned little-endian integer. Most libraries will be able to do this conversion automatically if you specify the field as the language's primitive for 64-bit unsigned integers (e.g., `uint64`, `u64`, `ulong`, etc.). +- `Nonce` is the nonce value for the signature request. This field is required. Modules that do not use replay protection should always send `0`; modules that do should use a monotonically increasing value per key. Conforming with the tree specification, it must be added as a 256-bit unsigned little-endian integer. Most libraries will be able to do this conversion automatically if you specify the field as the language's primitive for 64-bit unsigned integers (e.g., `uint64`, `u64`, `ulong`, etc.). - `Chain ID` is the ID of the chain that the Signer service is currently configured to use, as indicated by the [Commit-Boost configuration file](../get_started/configuration.md). This must also be a 256-bit unsigned little-endian integer. From 8268572dd85b145ba26e43ab18cec71f30240035 Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 15:43:05 -0700 Subject: [PATCH 07/25] fix suffix when displaying X-Forwaded-For --- crates/common/src/config/signer.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 4e28b089..95110958 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -92,10 +92,12 @@ impl Display for ReverseProxyHeaderSetup { write!(f, "\"{header} (unique)\"") } ReverseProxyHeaderSetup::Rightmost { header, trusted_count } => { - let suffix = match trusted_count.get() % 10 { - 1 => "st", - 2 => "nd", - 3 => "rd", + let n = trusted_count.get(); + let suffix = match (n % 100, n % 10) { + (11..=13, _) => "th", + (_, 1) => "st", + (_, 2) => "nd", + (_, 3) => "rd", _ => "th", }; write!(f, "\"{header} ({trusted_count}{suffix} from the right)\"") From 7c8cce6f3c05283730db949d9ae9fce03a3fd272 Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 15:54:43 -0700 Subject: [PATCH 08/25] only take the `jwt_auth_failures` writelock if strictly necessary, default to readlock --- crates/signer/src/service.rs | 52 +++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index b301f8dc..7edd63a8 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -273,38 +273,40 @@ async fn jwt_auth( /// Checks if the incoming request needs to be rate limited due to previous JWT /// authentication failures fn check_jwt_rate_limit(state: &SigningState, client_ip: &IpAddr) -> Result<(), SignerModuleError> { - let mut failures = state.jwt_auth_failures.write(); + let failures = state.jwt_auth_failures.read(); // Ignore clients that don't have any failures - if let Some(failure_info) = failures.get(client_ip) { - // If the last failure was more than the timeout ago, remove this entry so it's - // eligible again - let elapsed = failure_info.last_failure.elapsed(); - if elapsed > state.jwt_auth_fail_timeout { - debug!("Removing {client_ip} from JWT auth failure list"); - failures.remove(client_ip); - return Ok(()); - } + let Some(failure_info) = failures.get(client_ip) else { + debug!("Client {client_ip} has no JWT auth failures, no rate limit applied"); + return Ok(()); + }; - // If the failure threshold hasn't been met yet, don't rate limit - if failure_info.failure_count < state.jwt_auth_fail_limit { - debug!( - "Client {client_ip} has {}/{} JWT auth failures, no rate limit applied", - failure_info.failure_count, state.jwt_auth_fail_limit - ); - return Ok(()); - } + let elapsed = failure_info.last_failure.elapsed(); + + // If the last failure was more than the timeout ago, remove this entry so it's + // eligible again + if elapsed > state.jwt_auth_fail_timeout { + drop(failures); + debug!("Removing {client_ip} from JWT auth failure list"); + state.jwt_auth_failures.write().remove(client_ip); + return Ok(()); + } - // Rate limit the request - let remaining = state.jwt_auth_fail_timeout.saturating_sub(elapsed); - warn!( - "Client {client_ip} is rate limited for {remaining:?} more seconds due to JWT auth failures" + // If the failure threshold hasn't been met yet, don't rate limit + if failure_info.failure_count < state.jwt_auth_fail_limit { + debug!( + "Client {client_ip} has {}/{} JWT auth failures, no rate limit applied", + failure_info.failure_count, state.jwt_auth_fail_limit ); - return Err(SignerModuleError::RateLimited(remaining.as_secs_f64())); + return Ok(()); } - debug!("Client {client_ip} has no JWT auth failures, no rate limit applied"); - Ok(()) + // Rate limit the request + let remaining = state.jwt_auth_fail_timeout.saturating_sub(elapsed); + warn!( + "Client {client_ip} is rate limited for {remaining:?} more seconds due to JWT auth failures" + ); + Err(SignerModuleError::RateLimited(remaining.as_secs_f64())) } /// Checks if a request can successfully authenticate with the JWT secret From 4a9aff76140bbb0bbf5f669c1a4e8544b5287c7a Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 16:03:46 -0700 Subject: [PATCH 09/25] remove mark_jwt_failure() calls from failures unrelated to jwts --- crates/signer/src/service.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index 7edd63a8..81729272 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -248,7 +248,6 @@ async fn jwt_auth( let path = parts.uri.path(); let bytes = to_bytes(body, REQUEST_MAX_BODY_LENGTH).await.map_err(|e| { error!("Failed to read request body: {e}"); - mark_jwt_failure(&state, client_ip); SignerModuleError::RequestError(e.to_string()) })?; @@ -360,7 +359,6 @@ async fn admin_auth( let path = parts.uri.path(); let bytes = to_bytes(body, REQUEST_MAX_BODY_LENGTH).await.map_err(|e| { error!("Failed to read request body: {e}"); - mark_jwt_failure(&state, client_ip); SignerModuleError::RequestError(e.to_string()) })?; From d834242a238d376f7c6abeeff1f3cc7a3381ab37 Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 17:35:29 -0700 Subject: [PATCH 10/25] add round-trip sign->verify unit tests --- crates/common/src/signature.rs | 60 +++++++++++++++++++++++++++++++--- 1 file changed, 56 insertions(+), 4 deletions(-) diff --git a/crates/common/src/signature.rs b/crates/common/src/signature.rs index 18c10d4a..41631e33 100644 --- a/crates/common/src/signature.rs +++ b/crates/common/src/signature.rs @@ -165,10 +165,18 @@ pub fn verify_proposer_commitment_signature_ecdsa( #[cfg(test)] mod tests { - use alloy::primitives::aliases::B32; - - use super::compute_domain; - use crate::{constants::APPLICATION_BUILDER_DOMAIN, types::Chain}; + use alloy::primitives::{U256, aliases::B32}; + + use super::{compute_domain, sign_builder_message, verify_signed_message}; + use crate::{ + constants::APPLICATION_BUILDER_DOMAIN, + pbs::{ + BlindedBeaconBlockElectra, BuilderBid, BuilderBidElectra, + ExecutionPayloadHeaderElectra, ExecutionRequests, + }, + types::{BlsSecretKey, Chain}, + utils::TestRandomSeed, + }; #[test] fn test_builder_domains() { @@ -178,4 +186,48 @@ mod tests { assert_eq!(compute_domain(Chain::Sepolia, domain), Chain::Sepolia.builder_domain()); assert_eq!(compute_domain(Chain::Hoodi, domain), Chain::Hoodi.builder_domain()); } + + #[test] + fn test_builder_bid_sign_and_verify() { + let secret_key = BlsSecretKey::test_random(); + let pubkey = secret_key.public_key(); + + let message = BuilderBid::Electra(BuilderBidElectra { + header: ExecutionPayloadHeaderElectra::test_random(), + blob_kzg_commitments: Default::default(), + execution_requests: ExecutionRequests::default(), + value: U256::from(10), + pubkey: pubkey.clone().into(), + }); + + let sig = sign_builder_message(Chain::Mainnet, &secret_key, &message); + + assert!(verify_signed_message( + Chain::Mainnet, + &pubkey, + &message, + &sig, + None, + &B32::from(APPLICATION_BUILDER_DOMAIN), + )); + } + + #[test] + fn test_blinded_block_sign_and_verify() { + let secret_key = BlsSecretKey::test_random(); + let pubkey = secret_key.public_key(); + + let block = BlindedBeaconBlockElectra::test_random(); + + let sig = sign_builder_message(Chain::Mainnet, &secret_key, &block); + + assert!(verify_signed_message( + Chain::Mainnet, + &pubkey, + &block, + &sig, + None, + &B32::from(APPLICATION_BUILDER_DOMAIN), + )); + } } From 2a1d31546d084e9de827b1b0178b5efffc29208f Mon Sep 17 00:00:00 2001 From: ninaiiad Date: Thu, 26 Mar 2026 17:07:07 +0000 Subject: [PATCH 11/25] add get_header auction winner log (#443) --- crates/pbs/src/mev_boost/get_header.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/crates/pbs/src/mev_boost/get_header.rs b/crates/pbs/src/mev_boost/get_header.rs index 86743703..c1d2f511 100644 --- a/crates/pbs/src/mev_boost/get_header.rs +++ b/crates/pbs/src/mev_boost/get_header.rs @@ -27,7 +27,7 @@ use futures::future::join_all; use parking_lot::RwLock; use reqwest::{StatusCode, header::USER_AGENT}; use tokio::time::sleep; -use tracing::{Instrument, debug, error, warn}; +use tracing::{Instrument, debug, error, info, warn}; use tree_hash::TreeHash; use url::Url; @@ -131,7 +131,7 @@ pub async fn get_header( .unwrap_or_default(); RELAY_HEADER_VALUE.with_label_values(&[relay_id]).set(value_gwei); - relay_bids.push(res) + relay_bids.push((relay_id, res)) } Ok(_) => {} Err(err) if err.is_timeout() => error!(err = "Timed Out", relay_id), @@ -139,9 +139,18 @@ pub async fn get_header( } } - let max_bid = relay_bids.into_iter().max_by_key(|bid| *bid.value()); + let max_bid = relay_bids.into_iter().max_by_key(|(_, bid)| *bid.value()); - Ok(max_bid) + if let Some((winning_relay_id, ref bid)) = max_bid { + info!( + relay_id = winning_relay_id, + value_eth = format_ether(*bid.value()), + block_hash = %bid.block_hash(), + "auction winner" + ); + } + + Ok(max_bid.map(|(_, bid)| bid)) } /// Fetch the parent block from the RPC URL for extra validation of the header. @@ -373,7 +382,7 @@ async fn send_one_get_header( } }; - debug!( + info!( relay_id = relay.id.as_ref(), header_size_bytes, latency = ?request_latency, From dd87334a84ff50b4b8111c9e32dde70586df266a Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Tue, 24 Mar 2026 16:32:04 -0700 Subject: [PATCH 12/25] remove unutilized BuilderApi trait to simplify abstraction (keep pbs/mev_boost and pbs/routes separate for now) --- Cargo.lock | 17 --- Cargo.toml | 2 +- bin/commit-boost.rs | 4 +- bin/src/lib.rs | 4 +- crates/pbs/src/api.rs | 71 ------------- crates/pbs/src/lib.rs | 2 - crates/pbs/src/routes/get_header.rs | 6 +- crates/pbs/src/routes/register_validator.rs | 6 +- crates/pbs/src/routes/reload.rs | 7 +- crates/pbs/src/routes/router.rs | 23 ++--- crates/pbs/src/routes/status.rs | 6 +- crates/pbs/src/routes/submit_block.rs | 21 ++-- crates/pbs/src/service.rs | 5 +- docs/docs/developing/custom-modules.md | 6 +- docs/docs/get_started/configuration.md | 3 +- examples/status_api/Cargo.toml | 18 ---- examples/status_api/Dockerfile | 29 ------ examples/status_api/src/main.rs | 108 -------------------- tests/tests/pbs_cfg_file_update.rs | 4 +- tests/tests/pbs_get_header.rs | 12 +-- tests/tests/pbs_get_status.rs | 6 +- tests/tests/pbs_mux.rs | 8 +- tests/tests/pbs_mux_refresh.rs | 4 +- tests/tests/pbs_post_blinded_blocks.rs | 6 +- tests/tests/pbs_post_validators.rs | 8 +- 25 files changed, 69 insertions(+), 317 deletions(-) delete mode 100644 crates/pbs/src/api.rs delete mode 100644 examples/status_api/Cargo.toml delete mode 100644 examples/status_api/Dockerfile delete mode 100644 examples/status_api/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 2de971f8..91bd9143 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6652,23 +6652,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "status_api" -version = "0.9.3" -dependencies = [ - "async-trait", - "axum 0.8.8", - "color-eyre", - "commit-boost", - "eyre", - "lazy_static", - "prometheus", - "reqwest 0.12.28", - "serde", - "tokio", - "tracing", -] - [[package]] name = "strsim" version = "0.11.1" diff --git a/Cargo.toml b/Cargo.toml index 83a27e93..4419c9bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["benches/*", "bin", "crates/*", "examples/da_commit", "examples/status_api", "tests"] +members = ["benches/*", "bin", "crates/*", "examples/da_commit", "tests"] resolver = "2" [workspace.package] diff --git a/bin/commit-boost.rs b/bin/commit-boost.rs index e424d144..3994ef90 100644 --- a/bin/commit-boost.rs +++ b/bin/commit-boost.rs @@ -7,7 +7,7 @@ use cb_common::{ }, utils::{initialize_tracing_log, print_logo, wait_for_signal}, }; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_signer::service::SigningService; use clap::{Parser, Subcommand}; use eyre::Result; @@ -68,7 +68,7 @@ async fn run_pbs_service() -> Result<()> { PbsService::init_metrics(pbs_config.chain)?; let state = PbsState::new(pbs_config, config_path); - let server = PbsService::run::<_, DefaultBuilderApi>(state); + let server = PbsService::run(state); tokio::select! { maybe_err = server => { diff --git a/bin/src/lib.rs b/bin/src/lib.rs index 0897aa34..24d4d8f2 100644 --- a/bin/src/lib.rs +++ b/bin/src/lib.rs @@ -18,8 +18,8 @@ pub mod prelude { }; pub use cb_metrics::provider::MetricsProvider; pub use cb_pbs::{ - BuilderApi, BuilderApiState, DefaultBuilderApi, PbsService, PbsState, PbsStateGuard, - get_header, get_status, register_validator, submit_block, + BuilderApiState, PbsService, PbsState, PbsStateGuard, get_header, get_status, + register_validator, submit_block, }; // The TreeHash derive macro requires tree_hash as import pub mod tree_hash { diff --git a/crates/pbs/src/api.rs b/crates/pbs/src/api.rs deleted file mode 100644 index 74d92fb2..00000000 --- a/crates/pbs/src/api.rs +++ /dev/null @@ -1,71 +0,0 @@ -use std::{collections::HashSet, sync::Arc}; - -use async_trait::async_trait; -use axum::{Router, http::HeaderMap}; -use cb_common::{ - pbs::{BuilderApiVersion, GetHeaderParams, SignedBlindedBeaconBlock}, - utils::EncodingType, -}; - -use crate::{ - CompoundGetHeaderResponse, CompoundSubmitBlockResponse, mev_boost, - state::{BuilderApiState, PbsState, PbsStateGuard}, -}; - -#[async_trait] -pub trait BuilderApi: 'static { - /// Use to extend the BuilderApi - fn extra_routes() -> Option>> { - None - } - - /// https://ethereum.github.io/builder-specs/#/Builder/getHeader - async fn get_header( - params: GetHeaderParams, - req_headers: HeaderMap, - state: PbsState, - accepted_types: HashSet, - ) -> eyre::Result> { - mev_boost::get_header(params, req_headers, state, accepted_types).await - } - - /// https://ethereum.github.io/builder-specs/#/Builder/status - async fn get_status(req_headers: HeaderMap, state: PbsState) -> eyre::Result<()> { - mev_boost::get_status(req_headers, state).await - } - - /// https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlock and - /// https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlockV2 - async fn submit_block( - signed_blinded_block: Arc, - req_headers: HeaderMap, - state: PbsState, - api_version: BuilderApiVersion, - accepted_types: HashSet, - ) -> eyre::Result { - mev_boost::submit_block( - signed_blinded_block, - req_headers, - state, - api_version, - accepted_types, - ) - .await - } - - /// https://ethereum.github.io/builder-specs/#/Builder/registerValidator - async fn register_validator( - registrations: Vec, - req_headers: HeaderMap, - state: PbsState, - ) -> eyre::Result<()> { - mev_boost::register_validator(registrations, req_headers, state).await - } - - async fn reload(state: PbsState) -> eyre::Result> { - mev_boost::reload(state).await - } -} - -pub struct DefaultBuilderApi; -impl BuilderApi<()> for DefaultBuilderApi {} diff --git a/crates/pbs/src/lib.rs b/crates/pbs/src/lib.rs index 8b4afdcf..270a7f47 100644 --- a/crates/pbs/src/lib.rs +++ b/crates/pbs/src/lib.rs @@ -1,4 +1,3 @@ -mod api; mod constants; mod error; mod metrics; @@ -8,7 +7,6 @@ mod service; mod state; mod utils; -pub use api::*; pub use constants::*; pub use mev_boost::*; pub use service::PbsService; diff --git a/crates/pbs/src/routes/get_header.rs b/crates/pbs/src/routes/get_header.rs index c550d92f..249b869d 100644 --- a/crates/pbs/src/routes/get_header.rs +++ b/crates/pbs/src/routes/get_header.rs @@ -16,14 +16,14 @@ use tracing::{error, info}; use crate::{ CompoundGetHeaderResponse, - api::BuilderApi, constants::GET_HEADER_ENDPOINT_TAG, error::PbsClientError, metrics::BEACON_NODE_STATUS, + mev_boost, state::{BuilderApiState, PbsStateGuard}, }; -pub async fn handle_get_header>( +pub async fn handle_get_header( State(state): State>, req_headers: HeaderMap, Path(params): Path, @@ -45,7 +45,7 @@ pub async fn handle_get_header>( info!(ua, ms_into_slot, "new request"); - match A::get_header(params, req_headers, state, accept_types).await { + match mev_boost::get_header(params, req_headers, state, accept_types).await { Ok(res) => { if let Some(max_bid) = res { BEACON_NODE_STATUS.with_label_values(&["200", GET_HEADER_ENDPOINT_TAG]).inc(); diff --git a/crates/pbs/src/routes/register_validator.rs b/crates/pbs/src/routes/register_validator.rs index 51c8ce6e..2998b360 100644 --- a/crates/pbs/src/routes/register_validator.rs +++ b/crates/pbs/src/routes/register_validator.rs @@ -4,14 +4,14 @@ use reqwest::StatusCode; use tracing::{error, info, trace}; use crate::{ - api::BuilderApi, constants::REGISTER_VALIDATOR_ENDPOINT_TAG, error::PbsClientError, metrics::BEACON_NODE_STATUS, + mev_boost, state::{BuilderApiState, PbsStateGuard}, }; -pub async fn handle_register_validator>( +pub async fn handle_register_validator( State(state): State>, req_headers: HeaderMap, Json(registrations): Json>, @@ -24,7 +24,7 @@ pub async fn handle_register_validator>( info!(ua, num_registrations = registrations.len(), "new request"); - if let Err(err) = A::register_validator(registrations, req_headers, state).await { + if let Err(err) = mev_boost::register_validator(registrations, req_headers, state).await { error!(%err, "all relays failed registration"); let err = PbsClientError::NoResponse; diff --git a/crates/pbs/src/routes/reload.rs b/crates/pbs/src/routes/reload.rs index aa031d47..9474cbde 100644 --- a/crates/pbs/src/routes/reload.rs +++ b/crates/pbs/src/routes/reload.rs @@ -4,13 +4,14 @@ use reqwest::StatusCode; use tracing::{error, info}; use crate::{ - BuilderApi, RELOAD_ENDPOINT_TAG, + RELOAD_ENDPOINT_TAG, error::PbsClientError, metrics::BEACON_NODE_STATUS, + mev_boost, state::{BuilderApiState, PbsStateGuard}, }; -pub async fn handle_reload>( +pub async fn handle_reload( req_headers: HeaderMap, State(state): State>, ) -> Result { @@ -20,7 +21,7 @@ pub async fn handle_reload>( info!(ua, relay_check = prev_state.config.pbs_config.relay_check); - match A::reload(prev_state).await { + match mev_boost::reload(prev_state).await { Ok(new_state) => { info!("config reload successful"); diff --git a/crates/pbs/src/routes/router.rs b/crates/pbs/src/routes/router.rs index e98c89c1..87e2eeae 100644 --- a/crates/pbs/src/routes/router.rs +++ b/crates/pbs/src/routes/router.rs @@ -21,37 +21,36 @@ use super::{ }; use crate::{ MAX_SIZE_REGISTER_VALIDATOR_REQUEST, MAX_SIZE_SUBMIT_BLOCK_RESPONSE, - api::BuilderApi, routes::submit_block::handle_submit_block_v2, state::{BuilderApiState, PbsStateGuard}, }; -pub fn create_app_router>(state: PbsStateGuard) -> Router { +pub fn create_app_router(state: PbsStateGuard) -> Router { // DefaultBodyLimit is 2Mib by default, so we only increase it for a few routes // that may need more let v1_builder_routes = Router::new() - .route(GET_HEADER_PATH, get(handle_get_header::)) - .route(GET_STATUS_PATH, get(handle_get_status::)) + .route(GET_HEADER_PATH, get(handle_get_header::)) + .route(GET_STATUS_PATH, get(handle_get_status::)) .route( REGISTER_VALIDATOR_PATH, - post(handle_register_validator::) + post(handle_register_validator::) .route_layer(DefaultBodyLimit::max(MAX_SIZE_REGISTER_VALIDATOR_REQUEST)), ) .route( SUBMIT_BLOCK_PATH, - post(handle_submit_block_v1::) + post(handle_submit_block_v1::) .route_layer(DefaultBodyLimit::max(MAX_SIZE_SUBMIT_BLOCK_RESPONSE)), ); // header is smaller than the response but err on the safe side let v2_builder_routes = Router::new().route( SUBMIT_BLOCK_PATH, - post(handle_submit_block_v2::) + post(handle_submit_block_v2::) .route_layer(DefaultBodyLimit::max(MAX_SIZE_SUBMIT_BLOCK_RESPONSE)), ); let v1_builder_router = Router::new().nest(BUILDER_V1_API_PATH, v1_builder_routes); let v2_builder_router = Router::new().nest(BUILDER_V2_API_PATH, v2_builder_routes); - let reload_router = Router::new().route(RELOAD_PATH, post(handle_reload::)); - let builder_api = + let reload_router = Router::new().route(RELOAD_PATH, post(handle_reload::)); + let app = Router::new().merge(v1_builder_router).merge(v2_builder_router).merge(reload_router).layer( TraceLayer::new_for_http().on_response( |response: &Response, latency: std::time::Duration, _: &tracing::Span| { @@ -60,12 +59,6 @@ pub fn create_app_router>(state: PbsStateGu ), ); - let app = if let Some(extra_routes) = A::extra_routes() { - builder_api.merge(extra_routes) - } else { - builder_api - }; - app.layer(middleware::from_fn(tracing_middleware)).with_state(state) } diff --git a/crates/pbs/src/routes/status.rs b/crates/pbs/src/routes/status.rs index 52fd3e2f..8ac6fe86 100644 --- a/crates/pbs/src/routes/status.rs +++ b/crates/pbs/src/routes/status.rs @@ -4,14 +4,14 @@ use reqwest::StatusCode; use tracing::{error, info}; use crate::{ - api::BuilderApi, constants::STATUS_ENDPOINT_TAG, error::PbsClientError, metrics::BEACON_NODE_STATUS, + mev_boost, state::{BuilderApiState, PbsStateGuard}, }; -pub async fn handle_get_status>( +pub async fn handle_get_status( req_headers: HeaderMap, State(state): State>, ) -> Result { @@ -21,7 +21,7 @@ pub async fn handle_get_status>( info!(ua, relay_check = state.config.pbs_config.relay_check, "new request"); - match A::get_status(req_headers, state).await { + match mev_boost::get_status(req_headers, state).await { Ok(_) => { info!("relay check successful"); diff --git a/crates/pbs/src/routes/submit_block.rs b/crates/pbs/src/routes/submit_block.rs index 1cf442e0..2332eb0c 100644 --- a/crates/pbs/src/routes/submit_block.rs +++ b/crates/pbs/src/routes/submit_block.rs @@ -18,30 +18,30 @@ use tracing::{error, info, trace}; use crate::{ CompoundSubmitBlockResponse, - api::BuilderApi, constants::SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, error::PbsClientError, metrics::BEACON_NODE_STATUS, + mev_boost, state::{BuilderApiState, PbsStateGuard}, }; -pub async fn handle_submit_block_v1>( +pub async fn handle_submit_block_v1( state: State>, req_headers: HeaderMap, raw_request: RawRequest, ) -> Result { - handle_submit_block_impl::(state, req_headers, raw_request, BuilderApiVersion::V1).await + handle_submit_block_impl::(state, req_headers, raw_request, BuilderApiVersion::V1).await } -pub async fn handle_submit_block_v2>( +pub async fn handle_submit_block_v2( state: State>, req_headers: HeaderMap, raw_request: RawRequest, ) -> Result { - handle_submit_block_impl::(state, req_headers, raw_request, BuilderApiVersion::V2).await + handle_submit_block_impl::(state, req_headers, raw_request, BuilderApiVersion::V2).await } -async fn handle_submit_block_impl>( +async fn handle_submit_block_impl( State(state): State>, req_headers: HeaderMap, raw_request: RawRequest, @@ -72,7 +72,14 @@ async fn handle_submit_block_impl>( info!(ua, ms_into_slot = now.saturating_sub(slot_start_ms), "new request"); - match A::submit_block(signed_blinded_block, req_headers, state, api_version, accept_types).await + match mev_boost::submit_block( + signed_blinded_block, + req_headers, + state, + api_version, + accept_types, + ) + .await { Ok(res) => match res { crate::CompoundSubmitBlockResponse::EmptyBody => { diff --git a/crates/pbs/src/service.rs b/crates/pbs/src/service.rs index 8be422ca..6a738379 100644 --- a/crates/pbs/src/service.rs +++ b/crates/pbs/src/service.rs @@ -20,7 +20,6 @@ use tracing::{debug, info, warn}; use url::Url; use crate::{ - api::BuilderApi, metrics::PBS_METRICS_REGISTRY, routes::create_app_router, state::{BuilderApiState, PbsState, PbsStateGuard}, @@ -29,7 +28,7 @@ use crate::{ pub struct PbsService; impl PbsService { - pub async fn run>(state: PbsState) -> Result<()> { + pub async fn run(state: PbsState) -> Result<()> { let addr = state.config.endpoint; info!(version = COMMIT_BOOST_VERSION, commit_hash = COMMIT_BOOST_COMMIT, ?addr, chain =? state.config.chain, "starting PBS service"); @@ -43,7 +42,7 @@ impl PbsService { let config_path = state.config_path.clone(); let state: Arc>> = RwLock::new(state).into(); - let app = create_app_router::(state.clone()); + let app = create_app_router::(state.clone()); let listener = TcpListener::bind(addr).await?; let task = diff --git a/docs/docs/developing/custom-modules.md b/docs/docs/developing/custom-modules.md index cf224448..22044a58 100644 --- a/docs/docs/developing/custom-modules.md +++ b/docs/docs/developing/custom-modules.md @@ -4,9 +4,7 @@ sidebar_position: 1 # Custom Modules -Commit-Boost aims to provide an open platform for developers to create and distribute commitment protocols sidecars. +Commit-Boost aims to provide an open platform for developers to create and distribute commitment protocol sidecars. -There are two ways to leverage Commit-Boost modularity: -1. Commit Modules, which request signatures from the proposer, e.g. for preconfirmations ([example](https://github.com/Commit-Boost/commit-boost-client/tree/78bdc47bf89082f4d1ea302f9a3f86f609966b28/examples/da_commit)). -2. PBS Modules, which tweak the default PBS Module with additional logic, e.g. verifying additional constraints in `get_header` ([example](https://github.com/Commit-Boost/commit-boost-client/tree/78bdc47bf89082f4d1ea302f9a3f86f609966b28/examples/status_api)). +This happens through Commit Modules, which request signatures from the proposer, e.g. for preconfirmations ([example](https://github.com/Commit-Boost/commit-boost-client/tree/main/examples/da_commit)). diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index 60e55515..aac4a762 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -550,8 +550,7 @@ Parameters that are not provided will not be updated; they will be regenerated u ### Notes -- The hot reload feature is available for both the PBS service (both default and custom) and Signer service. +- The hot reload feature is available for both the PBS service and Signer service. - Changes related to listening hosts and ports will not been applied, as it requires the server to be restarted. - If running in Docker containers, changes in `volumes` will not be applied, as it requires the container to be recreated. Be careful if changing a path to a local file as it may not be accessible from the container. -- Custom PBS modules may override the default behaviour of the hot reload feature to parse extra configuration fields. Check the [examples](https://github.com/Commit-Boost/commit-boost-client/blob/main/examples/status_api/src/main.rs) for more details. - In case the reload fails (most likely because of some misconfigured option), the server will return a 500 error and the previous configuration will be kept. diff --git a/examples/status_api/Cargo.toml b/examples/status_api/Cargo.toml deleted file mode 100644 index f2be040e..00000000 --- a/examples/status_api/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -edition.workspace = true -name = "status_api" -rust-version.workspace = true -version.workspace = true - -[dependencies] -async-trait.workspace = true -axum.workspace = true -color-eyre.workspace = true -commit-boost = { path = "../../bin" } -eyre.workspace = true -lazy_static.workspace = true -prometheus.workspace = true -reqwest.workspace = true -serde.workspace = true -tokio.workspace = true -tracing.workspace = true diff --git a/examples/status_api/Dockerfile b/examples/status_api/Dockerfile deleted file mode 100644 index dd20f000..00000000 --- a/examples/status_api/Dockerfile +++ /dev/null @@ -1,29 +0,0 @@ -FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef -WORKDIR /app - -FROM chef AS planner -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - -FROM chef AS builder -COPY --from=planner /app/recipe.json recipe.json - -RUN cargo chef cook --release --recipe-path recipe.json - -RUN apt-get update && apt-get install -y protobuf-compiler - -COPY . . -RUN cargo build --release --bin status_api - - -FROM ubuntu AS runtime -WORKDIR /app - -RUN apt-get update -RUN apt-get install -y openssl ca-certificates libssl3 libssl-dev - -COPY --from=builder /app/target/release/status_api /usr/local/bin -ENTRYPOINT ["/usr/local/bin/status_api"] - - - diff --git a/examples/status_api/src/main.rs b/examples/status_api/src/main.rs deleted file mode 100644 index aa65f4d6..00000000 --- a/examples/status_api/src/main.rs +++ /dev/null @@ -1,108 +0,0 @@ -use std::{ - path::PathBuf, - sync::{ - Arc, - atomic::{AtomicU64, Ordering}, - }, -}; - -use async_trait::async_trait; -use axum::{ - Router, - extract::State, - response::{IntoResponse, Response}, - routing::get, -}; -use commit_boost::prelude::*; -use eyre::Result; -use lazy_static::lazy_static; -use prometheus::IntCounter; -use reqwest::{StatusCode, header::HeaderMap}; -use serde::Deserialize; -use tracing::info; - -lazy_static! { - pub static ref CHECK_RECEIVED_COUNTER: IntCounter = - IntCounter::new("checks", "successful /check requests received").unwrap(); -} - -/// Extra config loaded from the config file -/// You should add an `inc_amount` field to the config file in the `pbs` -/// section. Be sure also to change the `pbs.docker_image` field, -/// `test_status_api` in this case (from scripts/build_local_modules.sh). -#[derive(Debug, Deserialize)] -struct ExtraConfig { - inc_amount: u64, -} - -// Extra state available at runtime -#[derive(Clone)] -struct MyBuilderState { - inc_amount: u64, - counter: Arc, -} - -impl BuilderApiState for MyBuilderState {} - -impl MyBuilderState { - fn from_config(extra: ExtraConfig) -> Self { - Self { inc_amount: extra.inc_amount, counter: Arc::new(AtomicU64::new(0)) } - } - - fn inc(&self) { - self.counter.fetch_add(self.inc_amount, Ordering::Relaxed); - } - fn get(&self) -> u64 { - self.counter.load(Ordering::Relaxed) - } -} - -struct MyBuilderApi; - -#[async_trait] -impl BuilderApi for MyBuilderApi { - async fn get_status(req_headers: HeaderMap, state: PbsState) -> Result<()> { - state.data.inc(); - info!("THIS IS A CUSTOM LOG"); - CHECK_RECEIVED_COUNTER.inc(); - get_status(req_headers, state).await - } - - async fn reload(state: PbsState) -> Result> { - let (pbs_config, extra_config) = load_pbs_custom_config::().await?; - let mut data = state.data.clone(); - data.inc_amount = extra_config.inc_amount; - - let empty_config_path = PathBuf::new(); - Ok(PbsState::new(pbs_config, empty_config_path).with_data(data)) - } - - fn extra_routes() -> Option>> { - let mut router = Router::new(); - router = router.route("/check", get(handle_check)); - Some(router) - } -} - -async fn handle_check(State(state): State>) -> Response { - (StatusCode::OK, format!("Received {count} status requests!", count = state.read().data.get())) - .into_response() -} - -#[tokio::main] -async fn main() -> Result<()> { - color_eyre::install()?; - - let (pbs_config, extra) = load_pbs_custom_config::().await?; - let chain = pbs_config.chain; - let _guard = initialize_tracing_log(PBS_SERVICE_NAME, LogsSettings::from_env_config()?)?; - - let custom_state = MyBuilderState::from_config(extra); - let empty_config_path = PathBuf::new(); - let state = PbsState::new(pbs_config, empty_config_path).with_data(custom_state); - - PbsService::register_metric(Box::new(CHECK_RECEIVED_COUNTER.clone())); - PbsService::init_metrics(chain)?; - - PbsService::run::(state).await -} diff --git a/tests/tests/pbs_cfg_file_update.rs b/tests/tests/pbs_cfg_file_update.rs index b70ab47a..a9d2ff7d 100644 --- a/tests/tests/pbs_cfg_file_update.rs +++ b/tests/tests/pbs_cfg_file_update.rs @@ -10,7 +10,7 @@ use cb_common::{ signer::random_secret, types::Chain, }; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_tests::{ mock_relay::{MockRelayState, start_mock_relay_service}, mock_validator::MockValidator, @@ -109,7 +109,7 @@ async fn test_cfg_file_update() -> Result<()> { // Run the PBS service let config = to_pbs_config(chain, get_pbs_config(pbs_port), vec![relay1.clone()]); let state = PbsState::new(config, config_path.clone()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run::<()>(state)); // leave some time to start servers - extra time for the file watcher tokio::time::sleep(Duration::from_millis(1000)).await; diff --git a/tests/tests/pbs_get_header.rs b/tests/tests/pbs_get_header.rs index b7f3c4a5..c366c15e 100644 --- a/tests/tests/pbs_get_header.rs +++ b/tests/tests/pbs_get_header.rs @@ -12,7 +12,7 @@ use cb_common::{ get_consensus_version_header, timestamp_of_slot_start_sec, }, }; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_tests::{ mock_relay::{MockRelayState, start_mock_relay_service}, mock_validator::MockValidator, @@ -252,7 +252,7 @@ async fn test_get_header_impl( pbs_config.rpc_url = rpc_url; let config = to_pbs_config(chain, pbs_config, vec![mock_relay.clone()]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run::<()>(state)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -323,7 +323,7 @@ async fn test_get_header_returns_204_if_relay_down() -> Result<()> { // Run the PBS service let config = to_pbs_config(chain, get_pbs_config(pbs_port), vec![mock_relay.clone()]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run::<()>(state)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -355,7 +355,7 @@ async fn test_get_header_returns_400_if_request_is_invalid() -> Result<()> { // Run the PBS service let config = to_pbs_config(chain, get_pbs_config(pbs_port), vec![mock_relay.clone()]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run::<()>(state)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -460,7 +460,7 @@ async fn test_get_header_ssz_bid_value_round_trip() -> Result<()> { pbs_config.min_bid_wei = U256::ZERO; let config = to_pbs_config(chain, pbs_config, vec![mock_relay]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run::<()>(state)); tokio::time::sleep(Duration::from_millis(100)).await; @@ -671,7 +671,7 @@ async fn test_get_header_none_mode_bypasses_pubkey_validation() -> Result<()> { pbs_config.header_validation_mode = mode; let config = to_pbs_config(chain, pbs_config, vec![mock_relay]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run::<()>(state)); tokio::time::sleep(Duration::from_millis(100)).await; diff --git a/tests/tests/pbs_get_status.rs b/tests/tests/pbs_get_status.rs index cd2ab51d..f9a0d485 100644 --- a/tests/tests/pbs_get_status.rs +++ b/tests/tests/pbs_get_status.rs @@ -1,7 +1,7 @@ use std::{path::PathBuf, sync::Arc, time::Duration}; use cb_common::{signer::random_secret, types::Chain}; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_tests::{ mock_relay::{MockRelayState, start_mock_relay_service}, mock_validator::MockValidator, @@ -32,7 +32,7 @@ async fn test_get_status() -> Result<()> { let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays.clone()); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run::<()>(state)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -65,7 +65,7 @@ async fn test_get_status_returns_502_if_relay_down() -> Result<()> { let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays.clone()); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run::<()>(state)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; diff --git a/tests/tests/pbs_mux.rs b/tests/tests/pbs_mux.rs index 93731aa5..0703e49a 100644 --- a/tests/tests/pbs_mux.rs +++ b/tests/tests/pbs_mux.rs @@ -19,7 +19,7 @@ use cb_common::{ types::Chain, utils::{EncodingType, ForkName, ResponseReadError, set_ignore_content_length}, }; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_tests::{ mock_relay::{MockRelayState, start_mock_relay_service}, mock_ssv_node::{SsvNodeMockState, create_mock_ssv_node_server}, @@ -235,7 +235,7 @@ async fn test_mux() -> Result<()> { // Run PBS service let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run::<()>(state)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -379,7 +379,7 @@ async fn test_ssv_multi_with_node() -> Result<()> { // Run PBS service let state = PbsState::new(config, PathBuf::new()); - let pbs_server = tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + let pbs_server = tokio::spawn(PbsService::run::<()>(state)); info!("Started PBS server with pubkey {pubkey}"); // Wait for the server to start @@ -477,7 +477,7 @@ async fn test_ssv_multi_with_public() -> Result<()> { // Run PBS service let state = PbsState::new(config, PathBuf::new()); - let pbs_server = tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + let pbs_server = tokio::spawn(PbsService::run::<()>(state)); info!("Started PBS server with pubkey {pubkey}"); // Wait for the server to start diff --git a/tests/tests/pbs_mux_refresh.rs b/tests/tests/pbs_mux_refresh.rs index 11a96712..28642d2c 100644 --- a/tests/tests/pbs_mux_refresh.rs +++ b/tests/tests/pbs_mux_refresh.rs @@ -6,7 +6,7 @@ use cb_common::{ signer::random_secret, types::Chain, }; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_tests::{ mock_relay::{MockRelayState, start_mock_relay_service}, mock_ssv_public::{PublicSsvMockState, create_mock_public_ssv_server}, @@ -100,7 +100,7 @@ async fn test_auto_refresh() -> Result<()> { // Run PBS service let state = PbsState::new(config, PathBuf::new()); - let pbs_server = tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + let pbs_server = tokio::spawn(PbsService::run::<()>(state)); info!("Started PBS server with pubkey {default_pubkey}"); // Wait for the server to start diff --git a/tests/tests/pbs_post_blinded_blocks.rs b/tests/tests/pbs_post_blinded_blocks.rs index 36214c15..b711a38a 100644 --- a/tests/tests/pbs_post_blinded_blocks.rs +++ b/tests/tests/pbs_post_blinded_blocks.rs @@ -7,7 +7,7 @@ use cb_common::{ types::Chain, utils::{EncodingType, ForkName}, }; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_tests::{ mock_relay::{MockRelayState, start_mock_relay_service}, mock_validator::{MockValidator, load_test_signed_blinded_block}, @@ -453,7 +453,7 @@ async fn test_submit_block_too_large() -> Result<()> { let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run::<()>(state)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -513,7 +513,7 @@ async fn submit_block_impl( pbs_config.block_validation_mode = mode; let config = to_pbs_config(chain, pbs_config, vec![mock_relay]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run::<()>(state)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; diff --git a/tests/tests/pbs_post_validators.rs b/tests/tests/pbs_post_validators.rs index 12601cda..7ffabf72 100644 --- a/tests/tests/pbs_post_validators.rs +++ b/tests/tests/pbs_post_validators.rs @@ -5,7 +5,7 @@ use cb_common::{ signer::random_secret, types::{BlsPublicKey, Chain}, }; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_tests::{ mock_relay::{MockRelayState, start_mock_relay_service}, mock_validator::MockValidator, @@ -32,7 +32,7 @@ async fn test_register_validators() -> Result<()> { // Run the PBS service let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run::<()>(state)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -81,7 +81,7 @@ async fn test_register_validators_does_not_retry_on_429() -> Result<()> { // Run the PBS service let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state.clone())); + tokio::spawn(PbsService::run::<()>(state.clone())); // Leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -136,7 +136,7 @@ async fn test_register_validators_retries_on_500() -> Result<()> { let config = to_pbs_config(chain, pbs_config, relays); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state.clone())); + tokio::spawn(PbsService::run::<()>(state.clone())); tokio::time::sleep(Duration::from_millis(100)).await; From eb90215dfa979f0a10705d327144bbcab1394250 Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 11:42:08 -0700 Subject: [PATCH 13/25] Removed BuilderApiState trait to complete simplifying abstraction and consolidated mev_boost module into routes module. Also stop using hardcoded ports to prevent clashing --- Cargo.lock | 1 - benches/microbench/src/get_header.rs | 37 +- bin/src/lib.rs | 5 +- crates/pbs/Cargo.toml | 1 - crates/pbs/src/lib.rs | 8 +- crates/pbs/src/mev_boost/get_header.rs | 1019 ---------------- crates/pbs/src/mev_boost/mod.rs | 76 -- .../pbs/src/mev_boost/register_validator.rs | 211 ---- crates/pbs/src/mev_boost/reload.rs | 27 - crates/pbs/src/mev_boost/status.rs | 90 -- crates/pbs/src/mev_boost/submit_block.rs | 732 ------------ crates/pbs/src/routes/get_header.rs | 1032 ++++++++++++++++- crates/pbs/src/routes/mod.rs | 66 ++ crates/pbs/src/routes/register_validator.rs | 232 +++- crates/pbs/src/routes/reload.rs | 38 +- crates/pbs/src/routes/router.rs | 17 +- crates/pbs/src/routes/status.rs | 98 +- crates/pbs/src/routes/submit_block.rs | 760 +++++++++++- crates/pbs/src/service.rs | 21 +- crates/pbs/src/state.rs | 27 +- tests/src/mock_relay.rs | 10 +- tests/src/mock_ssv_public.rs | 14 +- tests/src/utils.rs | 12 + tests/tests/pbs_cfg_file_update.rs | 21 +- tests/tests/pbs_get_header.rs | 90 +- tests/tests/pbs_get_status.rs | 33 +- tests/tests/pbs_mux.rs | 96 +- tests/tests/pbs_mux_refresh.rs | 37 +- tests/tests/pbs_post_blinded_blocks.rs | 45 +- tests/tests/pbs_post_validators.rs | 39 +- 30 files changed, 2449 insertions(+), 2446 deletions(-) delete mode 100644 crates/pbs/src/mev_boost/get_header.rs delete mode 100644 crates/pbs/src/mev_boost/mod.rs delete mode 100644 crates/pbs/src/mev_boost/register_validator.rs delete mode 100644 crates/pbs/src/mev_boost/reload.rs delete mode 100644 crates/pbs/src/mev_boost/status.rs delete mode 100644 crates/pbs/src/mev_boost/submit_block.rs diff --git a/Cargo.lock b/Cargo.lock index 91bd9143..aab44380 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1880,7 +1880,6 @@ name = "cb-pbs" version = "0.9.3" dependencies = [ "alloy", - "async-trait", "axum 0.8.8", "axum-extra", "cb-common", diff --git a/benches/microbench/src/get_header.rs b/benches/microbench/src/get_header.rs index 44eff329..4059242e 100644 --- a/benches/microbench/src/get_header.rs +++ b/benches/microbench/src/get_header.rs @@ -36,20 +36,18 @@ //! - `HeaderMap` allocation (created once in setup, cloned cheaply per //! iteration) -use std::{path::PathBuf, sync::Arc, time::Duration}; +use std::{collections::HashSet, path::PathBuf, sync::Arc}; use alloy::primitives::B256; use axum::http::HeaderMap; -use cb_common::{pbs::GetHeaderParams, signer::random_secret, types::Chain}; +use cb_common::{pbs::GetHeaderParams, signer::random_secret, types::Chain, utils::EncodingType}; use cb_pbs::{PbsState, get_header}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, - utils::{generate_mock_relay, get_pbs_static_config, to_pbs_config}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, + utils::{generate_mock_relay, get_free_listener, get_pbs_config, to_pbs_config}, }; use criterion::{Criterion, black_box, criterion_group, criterion_main}; -// Ports 19201–19205 are reserved for the microbenchmark mock relays. -const BASE_PORT: u16 = 19200; const CHAIN: Chain = Chain::Hoodi; const MAX_RELAYS: usize = 5; const RELAY_COUNTS: [usize; 3] = [1, 3, MAX_RELAYS]; @@ -77,34 +75,34 @@ fn bench_get_header(c: &mut Criterion) { // Start all mock relays once and build one PbsState per relay-count variant. // All relays share the same MockRelayState (and therefore the same signing - // key). + // key). Each relay gets its own OS-assigned port via get_free_listener() so + // there is no TOCTOU race and no hardcoded port reservations. let (states, params) = rt.block_on(async { let signer = random_secret(); let pubkey = signer.public_key(); let mock_state = Arc::new(MockRelayState::new(CHAIN, signer)); - let relay_clients: Vec<_> = (0..MAX_RELAYS) - .map(|i| { - let port = BASE_PORT + 1 + i as u16; - tokio::spawn(start_mock_relay_service(mock_state.clone(), port)); - generate_mock_relay(port, pubkey.clone()).expect("relay client") - }) - .collect(); + let mut relay_clients = Vec::with_capacity(MAX_RELAYS); + for _ in 0..MAX_RELAYS { + let listener = get_free_listener().await; + let port = listener.local_addr().unwrap().port(); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), listener)); + relay_clients.push(generate_mock_relay(port, pubkey.clone()).expect("relay client")); + } - // Give all servers time to bind before benchmarking starts. - tokio::time::sleep(Duration::from_millis(200)).await; + // Give all servers time to start accepting before benchmarking begins. + tokio::time::sleep(std::time::Duration::from_millis(200)).await; let params = GetHeaderParams { slot: 0, parent_hash: B256::ZERO, pubkey }; // Port 0 here is the port the PBS service itself would bind to for incoming // validator requests. We call get_header() as a function directly, so no // PBS server is started and this port is never used. The actual relay - // endpoints are carried inside the RelayClient objects (ports 19201–19205). + // endpoints are carried inside the RelayClient objects. let states: Vec = RELAY_COUNTS .iter() .map(|&n| { - let config = - to_pbs_config(CHAIN, get_pbs_static_config(0), relay_clients[..n].to_vec()); + let config = to_pbs_config(CHAIN, get_pbs_config(0), relay_clients[..n].to_vec()); PbsState::new(config, PathBuf::new()) }) .collect(); @@ -138,6 +136,7 @@ fn bench_get_header(c: &mut Criterion) { black_box(params.clone()), black_box(headers.clone()), black_box(state.clone()), + black_box(HashSet::from([EncodingType::Json, EncodingType::Ssz])), )) .expect("get_header failed") }) diff --git a/bin/src/lib.rs b/bin/src/lib.rs index 24d4d8f2..365815a6 100644 --- a/bin/src/lib.rs +++ b/bin/src/lib.rs @@ -17,10 +17,7 @@ pub mod prelude { utils::{initialize_tracing_log, utcnow_ms, utcnow_ns, utcnow_sec, utcnow_us}, }; pub use cb_metrics::provider::MetricsProvider; - pub use cb_pbs::{ - BuilderApiState, PbsService, PbsState, PbsStateGuard, get_header, get_status, - register_validator, submit_block, - }; + pub use cb_pbs::{PbsService, PbsState, PbsStateGuard}; // The TreeHash derive macro requires tree_hash as import pub mod tree_hash { pub use tree_hash::*; diff --git a/crates/pbs/Cargo.toml b/crates/pbs/Cargo.toml index 9d9df214..9cfe9bc9 100644 --- a/crates/pbs/Cargo.toml +++ b/crates/pbs/Cargo.toml @@ -7,7 +7,6 @@ version.workspace = true [dependencies] alloy.workspace = true -async-trait.workspace = true axum.workspace = true axum-extra.workspace = true cb-common.workspace = true diff --git a/crates/pbs/src/lib.rs b/crates/pbs/src/lib.rs index 270a7f47..92d026c3 100644 --- a/crates/pbs/src/lib.rs +++ b/crates/pbs/src/lib.rs @@ -1,13 +1,15 @@ mod constants; mod error; mod metrics; -mod mev_boost; mod routes; mod service; mod state; mod utils; pub use constants::*; -pub use mev_boost::*; +pub use routes::{ + CompoundGetHeaderResponse, CompoundSubmitBlockResponse, LightGetHeaderResponse, + LightSubmitBlockResponse, get_header, +}; pub use service::PbsService; -pub use state::{BuilderApiState, PbsState, PbsStateGuard}; +pub use state::{PbsState, PbsStateGuard}; diff --git a/crates/pbs/src/mev_boost/get_header.rs b/crates/pbs/src/mev_boost/get_header.rs deleted file mode 100644 index 751987af..00000000 --- a/crates/pbs/src/mev_boost/get_header.rs +++ /dev/null @@ -1,1019 +0,0 @@ -use std::{ - collections::HashSet, - sync::Arc, - time::{Duration, Instant}, -}; - -use alloy::{ - primitives::{B256, U256, aliases::B32, utils::format_ether}, - providers::Provider, - rpc::types::Block, -}; -use axum::http::{HeaderMap, HeaderValue}; -use cb_common::{ - config::HeaderValidationMode, - constants::APPLICATION_BUILDER_DOMAIN, - pbs::{ - EMPTY_TX_ROOT_HASH, ExecutionPayloadHeaderRef, ForkName, ForkVersionDecode, GetHeaderInfo, - GetHeaderParams, GetHeaderResponse, HEADER_START_TIME_UNIX_MS, HEADER_TIMEOUT_MS, - RelayClient, SignedBuilderBid, - error::{PbsError, ValidationError}, - }, - signature::verify_signed_message, - types::{BlsPublicKey, BlsPublicKeyBytes, BlsSignature, Chain}, - utils::{ - EncodingType, get_bid_value_from_signed_builder_bid_ssz, get_consensus_version_header, - get_user_agent_with_version, ms_into_slot, read_chunked_body_with_max, - timestamp_of_slot_start_sec, utcnow_ms, - }, -}; -use futures::future::join_all; -use parking_lot::RwLock; -use reqwest::{ - StatusCode, - header::{ACCEPT, CONTENT_TYPE, USER_AGENT}, -}; -use serde::Deserialize; -use tokio::time::sleep; -use tracing::{Instrument, debug, error, warn}; -use tree_hash::TreeHash; -use url::Url; - -use crate::{ - constants::{ - GET_HEADER_ENDPOINT_TAG, MAX_SIZE_GET_HEADER_RESPONSE, TIMEOUT_ERROR_CODE, - TIMEOUT_ERROR_CODE_STR, - }, - metrics::{RELAY_HEADER_VALUE, RELAY_LAST_SLOT, RELAY_LATENCY, RELAY_STATUS_CODE}, - mev_boost::{CompoundGetHeaderResponse, LightGetHeaderResponse}, - state::{BuilderApiState, PbsState}, - utils::check_gas_limit, -}; - -/// Info about an incoming get_header request. -/// Sent from get_header to each send_timed_get_header call. -#[derive(Clone)] -struct RequestInfo { - /// The blockchain parameters of the get_header request (what slot it's for, - /// which pubkey is requesting it, etc) - params: GetHeaderParams, - - /// Common baseline of headers to send with each request - headers: Arc, - - /// The chain the request is for - chain: Chain, - - /// Context for validating the header returned by the relay - validation: ValidationContext, - - /// The accepted encoding types from the original request - accepted_types: HashSet, -} - -/// Used interally to provide info and context about a get_header request and -/// its response -struct GetHeaderResponseInfo { - /// ID of the relay the response came from - relay_id: Arc, - - /// The raw body of the response - response_bytes: Vec, - - /// The content type the response is encoded with - content_type: EncodingType, - - /// Which fork the response bid is for (if provided as a header, rather than - /// part of the body) - fork: Option, - - /// The status code of the response, for logging - code: StatusCode, - - /// The round-trip latency of the request - request_latency: Duration, -} - -/// Context for validating the header -#[derive(Clone)] -struct ValidationContext { - /// Whether to skip signature verification - skip_sigverify: bool, - - /// Minimum acceptable bid, in wei - min_bid_wei: U256, - - /// The mode used for response validation - mode: HeaderValidationMode, - - /// The parent block, if fetched - parent_block: Arc>>, -} - -/// Implements https://ethereum.github.io/builder-specs/#/Builder/getHeader -/// Returns 200 if at least one relay returns 200, else 204 -pub async fn get_header( - params: GetHeaderParams, - req_headers: HeaderMap, - state: PbsState, - accepted_types: HashSet, -) -> eyre::Result> { - let parent_block = Arc::new(RwLock::new(None)); - let extra_validation_enabled = - state.config.pbs_config.header_validation_mode == HeaderValidationMode::Extra; - if extra_validation_enabled && let Some(rpc_url) = state.pbs_config().rpc_url.clone() { - tokio::spawn( - fetch_parent_block(rpc_url, params.parent_hash, parent_block.clone()).in_current_span(), - ); - } - - let ms_into_slot = ms_into_slot(params.slot, state.config.chain); - let (pbs_config, relays, maybe_mux_id) = state.mux_config_and_relays(¶ms.pubkey); - - if let Some(mux_id) = maybe_mux_id { - debug!(mux_id, relays = relays.len(), pubkey = %params.pubkey, "using mux config"); - } else { - debug!(relays = relays.len(), pubkey = %params.pubkey, "using default config"); - } - - let max_timeout_ms = pbs_config - .timeout_get_header_ms - .min(pbs_config.late_in_slot_time_ms.saturating_sub(ms_into_slot)); - - if max_timeout_ms == 0 { - warn!( - ms_into_slot, - threshold = pbs_config.late_in_slot_time_ms, - "late in slot, skipping relay requests" - ); - - return Ok(None); - } - - // Use the minimum of the time left and the user provided timeout header - let max_timeout_ms = req_headers - .get(HEADER_TIMEOUT_MS) - .map(|header| match header.to_str().ok().and_then(|v| v.parse::().ok()) { - None | Some(0) => { - // Header can't be stringified, or parsed, or it's set to 0 - warn!(?header, "invalid user-supplied timeout header, using {max_timeout_ms}ms"); - max_timeout_ms - } - Some(user_timeout) => user_timeout.min(max_timeout_ms), - }) - .unwrap_or(max_timeout_ms); - - // prepare headers, except for start time which is set in `send_one_get_header` - let mut send_headers = HeaderMap::new(); - send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); - - // Create the Accept headers for requests - let mode = state.pbs_config().header_validation_mode; - let accept_types = match mode { - HeaderValidationMode::None => { - // No validation mode, so only request what the user wants because the response - // will be forwarded directly - accepted_types.iter().map(|t| t.content_type()).collect::>().join(",") - } - _ => { - // We're unpacking the body, so request both types since we can handle both - [EncodingType::Ssz.content_type(), EncodingType::Json.content_type()].join(",") - } - }; - send_headers.insert( - ACCEPT, - HeaderValue::from_str(&accept_types) - .map_err(|e| PbsError::GeneralRequest(format!("invalid accept header value: {e}")))?, - ); - - // Send requests to all relays concurrently - let slot = params.slot as i64; - let request_info = Arc::new(RequestInfo { - params, - headers: Arc::new(send_headers), - chain: state.config.chain, - validation: ValidationContext { - skip_sigverify: state.pbs_config().skip_sigverify, - min_bid_wei: state.pbs_config().min_bid_wei, - mode, - parent_block, - }, - accepted_types, - }); - let mut handles = Vec::with_capacity(relays.len()); - for relay in relays.iter() { - handles.push( - send_timed_get_header( - request_info.clone(), - relay.clone(), - ms_into_slot, - max_timeout_ms, - ) - .in_current_span(), - ); - } - - let results = join_all(handles).await; - let mut relay_bids = Vec::with_capacity(relays.len()); - for (i, res) in results.into_iter().enumerate() { - let relay_id = relays[i].id.as_str(); - - match res { - Ok(Some(res)) => { - let value = match &res { - CompoundGetHeaderResponse::Full(full) => *full.value(), - CompoundGetHeaderResponse::Light(light) => light.value, - }; - RELAY_LAST_SLOT.with_label_values(&[relay_id]).set(slot); - let value_gwei = (value / U256::from(1_000_000_000)).try_into().unwrap_or_default(); - RELAY_HEADER_VALUE.with_label_values(&[relay_id]).set(value_gwei); - - relay_bids.push(res) - } - Ok(_) => {} - Err(err) if err.is_timeout() => error!(err = "Timed Out", relay_id), - Err(err) => error!(%err, relay_id), - } - } - - let max_bid = relay_bids.into_iter().max_by_key(|bid| match bid { - CompoundGetHeaderResponse::Full(full) => *full.value(), - CompoundGetHeaderResponse::Light(light) => light.value, - }); - - Ok(max_bid) -} - -/// Fetch the parent block from the RPC URL for extra validation of the header. -/// Extra validation will be skipped if: -/// - relay returns header before parent block is fetched -/// - parent block is not found, eg because of a RPC delay -async fn fetch_parent_block( - rpc_url: Url, - parent_hash: B256, - parent_block: Arc>>, -) { - let provider = alloy::providers::ProviderBuilder::new().connect_http(rpc_url).to_owned(); - - debug!(%parent_hash, "fetching parent block"); - - match provider.get_block_by_hash(parent_hash).await { - Ok(maybe_block) => { - debug!(block_found = maybe_block.is_some(), "fetched parent block"); - let mut guard = parent_block.write(); - *guard = maybe_block; - } - Err(err) => { - error!(%err, "fetch failed"); - } - } -} - -async fn send_timed_get_header( - request_info: Arc, - relay: RelayClient, - ms_into_slot: u64, - mut timeout_left_ms: u64, -) -> Result, PbsError> { - let params = &request_info.params; - let url = Arc::new(relay.get_header_url(params.slot, ¶ms.parent_hash, ¶ms.pubkey)?); - - if relay.config.enable_timing_games { - if let Some(target_ms) = relay.config.target_first_request_ms { - // sleep until target time in slot - - let delay = target_ms.saturating_sub(ms_into_slot); - if delay > 0 { - debug!( - relay_id = relay.id.as_ref(), - target_ms, ms_into_slot, "TG: waiting to send first header request" - ); - timeout_left_ms = timeout_left_ms.saturating_sub(delay); - sleep(Duration::from_millis(delay)).await; - } else { - debug!( - relay_id = relay.id.as_ref(), - target_ms, ms_into_slot, "TG: request already late enough in slot" - ); - } - } - - if let Some(send_freq_ms) = relay.config.frequency_get_header_ms { - let mut handles = Vec::new(); - - debug!( - relay_id = relay.id.as_ref(), - send_freq_ms, timeout_left_ms, "TG: sending multiple header requests" - ); - - loop { - handles.push(tokio::spawn( - send_one_get_header( - request_info.clone(), - relay.clone(), - url.clone(), - timeout_left_ms, - ) - .in_current_span(), - )); - - if timeout_left_ms > send_freq_ms { - // enough time for one more - timeout_left_ms = timeout_left_ms.saturating_sub(send_freq_ms); - sleep(Duration::from_millis(send_freq_ms)).await; - } else { - break; - } - } - - let results = join_all(handles).await; - let mut n_headers = 0; - - if let Some((_, maybe_header)) = results - .into_iter() - .filter_map(|res| { - // ignore join error and timeouts, log other errors - res.ok().and_then(|inner_res| match inner_res { - Ok(maybe_header) => { - if maybe_header.1.is_some() { - n_headers += 1; - Some(maybe_header) - } else { - // filter out 204 responses that are returned if the request - // is after the relay cutoff - None - } - } - Err(err) if err.is_timeout() => None, - Err(err) => { - error!(relay_id = relay.id.as_ref(),%err, "TG: error sending header request"); - None - } - }) - }) - .max_by_key(|(start_time, _)| *start_time) - { - debug!(relay_id = relay.id.as_ref(), n_headers, "TG: received headers from relay"); - return Ok(maybe_header); - } else { - // all requests failed - warn!(relay_id = relay.id.as_ref(), "TG: no headers received"); - - return Err(PbsError::RelayResponse { - error_msg: "no headers received".to_string(), - code: TIMEOUT_ERROR_CODE, - }); - } - } - } - - // if no timing games or no repeated send, just send one request - send_one_get_header(request_info, relay, url, timeout_left_ms) - .await - .map(|(_, maybe_header)| maybe_header) -} - -/// Handles requesting a header from a relay, decoding, and validation. -/// Used by send_timed_get_header to handle each individual request. -async fn send_one_get_header( - request_info: Arc, - relay: RelayClient, - url: Arc, - timeout_left_ms: u64, -) -> Result<(u64, Option), PbsError> { - match request_info.validation.mode { - HeaderValidationMode::None => { - // Minimal processing: extract fork and value, forward response bytes directly. - // Expensive crypto/structural validation is skipped (sigverify, parent hash, - // timestamp), but the min_bid check is applied. - let (start_request_time, get_header_response) = send_get_header_light( - &relay, - url, - timeout_left_ms, - (*request_info.headers).clone(), /* Create a copy of the HeaderMap because the - * impl - * will - * modify it */ - ) - .await?; - match get_header_response { - None => Ok((start_request_time, None)), - Some(res) => { - let min_bid = request_info.validation.min_bid_wei; - if res.value < min_bid { - return Err(PbsError::Validation(ValidationError::BidTooLow { - min: min_bid, - got: res.value, - })); - } - - // Make sure the response is encoded in one of the accepted - // types since we're passing the raw response directly to the client - if !request_info.accepted_types.contains(&res.encoding_type) { - return Err(PbsError::RelayResponse { - error_msg: format!( - "relay returned unsupported encoding type for get_header in no-validation mode: {:?}", - res.encoding_type - ), - code: 406, // Not Acceptable - }); - } - Ok((start_request_time, Some(CompoundGetHeaderResponse::Light(res)))) - } - } - } - _ => { - // Full processing: decode full response and validate - let (start_request_time, get_header_response) = send_get_header_full( - &relay, - url, - timeout_left_ms, - (*request_info.headers).clone(), /* Create a copy of the HeaderMap because the - * impl - * will - * modify it */ - ) - .await?; - let get_header_response = match get_header_response { - None => { - // Break if there's no header - return Ok((start_request_time, None)); - } - Some(res) => res, - }; - - // Extract the basic header data needed for validation - let header_data = match &get_header_response.data.message.header() { - ExecutionPayloadHeaderRef::Bellatrix(_) | - ExecutionPayloadHeaderRef::Capella(_) | - ExecutionPayloadHeaderRef::Deneb(_) | - ExecutionPayloadHeaderRef::Gloas(_) => { - Err(PbsError::Validation(ValidationError::UnsupportedFork)) - } - ExecutionPayloadHeaderRef::Electra(res) => Ok(HeaderData { - block_hash: res.block_hash.0, - parent_hash: res.parent_hash.0, - tx_root: res.transactions_root, - value: *get_header_response.value(), - timestamp: res.timestamp, - }), - ExecutionPayloadHeaderRef::Fulu(res) => Ok(HeaderData { - block_hash: res.block_hash.0, - parent_hash: res.parent_hash.0, - tx_root: res.transactions_root, - value: *get_header_response.value(), - timestamp: res.timestamp, - }), - }?; - - // Validate the header - let chain = request_info.chain; - let params = &request_info.params; - let validation = &request_info.validation; - validate_header_data( - &header_data, - chain, - params.parent_hash, - validation.min_bid_wei, - params.slot, - )?; - - // Validate the relay signature - if !validation.skip_sigverify { - validate_signature( - chain, - relay.pubkey(), - get_header_response.data.message.pubkey(), - &get_header_response.data.message, - &get_header_response.data.signature, - )?; - } - - // Validate the parent block if enabled - if validation.mode == HeaderValidationMode::Extra { - let parent_block = validation.parent_block.read(); - if let Some(parent_block) = parent_block.as_ref() { - extra_validation(parent_block, &get_header_response)?; - } else { - warn!( - relay_id = relay.id.as_ref(), - "parent block not found, skipping extra validation" - ); - } - } - - Ok(( - start_request_time, - Some(CompoundGetHeaderResponse::Full(Box::new(get_header_response))), - )) - } - } -} - -/// Send and decode a full get_header response, will all of the fields. -async fn send_get_header_full( - relay: &RelayClient, - url: Arc, - timeout_left_ms: u64, - headers: HeaderMap, -) -> Result<(u64, Option), PbsError> { - // Send the request - let (start_request_time, info) = - send_get_header_impl(relay, url, timeout_left_ms, headers).await?; - let info = match info { - Some(info) => info, - None => { - return Ok((start_request_time, None)); - } - }; - - // Decode the response - let get_header_response = match info.content_type { - EncodingType::Json => decode_json_payload(&info.response_bytes)?, - EncodingType::Ssz => { - let fork = info.fork.ok_or(PbsError::RelayResponse { - error_msg: "relay did not provide consensus version header for ssz payload" - .to_string(), - code: info.code.as_u16(), - })?; - decode_ssz_payload(&info.response_bytes, fork)? - } - }; - - // Log and return - debug!( - relay_id = info.relay_id.as_ref(), - header_size_bytes = info.response_bytes.len(), - latency = ?info.request_latency, - version =? get_header_response.version, - value_eth = format_ether(*get_header_response.value()), - block_hash = %get_header_response.block_hash(), - content_type = ?info.content_type, - "received new header" - ); - Ok((start_request_time, Some(get_header_response))) -} - -/// Send a get_header request and decode only the fork and bid value from the -/// response, leaving the raw bytes intact for direct forwarding to the caller. -/// Used in `HeaderValidationMode::None` where expensive crypto/structural -/// checks are skipped. -async fn send_get_header_light( - relay: &RelayClient, - url: Arc, - timeout_left_ms: u64, - headers: HeaderMap, -) -> Result<(u64, Option), PbsError> { - // Send the request - let (start_request_time, info) = - send_get_header_impl(relay, url, timeout_left_ms, headers).await?; - let info = match info { - Some(info) => info, - None => { - return Ok((start_request_time, None)); - } - }; - - // Decode the value / fork from the response - let (fork, value) = match info.content_type { - EncodingType::Json => get_light_info_from_json(&info.response_bytes)?, - EncodingType::Ssz => { - let fork = info.fork.ok_or(PbsError::RelayResponse { - error_msg: "relay did not provide consensus version header for ssz payload" - .to_string(), - code: info.code.as_u16(), - })?; - (fork, get_bid_value_from_signed_builder_bid_ssz(&info.response_bytes, fork)?) - } - }; - - // Log and return - debug!( - relay_id = info.relay_id.as_ref(), - header_size_bytes = info.response_bytes.len(), - latency = ?info.request_latency, - version =? fork, - value_eth = format_ether(value), - content_type = ?info.content_type, - "received new header (light processing)" - ); - Ok(( - start_request_time, - Some(LightGetHeaderResponse { - version: fork, - value, - raw_bytes: info.response_bytes, - encoding_type: info.content_type, - }), - )) -} - -/// Sends a get_header request to a relay, returning the response, the time the -/// request was started, and the encoding type of the response (if any). -/// Used by send_one_get_header to perform the actual request submission. -async fn send_get_header_impl( - relay: &RelayClient, - url: Arc, - timeout_left_ms: u64, - mut headers: HeaderMap, -) -> Result<(u64, Option), PbsError> { - // the timestamp in the header is the consensus block time which is fixed, - // use the beginning of the request as proxy to make sure we use only the - // last one received - let start_request = Instant::now(); - let start_request_time = utcnow_ms(); - headers.insert(HEADER_START_TIME_UNIX_MS, HeaderValue::from(start_request_time)); - - // The timeout header indicating how long a relay has to respond, so they can - // minimize timing games without losing the bid - headers.insert(HEADER_TIMEOUT_MS, HeaderValue::from(timeout_left_ms)); - - let res = match relay - .client - .get(url.as_ref().clone()) - .timeout(Duration::from_millis(timeout_left_ms)) - .headers(headers) - .send() - .await - { - Ok(res) => res, - Err(err) => { - RELAY_STATUS_CODE - .with_label_values(&[TIMEOUT_ERROR_CODE_STR, GET_HEADER_ENDPOINT_TAG, &relay.id]) - .inc(); - return Err(err.into()); - } - }; - - // Log the response code and latency - let code = res.status(); - let request_latency = start_request.elapsed(); - RELAY_LATENCY - .with_label_values(&[GET_HEADER_ENDPOINT_TAG, &relay.id]) - .observe(request_latency.as_secs_f64()); - RELAY_STATUS_CODE.with_label_values(&[code.as_str(), GET_HEADER_ENDPOINT_TAG, &relay.id]).inc(); - - // According to the spec, OK is the only allowed success code so this can break - // early - if code != StatusCode::OK { - if code == StatusCode::NO_CONTENT { - let response_bytes = - read_chunked_body_with_max(res, MAX_SIZE_GET_HEADER_RESPONSE).await?; - debug!( - relay_id = relay.id.as_ref(), - ?code, - latency = ?request_latency, - response = ?response_bytes, - "no header from relay" - ); - return Ok((start_request_time, None)); - } else { - return Err(PbsError::RelayResponse { - error_msg: format!("unexpected status code from relay: {code}"), - code: code.as_u16(), - }); - } - } - - // Get the content type - let content_type = match res.headers().get(CONTENT_TYPE) { - None => { - // Assume a missing content type means JSON; shouldn't happen in practice with - // any respectable HTTP server but just in case - EncodingType::Json - } - Some(header_value) => match header_value.to_str().map_err(|e| PbsError::RelayResponse { - error_msg: format!("cannot decode content-type header: {e}").to_string(), - code: (code.as_u16()), - })? { - header_str if header_str.eq_ignore_ascii_case(&EncodingType::Ssz.to_string()) => { - EncodingType::Ssz - } - header_str if header_str.eq_ignore_ascii_case(&EncodingType::Json.to_string()) => { - EncodingType::Json - } - header_str => { - return Err(PbsError::RelayResponse { - error_msg: format!("unsupported content type: {header_str}"), - code: code.as_u16(), - }) - } - }, - }; - - // Decode the body - let fork = get_consensus_version_header(res.headers()); - let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_GET_HEADER_RESPONSE).await?; - Ok(( - start_request_time, - Some(GetHeaderResponseInfo { - relay_id: relay.id.clone(), - response_bytes, - content_type, - fork, - code, - request_latency, - }), - )) -} - -/// Decode a JSON-encoded get_header response -fn decode_json_payload(response_bytes: &[u8]) -> Result { - match serde_json::from_slice::(response_bytes) { - Ok(parsed) => Ok(parsed), - Err(err) => Err(PbsError::JsonDecode { - err, - raw: String::from_utf8_lossy(response_bytes).into_owned(), - }), - } -} - -/// Get the value of a builder bid and the fork name from a get_header JSON -/// response (used for light-level processing) -fn get_light_info_from_json(response_bytes: &[u8]) -> Result<(ForkName, U256), PbsError> { - #[derive(Deserialize)] - struct LightBuilderBid { - #[serde(with = "serde_utils::quoted_u256")] - pub value: U256, - } - - #[derive(Deserialize)] - struct LightSignedBuilderBid { - pub message: LightBuilderBid, - } - - #[derive(Deserialize)] - struct LightHeaderResponse { - version: ForkName, - data: LightSignedBuilderBid, - } - - match serde_json::from_slice::(response_bytes) { - Ok(parsed) => Ok((parsed.version, parsed.data.message.value)), - Err(err) => Err(PbsError::JsonDecode { - err, - raw: String::from_utf8_lossy(response_bytes).into_owned(), - }), - } -} - -/// Decode an SSZ-encoded get_header response -fn decode_ssz_payload( - response_bytes: &[u8], - fork: ForkName, -) -> Result { - let data = SignedBuilderBid::from_ssz_bytes_by_fork(response_bytes, fork).map_err(|e| { - PbsError::RelayResponse { - error_msg: (format!("error decoding relay payload: {e:?}")).to_string(), - code: 200, - } - })?; - Ok(GetHeaderResponse { version: fork, data, metadata: Default::default() }) -} - -struct HeaderData { - block_hash: B256, - parent_hash: B256, - tx_root: B256, - value: U256, - timestamp: u64, -} - -fn validate_header_data( - header_data: &HeaderData, - chain: Chain, - expected_parent_hash: B256, - minimum_bid_wei: U256, - slot: u64, -) -> Result<(), ValidationError> { - if header_data.block_hash == B256::ZERO { - return Err(ValidationError::EmptyBlockhash); - } - - if expected_parent_hash != header_data.parent_hash { - return Err(ValidationError::ParentHashMismatch { - expected: expected_parent_hash, - got: header_data.parent_hash, - }); - } - - if header_data.tx_root == EMPTY_TX_ROOT_HASH { - return Err(ValidationError::EmptyTxRoot); - } - - if header_data.value < minimum_bid_wei { - return Err(ValidationError::BidTooLow { min: minimum_bid_wei, got: header_data.value }); - } - - let expected_timestamp = timestamp_of_slot_start_sec(slot, chain); - if expected_timestamp != header_data.timestamp { - return Err(ValidationError::TimestampMismatch { - expected: expected_timestamp, - got: header_data.timestamp, - }); - } - - Ok(()) -} - -fn validate_signature( - chain: Chain, - expected_relay_pubkey: &BlsPublicKey, - received_relay_pubkey: &BlsPublicKeyBytes, - message: &T, - signature: &BlsSignature, -) -> Result<(), ValidationError> { - if expected_relay_pubkey.serialize() != received_relay_pubkey.as_serialized() { - return Err(ValidationError::PubkeyMismatch { - expected: BlsPublicKeyBytes::from(expected_relay_pubkey), - got: *received_relay_pubkey, - }); - } - - if !verify_signed_message( - chain, - expected_relay_pubkey, - &message, - signature, - None, - &B32::from(APPLICATION_BUILDER_DOMAIN), - ) { - return Err(ValidationError::Sigverify); - } - - Ok(()) -} - -fn extra_validation( - parent_block: &Block, - signed_header: &GetHeaderResponse, -) -> Result<(), ValidationError> { - if signed_header.block_number() != parent_block.header.number + 1 { - return Err(ValidationError::BlockNumberMismatch { - parent: parent_block.header.number, - header: signed_header.block_number(), - }); - } - - if !check_gas_limit(signed_header.gas_limit(), parent_block.header.gas_limit) { - return Err(ValidationError::GasLimit { - parent: parent_block.header.gas_limit, - header: signed_header.gas_limit(), - }); - }; - - Ok(()) -} - -#[cfg(test)] -mod tests { - use std::{fs, path::Path}; - - use alloy::primitives::{B256, U256}; - use cb_common::{ - pbs::*, - signature::sign_builder_message, - types::{BlsPublicKeyBytes, BlsSecretKey, BlsSignature, Chain}, - utils::{TestRandomSeed, timestamp_of_slot_start_sec}, - }; - use ssz::Encode; - - use super::{validate_header_data, *}; - - #[test] - fn test_validate_header() { - let slot = 5; - let parent_hash = B256::from_slice(&[1; 32]); - let chain = Chain::Holesky; - let min_bid = U256::from(10); - - let mut mock_header_data = HeaderData { - block_hash: B256::default(), - parent_hash: B256::default(), - tx_root: EMPTY_TX_ROOT_HASH, - value: U256::default(), - timestamp: 0, - }; - - assert_eq!( - validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), - Err(ValidationError::EmptyBlockhash) - ); - - mock_header_data.block_hash.0[1] = 1; - - assert_eq!( - validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), - Err(ValidationError::ParentHashMismatch { - expected: parent_hash, - got: B256::default() - }) - ); - - mock_header_data.parent_hash = parent_hash; - - assert_eq!( - validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), - Err(ValidationError::EmptyTxRoot) - ); - - mock_header_data.tx_root = Default::default(); - - assert_eq!( - validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), - Err(ValidationError::BidTooLow { min: min_bid, got: U256::ZERO }) - ); - - mock_header_data.value = U256::from(11); - - let expected = timestamp_of_slot_start_sec(slot, chain); - assert_eq!( - validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), - Err(ValidationError::TimestampMismatch { expected, got: 0 }) - ); - - mock_header_data.timestamp = expected; - - assert!(validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot).is_ok()); - } - - #[test] - fn test_validate_signature() { - let secret_key = BlsSecretKey::test_random(); - let pubkey = secret_key.public_key(); - let wrong_pubkey = BlsPublicKeyBytes::test_random(); - let wrong_signature = BlsSignature::test_random(); - - let message = B256::random(); - - let signature = sign_builder_message(Chain::Holesky, &secret_key, &message); - - assert_eq!( - validate_signature(Chain::Holesky, &pubkey, &wrong_pubkey, &message, &wrong_signature), - Err(ValidationError::PubkeyMismatch { - expected: BlsPublicKeyBytes::from(&pubkey), - got: wrong_pubkey - }) - ); - - assert!(matches!( - validate_signature( - Chain::Holesky, - &pubkey, - &BlsPublicKeyBytes::from(&pubkey), - &message, - &wrong_signature - ), - Err(ValidationError::Sigverify) - )); - - assert!( - validate_signature( - Chain::Holesky, - &pubkey, - &BlsPublicKeyBytes::from(&pubkey), - &message, - &signature - ) - .is_ok() - ); - } - - #[test] - fn test_ssz_value_extraction() { - for fork_name in ForkName::list_all() { - match fork_name { - // Handle forks that didn't have builder bids yet - ForkName::Altair | ForkName::Base => continue, - - // Handle supported forks - ForkName::Bellatrix | - ForkName::Capella | - ForkName::Deneb | - ForkName::Electra | - ForkName::Fulu => {} - - // Skip unsupported forks - ForkName::Gloas => continue, - } - - // Load get_header JSON from test data - let fork_name_str = fork_name.to_string().to_lowercase(); - let path_str = format!("../../tests/data/get_header/{fork_name_str}.json"); - let path = Path::new(path_str.as_str()); - let json_bytes = fs::read(path).expect("file not found"); - let decoded = decode_json_payload(&json_bytes).expect("failed to decode JSON"); - - // Extract the bid value from the SSZ - let encoded = decoded.data.as_ssz_bytes(); - let bid_value = get_bid_value_from_signed_builder_bid_ssz(&encoded, fork_name) - .expect("failed to extract bid value from SSZ"); - - // Compare to the original value - println!("Testing fork: {}", fork_name); - println!("Original value: {}", decoded.value()); - println!("Extracted value: {}", bid_value); - assert_eq!(*decoded.value(), bid_value); - } - } -} diff --git a/crates/pbs/src/mev_boost/mod.rs b/crates/pbs/src/mev_boost/mod.rs deleted file mode 100644 index 81dc4bf6..00000000 --- a/crates/pbs/src/mev_boost/mod.rs +++ /dev/null @@ -1,76 +0,0 @@ -mod get_header; -mod register_validator; -mod reload; -mod status; -mod submit_block; - -use alloy::primitives::U256; -use cb_common::{ - pbs::{GetHeaderResponse, SubmitBlindedBlockResponse}, - utils::EncodingType, -}; -pub use get_header::get_header; -use lh_types::ForkName; -pub use register_validator::register_validator; -pub use reload::reload; -pub use status::get_status; -pub use submit_block::submit_block; - -/// Enum that handles different GetHeader response types based on the level of -/// validation required -pub enum CompoundGetHeaderResponse { - /// Standard response type, fully parsing the response from a relay into a - /// complete response struct - Full(Box), - - /// Light response type, only extracting the fork and value from the builder - /// bid with the entire (undecoded) payload for forwarding - Light(LightGetHeaderResponse), -} - -/// Core details of a GetHeaderResponse, used for light processing when -/// validation mode is set to none. -#[derive(Clone)] -pub struct LightGetHeaderResponse { - /// The fork name for the bid - pub version: ForkName, - - /// The bid value in wei - pub value: U256, - - /// The raw bytes of the response, for forwarding to the caller - pub raw_bytes: Vec, - - /// The format the response bytes are encoded with - pub encoding_type: EncodingType, -} - -/// Enum that handles different SubmitBlock response types based on the level of -/// validation required -pub enum CompoundSubmitBlockResponse { - /// Standard response type, fully parsing the response from a relay into a - /// complete response struct - Full(Box), - - /// Light response type, only extracting the fork from the response with the - /// entire (undecoded) payload for forwarding - Light(LightSubmitBlockResponse), - - /// Response with no body, used for v2 requests when the relay does not - /// return any content intentionally - EmptyBody, -} - -/// Core details of a SubmitBlockResponse, used for light processing when -/// validation mode is set to none. -#[derive(Clone, Debug)] -pub struct LightSubmitBlockResponse { - /// The fork name for the bid - pub version: ForkName, - - /// The raw bytes of the response, for forwarding to the caller - pub raw_bytes: Vec, - - /// The format the response bytes are encoded with - pub encoding_type: EncodingType, -} diff --git a/crates/pbs/src/mev_boost/register_validator.rs b/crates/pbs/src/mev_boost/register_validator.rs deleted file mode 100644 index 15f68416..00000000 --- a/crates/pbs/src/mev_boost/register_validator.rs +++ /dev/null @@ -1,211 +0,0 @@ -use std::time::{Duration, Instant}; - -use alloy::primitives::Bytes; -use axum::http::{HeaderMap, HeaderValue}; -use cb_common::{ - pbs::{HEADER_START_TIME_UNIX_MS, RelayClient, error::PbsError}, - utils::{get_user_agent_with_version, read_chunked_body_with_max, utcnow_ms}, -}; -use eyre::bail; -use futures::{ - FutureExt, - future::{join_all, select_ok}, -}; -use reqwest::header::{CONTENT_TYPE, USER_AGENT}; -use tracing::{Instrument, debug, error}; -use url::Url; - -use crate::{ - constants::{MAX_SIZE_DEFAULT, REGISTER_VALIDATOR_ENDPOINT_TAG, TIMEOUT_ERROR_CODE_STR}, - metrics::{RELAY_LATENCY, RELAY_STATUS_CODE}, - state::{BuilderApiState, PbsState}, -}; - -/// Implements https://ethereum.github.io/builder-specs/#/Builder/registerValidator -/// Returns 200 if at least one relay returns 200, else 503 -pub async fn register_validator( - registrations: Vec, - req_headers: HeaderMap, - state: PbsState, -) -> eyre::Result<()> { - // prepare headers - let mut send_headers = HeaderMap::new(); - send_headers - .insert(HEADER_START_TIME_UNIX_MS, HeaderValue::from_str(&utcnow_ms().to_string())?); - send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); - - // prepare the body in advance, ugly dyn - let bodies: Box> = - if let Some(batch_size) = state.config.pbs_config.validator_registration_batch_size { - Box::new(registrations.chunks(batch_size).map(|batch| { - // SAFETY: unwrap is ok because we're serializing a &[serde_json::Value] - let body = serde_json::to_vec(batch).unwrap(); - (batch.len(), Bytes::from(body)) - })) - } else { - let body = serde_json::to_vec(®istrations).unwrap(); - Box::new(std::iter::once((registrations.len(), Bytes::from(body)))) - }; - send_headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); - - let mut handles = Vec::with_capacity(state.all_relays().len()); - - for (n_regs, body) in bodies { - for relay in state.all_relays().iter().cloned() { - handles.push( - tokio::spawn( - send_register_validator_with_timeout( - n_regs, - body.clone(), - relay, - send_headers.clone(), - state.pbs_config().timeout_register_validator_ms, - state.pbs_config().register_validator_retry_limit, - ) - .in_current_span(), - ) - .map(|join_result| match join_result { - Ok(res) => res, - Err(err) => Err(PbsError::TokioJoinError(err)), - }), - ); - } - } - - if state.pbs_config().wait_all_registrations { - // wait for all relays registrations to complete - let results = join_all(handles).await; - if results.into_iter().any(|res| res.is_ok()) { - Ok(()) - } else { - bail!("No relay passed register_validator successfully") - } - } else { - // return once first completes, others proceed in background - let result = select_ok(handles).await; - match result { - Ok(_) => Ok(()), - Err(_) => bail!("No relay passed register_validator successfully"), - } - } -} - -/// Register validator to relay, retry connection errors until the -/// given timeout has passed -async fn send_register_validator_with_timeout( - n_regs: usize, - body: Bytes, - relay: RelayClient, - headers: HeaderMap, - timeout_ms: u64, - retry_limit: u32, -) -> Result<(), PbsError> { - let url = relay.register_validator_url()?; - let mut remaining_timeout_ms = timeout_ms; - let mut retry = 0; - let mut backoff = Duration::from_millis(250); - - loop { - let start_request = Instant::now(); - match send_register_validator( - url.clone(), - n_regs, - body.clone(), - &relay, - headers.clone(), - remaining_timeout_ms, - retry, - ) - .await - { - Ok(_) => return Ok(()), - - Err(err) if err.should_retry() => { - retry += 1; - if retry >= retry_limit { - error!( - relay_id = relay.id.as_str(), - retry, "reached retry limit for validator registration" - ); - return Err(err); - } - tokio::time::sleep(backoff).await; - backoff += Duration::from_millis(250); - - remaining_timeout_ms = - timeout_ms.saturating_sub(start_request.elapsed().as_millis() as u64); - - if remaining_timeout_ms == 0 { - return Err(err); - } - } - - Err(err) => return Err(err), - }; - } -} - -async fn send_register_validator( - url: Url, - n_regs: usize, - body: Bytes, - relay: &RelayClient, - headers: HeaderMap, - timeout_ms: u64, - retry: u32, -) -> Result<(), PbsError> { - let start_request = Instant::now(); - let res = match relay - .client - .post(url) - .timeout(Duration::from_millis(timeout_ms)) - .headers(headers) - .body(body.0) - .send() - .await - { - Ok(res) => res, - Err(err) => { - RELAY_STATUS_CODE - .with_label_values(&[ - TIMEOUT_ERROR_CODE_STR, - REGISTER_VALIDATOR_ENDPOINT_TAG, - &relay.id, - ]) - .inc(); - return Err(err.into()); - } - }; - let request_latency = start_request.elapsed(); - RELAY_LATENCY - .with_label_values(&[REGISTER_VALIDATOR_ENDPOINT_TAG, &relay.id]) - .observe(request_latency.as_secs_f64()); - - let code = res.status(); - RELAY_STATUS_CODE - .with_label_values(&[code.as_str(), REGISTER_VALIDATOR_ENDPOINT_TAG, &relay.id]) - .inc(); - - if !code.is_success() { - let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_DEFAULT).await?; - let err = PbsError::RelayResponse { - error_msg: String::from_utf8_lossy(&response_bytes).into_owned(), - code: code.as_u16(), - }; - - // error here since we check if any success above - error!(relay_id = relay.id.as_ref(), retry, %err, "failed registration"); - return Err(err); - }; - - debug!( - relay_id = relay.id.as_ref(), - retry, - ?code, - latency = ?request_latency, - num_registrations = n_regs, - "registration successful" - ); - - Ok(()) -} diff --git a/crates/pbs/src/mev_boost/reload.rs b/crates/pbs/src/mev_boost/reload.rs deleted file mode 100644 index adfab89f..00000000 --- a/crates/pbs/src/mev_boost/reload.rs +++ /dev/null @@ -1,27 +0,0 @@ -use cb_common::config::load_pbs_config; -use tracing::warn; - -use crate::{BuilderApiState, PbsState}; - -/// Reload the PBS state with the latest configuration in the config file -/// Returns 200 if successful or 500 if failed -pub async fn reload(state: PbsState) -> eyre::Result> { - let (pbs_config, config_path) = load_pbs_config(None).await?; - let new_state = PbsState::new(pbs_config, config_path).with_data(state.data); - - if state.config.pbs_config.host != new_state.config.pbs_config.host { - warn!( - "Host change for PBS module require a full restart. Old: {}, New: {}", - state.config.pbs_config.host, new_state.config.pbs_config.host - ); - } - - if state.config.pbs_config.port != new_state.config.pbs_config.port { - warn!( - "Port change for PBS module require a full restart. Old: {}, New: {}", - state.config.pbs_config.port, new_state.config.pbs_config.port - ); - } - - Ok(new_state) -} diff --git a/crates/pbs/src/mev_boost/status.rs b/crates/pbs/src/mev_boost/status.rs deleted file mode 100644 index c4a8cfed..00000000 --- a/crates/pbs/src/mev_boost/status.rs +++ /dev/null @@ -1,90 +0,0 @@ -use std::time::{Duration, Instant}; - -use axum::http::HeaderMap; -use cb_common::{ - pbs::{RelayClient, error::PbsError}, - utils::{get_user_agent_with_version, read_chunked_body_with_max}, -}; -use futures::future::select_ok; -use reqwest::header::USER_AGENT; -use tracing::{debug, error}; - -use crate::{ - constants::{MAX_SIZE_DEFAULT, STATUS_ENDPOINT_TAG, TIMEOUT_ERROR_CODE_STR}, - metrics::{RELAY_LATENCY, RELAY_STATUS_CODE}, - state::{BuilderApiState, PbsState}, -}; - -/// Implements https://ethereum.github.io/builder-specs/#/Builder/status -/// Broadcasts a status check to all relays and returns 200 if at least one -/// relay returns 200 -pub async fn get_status( - req_headers: HeaderMap, - state: PbsState, -) -> eyre::Result<()> { - // If no relay check, return early - if !state.config.pbs_config.relay_check { - Ok(()) - } else { - // prepare headers - let mut send_headers = HeaderMap::new(); - send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); - - let relays = state.all_relays(); - let mut handles = Vec::with_capacity(relays.len()); - for relay in relays { - handles.push(Box::pin(send_relay_check(relay, send_headers.clone()))); - } - - // return ok if at least one relay returns 200 - let results = select_ok(handles).await; - match results { - Ok(_) => Ok(()), - Err(err) => Err(err.into()), - } - } -} - -async fn send_relay_check(relay: &RelayClient, headers: HeaderMap) -> Result<(), PbsError> { - let url = relay.get_status_url()?; - - let start_request = Instant::now(); - let res = match relay - .client - .get(url) - .timeout(Duration::from_secs(30)) - .headers(headers) - .send() - .await - { - Ok(res) => res, - Err(err) => { - RELAY_STATUS_CODE - .with_label_values(&[TIMEOUT_ERROR_CODE_STR, STATUS_ENDPOINT_TAG, &relay.id]) - .inc(); - return Err(err.into()); - } - }; - let request_latency = start_request.elapsed(); - RELAY_LATENCY - .with_label_values(&[STATUS_ENDPOINT_TAG, &relay.id]) - .observe(request_latency.as_secs_f64()); - - let code = res.status(); - RELAY_STATUS_CODE.with_label_values(&[code.as_str(), STATUS_ENDPOINT_TAG, &relay.id]).inc(); - - if !code.is_success() { - let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_DEFAULT).await?; - let err = PbsError::RelayResponse { - error_msg: String::from_utf8_lossy(&response_bytes).into_owned(), - code: code.as_u16(), - }; - - error!(relay_id = relay.id.as_ref(),%err, "status failed"); - return Err(err); - }; - - debug!(relay_id = relay.id.as_ref(),?code, latency = ?request_latency, "status passed"); - - Ok(()) -} diff --git a/crates/pbs/src/mev_boost/submit_block.rs b/crates/pbs/src/mev_boost/submit_block.rs deleted file mode 100644 index 11e0e289..00000000 --- a/crates/pbs/src/mev_boost/submit_block.rs +++ /dev/null @@ -1,732 +0,0 @@ -use std::{ - collections::HashSet, - sync::Arc, - time::{Duration, Instant}, -}; - -use alloy::{eips::eip7594::CELLS_PER_EXT_BLOB, primitives::B256}; -use axum::http::{HeaderMap, HeaderValue}; -use cb_common::{ - config::BlockValidationMode, - pbs::{ - BlindedBeaconBlock, BlobsBundle, BuilderApiVersion, ForkName, ForkVersionDecode, - HEADER_START_TIME_UNIX_MS, KzgCommitments, PayloadAndBlobs, RelayClient, - SignedBlindedBeaconBlock, SubmitBlindedBlockResponse, - error::{PbsError, ValidationError}, - }, - utils::{ - CONSENSUS_VERSION_HEADER, EncodingType, get_consensus_version_header, - get_user_agent_with_version, read_chunked_body_with_max, utcnow_ms, - }, -}; -use futures::{FutureExt, future::select_ok}; -use reqwest::{ - StatusCode, - header::{ACCEPT, CONTENT_TYPE, USER_AGENT}, -}; -use serde::Deserialize; -use ssz::Encode; -use tracing::{debug, warn}; -use url::Url; - -use crate::{ - CompoundSubmitBlockResponse, LightSubmitBlockResponse, TIMEOUT_ERROR_CODE_STR, - constants::{MAX_SIZE_SUBMIT_BLOCK_RESPONSE, SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG}, - metrics::{RELAY_LATENCY, RELAY_STATUS_CODE}, - state::{BuilderApiState, PbsState}, -}; - -/// Info about a proposal submission request. -/// Sent from submit_block to the submit_block_with_timeout function. -#[derive(Clone)] -struct ProposalInfo { - /// The signed blinded block to submit - signed_blinded_block: Arc, - - /// Common baseline of headers to send with each request - headers: Arc, - - /// The version of the submit_block route being used - api_version: BuilderApiVersion, - - /// How to validate the block returned by the relay - validation_mode: BlockValidationMode, - - /// The accepted encoding types from the original request - accepted_types: HashSet, -} - -/// Used interally to provide info and context about a submit_block request and -/// its response -struct SubmitBlockResponseInfo { - /// The raw body of the response - response_bytes: Vec, - - /// The content type the response is encoded with - content_type: EncodingType, - - /// Which fork the response bid is for (if provided as a header, rather than - /// part of the body) - fork: Option, - - /// The status code of the response, for logging - code: StatusCode, - - /// The round-trip latency of the request - request_latency: Duration, -} - -/// Implements https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlock and -/// https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlockV2. Use `api_version` to -/// distinguish between the two. -pub async fn submit_block( - signed_blinded_block: Arc, - req_headers: HeaderMap, - state: PbsState, - api_version: BuilderApiVersion, - accepted_types: HashSet, -) -> eyre::Result { - debug!(?req_headers, "received headers"); - - // prepare headers - let mut send_headers = HeaderMap::new(); - send_headers.insert(HEADER_START_TIME_UNIX_MS, HeaderValue::from(utcnow_ms())); - send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); - - // Create the Accept headers for requests - let mode = state.pbs_config().block_validation_mode; - let accept_types = match mode { - BlockValidationMode::None => { - // No validation mode, so only request what the user wants because the response - // will be forwarded directly - accepted_types.iter().map(|t| t.content_type()).collect::>().join(",") - } - _ => { - // We're unpacking the body, so request both types since we can handle both - [EncodingType::Ssz.content_type(), EncodingType::Json.content_type()].join(",") - } - }; - send_headers.insert(ACCEPT, HeaderValue::from_str(&accept_types).unwrap()); - - // Send requests to all relays concurrently - let proposal_info = Arc::new(ProposalInfo { - signed_blinded_block, - headers: Arc::new(send_headers), - api_version, - validation_mode: mode, - accepted_types, - }); - let mut handles = Vec::with_capacity(state.all_relays().len()); - for relay in state.all_relays().iter() { - handles.push( - tokio::spawn(submit_block_with_timeout( - proposal_info.clone(), - relay.clone(), - state.pbs_config().timeout_get_payload_ms, - )) - .map(|join_result| match join_result { - Ok(res) => res, - Err(err) => Err(PbsError::TokioJoinError(err)), - }), - ); - } - - let results = select_ok(handles).await; - match results { - Ok((res, _)) => Ok(res), - Err(err) => Err(err.into()), - } -} - -/// Submit blinded block to relay, retry connection errors until the -/// given timeout has passed -async fn submit_block_with_timeout( - proposal_info: Arc, - relay: RelayClient, - timeout_ms: u64, -) -> Result { - let mut url = Arc::new(relay.submit_block_url(proposal_info.api_version)?); - let mut remaining_timeout_ms = timeout_ms; - let mut retry = 0; - let mut backoff = Duration::from_millis(250); - let mut request_api_version = proposal_info.api_version; - - loop { - let start_request = Instant::now(); - match send_submit_block( - proposal_info.clone(), - url.clone(), - &relay, - remaining_timeout_ms, - retry, - request_api_version, - ) - .await - { - Ok(response) => { - // If the original request was for v2 but we had to fall back to v1, return a v2 - // response - if request_api_version == BuilderApiVersion::V1 && - proposal_info.api_version != request_api_version - { - return Ok(CompoundSubmitBlockResponse::EmptyBody); - } - return Ok(response); - } - - Err(err) if err.should_retry() => { - tokio::time::sleep(backoff).await; - backoff += Duration::from_millis(250); - - remaining_timeout_ms = - timeout_ms.saturating_sub(start_request.elapsed().as_millis() as u64); - - if remaining_timeout_ms == 0 { - return Err(err); - } - } - - Err(err) - if err.is_not_found() && matches!(request_api_version, BuilderApiVersion::V2) => - { - warn!( - relay_id = relay.id.as_ref(), - "relay does not support v2 endpoint, retrying with v1" - ); - url = Arc::new(relay.submit_block_url(BuilderApiVersion::V1)?); - request_api_version = BuilderApiVersion::V1; - } - - Err(err) => return Err(err), - }; - - retry += 1; - } -} - -// submits blinded signed block and expects the execution payload + blobs bundle -// back -#[allow(clippy::too_many_arguments)] -async fn send_submit_block( - proposal_info: Arc, - url: Arc, - relay: &RelayClient, - timeout_ms: u64, - retry: u32, - api_version: BuilderApiVersion, -) -> Result { - match proposal_info.validation_mode { - BlockValidationMode::None => { - // No validation so do some light processing and forward the response directly - let response = - send_submit_block_light(proposal_info.clone(), url, relay, timeout_ms, retry) - .await?; - match response { - None => Ok(CompoundSubmitBlockResponse::EmptyBody), - Some(res) => { - // Make sure the response is encoded in one of the accepted - // types since we're passing the raw response directly to the client - if !proposal_info.accepted_types.contains(&res.encoding_type) { - return Err(PbsError::RelayResponse { - error_msg: format!( - "relay returned unsupported encoding type for submit_block in no-validation mode: {:?}", - res.encoding_type - ), - code: 406, // Not Acceptable - }); - } - Ok(CompoundSubmitBlockResponse::Light(res)) - } - } - } - _ => { - // Full processing: decode full response and validate - let response = send_submit_block_full( - proposal_info.clone(), - url, - relay, - timeout_ms, - retry, - api_version, - ) - .await?; - let response = match response { - None => { - // v2 request with no body - return Ok(CompoundSubmitBlockResponse::EmptyBody); - } - Some(res) => res, - }; - // Extract the info needed for validation - let got_block_hash = response.data.execution_payload.block_hash().0; - - // request has different type so cant be deserialized in the wrong version, - // response has a "version" field - match &proposal_info.signed_blinded_block.message() { - BlindedBeaconBlock::Electra(blinded_block) => { - let expected_block_hash = - blinded_block.body.execution_payload.execution_payload_header.block_hash.0; - let expected_commitments = &blinded_block.body.blob_kzg_commitments; - - validate_unblinded_block( - expected_block_hash, - got_block_hash, - expected_commitments, - &response.data.blobs_bundle, - response.version, - ) - } - - BlindedBeaconBlock::Fulu(blinded_block) => { - let expected_block_hash = - blinded_block.body.execution_payload.execution_payload_header.block_hash.0; - let expected_commitments = &blinded_block.body.blob_kzg_commitments; - - validate_unblinded_block( - expected_block_hash, - got_block_hash, - expected_commitments, - &response.data.blobs_bundle, - response.version, - ) - } - - _ => return Err(PbsError::Validation(ValidationError::UnsupportedFork)), - }?; - Ok(CompoundSubmitBlockResponse::Full(Box::new(response))) - } - } -} - -/// Send and fully process a submit_block request, returning a complete decoded -/// response -async fn send_submit_block_full( - proposal_info: Arc, - url: Arc, - relay: &RelayClient, - timeout_ms: u64, - retry: u32, - api_version: BuilderApiVersion, -) -> Result, PbsError> { - // Send the request - let block_response = send_submit_block_impl( - relay, - url, - timeout_ms, - (*proposal_info.headers).clone(), - &proposal_info.signed_blinded_block, - retry, - api_version, - ) - .await?; - - // If this is not v1, there's no body to decode - if api_version != BuilderApiVersion::V1 { - return Ok(None); - } - - // Decode the payload based on content type - let decoded_response = match block_response.content_type { - EncodingType::Json => decode_json_payload(&block_response.response_bytes)?, - EncodingType::Ssz => { - let fork = match block_response.fork { - Some(fork) => fork, - None => { - return Err(PbsError::RelayResponse { - error_msg: "missing fork version header in SSZ submit_block response" - .to_string(), - code: block_response.code.as_u16(), - }); - } - }; - decode_ssz_payload(&block_response.response_bytes, fork)? - } - }; - - // Log and return - debug!( - relay_id = relay.id.as_ref(), - retry, - latency = ?block_response.request_latency, - version =% decoded_response.version, - "received unblinded block" - ); - - Ok(Some(decoded_response)) -} - -/// Send and lightly process a submit_block request, minimizing the amount of -/// decoding and validation done -async fn send_submit_block_light( - proposal_info: Arc, - url: Arc, - relay: &RelayClient, - timeout_ms: u64, - retry: u32, -) -> Result, PbsError> { - // Send the request - let block_response = send_submit_block_impl( - relay, - url, - timeout_ms, - (*proposal_info.headers).clone(), - &proposal_info.signed_blinded_block, - retry, - proposal_info.api_version, - ) - .await?; - - // If this is not v1, there's no body to decode - if proposal_info.api_version != BuilderApiVersion::V1 { - return Ok(None); - } - - // Decode the payload based on content type - let fork = match block_response.content_type { - EncodingType::Json => get_light_info_from_json(&block_response.response_bytes)?, - EncodingType::Ssz => match block_response.fork { - Some(fork) => fork, - None => { - return Err(PbsError::RelayResponse { - error_msg: "missing fork version header in SSZ submit_block response" - .to_string(), - code: block_response.code.as_u16(), - }); - } - }, - }; - - // Log and return - debug!( - relay_id = relay.id.as_ref(), - retry, - latency = ?block_response.request_latency, - version =% fork, - "received unblinded block (light processing)" - ); - - Ok(Some(LightSubmitBlockResponse { - version: fork, - encoding_type: block_response.content_type, - raw_bytes: block_response.response_bytes, - })) -} - -/// Sends the actual HTTP request to the relay's submit_block endpoint, -/// returning the response (if applicable), the round-trip time, and the -/// encoding type used for the body (if any). Used by send_submit_block. -async fn send_submit_block_impl( - relay: &RelayClient, - url: Arc, - timeout_ms: u64, - headers: HeaderMap, - signed_blinded_block: &SignedBlindedBeaconBlock, - retry: u32, - api_version: BuilderApiVersion, -) -> Result { - let start_request = Instant::now(); - - // Try SSZ first - let mut res = match relay - .client - .post(url.as_ref().clone()) - .timeout(Duration::from_millis(timeout_ms)) - .headers(headers.clone()) - .body(signed_blinded_block.as_ssz_bytes()) - .header(CONTENT_TYPE, EncodingType::Ssz.to_string()) - .header(CONSENSUS_VERSION_HEADER, signed_blinded_block.fork_name_unchecked().to_string()) - .send() - .await - { - Ok(res) => res, - Err(err) => { - RELAY_STATUS_CODE - .with_label_values(&[ - TIMEOUT_ERROR_CODE_STR, - SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, - &relay.id, - ]) - .inc(); - return Err(err.into()); - } - }; - - // If we got a client error, retry with JSON - the spec says that this should be - // a 406 or 415, but we're a little more permissive here - if res.status().is_client_error() { - warn!( - relay_id = relay.id.as_ref(), - "relay does not support SSZ, resubmitting block with JSON content-type" - ); - res = match relay - .client - .post(url.as_ref().clone()) - .timeout(Duration::from_millis(timeout_ms)) - .headers(headers) - .body(serde_json::to_vec(&signed_blinded_block).unwrap()) - .header(CONTENT_TYPE, EncodingType::Json.to_string()) - .send() - .await - { - Ok(res) => res, - Err(err) => { - RELAY_STATUS_CODE - .with_label_values(&[ - TIMEOUT_ERROR_CODE_STR, - SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, - &relay.id, - ]) - .inc(); - return Err(err.into()); - } - }; - } - - // Log the response code and latency - let code = res.status(); - let request_latency = start_request.elapsed(); - RELAY_LATENCY - .with_label_values(&[SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, &relay.id]) - .observe(request_latency.as_secs_f64()); - RELAY_STATUS_CODE - .with_label_values(&[code.as_str(), SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, &relay.id]) - .inc(); - - // If this was API v2 and succeeded then we can just return here - if api_version != BuilderApiVersion::V1 { - debug!( - relay_id = relay.id.as_ref(), - retry, - latency = ?request_latency, - "received 202 Accepted for v2 submit_block" - ); - - match code { - StatusCode::ACCEPTED => { - return Ok(SubmitBlockResponseInfo { - response_bytes: Vec::new(), - content_type: EncodingType::Json, // dummy value - fork: None, - code, - request_latency, - }); - } - StatusCode::OK => { - warn!( - relay_id = relay.id.as_ref(), - "relay sent OK response for v2 submit_block, expected 202 Accepted" - ); - return Ok(SubmitBlockResponseInfo { - response_bytes: Vec::new(), - content_type: EncodingType::Json, // dummy value - fork: None, - code, - request_latency, - }); - } - _ => { - return Err(PbsError::RelayResponse { - error_msg: format!( - "relay sent unexpected code for builder route v2 {}: {code}", - relay.id.as_ref() - ), - code: code.as_u16(), - }); - } - } - } - - // If the code is not OK, return early - if code != StatusCode::OK { - let response_bytes = - read_chunked_body_with_max(res, MAX_SIZE_SUBMIT_BLOCK_RESPONSE).await?; - let err = PbsError::RelayResponse { - error_msg: String::from_utf8_lossy(&response_bytes).into_owned(), - code: code.as_u16(), - }; - - // we requested the payload from all relays, but some may have not received it - warn!(relay_id = relay.id.as_ref(), %err, "failed to get payload (this might be ok if other relays have it)"); - return Err(err); - } - - // We're on v1 so decode the payload normally - get the content type - let content_type = match res.headers().get(CONTENT_TYPE) { - None => { - // Assume a missing content type means JSON; shouldn't happen in practice with - // any respectable HTTP server but just in case - EncodingType::Json - } - Some(header_value) => match header_value.to_str().map_err(|e| PbsError::RelayResponse { - error_msg: format!("cannot decode content-type header: {e}").to_string(), - code: (code.as_u16()), - })? { - header_str if header_str.eq_ignore_ascii_case(&EncodingType::Ssz.to_string()) => { - EncodingType::Ssz - } - header_str if header_str.eq_ignore_ascii_case(&EncodingType::Json.to_string()) => { - EncodingType::Json - } - header_str => { - return Err(PbsError::RelayResponse { - error_msg: format!("unsupported content type: {header_str}"), - code: code.as_u16(), - }) - } - }, - }; - - // Decode the body - let fork = get_consensus_version_header(res.headers()); - let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_SUBMIT_BLOCK_RESPONSE).await?; - Ok(SubmitBlockResponseInfo { response_bytes, content_type, fork, code, request_latency }) -} - -/// Decode a JSON-encoded submit_block response -fn decode_json_payload(response_bytes: &[u8]) -> Result { - match serde_json::from_slice::(response_bytes) { - Ok(parsed) => Ok(parsed), - Err(err) => Err(PbsError::JsonDecode { - err, - raw: String::from_utf8_lossy(response_bytes).into_owned(), - }), - } -} - -/// Get the fork name from a submit_block JSON response (used for light -/// processing) -fn get_light_info_from_json(response_bytes: &[u8]) -> Result { - #[derive(Deserialize)] - struct LightSubmitBlockResponse { - version: ForkName, - } - - match serde_json::from_slice::(response_bytes) { - Ok(parsed) => Ok(parsed.version), - Err(err) => Err(PbsError::JsonDecode { - err, - raw: String::from_utf8_lossy(response_bytes).into_owned(), - }), - } -} - -/// Decode an SSZ-encoded submit_block response -fn decode_ssz_payload( - response_bytes: &[u8], - fork: ForkName, -) -> Result { - let data = PayloadAndBlobs::from_ssz_bytes_by_fork(response_bytes, fork).map_err(|e| { - PbsError::RelayResponse { - error_msg: (format!("error decoding relay payload: {e:?}")).to_string(), - code: 200, - } - })?; - Ok(SubmitBlindedBlockResponse { version: fork, data, metadata: Default::default() }) -} - -fn validate_unblinded_block( - expected_block_hash: B256, - got_block_hash: B256, - expected_commitments: &KzgCommitments, - blobs_bundle: &BlobsBundle, - fork_name: ForkName, -) -> Result<(), PbsError> { - match fork_name { - ForkName::Base | - ForkName::Altair | - ForkName::Bellatrix | - ForkName::Capella | - ForkName::Deneb | - ForkName::Gloas => Err(PbsError::Validation(ValidationError::UnsupportedFork)), - ForkName::Electra => validate_unblinded_block_electra( - expected_block_hash, - got_block_hash, - expected_commitments, - blobs_bundle, - ), - ForkName::Fulu => validate_unblinded_block_fulu( - expected_block_hash, - got_block_hash, - expected_commitments, - blobs_bundle, - ), - } -} - -fn validate_unblinded_block_electra( - expected_block_hash: B256, - got_block_hash: B256, - expected_commitments: &KzgCommitments, - blobs_bundle: &BlobsBundle, -) -> Result<(), PbsError> { - if expected_block_hash != got_block_hash { - return Err(PbsError::Validation(ValidationError::BlockHashMismatch { - expected: expected_block_hash, - got: got_block_hash, - })); - } - - if expected_commitments.len() != blobs_bundle.blobs.len() || - expected_commitments.len() != blobs_bundle.commitments.len() || - expected_commitments.len() != blobs_bundle.proofs.len() - { - return Err(PbsError::Validation(ValidationError::KzgCommitments { - expected_blobs: expected_commitments.len(), - got_blobs: blobs_bundle.blobs.len(), - got_commitments: blobs_bundle.commitments.len(), - got_proofs: blobs_bundle.proofs.len(), - })); - } - - for (i, comm) in expected_commitments.iter().enumerate() { - // this is safe since we already know they are the same length - if *comm != blobs_bundle.commitments[i] { - return Err(PbsError::Validation(ValidationError::KzgMismatch { - expected: format!("{comm}"), - got: format!("{}", blobs_bundle.commitments[i]), - index: i, - })); - } - } - - Ok(()) -} - -fn validate_unblinded_block_fulu( - expected_block_hash: B256, - got_block_hash: B256, - expected_commitments: &KzgCommitments, - blobs_bundle: &BlobsBundle, -) -> Result<(), PbsError> { - if expected_block_hash != got_block_hash { - return Err(PbsError::Validation(ValidationError::BlockHashMismatch { - expected: expected_block_hash, - got: got_block_hash, - })); - } - - if expected_commitments.len() != blobs_bundle.blobs.len() || - expected_commitments.len() != blobs_bundle.commitments.len() || - expected_commitments.len() * CELLS_PER_EXT_BLOB != blobs_bundle.proofs.len() - { - return Err(PbsError::Validation(ValidationError::KzgCommitments { - expected_blobs: expected_commitments.len(), - got_blobs: blobs_bundle.blobs.len(), - got_commitments: blobs_bundle.commitments.len(), - got_proofs: blobs_bundle.proofs.len(), - })); - } - - for (i, comm) in expected_commitments.iter().enumerate() { - // this is safe since we already know they are the same length - if *comm != blobs_bundle.commitments[i] { - return Err(PbsError::Validation(ValidationError::KzgMismatch { - expected: format!("{comm}"), - got: format!("{}", blobs_bundle.commitments[i]), - index: i, - })); - } - } - - Ok(()) -} diff --git a/crates/pbs/src/routes/get_header.rs b/crates/pbs/src/routes/get_header.rs index 249b869d..f448552b 100644 --- a/crates/pbs/src/routes/get_header.rs +++ b/crates/pbs/src/routes/get_header.rs @@ -1,30 +1,66 @@ -use alloy::primitives::utils::format_ether; +use std::{ + collections::HashSet, + sync::Arc, + time::{Duration, Instant}, +}; + +use alloy::{ + primitives::{B256, U256, aliases::B32, utils::format_ether}, + providers::Provider, + rpc::types::Block, +}; use axum::{ extract::{Path, State}, http::{HeaderMap, HeaderValue}, response::IntoResponse, }; use cb_common::{ - pbs::{GetHeaderInfo, GetHeaderParams}, + config::HeaderValidationMode, + constants::APPLICATION_BUILDER_DOMAIN, + pbs::{ + EMPTY_TX_ROOT_HASH, ExecutionPayloadHeaderRef, ForkName, ForkVersionDecode, GetHeaderInfo, + GetHeaderParams, GetHeaderResponse, HEADER_START_TIME_UNIX_MS, HEADER_TIMEOUT_MS, + RelayClient, SignedBuilderBid, + error::{PbsError, ValidationError}, + }, + signature::verify_signed_message, + types::{BlsPublicKey, BlsPublicKeyBytes, BlsSignature, Chain}, utils::{ - CONSENSUS_VERSION_HEADER, EncodingType, get_accept_types, get_user_agent, ms_into_slot, + CONSENSUS_VERSION_HEADER, EncodingType, get_accept_types, + get_bid_value_from_signed_builder_bid_ssz, get_consensus_version_header, get_user_agent, + get_user_agent_with_version, ms_into_slot, read_chunked_body_with_max, + timestamp_of_slot_start_sec, utcnow_ms, }, }; -use reqwest::{StatusCode, header::CONTENT_TYPE}; +use futures::future::join_all; +use parking_lot::RwLock; +use reqwest::{ + StatusCode, + header::{ACCEPT, CONTENT_TYPE, USER_AGENT}, +}; +use serde::Deserialize; use ssz::Encode; -use tracing::{error, info}; +use tokio::time::sleep; +use tracing::{Instrument, debug, error, info, warn}; +use tree_hash::TreeHash; +use url::Url; +use super::{CompoundGetHeaderResponse, LightGetHeaderResponse}; use crate::{ - CompoundGetHeaderResponse, - constants::GET_HEADER_ENDPOINT_TAG, + constants::{ + GET_HEADER_ENDPOINT_TAG, MAX_SIZE_GET_HEADER_RESPONSE, TIMEOUT_ERROR_CODE, + TIMEOUT_ERROR_CODE_STR, + }, error::PbsClientError, - metrics::BEACON_NODE_STATUS, - mev_boost, - state::{BuilderApiState, PbsStateGuard}, + metrics::{ + BEACON_NODE_STATUS, RELAY_HEADER_VALUE, RELAY_LAST_SLOT, RELAY_LATENCY, RELAY_STATUS_CODE, + }, + state::{PbsState, PbsStateGuard}, + utils::check_gas_limit, }; -pub async fn handle_get_header( - State(state): State>, +pub async fn handle_get_header( + State(state): State, req_headers: HeaderMap, Path(params): Path, ) -> Result { @@ -45,7 +81,7 @@ pub async fn handle_get_header( info!(ua, ms_into_slot, "new request"); - match mev_boost::get_header(params, req_headers, state, accept_types).await { + match get_header(params, req_headers, state, accept_types).await { Ok(res) => { if let Some(max_bid) = res { BEACON_NODE_STATUS.with_label_values(&["200", GET_HEADER_ENDPOINT_TAG]).inc(); @@ -148,3 +184,973 @@ pub async fn handle_get_header( } } } + +// ── Relay logic ────────────────────────────────────────────────────────────── + +/// Info about an incoming get_header request. +/// Sent from get_header to each send_timed_get_header call. +#[derive(Clone)] +struct RequestInfo { + /// The blockchain parameters of the get_header request (what slot it's for, + /// which pubkey is requesting it, etc) + params: GetHeaderParams, + + /// Common baseline of headers to send with each request + headers: Arc, + + /// The chain the request is for + chain: Chain, + + /// Context for validating the header returned by the relay + validation: ValidationContext, + + /// The accepted encoding types from the original request + accepted_types: HashSet, +} + +/// Used interally to provide info and context about a get_header request and +/// its response +struct GetHeaderResponseInfo { + /// ID of the relay the response came from + relay_id: Arc, + + /// The raw body of the response + response_bytes: Vec, + + /// The content type the response is encoded with + content_type: EncodingType, + + /// Which fork the response bid is for (if provided as a header, rather than + /// part of the body) + fork: Option, + + /// The status code of the response, for logging + code: StatusCode, + + /// The round-trip latency of the request + request_latency: Duration, +} + +/// Context for validating the header +#[derive(Clone)] +struct ValidationContext { + /// Whether to skip signature verification + skip_sigverify: bool, + + /// Minimum acceptable bid, in wei + min_bid_wei: U256, + + /// The mode used for response validation + mode: HeaderValidationMode, + + /// The parent block, if fetched + parent_block: Arc>>, +} + +/// Implements https://ethereum.github.io/builder-specs/#/Builder/getHeader +/// Returns 200 if at least one relay returns 200, else 204 +pub async fn get_header( + params: GetHeaderParams, + req_headers: HeaderMap, + state: PbsState, + accepted_types: HashSet, +) -> eyre::Result> { + let parent_block = Arc::new(RwLock::new(None)); + let extra_validation_enabled = + state.config.pbs_config.header_validation_mode == HeaderValidationMode::Extra; + if extra_validation_enabled && let Some(rpc_url) = state.pbs_config().rpc_url.clone() { + tokio::spawn( + fetch_parent_block(rpc_url, params.parent_hash, parent_block.clone()).in_current_span(), + ); + } + + let ms_into_slot = ms_into_slot(params.slot, state.config.chain); + let (pbs_config, relays, maybe_mux_id) = state.mux_config_and_relays(¶ms.pubkey); + + if let Some(mux_id) = maybe_mux_id { + debug!(mux_id, relays = relays.len(), pubkey = %params.pubkey, "using mux config"); + } else { + debug!(relays = relays.len(), pubkey = %params.pubkey, "using default config"); + } + + let max_timeout_ms = pbs_config + .timeout_get_header_ms + .min(pbs_config.late_in_slot_time_ms.saturating_sub(ms_into_slot)); + + if max_timeout_ms == 0 { + warn!( + ms_into_slot, + threshold = pbs_config.late_in_slot_time_ms, + "late in slot, skipping relay requests" + ); + + return Ok(None); + } + + // Use the minimum of the time left and the user provided timeout header + let max_timeout_ms = req_headers + .get(HEADER_TIMEOUT_MS) + .map(|header| match header.to_str().ok().and_then(|v| v.parse::().ok()) { + None | Some(0) => { + // Header can't be stringified, or parsed, or it's set to 0 + warn!(?header, "invalid user-supplied timeout header, using {max_timeout_ms}ms"); + max_timeout_ms + } + Some(user_timeout) => user_timeout.min(max_timeout_ms), + }) + .unwrap_or(max_timeout_ms); + + // prepare headers, except for start time which is set in `send_one_get_header` + let mut send_headers = HeaderMap::new(); + send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); + + // Create the Accept headers for requests + let mode = state.pbs_config().header_validation_mode; + let accept_types = match mode { + HeaderValidationMode::None => { + // No validation mode, so only request what the user wants because the response + // will be forwarded directly + accepted_types.iter().map(|t| t.content_type()).collect::>().join(",") + } + _ => { + // We're unpacking the body, so request both types since we can handle both + [EncodingType::Ssz.content_type(), EncodingType::Json.content_type()].join(",") + } + }; + send_headers.insert( + ACCEPT, + HeaderValue::from_str(&accept_types) + .map_err(|e| PbsError::GeneralRequest(format!("invalid accept header value: {e}")))?, + ); + + // Send requests to all relays concurrently + let slot = params.slot as i64; + let request_info = Arc::new(RequestInfo { + params, + headers: Arc::new(send_headers), + chain: state.config.chain, + validation: ValidationContext { + skip_sigverify: state.pbs_config().skip_sigverify, + min_bid_wei: state.pbs_config().min_bid_wei, + mode, + parent_block, + }, + accepted_types, + }); + let mut handles = Vec::with_capacity(relays.len()); + for relay in relays.iter() { + handles.push( + send_timed_get_header( + request_info.clone(), + relay.clone(), + ms_into_slot, + max_timeout_ms, + ) + .in_current_span(), + ); + } + + let results = join_all(handles).await; + let mut relay_bids = Vec::with_capacity(relays.len()); + for (i, res) in results.into_iter().enumerate() { + let relay_id = relays[i].id.as_str(); + + match res { + Ok(Some(res)) => { + let value = match &res { + CompoundGetHeaderResponse::Full(full) => *full.value(), + CompoundGetHeaderResponse::Light(light) => light.value, + }; + RELAY_LAST_SLOT.with_label_values(&[relay_id]).set(slot); + let value_gwei = (value / U256::from(1_000_000_000)).try_into().unwrap_or_default(); + RELAY_HEADER_VALUE.with_label_values(&[relay_id]).set(value_gwei); + + relay_bids.push(res) + } + Ok(_) => {} + Err(err) if err.is_timeout() => error!(err = "Timed Out", relay_id), + Err(err) => error!(%err, relay_id), + } + } + + let max_bid = relay_bids.into_iter().max_by_key(|bid| match bid { + CompoundGetHeaderResponse::Full(full) => *full.value(), + CompoundGetHeaderResponse::Light(light) => light.value, + }); + + Ok(max_bid) +} + +/// Fetch the parent block from the RPC URL for extra validation of the header. +/// Extra validation will be skipped if: +/// - relay returns header before parent block is fetched +/// - parent block is not found, eg because of a RPC delay +async fn fetch_parent_block( + rpc_url: Url, + parent_hash: B256, + parent_block: Arc>>, +) { + let provider = alloy::providers::ProviderBuilder::new().connect_http(rpc_url).to_owned(); + + debug!(%parent_hash, "fetching parent block"); + + match provider.get_block_by_hash(parent_hash).await { + Ok(maybe_block) => { + debug!(block_found = maybe_block.is_some(), "fetched parent block"); + let mut guard = parent_block.write(); + *guard = maybe_block; + } + Err(err) => { + error!(%err, "fetch failed"); + } + } +} + +async fn send_timed_get_header( + request_info: Arc, + relay: RelayClient, + ms_into_slot: u64, + mut timeout_left_ms: u64, +) -> Result, PbsError> { + let params = &request_info.params; + let url = Arc::new(relay.get_header_url(params.slot, ¶ms.parent_hash, ¶ms.pubkey)?); + + if relay.config.enable_timing_games { + if let Some(target_ms) = relay.config.target_first_request_ms { + // sleep until target time in slot + + let delay = target_ms.saturating_sub(ms_into_slot); + if delay > 0 { + debug!( + relay_id = relay.id.as_ref(), + target_ms, ms_into_slot, "TG: waiting to send first header request" + ); + timeout_left_ms = timeout_left_ms.saturating_sub(delay); + sleep(Duration::from_millis(delay)).await; + } else { + debug!( + relay_id = relay.id.as_ref(), + target_ms, ms_into_slot, "TG: request already late enough in slot" + ); + } + } + + if let Some(send_freq_ms) = relay.config.frequency_get_header_ms { + let mut handles = Vec::new(); + + debug!( + relay_id = relay.id.as_ref(), + send_freq_ms, timeout_left_ms, "TG: sending multiple header requests" + ); + + loop { + handles.push(tokio::spawn( + send_one_get_header( + request_info.clone(), + relay.clone(), + url.clone(), + timeout_left_ms, + ) + .in_current_span(), + )); + + if timeout_left_ms > send_freq_ms { + // enough time for one more + timeout_left_ms = timeout_left_ms.saturating_sub(send_freq_ms); + sleep(Duration::from_millis(send_freq_ms)).await; + } else { + break; + } + } + + let results = join_all(handles).await; + let mut n_headers = 0; + + if let Some((_, maybe_header)) = results + .into_iter() + .filter_map(|res| { + // ignore join error and timeouts, log other errors + res.ok().and_then(|inner_res| match inner_res { + Ok(maybe_header) => { + if maybe_header.1.is_some() { + n_headers += 1; + Some(maybe_header) + } else { + // filter out 204 responses that are returned if the request + // is after the relay cutoff + None + } + } + Err(err) if err.is_timeout() => None, + Err(err) => { + error!(relay_id = relay.id.as_ref(),%err, "TG: error sending header request"); + None + } + }) + }) + .max_by_key(|(start_time, _)| *start_time) + { + debug!(relay_id = relay.id.as_ref(), n_headers, "TG: received headers from relay"); + return Ok(maybe_header); + } else { + // all requests failed + warn!(relay_id = relay.id.as_ref(), "TG: no headers received"); + + return Err(PbsError::RelayResponse { + error_msg: "no headers received".to_string(), + code: TIMEOUT_ERROR_CODE, + }); + } + } + } + + // if no timing games or no repeated send, just send one request + send_one_get_header(request_info, relay, url, timeout_left_ms) + .await + .map(|(_, maybe_header)| maybe_header) +} + +/// Handles requesting a header from a relay, decoding, and validation. +/// Used by send_timed_get_header to handle each individual request. +async fn send_one_get_header( + request_info: Arc, + relay: RelayClient, + url: Arc, + timeout_left_ms: u64, +) -> Result<(u64, Option), PbsError> { + match request_info.validation.mode { + HeaderValidationMode::None => { + // Minimal processing: extract fork and value, forward response bytes directly. + // Expensive crypto/structural validation is skipped (sigverify, parent hash, + // timestamp), but the min_bid check is applied. + let (start_request_time, get_header_response) = send_get_header_light( + &relay, + url, + timeout_left_ms, + (*request_info.headers).clone(), /* Create a copy of the HeaderMap because the + * impl + * will + * modify it */ + ) + .await?; + match get_header_response { + None => Ok((start_request_time, None)), + Some(res) => { + let min_bid = request_info.validation.min_bid_wei; + if res.value < min_bid { + return Err(PbsError::Validation(ValidationError::BidTooLow { + min: min_bid, + got: res.value, + })); + } + + // Make sure the response is encoded in one of the accepted + // types since we're passing the raw response directly to the client + if !request_info.accepted_types.contains(&res.encoding_type) { + return Err(PbsError::RelayResponse { + error_msg: format!( + "relay returned unsupported encoding type for get_header in no-validation mode: {:?}", + res.encoding_type + ), + code: 406, // Not Acceptable + }); + } + Ok((start_request_time, Some(CompoundGetHeaderResponse::Light(res)))) + } + } + } + _ => { + // Full processing: decode full response and validate + let (start_request_time, get_header_response) = send_get_header_full( + &relay, + url, + timeout_left_ms, + (*request_info.headers).clone(), /* Create a copy of the HeaderMap because the + * impl + * will + * modify it */ + ) + .await?; + let get_header_response = match get_header_response { + None => { + // Break if there's no header + return Ok((start_request_time, None)); + } + Some(res) => res, + }; + + // Extract the basic header data needed for validation + let header_data = match &get_header_response.data.message.header() { + ExecutionPayloadHeaderRef::Bellatrix(_) | + ExecutionPayloadHeaderRef::Capella(_) | + ExecutionPayloadHeaderRef::Deneb(_) | + ExecutionPayloadHeaderRef::Gloas(_) => { + Err(PbsError::Validation(ValidationError::UnsupportedFork)) + } + ExecutionPayloadHeaderRef::Electra(res) => Ok(HeaderData { + block_hash: res.block_hash.0, + parent_hash: res.parent_hash.0, + tx_root: res.transactions_root, + value: *get_header_response.value(), + timestamp: res.timestamp, + }), + ExecutionPayloadHeaderRef::Fulu(res) => Ok(HeaderData { + block_hash: res.block_hash.0, + parent_hash: res.parent_hash.0, + tx_root: res.transactions_root, + value: *get_header_response.value(), + timestamp: res.timestamp, + }), + }?; + + // Validate the header + let chain = request_info.chain; + let params = &request_info.params; + let validation = &request_info.validation; + validate_header_data( + &header_data, + chain, + params.parent_hash, + validation.min_bid_wei, + params.slot, + )?; + + // Validate the relay signature + if !validation.skip_sigverify { + validate_signature( + chain, + relay.pubkey(), + get_header_response.data.message.pubkey(), + &get_header_response.data.message, + &get_header_response.data.signature, + )?; + } + + // Validate the parent block if enabled + if validation.mode == HeaderValidationMode::Extra { + let parent_block = validation.parent_block.read(); + if let Some(parent_block) = parent_block.as_ref() { + extra_validation(parent_block, &get_header_response)?; + } else { + warn!( + relay_id = relay.id.as_ref(), + "parent block not found, skipping extra validation" + ); + } + } + + Ok(( + start_request_time, + Some(CompoundGetHeaderResponse::Full(Box::new(get_header_response))), + )) + } + } +} + +/// Send and decode a full get_header response, will all of the fields. +async fn send_get_header_full( + relay: &RelayClient, + url: Arc, + timeout_left_ms: u64, + headers: HeaderMap, +) -> Result<(u64, Option), PbsError> { + // Send the request + let (start_request_time, info) = + send_get_header_impl(relay, url, timeout_left_ms, headers).await?; + let info = match info { + Some(info) => info, + None => { + return Ok((start_request_time, None)); + } + }; + + // Decode the response + let get_header_response = match info.content_type { + EncodingType::Json => decode_json_payload(&info.response_bytes)?, + EncodingType::Ssz => { + let fork = info.fork.ok_or(PbsError::RelayResponse { + error_msg: "relay did not provide consensus version header for ssz payload" + .to_string(), + code: info.code.as_u16(), + })?; + decode_ssz_payload(&info.response_bytes, fork)? + } + }; + + // Log and return + debug!( + relay_id = info.relay_id.as_ref(), + header_size_bytes = info.response_bytes.len(), + latency = ?info.request_latency, + version =? get_header_response.version, + value_eth = format_ether(*get_header_response.value()), + block_hash = %get_header_response.block_hash(), + content_type = ?info.content_type, + "received new header" + ); + Ok((start_request_time, Some(get_header_response))) +} + +/// Send a get_header request and decode only the fork and bid value from the +/// response, leaving the raw bytes intact for direct forwarding to the caller. +/// Used in `HeaderValidationMode::None` where expensive crypto/structural +/// checks are skipped. +async fn send_get_header_light( + relay: &RelayClient, + url: Arc, + timeout_left_ms: u64, + headers: HeaderMap, +) -> Result<(u64, Option), PbsError> { + // Send the request + let (start_request_time, info) = + send_get_header_impl(relay, url, timeout_left_ms, headers).await?; + let info = match info { + Some(info) => info, + None => { + return Ok((start_request_time, None)); + } + }; + + // Decode the value / fork from the response + let (fork, value) = match info.content_type { + EncodingType::Json => get_light_info_from_json(&info.response_bytes)?, + EncodingType::Ssz => { + let fork = info.fork.ok_or(PbsError::RelayResponse { + error_msg: "relay did not provide consensus version header for ssz payload" + .to_string(), + code: info.code.as_u16(), + })?; + (fork, get_bid_value_from_signed_builder_bid_ssz(&info.response_bytes, fork)?) + } + }; + + // Log and return + debug!( + relay_id = info.relay_id.as_ref(), + header_size_bytes = info.response_bytes.len(), + latency = ?info.request_latency, + version =? fork, + value_eth = format_ether(value), + content_type = ?info.content_type, + "received new header (light processing)" + ); + Ok(( + start_request_time, + Some(LightGetHeaderResponse { + version: fork, + value, + raw_bytes: info.response_bytes, + encoding_type: info.content_type, + }), + )) +} + +/// Sends a get_header request to a relay, returning the response, the time the +/// request was started, and the encoding type of the response (if any). +/// Used by send_one_get_header to perform the actual request submission. +async fn send_get_header_impl( + relay: &RelayClient, + url: Arc, + timeout_left_ms: u64, + mut headers: HeaderMap, +) -> Result<(u64, Option), PbsError> { + // the timestamp in the header is the consensus block time which is fixed, + // use the beginning of the request as proxy to make sure we use only the + // last one received + let start_request = Instant::now(); + let start_request_time = utcnow_ms(); + headers.insert(HEADER_START_TIME_UNIX_MS, HeaderValue::from(start_request_time)); + + // The timeout header indicating how long a relay has to respond, so they can + // minimize timing games without losing the bid + headers.insert(HEADER_TIMEOUT_MS, HeaderValue::from(timeout_left_ms)); + + let res = match relay + .client + .get(url.as_ref().clone()) + .timeout(Duration::from_millis(timeout_left_ms)) + .headers(headers) + .send() + .await + { + Ok(res) => res, + Err(err) => { + RELAY_STATUS_CODE + .with_label_values(&[TIMEOUT_ERROR_CODE_STR, GET_HEADER_ENDPOINT_TAG, &relay.id]) + .inc(); + return Err(err.into()); + } + }; + + // Log the response code and latency + let code = res.status(); + let request_latency = start_request.elapsed(); + RELAY_LATENCY + .with_label_values(&[GET_HEADER_ENDPOINT_TAG, &relay.id]) + .observe(request_latency.as_secs_f64()); + RELAY_STATUS_CODE.with_label_values(&[code.as_str(), GET_HEADER_ENDPOINT_TAG, &relay.id]).inc(); + + // According to the spec, OK is the only allowed success code so this can break + // early + if code != StatusCode::OK { + if code == StatusCode::NO_CONTENT { + let response_bytes = + read_chunked_body_with_max(res, MAX_SIZE_GET_HEADER_RESPONSE).await?; + debug!( + relay_id = relay.id.as_ref(), + ?code, + latency = ?request_latency, + response = ?response_bytes, + "no header from relay" + ); + return Ok((start_request_time, None)); + } else { + return Err(PbsError::RelayResponse { + error_msg: format!("unexpected status code from relay: {code}"), + code: code.as_u16(), + }); + } + } + + // Get the content type + let content_type = match res.headers().get(CONTENT_TYPE) { + None => { + // Assume a missing content type means JSON; shouldn't happen in practice with + // any respectable HTTP server but just in case + EncodingType::Json + } + Some(header_value) => match header_value.to_str().map_err(|e| PbsError::RelayResponse { + error_msg: format!("cannot decode content-type header: {e}").to_string(), + code: (code.as_u16()), + })? { + header_str if header_str.eq_ignore_ascii_case(&EncodingType::Ssz.to_string()) => { + EncodingType::Ssz + } + header_str if header_str.eq_ignore_ascii_case(&EncodingType::Json.to_string()) => { + EncodingType::Json + } + header_str => { + return Err(PbsError::RelayResponse { + error_msg: format!("unsupported content type: {header_str}"), + code: code.as_u16(), + }) + } + }, + }; + + // Decode the body + let fork = get_consensus_version_header(res.headers()); + let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_GET_HEADER_RESPONSE).await?; + Ok(( + start_request_time, + Some(GetHeaderResponseInfo { + relay_id: relay.id.clone(), + response_bytes, + content_type, + fork, + code, + request_latency, + }), + )) +} + +/// Decode a JSON-encoded get_header response +fn decode_json_payload(response_bytes: &[u8]) -> Result { + match serde_json::from_slice::(response_bytes) { + Ok(parsed) => Ok(parsed), + Err(err) => Err(PbsError::JsonDecode { + err, + raw: String::from_utf8_lossy(response_bytes).into_owned(), + }), + } +} + +/// Get the value of a builder bid and the fork name from a get_header JSON +/// response (used for light-level processing) +fn get_light_info_from_json(response_bytes: &[u8]) -> Result<(ForkName, U256), PbsError> { + #[derive(Deserialize)] + struct LightBuilderBid { + #[serde(with = "serde_utils::quoted_u256")] + pub value: U256, + } + + #[derive(Deserialize)] + struct LightSignedBuilderBid { + pub message: LightBuilderBid, + } + + #[derive(Deserialize)] + struct LightHeaderResponse { + version: ForkName, + data: LightSignedBuilderBid, + } + + match serde_json::from_slice::(response_bytes) { + Ok(parsed) => Ok((parsed.version, parsed.data.message.value)), + Err(err) => Err(PbsError::JsonDecode { + err, + raw: String::from_utf8_lossy(response_bytes).into_owned(), + }), + } +} + +/// Decode an SSZ-encoded get_header response +fn decode_ssz_payload( + response_bytes: &[u8], + fork: ForkName, +) -> Result { + let data = SignedBuilderBid::from_ssz_bytes_by_fork(response_bytes, fork).map_err(|e| { + PbsError::RelayResponse { + error_msg: (format!("error decoding relay payload: {e:?}")).to_string(), + code: 200, + } + })?; + Ok(GetHeaderResponse { version: fork, data, metadata: Default::default() }) +} + +struct HeaderData { + block_hash: B256, + parent_hash: B256, + tx_root: B256, + value: U256, + timestamp: u64, +} + +fn validate_header_data( + header_data: &HeaderData, + chain: Chain, + expected_parent_hash: B256, + minimum_bid_wei: U256, + slot: u64, +) -> Result<(), ValidationError> { + if header_data.block_hash == B256::ZERO { + return Err(ValidationError::EmptyBlockhash); + } + + if expected_parent_hash != header_data.parent_hash { + return Err(ValidationError::ParentHashMismatch { + expected: expected_parent_hash, + got: header_data.parent_hash, + }); + } + + if header_data.tx_root == EMPTY_TX_ROOT_HASH { + return Err(ValidationError::EmptyTxRoot); + } + + if header_data.value < minimum_bid_wei { + return Err(ValidationError::BidTooLow { min: minimum_bid_wei, got: header_data.value }); + } + + let expected_timestamp = timestamp_of_slot_start_sec(slot, chain); + if expected_timestamp != header_data.timestamp { + return Err(ValidationError::TimestampMismatch { + expected: expected_timestamp, + got: header_data.timestamp, + }); + } + + Ok(()) +} + +fn validate_signature( + chain: Chain, + expected_relay_pubkey: &BlsPublicKey, + received_relay_pubkey: &BlsPublicKeyBytes, + message: &T, + signature: &BlsSignature, +) -> Result<(), ValidationError> { + if expected_relay_pubkey.serialize() != received_relay_pubkey.as_serialized() { + return Err(ValidationError::PubkeyMismatch { + expected: BlsPublicKeyBytes::from(expected_relay_pubkey), + got: *received_relay_pubkey, + }); + } + + if !verify_signed_message( + chain, + expected_relay_pubkey, + &message, + signature, + None, + &B32::from(APPLICATION_BUILDER_DOMAIN), + ) { + return Err(ValidationError::Sigverify); + } + + Ok(()) +} + +fn extra_validation( + parent_block: &Block, + signed_header: &GetHeaderResponse, +) -> Result<(), ValidationError> { + if signed_header.block_number() != parent_block.header.number + 1 { + return Err(ValidationError::BlockNumberMismatch { + parent: parent_block.header.number, + header: signed_header.block_number(), + }); + } + + if !check_gas_limit(signed_header.gas_limit(), parent_block.header.gas_limit) { + return Err(ValidationError::GasLimit { + parent: parent_block.header.gas_limit, + header: signed_header.gas_limit(), + }); + }; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use std::{fs, path::Path}; + + use alloy::primitives::{B256, U256}; + use cb_common::{ + pbs::*, + signature::sign_builder_message, + types::{BlsPublicKeyBytes, BlsSecretKey, BlsSignature, Chain}, + utils::{TestRandomSeed, timestamp_of_slot_start_sec}, + }; + use ssz::Encode; + + use super::{validate_header_data, *}; + + #[test] + fn test_validate_header() { + let slot = 5; + let parent_hash = B256::from_slice(&[1; 32]); + let chain = Chain::Holesky; + let min_bid = U256::from(10); + + let mut mock_header_data = HeaderData { + block_hash: B256::default(), + parent_hash: B256::default(), + tx_root: EMPTY_TX_ROOT_HASH, + value: U256::default(), + timestamp: 0, + }; + + assert_eq!( + validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), + Err(ValidationError::EmptyBlockhash) + ); + + mock_header_data.block_hash.0[1] = 1; + + assert_eq!( + validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), + Err(ValidationError::ParentHashMismatch { + expected: parent_hash, + got: B256::default() + }) + ); + + mock_header_data.parent_hash = parent_hash; + + assert_eq!( + validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), + Err(ValidationError::EmptyTxRoot) + ); + + mock_header_data.tx_root = Default::default(); + + assert_eq!( + validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), + Err(ValidationError::BidTooLow { min: min_bid, got: U256::ZERO }) + ); + + mock_header_data.value = U256::from(11); + + let expected = timestamp_of_slot_start_sec(slot, chain); + assert_eq!( + validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), + Err(ValidationError::TimestampMismatch { expected, got: 0 }) + ); + + mock_header_data.timestamp = expected; + + assert!(validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot).is_ok()); + } + + #[test] + fn test_validate_signature() { + let secret_key = BlsSecretKey::test_random(); + let pubkey = secret_key.public_key(); + let wrong_pubkey = BlsPublicKeyBytes::test_random(); + let wrong_signature = BlsSignature::test_random(); + + let message = B256::random(); + + let signature = sign_builder_message(Chain::Holesky, &secret_key, &message); + + assert_eq!( + validate_signature(Chain::Holesky, &pubkey, &wrong_pubkey, &message, &wrong_signature), + Err(ValidationError::PubkeyMismatch { + expected: BlsPublicKeyBytes::from(&pubkey), + got: wrong_pubkey + }) + ); + + assert!(matches!( + validate_signature( + Chain::Holesky, + &pubkey, + &BlsPublicKeyBytes::from(&pubkey), + &message, + &wrong_signature + ), + Err(ValidationError::Sigverify) + )); + + assert!( + validate_signature( + Chain::Holesky, + &pubkey, + &BlsPublicKeyBytes::from(&pubkey), + &message, + &signature + ) + .is_ok() + ); + } + + #[test] + fn test_ssz_value_extraction() { + for fork_name in ForkName::list_all() { + match fork_name { + // Handle forks that didn't have builder bids yet + ForkName::Altair | ForkName::Base => continue, + + // Handle supported forks + ForkName::Bellatrix | + ForkName::Capella | + ForkName::Deneb | + ForkName::Electra | + ForkName::Fulu => {} + + // Skip unsupported forks + ForkName::Gloas => continue, + } + + // Load get_header JSON from test data + let fork_name_str = fork_name.to_string().to_lowercase(); + let path_str = format!("../../tests/data/get_header/{fork_name_str}.json"); + let path = Path::new(path_str.as_str()); + let json_bytes = fs::read(path).expect("file not found"); + let decoded = decode_json_payload(&json_bytes).expect("failed to decode JSON"); + + // Extract the bid value from the SSZ + let encoded = decoded.data.as_ssz_bytes(); + let bid_value = get_bid_value_from_signed_builder_bid_ssz(&encoded, fork_name) + .expect("failed to extract bid value from SSZ"); + + // Compare to the original value + println!("Testing fork: {}", fork_name); + println!("Original value: {}", decoded.value()); + println!("Extracted value: {}", bid_value); + assert_eq!(*decoded.value(), bid_value); + } + } +} diff --git a/crates/pbs/src/routes/mod.rs b/crates/pbs/src/routes/mod.rs index 84853d9e..0a9e856c 100644 --- a/crates/pbs/src/routes/mod.rs +++ b/crates/pbs/src/routes/mod.rs @@ -5,8 +5,74 @@ mod router; mod status; mod submit_block; +use alloy::primitives::U256; +use cb_common::{ + pbs::{GetHeaderResponse, SubmitBlindedBlockResponse}, + utils::EncodingType, +}; +pub use get_header::get_header; use get_header::handle_get_header; +use lh_types::ForkName; use register_validator::handle_register_validator; pub use router::create_app_router; use status::handle_get_status; use submit_block::handle_submit_block_v1; + +/// Enum that handles different GetHeader response types based on the level of +/// validation required +pub enum CompoundGetHeaderResponse { + /// Standard response type, fully parsing the response from a relay into a + /// complete response struct + Full(Box), + + /// Light response type, only extracting the fork and value from the builder + /// bid with the entire (undecoded) payload for forwarding + Light(LightGetHeaderResponse), +} + +/// Core details of a GetHeaderResponse, used for light processing when +/// validation mode is set to none. +#[derive(Clone)] +pub struct LightGetHeaderResponse { + /// The fork name for the bid + pub version: ForkName, + + /// The bid value in wei + pub value: U256, + + /// The raw bytes of the response, for forwarding to the caller + pub raw_bytes: Vec, + + /// The format the response bytes are encoded with + pub encoding_type: EncodingType, +} + +/// Enum that handles different SubmitBlock response types based on the level of +/// validation required +pub enum CompoundSubmitBlockResponse { + /// Standard response type, fully parsing the response from a relay into a + /// complete response struct + Full(Box), + + /// Light response type, only extracting the fork from the response with the + /// entire (undecoded) payload for forwarding + Light(LightSubmitBlockResponse), + + /// Response with no body, used for v2 requests when the relay does not + /// return any content intentionally + EmptyBody, +} + +/// Core details of a SubmitBlockResponse, used for light processing when +/// validation mode is set to none. +#[derive(Clone, Debug)] +pub struct LightSubmitBlockResponse { + /// The fork name for the bid + pub version: ForkName, + + /// The raw bytes of the response, for forwarding to the caller + pub raw_bytes: Vec, + + /// The format the response bytes are encoded with + pub encoding_type: EncodingType, +} diff --git a/crates/pbs/src/routes/register_validator.rs b/crates/pbs/src/routes/register_validator.rs index 2998b360..d267ddf6 100644 --- a/crates/pbs/src/routes/register_validator.rs +++ b/crates/pbs/src/routes/register_validator.rs @@ -1,18 +1,37 @@ -use axum::{Json, extract::State, http::HeaderMap, response::IntoResponse}; -use cb_common::utils::get_user_agent; -use reqwest::StatusCode; -use tracing::{error, info, trace}; +use std::time::{Duration, Instant}; + +use alloy::primitives::Bytes; +use axum::{ + Json, + extract::State, + http::{HeaderMap, HeaderValue}, + response::IntoResponse, +}; +use cb_common::{ + pbs::{HEADER_START_TIME_UNIX_MS, RelayClient, error::PbsError}, + utils::{get_user_agent, get_user_agent_with_version, read_chunked_body_with_max, utcnow_ms}, +}; +use eyre::bail; +use futures::{ + FutureExt, + future::{join_all, select_ok}, +}; +use reqwest::{ + StatusCode, + header::{CONTENT_TYPE, USER_AGENT}, +}; +use tracing::{Instrument, debug, error, info, trace}; +use url::Url; use crate::{ - constants::REGISTER_VALIDATOR_ENDPOINT_TAG, + constants::{MAX_SIZE_DEFAULT, REGISTER_VALIDATOR_ENDPOINT_TAG, TIMEOUT_ERROR_CODE_STR}, error::PbsClientError, - metrics::BEACON_NODE_STATUS, - mev_boost, - state::{BuilderApiState, PbsStateGuard}, + metrics::{BEACON_NODE_STATUS, RELAY_LATENCY, RELAY_STATUS_CODE}, + state::{PbsState, PbsStateGuard}, }; -pub async fn handle_register_validator( - State(state): State>, +pub async fn handle_register_validator( + State(state): State, req_headers: HeaderMap, Json(registrations): Json>, ) -> Result { @@ -24,7 +43,7 @@ pub async fn handle_register_validator( info!(ua, num_registrations = registrations.len(), "new request"); - if let Err(err) = mev_boost::register_validator(registrations, req_headers, state).await { + if let Err(err) = register_validator(registrations, req_headers, state).await { error!(%err, "all relays failed registration"); let err = PbsClientError::NoResponse; @@ -39,3 +58,194 @@ pub async fn handle_register_validator( Ok(StatusCode::OK) } } + +// ── Relay logic ────────────────────────────────────────────────────────────── + +/// Implements https://ethereum.github.io/builder-specs/#/Builder/registerValidator +/// Returns 200 if at least one relay returns 200, else 503 +pub(crate) async fn register_validator( + registrations: Vec, + req_headers: HeaderMap, + state: PbsState, +) -> eyre::Result<()> { + // prepare headers + let mut send_headers = HeaderMap::new(); + send_headers + .insert(HEADER_START_TIME_UNIX_MS, HeaderValue::from_str(&utcnow_ms().to_string())?); + send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); + + // prepare the body in advance, ugly dyn + let bodies: Box> = + if let Some(batch_size) = state.config.pbs_config.validator_registration_batch_size { + Box::new(registrations.chunks(batch_size).map(|batch| { + // SAFETY: unwrap is ok because we're serializing a &[serde_json::Value] + let body = serde_json::to_vec(batch).unwrap(); + (batch.len(), Bytes::from(body)) + })) + } else { + let body = serde_json::to_vec(®istrations).unwrap(); + Box::new(std::iter::once((registrations.len(), Bytes::from(body)))) + }; + send_headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); + + let mut handles = Vec::with_capacity(state.all_relays().len()); + + for (n_regs, body) in bodies { + for relay in state.all_relays().iter().cloned() { + handles.push( + tokio::spawn( + send_register_validator_with_timeout( + n_regs, + body.clone(), + relay, + send_headers.clone(), + state.pbs_config().timeout_register_validator_ms, + state.pbs_config().register_validator_retry_limit, + ) + .in_current_span(), + ) + .map(|join_result| match join_result { + Ok(res) => res, + Err(err) => Err(PbsError::TokioJoinError(err)), + }), + ); + } + } + + if state.pbs_config().wait_all_registrations { + // wait for all relays registrations to complete + let results = join_all(handles).await; + if results.into_iter().any(|res| res.is_ok()) { + Ok(()) + } else { + bail!("No relay passed register_validator successfully") + } + } else { + // return once first completes, others proceed in background + let result = select_ok(handles).await; + match result { + Ok(_) => Ok(()), + Err(_) => bail!("No relay passed register_validator successfully"), + } + } +} + +/// Register validator to relay, retry connection errors until the +/// given timeout has passed +async fn send_register_validator_with_timeout( + n_regs: usize, + body: Bytes, + relay: RelayClient, + headers: HeaderMap, + timeout_ms: u64, + retry_limit: u32, +) -> Result<(), PbsError> { + let url = relay.register_validator_url()?; + let mut remaining_timeout_ms = timeout_ms; + let mut retry = 0; + let mut backoff = Duration::from_millis(250); + + loop { + let start_request = Instant::now(); + match send_register_validator( + url.clone(), + n_regs, + body.clone(), + &relay, + headers.clone(), + remaining_timeout_ms, + retry, + ) + .await + { + Ok(_) => return Ok(()), + + Err(err) if err.should_retry() => { + retry += 1; + if retry >= retry_limit { + error!( + relay_id = relay.id.as_str(), + retry, "reached retry limit for validator registration" + ); + return Err(err); + } + tokio::time::sleep(backoff).await; + backoff += Duration::from_millis(250); + + remaining_timeout_ms = + timeout_ms.saturating_sub(start_request.elapsed().as_millis() as u64); + + if remaining_timeout_ms == 0 { + return Err(err); + } + } + + Err(err) => return Err(err), + }; + } +} + +async fn send_register_validator( + url: Url, + n_regs: usize, + body: Bytes, + relay: &RelayClient, + headers: HeaderMap, + timeout_ms: u64, + retry: u32, +) -> Result<(), PbsError> { + let start_request = Instant::now(); + let res = match relay + .client + .post(url) + .timeout(Duration::from_millis(timeout_ms)) + .headers(headers) + .body(body.0) + .send() + .await + { + Ok(res) => res, + Err(err) => { + RELAY_STATUS_CODE + .with_label_values(&[ + TIMEOUT_ERROR_CODE_STR, + REGISTER_VALIDATOR_ENDPOINT_TAG, + &relay.id, + ]) + .inc(); + return Err(err.into()); + } + }; + let request_latency = start_request.elapsed(); + RELAY_LATENCY + .with_label_values(&[REGISTER_VALIDATOR_ENDPOINT_TAG, &relay.id]) + .observe(request_latency.as_secs_f64()); + + let code = res.status(); + RELAY_STATUS_CODE + .with_label_values(&[code.as_str(), REGISTER_VALIDATOR_ENDPOINT_TAG, &relay.id]) + .inc(); + + if !code.is_success() { + let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_DEFAULT).await?; + let err = PbsError::RelayResponse { + error_msg: String::from_utf8_lossy(&response_bytes).into_owned(), + code: code.as_u16(), + }; + + // error here since we check if any success above + error!(relay_id = relay.id.as_ref(), retry, %err, "failed registration"); + return Err(err); + }; + + debug!( + relay_id = relay.id.as_ref(), + retry, + ?code, + latency = ?request_latency, + num_registrations = n_regs, + "registration successful" + ); + + Ok(()) +} diff --git a/crates/pbs/src/routes/reload.rs b/crates/pbs/src/routes/reload.rs index 9474cbde..6e4d5f07 100644 --- a/crates/pbs/src/routes/reload.rs +++ b/crates/pbs/src/routes/reload.rs @@ -1,19 +1,18 @@ use axum::{extract::State, http::HeaderMap, response::IntoResponse}; -use cb_common::utils::get_user_agent; +use cb_common::{config::load_pbs_config, utils::get_user_agent}; use reqwest::StatusCode; -use tracing::{error, info}; +use tracing::{error, info, warn}; use crate::{ RELOAD_ENDPOINT_TAG, error::PbsClientError, metrics::BEACON_NODE_STATUS, - mev_boost, - state::{BuilderApiState, PbsStateGuard}, + state::{PbsState, PbsStateGuard}, }; -pub async fn handle_reload( +pub async fn handle_reload( req_headers: HeaderMap, - State(state): State>, + State(state): State, ) -> Result { let prev_state = state.read().clone(); @@ -21,7 +20,7 @@ pub async fn handle_reload( info!(ua, relay_check = prev_state.config.pbs_config.relay_check); - match mev_boost::reload(prev_state).await { + match reload(prev_state).await { Ok(new_state) => { info!("config reload successful"); @@ -41,3 +40,28 @@ pub async fn handle_reload( } } } + +// ── Relay logic ────────────────────────────────────────────────────────────── + +/// Reload the PBS state with the latest configuration in the config file +/// Returns 200 if successful or 500 if failed +pub(crate) async fn reload(state: PbsState) -> eyre::Result { + let (pbs_config, config_path) = load_pbs_config(None).await?; + let new_state = PbsState::new(pbs_config, config_path); + + if state.config.pbs_config.host != new_state.config.pbs_config.host { + warn!( + "Host change for PBS module require a full restart. Old: {}, New: {}", + state.config.pbs_config.host, new_state.config.pbs_config.host + ); + } + + if state.config.pbs_config.port != new_state.config.pbs_config.port { + warn!( + "Port change for PBS module require a full restart. Old: {}, New: {}", + state.config.pbs_config.port, new_state.config.pbs_config.port + ); + } + + Ok(new_state) +} diff --git a/crates/pbs/src/routes/router.rs b/crates/pbs/src/routes/router.rs index 87e2eeae..a92da735 100644 --- a/crates/pbs/src/routes/router.rs +++ b/crates/pbs/src/routes/router.rs @@ -21,35 +21,34 @@ use super::{ }; use crate::{ MAX_SIZE_REGISTER_VALIDATOR_REQUEST, MAX_SIZE_SUBMIT_BLOCK_RESPONSE, - routes::submit_block::handle_submit_block_v2, - state::{BuilderApiState, PbsStateGuard}, + routes::submit_block::handle_submit_block_v2, state::PbsStateGuard, }; -pub fn create_app_router(state: PbsStateGuard) -> Router { +pub fn create_app_router(state: PbsStateGuard) -> Router { // DefaultBodyLimit is 2Mib by default, so we only increase it for a few routes // that may need more let v1_builder_routes = Router::new() - .route(GET_HEADER_PATH, get(handle_get_header::)) - .route(GET_STATUS_PATH, get(handle_get_status::)) + .route(GET_HEADER_PATH, get(handle_get_header)) + .route(GET_STATUS_PATH, get(handle_get_status)) .route( REGISTER_VALIDATOR_PATH, - post(handle_register_validator::) + post(handle_register_validator) .route_layer(DefaultBodyLimit::max(MAX_SIZE_REGISTER_VALIDATOR_REQUEST)), ) .route( SUBMIT_BLOCK_PATH, - post(handle_submit_block_v1::) + post(handle_submit_block_v1) .route_layer(DefaultBodyLimit::max(MAX_SIZE_SUBMIT_BLOCK_RESPONSE)), ); // header is smaller than the response but err on the safe side let v2_builder_routes = Router::new().route( SUBMIT_BLOCK_PATH, - post(handle_submit_block_v2::) + post(handle_submit_block_v2) .route_layer(DefaultBodyLimit::max(MAX_SIZE_SUBMIT_BLOCK_RESPONSE)), ); let v1_builder_router = Router::new().nest(BUILDER_V1_API_PATH, v1_builder_routes); let v2_builder_router = Router::new().nest(BUILDER_V2_API_PATH, v2_builder_routes); - let reload_router = Router::new().route(RELOAD_PATH, post(handle_reload::)); + let reload_router = Router::new().route(RELOAD_PATH, post(handle_reload)); let app = Router::new().merge(v1_builder_router).merge(v2_builder_router).merge(reload_router).layer( TraceLayer::new_for_http().on_response( diff --git a/crates/pbs/src/routes/status.rs b/crates/pbs/src/routes/status.rs index 8ac6fe86..ef0a8c6f 100644 --- a/crates/pbs/src/routes/status.rs +++ b/crates/pbs/src/routes/status.rs @@ -1,19 +1,24 @@ +use std::time::{Duration, Instant}; + use axum::{extract::State, http::HeaderMap, response::IntoResponse}; -use cb_common::utils::get_user_agent; -use reqwest::StatusCode; -use tracing::{error, info}; +use cb_common::{ + pbs::{RelayClient, error::PbsError}, + utils::{get_user_agent, get_user_agent_with_version, read_chunked_body_with_max}, +}; +use futures::future::select_ok; +use reqwest::{StatusCode, header::USER_AGENT}; +use tracing::{debug, error, info}; use crate::{ - constants::STATUS_ENDPOINT_TAG, + constants::{MAX_SIZE_DEFAULT, STATUS_ENDPOINT_TAG, TIMEOUT_ERROR_CODE_STR}, error::PbsClientError, - metrics::BEACON_NODE_STATUS, - mev_boost, - state::{BuilderApiState, PbsStateGuard}, + metrics::{BEACON_NODE_STATUS, RELAY_LATENCY, RELAY_STATUS_CODE}, + state::{PbsState, PbsStateGuard}, }; -pub async fn handle_get_status( +pub async fn handle_get_status( req_headers: HeaderMap, - State(state): State>, + State(state): State, ) -> Result { let state = state.read().clone(); @@ -21,7 +26,7 @@ pub async fn handle_get_status( info!(ua, relay_check = state.config.pbs_config.relay_check, "new request"); - match mev_boost::get_status(req_headers, state).await { + match get_status(req_headers, state).await { Ok(_) => { info!("relay check successful"); @@ -39,3 +44,76 @@ pub async fn handle_get_status( } } } + +// ── Relay logic ────────────────────────────────────────────────────────────── + +/// Implements https://ethereum.github.io/builder-specs/#/Builder/status +/// Broadcasts a status check to all relays and returns 200 if at least one +/// relay returns 200 +pub(crate) async fn get_status(req_headers: HeaderMap, state: PbsState) -> eyre::Result<()> { + // If no relay check, return early + if !state.config.pbs_config.relay_check { + Ok(()) + } else { + // prepare headers + let mut send_headers = HeaderMap::new(); + send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); + + let relays = state.all_relays(); + let mut handles = Vec::with_capacity(relays.len()); + for relay in relays { + handles.push(Box::pin(send_relay_check(relay, send_headers.clone()))); + } + + // return ok if at least one relay returns 200 + let results = select_ok(handles).await; + match results { + Ok(_) => Ok(()), + Err(err) => Err(err.into()), + } + } +} + +async fn send_relay_check(relay: &RelayClient, headers: HeaderMap) -> Result<(), PbsError> { + let url = relay.get_status_url()?; + + let start_request = Instant::now(); + let res = match relay + .client + .get(url) + .timeout(Duration::from_secs(30)) + .headers(headers) + .send() + .await + { + Ok(res) => res, + Err(err) => { + RELAY_STATUS_CODE + .with_label_values(&[TIMEOUT_ERROR_CODE_STR, STATUS_ENDPOINT_TAG, &relay.id]) + .inc(); + return Err(err.into()); + } + }; + let request_latency = start_request.elapsed(); + RELAY_LATENCY + .with_label_values(&[STATUS_ENDPOINT_TAG, &relay.id]) + .observe(request_latency.as_secs_f64()); + + let code = res.status(); + RELAY_STATUS_CODE.with_label_values(&[code.as_str(), STATUS_ENDPOINT_TAG, &relay.id]).inc(); + + if !code.is_success() { + let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_DEFAULT).await?; + let err = PbsError::RelayResponse { + error_msg: String::from_utf8_lossy(&response_bytes).into_owned(), + code: code.as_u16(), + }; + + error!(relay_id = relay.id.as_ref(),%err, "status failed"); + return Err(err); + }; + + debug!(relay_id = relay.id.as_ref(),?code, latency = ?request_latency, "status passed"); + + Ok(()) +} diff --git a/crates/pbs/src/routes/submit_block.rs b/crates/pbs/src/routes/submit_block.rs index 2332eb0c..48b26db9 100644 --- a/crates/pbs/src/routes/submit_block.rs +++ b/crates/pbs/src/routes/submit_block.rs @@ -1,48 +1,65 @@ -use std::sync::Arc; +use std::{ + collections::HashSet, + sync::Arc, + time::{Duration, Instant}, +}; +use alloy::{eips::eip7594::CELLS_PER_EXT_BLOB, primitives::B256}; use axum::{ extract::State, http::{HeaderMap, HeaderValue}, response::IntoResponse, }; use cb_common::{ - pbs::{BuilderApiVersion, GetPayloadInfo}, + config::BlockValidationMode, + pbs::{ + BlindedBeaconBlock, BlobsBundle, BuilderApiVersion, ForkName, ForkVersionDecode, + GetPayloadInfo, HEADER_START_TIME_UNIX_MS, KzgCommitments, PayloadAndBlobs, RelayClient, + SignedBlindedBeaconBlock, SubmitBlindedBlockResponse, + error::{PbsError, ValidationError}, + }, utils::{ CONSENSUS_VERSION_HEADER, EncodingType, RawRequest, deserialize_body, get_accept_types, - get_user_agent, timestamp_of_slot_start_millis, utcnow_ms, + get_consensus_version_header, get_user_agent, get_user_agent_with_version, + read_chunked_body_with_max, timestamp_of_slot_start_millis, utcnow_ms, }, }; -use reqwest::{StatusCode, header::CONTENT_TYPE}; +use futures::{FutureExt, future::select_ok}; +use reqwest::{ + StatusCode, + header::{ACCEPT, CONTENT_TYPE, USER_AGENT}, +}; +use serde::Deserialize; use ssz::Encode; -use tracing::{error, info, trace}; +use tracing::{debug, error, info, trace, warn}; +use url::Url; use crate::{ - CompoundSubmitBlockResponse, - constants::SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, + CompoundSubmitBlockResponse, LightSubmitBlockResponse, TIMEOUT_ERROR_CODE_STR, + constants::{MAX_SIZE_SUBMIT_BLOCK_RESPONSE, SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG}, error::PbsClientError, - metrics::BEACON_NODE_STATUS, - mev_boost, - state::{BuilderApiState, PbsStateGuard}, + metrics::{BEACON_NODE_STATUS, RELAY_LATENCY, RELAY_STATUS_CODE}, + state::{PbsState, PbsStateGuard}, }; -pub async fn handle_submit_block_v1( - state: State>, +pub async fn handle_submit_block_v1( + state: State, req_headers: HeaderMap, raw_request: RawRequest, ) -> Result { - handle_submit_block_impl::(state, req_headers, raw_request, BuilderApiVersion::V1).await + handle_submit_block_impl(state, req_headers, raw_request, BuilderApiVersion::V1).await } -pub async fn handle_submit_block_v2( - state: State>, +pub async fn handle_submit_block_v2( + state: State, req_headers: HeaderMap, raw_request: RawRequest, ) -> Result { - handle_submit_block_impl::(state, req_headers, raw_request, BuilderApiVersion::V2).await + handle_submit_block_impl(state, req_headers, raw_request, BuilderApiVersion::V2).await } -async fn handle_submit_block_impl( - State(state): State>, +async fn handle_submit_block_impl( + State(state): State, req_headers: HeaderMap, raw_request: RawRequest, api_version: BuilderApiVersion, @@ -72,15 +89,7 @@ async fn handle_submit_block_impl( info!(ua, ms_into_slot = now.saturating_sub(slot_start_ms), "new request"); - match mev_boost::submit_block( - signed_blinded_block, - req_headers, - state, - api_version, - accept_types, - ) - .await - { + match submit_block(signed_blinded_block, req_headers, state, api_version, accept_types).await { Ok(res) => match res { crate::CompoundSubmitBlockResponse::EmptyBody => { info!("received unblinded block (v2)"); @@ -169,3 +178,700 @@ async fn handle_submit_block_impl( } } } + +// ── Relay logic ────────────────────────────────────────────────────────────── + +/// Info about a proposal submission request. +/// Sent from submit_block to the submit_block_with_timeout function. +#[derive(Clone)] +struct ProposalInfo { + /// The signed blinded block to submit + signed_blinded_block: Arc, + + /// Common baseline of headers to send with each request + headers: Arc, + + /// The version of the submit_block route being used + api_version: BuilderApiVersion, + + /// How to validate the block returned by the relay + validation_mode: BlockValidationMode, + + /// The accepted encoding types from the original request + accepted_types: HashSet, +} + +/// Used interally to provide info and context about a submit_block request and +/// its response +struct SubmitBlockResponseInfo { + /// The raw body of the response + response_bytes: Vec, + + /// The content type the response is encoded with + content_type: EncodingType, + + /// Which fork the response bid is for (if provided as a header, rather than + /// part of the body) + fork: Option, + + /// The status code of the response, for logging + code: StatusCode, + + /// The round-trip latency of the request + request_latency: Duration, +} + +/// Implements https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlock and +/// https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlockV2. Use `api_version` to +/// distinguish between the two. +pub(crate) async fn submit_block( + signed_blinded_block: Arc, + req_headers: HeaderMap, + state: PbsState, + api_version: BuilderApiVersion, + accepted_types: HashSet, +) -> eyre::Result { + debug!(?req_headers, "received headers"); + + // prepare headers + let mut send_headers = HeaderMap::new(); + send_headers.insert(HEADER_START_TIME_UNIX_MS, HeaderValue::from(utcnow_ms())); + send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); + + // Create the Accept headers for requests + let mode = state.pbs_config().block_validation_mode; + let accept_types = match mode { + BlockValidationMode::None => { + // No validation mode, so only request what the user wants because the response + // will be forwarded directly + accepted_types.iter().map(|t| t.content_type()).collect::>().join(",") + } + _ => { + // We're unpacking the body, so request both types since we can handle both + [EncodingType::Ssz.content_type(), EncodingType::Json.content_type()].join(",") + } + }; + send_headers.insert(ACCEPT, HeaderValue::from_str(&accept_types).unwrap()); + + // Send requests to all relays concurrently + let proposal_info = Arc::new(ProposalInfo { + signed_blinded_block, + headers: Arc::new(send_headers), + api_version, + validation_mode: mode, + accepted_types, + }); + let mut handles = Vec::with_capacity(state.all_relays().len()); + for relay in state.all_relays().iter() { + handles.push( + tokio::spawn(submit_block_with_timeout( + proposal_info.clone(), + relay.clone(), + state.pbs_config().timeout_get_payload_ms, + )) + .map(|join_result| match join_result { + Ok(res) => res, + Err(err) => Err(PbsError::TokioJoinError(err)), + }), + ); + } + + let results = select_ok(handles).await; + match results { + Ok((res, _)) => Ok(res), + Err(err) => Err(err.into()), + } +} + +/// Submit blinded block to relay, retry connection errors until the +/// given timeout has passed +async fn submit_block_with_timeout( + proposal_info: Arc, + relay: RelayClient, + timeout_ms: u64, +) -> Result { + let mut url = Arc::new(relay.submit_block_url(proposal_info.api_version)?); + let mut remaining_timeout_ms = timeout_ms; + let mut retry = 0; + let mut backoff = Duration::from_millis(250); + let mut request_api_version = proposal_info.api_version; + + loop { + let start_request = Instant::now(); + match send_submit_block( + proposal_info.clone(), + url.clone(), + &relay, + remaining_timeout_ms, + retry, + request_api_version, + ) + .await + { + Ok(response) => { + // If the original request was for v2 but we had to fall back to v1, return a v2 + // response + if request_api_version == BuilderApiVersion::V1 && + proposal_info.api_version != request_api_version + { + return Ok(CompoundSubmitBlockResponse::EmptyBody); + } + return Ok(response); + } + + Err(err) if err.should_retry() => { + tokio::time::sleep(backoff).await; + backoff += Duration::from_millis(250); + + remaining_timeout_ms = + timeout_ms.saturating_sub(start_request.elapsed().as_millis() as u64); + + if remaining_timeout_ms == 0 { + return Err(err); + } + } + + Err(err) + if err.is_not_found() && matches!(request_api_version, BuilderApiVersion::V2) => + { + warn!( + relay_id = relay.id.as_ref(), + "relay does not support v2 endpoint, retrying with v1" + ); + url = Arc::new(relay.submit_block_url(BuilderApiVersion::V1)?); + request_api_version = BuilderApiVersion::V1; + } + + Err(err) => return Err(err), + }; + + retry += 1; + } +} + +// submits blinded signed block and expects the execution payload + blobs bundle +// back +#[allow(clippy::too_many_arguments)] +async fn send_submit_block( + proposal_info: Arc, + url: Arc, + relay: &RelayClient, + timeout_ms: u64, + retry: u32, + api_version: BuilderApiVersion, +) -> Result { + match proposal_info.validation_mode { + BlockValidationMode::None => { + // No validation so do some light processing and forward the response directly + let response = + send_submit_block_light(proposal_info.clone(), url, relay, timeout_ms, retry) + .await?; + match response { + None => Ok(CompoundSubmitBlockResponse::EmptyBody), + Some(res) => { + // Make sure the response is encoded in one of the accepted + // types since we're passing the raw response directly to the client + if !proposal_info.accepted_types.contains(&res.encoding_type) { + return Err(PbsError::RelayResponse { + error_msg: format!( + "relay returned unsupported encoding type for submit_block in no-validation mode: {:?}", + res.encoding_type + ), + code: 406, // Not Acceptable + }); + } + Ok(CompoundSubmitBlockResponse::Light(res)) + } + } + } + _ => { + // Full processing: decode full response and validate + let response = send_submit_block_full( + proposal_info.clone(), + url, + relay, + timeout_ms, + retry, + api_version, + ) + .await?; + let response = match response { + None => { + // v2 request with no body + return Ok(CompoundSubmitBlockResponse::EmptyBody); + } + Some(res) => res, + }; + // Extract the info needed for validation + let got_block_hash = response.data.execution_payload.block_hash().0; + + // request has different type so cant be deserialized in the wrong version, + // response has a "version" field + match &proposal_info.signed_blinded_block.message() { + BlindedBeaconBlock::Electra(blinded_block) => { + let expected_block_hash = + blinded_block.body.execution_payload.execution_payload_header.block_hash.0; + let expected_commitments = &blinded_block.body.blob_kzg_commitments; + + validate_unblinded_block( + expected_block_hash, + got_block_hash, + expected_commitments, + &response.data.blobs_bundle, + response.version, + ) + } + + BlindedBeaconBlock::Fulu(blinded_block) => { + let expected_block_hash = + blinded_block.body.execution_payload.execution_payload_header.block_hash.0; + let expected_commitments = &blinded_block.body.blob_kzg_commitments; + + validate_unblinded_block( + expected_block_hash, + got_block_hash, + expected_commitments, + &response.data.blobs_bundle, + response.version, + ) + } + + _ => return Err(PbsError::Validation(ValidationError::UnsupportedFork)), + }?; + Ok(CompoundSubmitBlockResponse::Full(Box::new(response))) + } + } +} + +/// Send and fully process a submit_block request, returning a complete decoded +/// response +async fn send_submit_block_full( + proposal_info: Arc, + url: Arc, + relay: &RelayClient, + timeout_ms: u64, + retry: u32, + api_version: BuilderApiVersion, +) -> Result, PbsError> { + // Send the request + let block_response = send_submit_block_impl( + relay, + url, + timeout_ms, + (*proposal_info.headers).clone(), + &proposal_info.signed_blinded_block, + retry, + api_version, + ) + .await?; + + // If this is not v1, there's no body to decode + if api_version != BuilderApiVersion::V1 { + return Ok(None); + } + + // Decode the payload based on content type + let decoded_response = match block_response.content_type { + EncodingType::Json => decode_json_payload(&block_response.response_bytes)?, + EncodingType::Ssz => { + let fork = match block_response.fork { + Some(fork) => fork, + None => { + return Err(PbsError::RelayResponse { + error_msg: "missing fork version header in SSZ submit_block response" + .to_string(), + code: block_response.code.as_u16(), + }); + } + }; + decode_ssz_payload(&block_response.response_bytes, fork)? + } + }; + + // Log and return + debug!( + relay_id = relay.id.as_ref(), + retry, + latency = ?block_response.request_latency, + version =% decoded_response.version, + "received unblinded block" + ); + + Ok(Some(decoded_response)) +} + +/// Send and lightly process a submit_block request, minimizing the amount of +/// decoding and validation done +async fn send_submit_block_light( + proposal_info: Arc, + url: Arc, + relay: &RelayClient, + timeout_ms: u64, + retry: u32, +) -> Result, PbsError> { + // Send the request + let block_response = send_submit_block_impl( + relay, + url, + timeout_ms, + (*proposal_info.headers).clone(), + &proposal_info.signed_blinded_block, + retry, + proposal_info.api_version, + ) + .await?; + + // If this is not v1, there's no body to decode + if proposal_info.api_version != BuilderApiVersion::V1 { + return Ok(None); + } + + // Decode the payload based on content type + let fork = match block_response.content_type { + EncodingType::Json => get_light_info_from_json(&block_response.response_bytes)?, + EncodingType::Ssz => match block_response.fork { + Some(fork) => fork, + None => { + return Err(PbsError::RelayResponse { + error_msg: "missing fork version header in SSZ submit_block response" + .to_string(), + code: block_response.code.as_u16(), + }); + } + }, + }; + + // Log and return + debug!( + relay_id = relay.id.as_ref(), + retry, + latency = ?block_response.request_latency, + version =% fork, + "received unblinded block (light processing)" + ); + + Ok(Some(LightSubmitBlockResponse { + version: fork, + encoding_type: block_response.content_type, + raw_bytes: block_response.response_bytes, + })) +} + +/// Sends the actual HTTP request to the relay's submit_block endpoint, +/// returning the response (if applicable), the round-trip time, and the +/// encoding type used for the body (if any). Used by send_submit_block. +async fn send_submit_block_impl( + relay: &RelayClient, + url: Arc, + timeout_ms: u64, + headers: HeaderMap, + signed_blinded_block: &SignedBlindedBeaconBlock, + retry: u32, + api_version: BuilderApiVersion, +) -> Result { + let start_request = Instant::now(); + + // Try SSZ first + let mut res = match relay + .client + .post(url.as_ref().clone()) + .timeout(Duration::from_millis(timeout_ms)) + .headers(headers.clone()) + .body(signed_blinded_block.as_ssz_bytes()) + .header(CONTENT_TYPE, EncodingType::Ssz.to_string()) + .header(CONSENSUS_VERSION_HEADER, signed_blinded_block.fork_name_unchecked().to_string()) + .send() + .await + { + Ok(res) => res, + Err(err) => { + RELAY_STATUS_CODE + .with_label_values(&[ + TIMEOUT_ERROR_CODE_STR, + SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, + &relay.id, + ]) + .inc(); + return Err(err.into()); + } + }; + + // If we got a client error, retry with JSON - the spec says that this should be + // a 406 or 415, but we're a little more permissive here + if res.status().is_client_error() { + warn!( + relay_id = relay.id.as_ref(), + "relay does not support SSZ, resubmitting block with JSON content-type" + ); + res = match relay + .client + .post(url.as_ref().clone()) + .timeout(Duration::from_millis(timeout_ms)) + .headers(headers) + .body(serde_json::to_vec(&signed_blinded_block).unwrap()) + .header(CONTENT_TYPE, EncodingType::Json.to_string()) + .send() + .await + { + Ok(res) => res, + Err(err) => { + RELAY_STATUS_CODE + .with_label_values(&[ + TIMEOUT_ERROR_CODE_STR, + SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, + &relay.id, + ]) + .inc(); + return Err(err.into()); + } + }; + } + + // Log the response code and latency + let code = res.status(); + let request_latency = start_request.elapsed(); + RELAY_LATENCY + .with_label_values(&[SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, &relay.id]) + .observe(request_latency.as_secs_f64()); + RELAY_STATUS_CODE + .with_label_values(&[code.as_str(), SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, &relay.id]) + .inc(); + + // If this was API v2 and succeeded then we can just return here + if api_version != BuilderApiVersion::V1 { + debug!( + relay_id = relay.id.as_ref(), + retry, + latency = ?request_latency, + "received 202 Accepted for v2 submit_block" + ); + + match code { + StatusCode::ACCEPTED => { + return Ok(SubmitBlockResponseInfo { + response_bytes: Vec::new(), + content_type: EncodingType::Json, // dummy value + fork: None, + code, + request_latency, + }); + } + StatusCode::OK => { + warn!( + relay_id = relay.id.as_ref(), + "relay sent OK response for v2 submit_block, expected 202 Accepted" + ); + return Ok(SubmitBlockResponseInfo { + response_bytes: Vec::new(), + content_type: EncodingType::Json, // dummy value + fork: None, + code, + request_latency, + }); + } + _ => { + return Err(PbsError::RelayResponse { + error_msg: format!( + "relay sent unexpected code for builder route v2 {}: {code}", + relay.id.as_ref() + ), + code: code.as_u16(), + }); + } + } + } + + // If the code is not OK, return early + if code != StatusCode::OK { + let response_bytes = + read_chunked_body_with_max(res, MAX_SIZE_SUBMIT_BLOCK_RESPONSE).await?; + let err = PbsError::RelayResponse { + error_msg: String::from_utf8_lossy(&response_bytes).into_owned(), + code: code.as_u16(), + }; + + // we requested the payload from all relays, but some may have not received it + warn!(relay_id = relay.id.as_ref(), %err, "failed to get payload (this might be ok if other relays have it)"); + return Err(err); + } + + // We're on v1 so decode the payload normally - get the content type + let content_type = match res.headers().get(CONTENT_TYPE) { + None => { + // Assume a missing content type means JSON; shouldn't happen in practice with + // any respectable HTTP server but just in case + EncodingType::Json + } + Some(header_value) => match header_value.to_str().map_err(|e| PbsError::RelayResponse { + error_msg: format!("cannot decode content-type header: {e}").to_string(), + code: (code.as_u16()), + })? { + header_str if header_str.eq_ignore_ascii_case(&EncodingType::Ssz.to_string()) => { + EncodingType::Ssz + } + header_str if header_str.eq_ignore_ascii_case(&EncodingType::Json.to_string()) => { + EncodingType::Json + } + header_str => { + return Err(PbsError::RelayResponse { + error_msg: format!("unsupported content type: {header_str}"), + code: code.as_u16(), + }) + } + }, + }; + + // Decode the body + let fork = get_consensus_version_header(res.headers()); + let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_SUBMIT_BLOCK_RESPONSE).await?; + Ok(SubmitBlockResponseInfo { response_bytes, content_type, fork, code, request_latency }) +} + +/// Decode a JSON-encoded submit_block response +fn decode_json_payload(response_bytes: &[u8]) -> Result { + match serde_json::from_slice::(response_bytes) { + Ok(parsed) => Ok(parsed), + Err(err) => Err(PbsError::JsonDecode { + err, + raw: String::from_utf8_lossy(response_bytes).into_owned(), + }), + } +} + +/// Get the fork name from a submit_block JSON response (used for light +/// processing) +fn get_light_info_from_json(response_bytes: &[u8]) -> Result { + #[derive(Deserialize)] + struct LightSubmitBlockResponse { + version: ForkName, + } + + match serde_json::from_slice::(response_bytes) { + Ok(parsed) => Ok(parsed.version), + Err(err) => Err(PbsError::JsonDecode { + err, + raw: String::from_utf8_lossy(response_bytes).into_owned(), + }), + } +} + +/// Decode an SSZ-encoded submit_block response +fn decode_ssz_payload( + response_bytes: &[u8], + fork: ForkName, +) -> Result { + let data = PayloadAndBlobs::from_ssz_bytes_by_fork(response_bytes, fork).map_err(|e| { + PbsError::RelayResponse { + error_msg: (format!("error decoding relay payload: {e:?}")).to_string(), + code: 200, + } + })?; + Ok(SubmitBlindedBlockResponse { version: fork, data, metadata: Default::default() }) +} + +fn validate_unblinded_block( + expected_block_hash: B256, + got_block_hash: B256, + expected_commitments: &KzgCommitments, + blobs_bundle: &BlobsBundle, + fork_name: ForkName, +) -> Result<(), PbsError> { + match fork_name { + ForkName::Base | + ForkName::Altair | + ForkName::Bellatrix | + ForkName::Capella | + ForkName::Deneb | + ForkName::Gloas => Err(PbsError::Validation(ValidationError::UnsupportedFork)), + ForkName::Electra => validate_unblinded_block_electra( + expected_block_hash, + got_block_hash, + expected_commitments, + blobs_bundle, + ), + ForkName::Fulu => validate_unblinded_block_fulu( + expected_block_hash, + got_block_hash, + expected_commitments, + blobs_bundle, + ), + } +} + +fn validate_unblinded_block_electra( + expected_block_hash: B256, + got_block_hash: B256, + expected_commitments: &KzgCommitments, + blobs_bundle: &BlobsBundle, +) -> Result<(), PbsError> { + if expected_block_hash != got_block_hash { + return Err(PbsError::Validation(ValidationError::BlockHashMismatch { + expected: expected_block_hash, + got: got_block_hash, + })); + } + + if expected_commitments.len() != blobs_bundle.blobs.len() || + expected_commitments.len() != blobs_bundle.commitments.len() || + expected_commitments.len() != blobs_bundle.proofs.len() + { + return Err(PbsError::Validation(ValidationError::KzgCommitments { + expected_blobs: expected_commitments.len(), + got_blobs: blobs_bundle.blobs.len(), + got_commitments: blobs_bundle.commitments.len(), + got_proofs: blobs_bundle.proofs.len(), + })); + } + + for (i, comm) in expected_commitments.iter().enumerate() { + // this is safe since we already know they are the same length + if *comm != blobs_bundle.commitments[i] { + return Err(PbsError::Validation(ValidationError::KzgMismatch { + expected: format!("{comm}"), + got: format!("{}", blobs_bundle.commitments[i]), + index: i, + })); + } + } + + Ok(()) +} + +fn validate_unblinded_block_fulu( + expected_block_hash: B256, + got_block_hash: B256, + expected_commitments: &KzgCommitments, + blobs_bundle: &BlobsBundle, +) -> Result<(), PbsError> { + if expected_block_hash != got_block_hash { + return Err(PbsError::Validation(ValidationError::BlockHashMismatch { + expected: expected_block_hash, + got: got_block_hash, + })); + } + + if expected_commitments.len() != blobs_bundle.blobs.len() || + expected_commitments.len() != blobs_bundle.commitments.len() || + expected_commitments.len() * CELLS_PER_EXT_BLOB != blobs_bundle.proofs.len() + { + return Err(PbsError::Validation(ValidationError::KzgCommitments { + expected_blobs: expected_commitments.len(), + got_blobs: blobs_bundle.blobs.len(), + got_commitments: blobs_bundle.commitments.len(), + got_proofs: blobs_bundle.proofs.len(), + })); + } + + for (i, comm) in expected_commitments.iter().enumerate() { + // this is safe since we already know they are the same length + if *comm != blobs_bundle.commitments[i] { + return Err(PbsError::Validation(ValidationError::KzgMismatch { + expected: format!("{comm}"), + got: format!("{}", blobs_bundle.commitments[i]), + index: i, + })); + } + } + + Ok(()) +} diff --git a/crates/pbs/src/service.rs b/crates/pbs/src/service.rs index 6a738379..36b417a7 100644 --- a/crates/pbs/src/service.rs +++ b/crates/pbs/src/service.rs @@ -22,13 +22,23 @@ use url::Url; use crate::{ metrics::PBS_METRICS_REGISTRY, routes::create_app_router, - state::{BuilderApiState, PbsState, PbsStateGuard}, + state::{PbsState, PbsStateGuard}, }; pub struct PbsService; impl PbsService { - pub async fn run(state: PbsState) -> Result<()> { + pub async fn run(state: PbsState) -> Result<()> { + let listener = TcpListener::bind(state.config.endpoint).await?; + Self::run_with_listener(state, listener).await + } + + /// Like [`run`], but accepts a pre-bound [`TcpListener`]. + /// + /// Useful in tests where the caller binds the socket with port 0 to get + /// an OS-assigned port and then passes the listener here, eliminating the + /// TOCTOU race that would otherwise exist between port discovery and bind. + pub async fn run_with_listener(state: PbsState, listener: TcpListener) -> Result<()> { let addr = state.config.endpoint; info!(version = COMMIT_BOOST_VERSION, commit_hash = COMMIT_BOOST_COMMIT, ?addr, chain =? state.config.chain, "starting PBS service"); @@ -41,9 +51,8 @@ impl PbsService { }); let config_path = state.config_path.clone(); - let state: Arc>> = RwLock::new(state).into(); - let app = create_app_router::(state.clone()); - let listener = TcpListener::bind(addr).await?; + let state: Arc> = RwLock::new(state).into(); + let app = create_app_router(state.clone()); let task = tokio::spawn( @@ -130,7 +139,7 @@ impl PbsService { MetricsProvider::load_and_run(network, PBS_METRICS_REGISTRY.clone()) } - async fn refresh_registry_muxes(state: PbsStateGuard) { + async fn refresh_registry_muxes(state: PbsStateGuard) { // Read-only portion let mut new_pubkeys = HashMap::new(); let mut removed_pubkeys = HashSet::new(); diff --git a/crates/pbs/src/state.rs b/crates/pbs/src/state.rs index cbe86af9..560a0cf3 100644 --- a/crates/pbs/src/state.rs +++ b/crates/pbs/src/state.rs @@ -7,39 +7,22 @@ use cb_common::{ }; use parking_lot::RwLock; -pub trait BuilderApiState: Clone + Sync + Send + 'static {} -impl BuilderApiState for () {} +pub type PbsStateGuard = Arc>; -pub type PbsStateGuard = Arc>>; - -/// Config for the Pbs module. It can be extended by adding extra data to the -/// state for modules that need it -// TODO: consider remove state from the PBS module altogether +/// Config for the Pbs module. #[derive(Clone)] -pub struct PbsState { +pub struct PbsState { /// Config data for the Pbs service pub config: Arc, /// Path of the config file, for watching changes pub config_path: Arc, - /// Opaque extra data for library use - pub data: S, } -impl PbsState<()> { +impl PbsState { pub fn new(config: PbsModuleConfig, config_path: PathBuf) -> Self { - Self { config: Arc::new(config), config_path: Arc::new(config_path), data: () } - } - - pub fn with_data(self, data: S) -> PbsState { - PbsState { data, config: self.config, config_path: self.config_path } + Self { config: Arc::new(config), config_path: Arc::new(config_path) } } -} -impl PbsState -where - S: BuilderApiState, -{ - // Getters pub fn pbs_config(&self) -> &PbsConfig { &self.config.pbs_config } diff --git a/tests/src/mock_relay.rs b/tests/src/mock_relay.rs index 21accb34..0eb4eabc 100644 --- a/tests/src/mock_relay.rs +++ b/tests/src/mock_relay.rs @@ -40,11 +40,17 @@ use tracing::{debug, error}; use tree_hash::TreeHash; pub async fn start_mock_relay_service(state: Arc, port: u16) -> eyre::Result<()> { - let app = mock_relay_app_router(state); - let socket = SocketAddr::new("0.0.0.0".parse()?, port); let listener = TcpListener::bind(socket).await?; + start_mock_relay_service_with_listener(state, listener).await +} +/// Like [`start_mock_relay_service`], but accepts a pre-bound [`TcpListener`]. +pub async fn start_mock_relay_service_with_listener( + state: Arc, + listener: TcpListener, +) -> eyre::Result<()> { + let app = mock_relay_app_router(state); axum::serve(listener, app).await?; Ok(()) } diff --git a/tests/src/mock_ssv_public.rs b/tests/src/mock_ssv_public.rs index a014db42..dcd62df5 100644 --- a/tests/src/mock_ssv_public.rs +++ b/tests/src/mock_ssv_public.rs @@ -30,6 +30,18 @@ pub async fn create_mock_public_ssv_server( port: u16, state: Option, ) -> Result, axum::Error> { + let address = SocketAddr::from(([127, 0, 0, 1], port)); + let listener = TcpListener::bind(address).await.map_err(axum::Error::new)?; + create_mock_public_ssv_server_with_listener(listener, state).await +} + +/// Like [`create_mock_public_ssv_server`], but accepts a pre-bound +/// [`TcpListener`]. +pub async fn create_mock_public_ssv_server_with_listener( + listener: TcpListener, + state: Option, +) -> Result, axum::Error> { + let port = listener.local_addr().map(|a| a.port()).unwrap_or(0); let data = include_str!("../../tests/data/ssv_valid_public.json"); let response = serde_json::from_str::(data).expect("failed to parse test data"); @@ -46,8 +58,6 @@ pub async fn create_mock_public_ssv_server( .with_state(state) .into_make_service(); - let address = SocketAddr::from(([127, 0, 0, 1], port)); - let listener = TcpListener::bind(address).await.map_err(axum::Error::new)?; let server = axum::serve(listener, router).with_graceful_shutdown(async { tokio::signal::ctrl_c().await.expect("Failed to listen for shutdown signal"); }); diff --git a/tests/src/utils.rs b/tests/src/utils.rs index dd0ba733..74e86f2d 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -27,6 +27,18 @@ pub fn get_local_address(port: u16) -> String { format!("http://0.0.0.0:{port}") } +/// Bind to port 0 and let the OS assign an unused ephemeral port. +/// +/// The returned listener keeps the port reserved. Pass it to +/// [`PbsService::run_with_listener`] or +/// [`start_mock_relay_service_with_listener`] so the socket is never released +/// between allocation and use (zero TOCTOU race). Extract the port with +/// `listener.local_addr().unwrap().port()` when you need the number for config +/// or client construction. +pub async fn get_free_listener() -> tokio::net::TcpListener { + tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap() +} + static SYNC_SETUP: Once = Once::new(); pub fn setup_test_env() { SYNC_SETUP.call_once(|| { diff --git a/tests/tests/pbs_cfg_file_update.rs b/tests/tests/pbs_cfg_file_update.rs index a9d2ff7d..a3a01c58 100644 --- a/tests/tests/pbs_cfg_file_update.rs +++ b/tests/tests/pbs_cfg_file_update.rs @@ -12,9 +12,11 @@ use cb_common::{ }; use cb_pbs::{PbsService, PbsState}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, + utils::{ + generate_mock_relay, get_free_listener, get_pbs_config, setup_test_env, to_pbs_config, + }, }; use eyre::Result; use lh_types::ForkName; @@ -32,20 +34,23 @@ async fn test_cfg_file_update() -> Result<()> { let pubkey = signer.public_key(); let chain = Chain::Hoodi; - let pbs_port = 3730; + let pbs_listener = get_free_listener().await; + let relay1_listener = get_free_listener().await; + let relay2_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay1_port = relay1_listener.local_addr().unwrap().port(); + let relay2_port = relay2_listener.local_addr().unwrap().port(); // Start relay 1 - let relay1_port = pbs_port + 1; let relay1 = generate_mock_relay(relay1_port, pubkey.clone())?; let relay1_state = Arc::new(MockRelayState::new(chain, signer.clone())); - tokio::spawn(start_mock_relay_service(relay1_state.clone(), relay1_port)); + tokio::spawn(start_mock_relay_service_with_listener(relay1_state.clone(), relay1_listener)); // Start relay 2 - let relay2_port = relay1_port + 1; let relay2 = generate_mock_relay(relay2_port, pubkey.clone())?; let relay2_id = relay2.id.clone().to_string(); let relay2_state = Arc::new(MockRelayState::new(chain, signer)); - tokio::spawn(start_mock_relay_service(relay2_state.clone(), relay2_port)); + tokio::spawn(start_mock_relay_service_with_listener(relay2_state.clone(), relay2_listener)); // Make a config with relay 1 only let pbs_config = PbsConfig { @@ -109,7 +114,7 @@ async fn test_cfg_file_update() -> Result<()> { // Run the PBS service let config = to_pbs_config(chain, get_pbs_config(pbs_port), vec![relay1.clone()]); let state = PbsState::new(config, config_path.clone()); - tokio::spawn(PbsService::run::<()>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers - extra time for the file watcher tokio::time::sleep(Duration::from_millis(1000)).await; diff --git a/tests/tests/pbs_get_header.rs b/tests/tests/pbs_get_header.rs index c366c15e..679e6ab5 100644 --- a/tests/tests/pbs_get_header.rs +++ b/tests/tests/pbs_get_header.rs @@ -14,9 +14,11 @@ use cb_common::{ }; use cb_pbs::{PbsService, PbsState}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, + utils::{ + generate_mock_relay, get_free_listener, get_pbs_config, setup_test_env, to_pbs_config, + }, }; use eyre::Result; use lh_types::{ForkVersionDecode, beacon_response::EmptyMetadata}; @@ -29,7 +31,6 @@ use url::Url; #[tokio::test] async fn test_get_header() -> Result<()> { test_get_header_impl( - 3200, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), 1, @@ -47,7 +48,6 @@ async fn test_get_header() -> Result<()> { #[tokio::test] async fn test_get_header_ssz() -> Result<()> { test_get_header_impl( - 3202, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), 1, @@ -67,7 +67,6 @@ async fn test_get_header_ssz() -> Result<()> { #[tokio::test] async fn test_get_header_ssz_into_json() -> Result<()> { test_get_header_impl( - 3204, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Json]), 1, @@ -86,7 +85,6 @@ async fn test_get_header_ssz_into_json() -> Result<()> { #[tokio::test] async fn test_get_header_multitype_ssz() -> Result<()> { test_get_header_impl( - 3206, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Ssz]), 1, @@ -105,7 +103,6 @@ async fn test_get_header_multitype_ssz() -> Result<()> { #[tokio::test] async fn test_get_header_multitype_json() -> Result<()> { test_get_header_impl( - 3208, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Json]), 1, @@ -125,7 +122,6 @@ async fn test_get_header_multitype_json() -> Result<()> { #[tokio::test] async fn test_get_header_light() -> Result<()> { test_get_header_impl( - 3210, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), 1, @@ -143,7 +139,6 @@ async fn test_get_header_light() -> Result<()> { #[tokio::test] async fn test_get_header_ssz_light() -> Result<()> { test_get_header_impl( - 3212, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), 1, @@ -163,7 +158,6 @@ async fn test_get_header_ssz_light() -> Result<()> { #[tokio::test] async fn test_get_header_ssz_into_json_light() -> Result<()> { test_get_header_impl( - 3214, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Json]), 1, @@ -182,7 +176,6 @@ async fn test_get_header_ssz_into_json_light() -> Result<()> { #[tokio::test] async fn test_get_header_multitype_ssz_light() -> Result<()> { test_get_header_impl( - 3216, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Ssz]), 1, @@ -201,7 +194,6 @@ async fn test_get_header_multitype_ssz_light() -> Result<()> { #[tokio::test] async fn test_get_header_multitype_json_light() -> Result<()> { test_get_header_impl( - 3218, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Json]), 1, @@ -221,7 +213,6 @@ async fn test_get_header_multitype_json_light() -> Result<()> { /// fine; if the parent block fetch fails the relay response is still returned /// (extra validation is skipped with a warning). async fn test_get_header_impl( - pbs_port: u16, accept_types: HashSet, relay_types: HashSet, expected_try_count: u64, @@ -237,13 +228,16 @@ async fn test_get_header_impl( let signer = random_secret(); let pubkey = signer.public_key(); let chain = Chain::Holesky; - let relay_port = pbs_port + 1; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); let mut mock_state = MockRelayState::new(chain, signer).with_bid_value(bid_value); mock_state.supported_content_types = Arc::new(relay_types); let mock_state = Arc::new(mock_state); let mock_relay = generate_mock_relay(relay_port, pubkey)?; - tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); // Run the PBS service let mut pbs_config = get_pbs_config(pbs_port); @@ -252,7 +246,7 @@ async fn test_get_header_impl( pbs_config.rpc_url = rpc_url; let config = to_pbs_config(chain, pbs_config, vec![mock_relay.clone()]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<()>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -310,20 +304,24 @@ async fn test_get_header_returns_204_if_relay_down() -> Result<()> { let pubkey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 3300; - let relay_port = pbs_port + 1; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); // Create a mock relay client let mock_state = Arc::new(MockRelayState::new(chain, signer)); let mock_relay = generate_mock_relay(relay_port, pubkey)?; // Don't start the relay - // tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); + // tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), + // relay_listener)); + drop(relay_listener); // Run the PBS service let config = to_pbs_config(chain, get_pbs_config(pbs_port), vec![mock_relay.clone()]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<()>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -344,18 +342,20 @@ async fn test_get_header_returns_400_if_request_is_invalid() -> Result<()> { let pubkey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 3400; - let relay_port = pbs_port + 1; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); // Run a mock relay let mock_state = Arc::new(MockRelayState::new(chain, signer)); let mock_relay = generate_mock_relay(relay_port, pubkey.clone())?; - tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); // Run the PBS service let config = to_pbs_config(chain, get_pbs_config(pbs_port), vec![mock_relay.clone()]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<()>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -394,14 +394,13 @@ async fn test_get_header_all_modes_enforce_min_bid() -> Result<()> { // handled gracefully (extra validation is skipped with a warning). let fake_rpc: Url = "http://127.0.0.1:1".parse()?; - for (pbs_port, mode, rpc_url) in [ - (3500u16, HeaderValidationMode::Standard, None), - (3502u16, HeaderValidationMode::None, None), - (3504u16, HeaderValidationMode::Extra, Some(fake_rpc.clone())), + for (mode, rpc_url) in [ + (HeaderValidationMode::Standard, None), + (HeaderValidationMode::None, None), + (HeaderValidationMode::Extra, Some(fake_rpc.clone())), ] { // Bid below min → all modes reject (204). test_get_header_impl( - pbs_port, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Json]), 1, @@ -416,7 +415,6 @@ async fn test_get_header_all_modes_enforce_min_bid() -> Result<()> { // Bid above min → all modes accept (200). test_get_header_impl( - pbs_port + 100, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Json]), 1, @@ -447,12 +445,15 @@ async fn test_get_header_ssz_bid_value_round_trip() -> Result<()> { // Use a distinctive value so accidental zero-matches are impossible. let relay_bid = U256::from(999_888_777u64); - for (pbs_port, fork_name) in [(3508u16, ForkName::Electra), (3510u16, ForkName::Fulu)] { - let relay_port = pbs_port + 1; + for fork_name in [ForkName::Electra, ForkName::Fulu] { + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); let mock_state = Arc::new(MockRelayState::new(chain, signer.clone()).with_bid_value(relay_bid)); let mock_relay = generate_mock_relay(relay_port, pubkey.clone())?; - tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); let mut pbs_config = get_pbs_config(pbs_port); // None mode: PBS forwards the raw SSZ bytes without re-encoding. @@ -460,7 +461,7 @@ async fn test_get_header_ssz_bid_value_round_trip() -> Result<()> { pbs_config.min_bid_wei = U256::ZERO; let config = to_pbs_config(chain, pbs_config, vec![mock_relay]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<()>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); tokio::time::sleep(Duration::from_millis(100)).await; @@ -490,9 +491,10 @@ async fn test_get_header_unsupported_fork_returns_400() -> Result<()> { let signer = random_secret(); let chain = Chain::Holesky; - let relay_port = 3512u16; + let relay_listener = get_free_listener().await; + let relay_port = relay_listener.local_addr().unwrap().port(); let mock_state = Arc::new(MockRelayState::new(chain, signer.clone())); - tokio::spawn(start_mock_relay_service(mock_state, relay_port)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state, relay_listener)); tokio::time::sleep(Duration::from_millis(100)).await; @@ -625,7 +627,6 @@ async fn test_get_header_bid_validation_matrix() -> Result<()> { for (i, &(fork, encoding, mode, relay_bid, expected_status)) in cases.iter().enumerate() { test_get_header_impl( - 3900u16 + (i as u16 * 2), HashSet::from([encoding]), HashSet::from([encoding]), 1, @@ -656,22 +657,25 @@ async fn test_get_header_none_mode_bypasses_pubkey_validation() -> Result<()> { let signer = random_secret(); let wrong_pubkey = random_secret().public_key(); - for (pbs_port, mode, expected_status) in [ - (3504u16, HeaderValidationMode::Standard, StatusCode::NO_CONTENT), - (3506u16, HeaderValidationMode::None, StatusCode::OK), + for (mode, expected_status) in [ + (HeaderValidationMode::Standard, StatusCode::NO_CONTENT), + (HeaderValidationMode::None, StatusCode::OK), ] { - let relay_port = pbs_port + 1; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); let mock_state = Arc::new(MockRelayState::new(chain, signer.clone())); // Register with `wrong_pubkey` — PBS will expect this key but the relay // embeds `signer.public_key()`, causing a mismatch in Standard mode. let mock_relay = generate_mock_relay(relay_port, wrong_pubkey.clone())?; - tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); let mut pbs_config = get_pbs_config(pbs_port); pbs_config.header_validation_mode = mode; let config = to_pbs_config(chain, pbs_config, vec![mock_relay]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<()>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); tokio::time::sleep(Duration::from_millis(100)).await; diff --git a/tests/tests/pbs_get_status.rs b/tests/tests/pbs_get_status.rs index f9a0d485..f7497b6e 100644 --- a/tests/tests/pbs_get_status.rs +++ b/tests/tests/pbs_get_status.rs @@ -3,9 +3,11 @@ use std::{path::PathBuf, sync::Arc, time::Duration}; use cb_common::{signer::random_secret, types::Chain}; use cb_pbs::{PbsService, PbsState}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, + utils::{ + generate_mock_relay, get_free_listener, get_pbs_config, setup_test_env, to_pbs_config, + }, }; use eyre::Result; use reqwest::StatusCode; @@ -18,21 +20,24 @@ async fn test_get_status() -> Result<()> { let pubkey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 3500; - let relay_0_port = pbs_port + 1; - let relay_1_port = pbs_port + 2; + let pbs_listener = get_free_listener().await; + let relay_0_listener = get_free_listener().await; + let relay_1_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_0_port = relay_0_listener.local_addr().unwrap().port(); + let relay_1_port = relay_1_listener.local_addr().unwrap().port(); let relays = vec![ generate_mock_relay(relay_0_port, pubkey.clone())?, generate_mock_relay(relay_1_port, pubkey)?, ]; let mock_state = Arc::new(MockRelayState::new(chain, signer)); - tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_0_port)); - tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_1_port)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_0_listener)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_1_listener)); let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays.clone()); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<()>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -54,18 +59,22 @@ async fn test_get_status_returns_502_if_relay_down() -> Result<()> { let pubkey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 3600; - let relay_port = pbs_port + 1; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); let relays = vec![generate_mock_relay(relay_port, pubkey)?]; let mock_state = Arc::new(MockRelayState::new(chain, signer)); // Don't start the relay - // tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); + // tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), + // relay_listener)); + drop(relay_listener); let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays.clone()); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<()>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; diff --git a/tests/tests/pbs_mux.rs b/tests/tests/pbs_mux.rs index 0703e49a..5d184c04 100644 --- a/tests/tests/pbs_mux.rs +++ b/tests/tests/pbs_mux.rs @@ -21,13 +21,16 @@ use cb_common::{ }; use cb_pbs::{PbsService, PbsState}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, mock_ssv_node::{SsvNodeMockState, create_mock_ssv_node_server}, - mock_ssv_public::{PublicSsvMockState, TEST_HTTP_TIMEOUT, create_mock_public_ssv_server}, + mock_ssv_public::{ + PublicSsvMockState, TEST_HTTP_TIMEOUT, create_mock_public_ssv_server, + create_mock_public_ssv_server_with_listener, + }, mock_validator::MockValidator, utils::{ - bls_pubkey_from_hex_unchecked, generate_mock_relay, get_pbs_config, setup_test_env, - to_pbs_config, + bls_pubkey_from_hex_unchecked, generate_mock_relay, get_free_listener, get_pbs_config, + setup_test_env, to_pbs_config, }, }; use eyre::Result; @@ -205,17 +208,24 @@ async fn test_mux() -> Result<()> { let pubkey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 3700; - - let mux_relay_1 = generate_mock_relay(pbs_port + 1, pubkey.clone())?; - let mux_relay_2 = generate_mock_relay(pbs_port + 2, pubkey.clone())?; - let default_relay = generate_mock_relay(pbs_port + 3, pubkey.clone())?; + let pbs_listener = get_free_listener().await; + let relay_1_listener = get_free_listener().await; + let relay_2_listener = get_free_listener().await; + let relay_3_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_1_port = relay_1_listener.local_addr().unwrap().port(); + let relay_2_port = relay_2_listener.local_addr().unwrap().port(); + let relay_3_port = relay_3_listener.local_addr().unwrap().port(); + + let mux_relay_1 = generate_mock_relay(relay_1_port, pubkey.clone())?; + let mux_relay_2 = generate_mock_relay(relay_2_port, pubkey.clone())?; + let default_relay = generate_mock_relay(relay_3_port, pubkey.clone())?; // Run 3 mock relays let mock_state = Arc::new(MockRelayState::new(chain, signer)); - tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); - tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 2)); - tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 3)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_1_listener)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_2_listener)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_3_listener)); // Register all relays in PBS config let relays = vec![default_relay.clone()]; @@ -235,7 +245,7 @@ async fn test_mux() -> Result<()> { // Run PBS service let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<()>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -315,10 +325,19 @@ async fn test_ssv_multi_with_node() -> Result<()> { let pubkey2 = signer2.public_key(); let chain = Chain::Hoodi; - let pbs_port = 3711; + let pbs_listener = get_free_listener().await; + let ssv_node_listener = get_free_listener().await; + let ssv_public_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let ssv_node_port = ssv_node_listener.local_addr().unwrap().port(); + let ssv_public_port = ssv_public_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); + // Drop the ssv_node_listener so create_mock_ssv_node_server can bind the port + // (no _with_listener variant available for the SSV node mock server). + drop(ssv_node_listener); // Start the mock SSV node - let ssv_node_port = pbs_port + 1; let ssv_node_url = Url::parse(&format!("http://localhost:{ssv_node_port}/v1/"))?; let mock_ssv_node_state = SsvNodeMockState { validators: Arc::new(RwLock::new(vec![ @@ -331,21 +350,23 @@ async fn test_ssv_multi_with_node() -> Result<()> { create_mock_ssv_node_server(ssv_node_port, Some(mock_ssv_node_state.clone())).await?; // Start the mock SSV public API - let ssv_public_port = ssv_node_port + 1; let ssv_public_url = Url::parse(&format!("http://localhost:{ssv_public_port}/api/v4/"))?; let mock_ssv_public_state = PublicSsvMockState { validators: Arc::new(RwLock::new(vec![SSVPublicValidator { pubkey: pubkey.clone() }])), force_timeout: Arc::new(RwLock::new(false)), }; - let ssv_public_handle = - create_mock_public_ssv_server(ssv_public_port, Some(mock_ssv_public_state.clone())).await?; + let ssv_public_handle = create_mock_public_ssv_server_with_listener( + ssv_public_listener, + Some(mock_ssv_public_state.clone()), + ) + .await?; // Start a mock relay to be used by the mux - let relay_port = ssv_public_port + 1; let relay = generate_mock_relay(relay_port, pubkey.clone())?; let relay_id = relay.id.clone().to_string(); let relay_state = Arc::new(MockRelayState::new(chain, signer)); - let relay_task = tokio::spawn(start_mock_relay_service(relay_state.clone(), relay_port)); + let relay_task = + tokio::spawn(start_mock_relay_service_with_listener(relay_state.clone(), relay_listener)); // Create the registry mux let loader = MuxKeysLoader::Registry { @@ -379,7 +400,7 @@ async fn test_ssv_multi_with_node() -> Result<()> { // Run PBS service let state = PbsState::new(config, PathBuf::new()); - let pbs_server = tokio::spawn(PbsService::run::<()>(state)); + let pbs_server = tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); info!("Started PBS server with pubkey {pubkey}"); // Wait for the server to start @@ -393,7 +414,7 @@ async fn test_ssv_multi_with_node() -> Result<()> { .do_get_header(Some(pubkey2.clone()), HashSet::new(), ForkName::Electra) .await?; assert_eq!(res.status(), StatusCode::OK); - assert_eq!(relay_state.received_get_header(), 1); // pubkey2 was loaded from the SSV node + assert_eq!(relay_state.received_get_header(), 1); // pubkey2 was loaded from the SSV node // Shut down the server handles pbs_server.abort(); @@ -415,10 +436,17 @@ async fn test_ssv_multi_with_public() -> Result<()> { let pubkey2 = signer2.public_key(); let chain = Chain::Hoodi; - let pbs_port = 3720; - - // Start the mock SSV node - let ssv_node_port = pbs_port + 1; + let pbs_listener = get_free_listener().await; + let ssv_node_listener = get_free_listener().await; + let ssv_public_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let ssv_node_port = ssv_node_listener.local_addr().unwrap().port(); + let ssv_public_port = ssv_public_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); + drop(ssv_node_listener); // SSV node is intentionally down — release the reserved port + + // Start the mock SSV node (not started — simulating it being down) let ssv_node_url = Url::parse(&format!("http://localhost:{ssv_node_port}/v1/"))?; // Don't start the SSV node server to simulate it being down @@ -426,7 +454,6 @@ async fn test_ssv_multi_with_public() -> Result<()> { // Some(mock_ssv_node_state.clone())).await?; // Start the mock SSV public API - let ssv_public_port = ssv_node_port + 1; let ssv_public_url = Url::parse(&format!("http://localhost:{ssv_public_port}/api/v4/"))?; let mock_ssv_public_state = PublicSsvMockState { validators: Arc::new(RwLock::new(vec![ @@ -435,15 +462,18 @@ async fn test_ssv_multi_with_public() -> Result<()> { ])), force_timeout: Arc::new(RwLock::new(false)), }; - let ssv_public_handle = - create_mock_public_ssv_server(ssv_public_port, Some(mock_ssv_public_state.clone())).await?; + let ssv_public_handle = create_mock_public_ssv_server_with_listener( + ssv_public_listener, + Some(mock_ssv_public_state.clone()), + ) + .await?; // Start a mock relay to be used by the mux - let relay_port = ssv_public_port + 1; let relay = generate_mock_relay(relay_port, pubkey.clone())?; let relay_id = relay.id.clone().to_string(); let relay_state = Arc::new(MockRelayState::new(chain, signer)); - let relay_task = tokio::spawn(start_mock_relay_service(relay_state.clone(), relay_port)); + let relay_task = + tokio::spawn(start_mock_relay_service_with_listener(relay_state.clone(), relay_listener)); // Create the registry mux let loader = MuxKeysLoader::Registry { @@ -477,7 +507,7 @@ async fn test_ssv_multi_with_public() -> Result<()> { // Run PBS service let state = PbsState::new(config, PathBuf::new()); - let pbs_server = tokio::spawn(PbsService::run::<()>(state)); + let pbs_server = tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); info!("Started PBS server with pubkey {pubkey}"); // Wait for the server to start @@ -491,7 +521,7 @@ async fn test_ssv_multi_with_public() -> Result<()> { .do_get_header(Some(pubkey2.clone()), HashSet::new(), ForkName::Electra) .await?; assert_eq!(res.status(), StatusCode::OK); - assert_eq!(relay_state.received_get_header(), 1); // pubkey2 was loaded from the SSV public API + assert_eq!(relay_state.received_get_header(), 1); // pubkey2 was loaded from the SSV public API // Shut down the server handles pbs_server.abort(); diff --git a/tests/tests/pbs_mux_refresh.rs b/tests/tests/pbs_mux_refresh.rs index 28642d2c..aff8300b 100644 --- a/tests/tests/pbs_mux_refresh.rs +++ b/tests/tests/pbs_mux_refresh.rs @@ -8,10 +8,10 @@ use cb_common::{ }; use cb_pbs::{PbsService, PbsState}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, - mock_ssv_public::{PublicSsvMockState, create_mock_public_ssv_server}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, + mock_ssv_public::{PublicSsvMockState, create_mock_public_ssv_server_with_listener}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_config, to_pbs_config}, + utils::{generate_mock_relay, get_free_listener, get_pbs_config, to_pbs_config}, }; use eyre::Result; use lh_types::ForkName; @@ -39,10 +39,15 @@ async fn test_auto_refresh() -> Result<()> { let new_mux_pubkey = new_mux_signer.public_key(); let chain = Chain::Hoodi; - let pbs_port = 3710; + let pbs_listener = get_free_listener().await; + let ssv_listener = get_free_listener().await; + let default_relay_listener = get_free_listener().await; + let mux_relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let ssv_api_port = ssv_listener.local_addr().unwrap().port(); + let default_relay_port = default_relay_listener.local_addr().unwrap().port(); + let mux_relay_port = mux_relay_listener.local_addr().unwrap().port(); - // Start the mock SSV API server - let ssv_api_port = pbs_port + 1; // Intentionally missing a trailing slash to ensure this is handled properly let ssv_api_url = Url::parse(&format!("http://localhost:{ssv_api_port}/api/v4"))?; let mock_ssv_state = PublicSsvMockState { @@ -51,23 +56,24 @@ async fn test_auto_refresh() -> Result<()> { }])), force_timeout: Arc::new(RwLock::new(false)), }; - let ssv_server_handle = - create_mock_public_ssv_server(ssv_api_port, Some(mock_ssv_state.clone())).await?; + create_mock_public_ssv_server_with_listener(ssv_listener, Some(mock_ssv_state.clone())).await?; // Start a default relay for non-mux keys - let default_relay_port = ssv_api_port + 1; let default_relay = generate_mock_relay(default_relay_port, default_pubkey.clone())?; let default_relay_state = Arc::new(MockRelayState::new(chain, default_signer.clone())); - let default_relay_task = - tokio::spawn(start_mock_relay_service(default_relay_state.clone(), default_relay_port)); + let default_relay_task = tokio::spawn(start_mock_relay_service_with_listener( + default_relay_state.clone(), + default_relay_listener, + )); // Start a mock relay to be used by the mux - let mux_relay_port = default_relay_port + 1; let mux_relay = generate_mock_relay(mux_relay_port, default_pubkey.clone())?; let mux_relay_id = mux_relay.id.clone().to_string(); let mux_relay_state = Arc::new(MockRelayState::new(chain, default_signer)); - let mux_relay_task = - tokio::spawn(start_mock_relay_service(mux_relay_state.clone(), mux_relay_port)); + let mux_relay_task = tokio::spawn(start_mock_relay_service_with_listener( + mux_relay_state.clone(), + mux_relay_listener, + )); // Create the registry mux let loader = MuxKeysLoader::Registry { @@ -100,7 +106,7 @@ async fn test_auto_refresh() -> Result<()> { // Run PBS service let state = PbsState::new(config, PathBuf::new()); - let pbs_server = tokio::spawn(PbsService::run::<()>(state)); + let pbs_server = tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); info!("Started PBS server with pubkey {default_pubkey}"); // Wait for the server to start @@ -177,7 +183,6 @@ async fn test_auto_refresh() -> Result<()> { // Shut down the server handles pbs_server.abort(); - ssv_server_handle.abort(); default_relay_task.abort(); mux_relay_task.abort(); diff --git a/tests/tests/pbs_post_blinded_blocks.rs b/tests/tests/pbs_post_blinded_blocks.rs index b711a38a..f921c6c6 100644 --- a/tests/tests/pbs_post_blinded_blocks.rs +++ b/tests/tests/pbs_post_blinded_blocks.rs @@ -9,9 +9,11 @@ use cb_common::{ }; use cb_pbs::{PbsService, PbsState}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, mock_validator::{MockValidator, load_test_signed_blinded_block}, - utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, + utils::{ + generate_mock_relay, get_free_listener, get_pbs_config, setup_test_env, to_pbs_config, + }, }; use eyre::Result; use lh_types::beacon_response::ForkVersionDecode; @@ -21,7 +23,6 @@ use tracing::info; #[tokio::test] async fn test_submit_block_v1() -> Result<()> { let res = submit_block_impl( - 3800, BuilderApiVersion::V1, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -46,7 +47,6 @@ async fn test_submit_block_v1() -> Result<()> { #[tokio::test] async fn test_submit_block_v2() -> Result<()> { let res = submit_block_impl( - 3802, BuilderApiVersion::V2, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -67,7 +67,6 @@ async fn test_submit_block_v2() -> Result<()> { #[tokio::test] async fn test_submit_block_v2_without_relay_support() -> Result<()> { let res = submit_block_impl( - 3804, BuilderApiVersion::V2, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -88,7 +87,6 @@ async fn test_submit_block_v2_without_relay_support() -> Result<()> { #[tokio::test] async fn test_submit_block_on_broken_relay() -> Result<()> { let _res = submit_block_impl( - 3806, BuilderApiVersion::V2, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -106,7 +104,6 @@ async fn test_submit_block_on_broken_relay() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_ssz() -> Result<()> { let res = submit_block_impl( - 3808, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -132,7 +129,6 @@ async fn test_submit_block_v1_ssz() -> Result<()> { #[tokio::test] async fn test_submit_block_v2_ssz() -> Result<()> { let res = submit_block_impl( - 3810, BuilderApiVersion::V2, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -153,7 +149,6 @@ async fn test_submit_block_v2_ssz() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_ssz_into_json() -> Result<()> { let res = submit_block_impl( - 3812, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Json]), @@ -181,7 +176,6 @@ async fn test_submit_block_v1_ssz_into_json() -> Result<()> { #[tokio::test] async fn test_submit_block_v2_ssz_into_json() -> Result<()> { let res = submit_block_impl( - 3814, BuilderApiVersion::V2, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Json]), @@ -202,7 +196,6 @@ async fn test_submit_block_v2_ssz_into_json() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_multitype_ssz() -> Result<()> { let res = submit_block_impl( - 3816, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Ssz]), @@ -230,7 +223,6 @@ async fn test_submit_block_v1_multitype_ssz() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_multitype_json() -> Result<()> { let res = submit_block_impl( - 3818, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Json]), @@ -256,7 +248,6 @@ async fn test_submit_block_v1_multitype_json() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_light() -> Result<()> { let res = submit_block_impl( - 3820, BuilderApiVersion::V1, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -281,7 +272,6 @@ async fn test_submit_block_v1_light() -> Result<()> { #[tokio::test] async fn test_submit_block_v2_light() -> Result<()> { let res = submit_block_impl( - 3822, BuilderApiVersion::V2, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -300,7 +290,6 @@ async fn test_submit_block_v2_light() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_ssz_light() -> Result<()> { let res = submit_block_impl( - 3824, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -326,7 +315,6 @@ async fn test_submit_block_v1_ssz_light() -> Result<()> { #[tokio::test] async fn test_submit_block_v2_ssz_light() -> Result<()> { let res = submit_block_impl( - 3826, BuilderApiVersion::V2, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -347,7 +335,6 @@ async fn test_submit_block_v2_ssz_light() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_ssz_into_json_light() -> Result<()> { submit_block_impl( - 3828, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Json]), @@ -367,7 +354,6 @@ async fn test_submit_block_v1_ssz_into_json_light() -> Result<()> { #[tokio::test] async fn test_submit_block_v2_ssz_into_json_light() -> Result<()> { let res = submit_block_impl( - 3830, BuilderApiVersion::V2, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Json]), @@ -388,7 +374,6 @@ async fn test_submit_block_v2_ssz_into_json_light() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_multitype_ssz_light() -> Result<()> { let res = submit_block_impl( - 3832, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Ssz]), @@ -416,7 +401,6 @@ async fn test_submit_block_v1_multitype_ssz_light() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_multitype_json_light() -> Result<()> { let res = submit_block_impl( - 3834, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Json]), @@ -445,15 +429,18 @@ async fn test_submit_block_too_large() -> Result<()> { let pubkey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 3836; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); - let relays = vec![generate_mock_relay(pbs_port + 1, pubkey)?]; + let relays = vec![generate_mock_relay(relay_port, pubkey)?]; let mock_state = Arc::new(MockRelayState::new(chain, signer).with_large_body()); - tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<()>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -477,7 +464,6 @@ async fn test_submit_block_too_large() -> Result<()> { #[allow(clippy::too_many_arguments)] async fn submit_block_impl( - pbs_port: u16, api_version: BuilderApiVersion, accept_types: HashSet, relay_types: HashSet, @@ -493,7 +479,10 @@ async fn submit_block_impl( let signer = random_secret(); let pubkey = signer.public_key(); let chain = Chain::Holesky; - let relay_port = pbs_port + 1; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); // Run a mock relay let mock_relay = generate_mock_relay(relay_port, pubkey)?; @@ -506,14 +495,14 @@ async fn submit_block_impl( mock_relay_state = mock_relay_state.with_not_found_for_submit_block(); } let mock_state = Arc::new(mock_relay_state); - tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); // Run the PBS service let mut pbs_config = get_pbs_config(pbs_port); pbs_config.block_validation_mode = mode; let config = to_pbs_config(chain, pbs_config, vec![mock_relay]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<()>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; diff --git a/tests/tests/pbs_post_validators.rs b/tests/tests/pbs_post_validators.rs index 7ffabf72..3f4ebf76 100644 --- a/tests/tests/pbs_post_validators.rs +++ b/tests/tests/pbs_post_validators.rs @@ -7,9 +7,11 @@ use cb_common::{ }; use cb_pbs::{PbsService, PbsState}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, + utils::{ + generate_mock_relay, get_free_listener, get_pbs_config, setup_test_env, to_pbs_config, + }, }; use eyre::Result; use reqwest::StatusCode; @@ -22,17 +24,20 @@ async fn test_register_validators() -> Result<()> { let pubkey: BlsPublicKey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 4000; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); // Run a mock relay - let relays = vec![generate_mock_relay(pbs_port + 1, pubkey)?]; + let relays = vec![generate_mock_relay(relay_port, pubkey)?]; let mock_state = Arc::new(MockRelayState::new(chain, signer)); - tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); // Run the PBS service let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<()>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -68,20 +73,23 @@ async fn test_register_validators_does_not_retry_on_429() -> Result<()> { let pubkey: BlsPublicKey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 4200; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); // Set up mock relay state and override response to 429 let mock_state = Arc::new(MockRelayState::new(chain, signer)); mock_state.set_response_override(StatusCode::TOO_MANY_REQUESTS); // Run a mock relay - let relays = vec![generate_mock_relay(pbs_port + 1, pubkey)?]; - tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); + let relays = vec![generate_mock_relay(relay_port, pubkey)?]; + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); // Run the PBS service let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<()>(state.clone())); + tokio::spawn(PbsService::run_with_listener(state.clone(), pbs_listener)); // Leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -121,14 +129,17 @@ async fn test_register_validators_retries_on_500() -> Result<()> { let pubkey: BlsPublicKey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 4300; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); // Set up internal mock relay with 500 response override let mock_state = Arc::new(MockRelayState::new(chain, signer)); mock_state.set_response_override(StatusCode::INTERNAL_SERVER_ERROR); // 500 - let relays = vec![generate_mock_relay(pbs_port + 1, pubkey)?]; - tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); + let relays = vec![generate_mock_relay(relay_port, pubkey)?]; + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); // Set retry limit to 3 let mut pbs_config = get_pbs_config(pbs_port); @@ -136,7 +147,7 @@ async fn test_register_validators_retries_on_500() -> Result<()> { let config = to_pbs_config(chain, pbs_config, relays); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<()>(state.clone())); + tokio::spawn(PbsService::run_with_listener(state.clone(), pbs_listener)); tokio::time::sleep(Duration::from_millis(100)).await; From 7e4648f826c5d57403add6d428a8a4c2bbcd563d Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 12:10:05 -0700 Subject: [PATCH 14/25] address Dirk issues in docker_init.rs: - add missing ADMIN_JWT_ENV and SIGNER_TLS_CERTIFICATES_PATH_ENV - support https healthchecks --- crates/cli/src/docker_init.rs | 79 ++++++++++++++++++++++++++++++++++- 1 file changed, 77 insertions(+), 2 deletions(-) diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 7976ce17..f2c5e2e4 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -497,6 +497,8 @@ fn create_signer_service_dirk( let mut envs = IndexMap::from([ get_env_val(CONFIG_ENV, CONFIG_DEFAULT), get_env_same(JWTS_ENV), + get_env_same(ADMIN_JWT_ENV), + get_env_val(SIGNER_TLS_CERTIFICATES_PATH_ENV, SIGNER_TLS_CERTIFICATES_PATH_DEFAULT), get_env_val(DIRK_CERT_ENV, DIRK_CERT_DEFAULT), get_env_val(DIRK_KEY_ENV, DIRK_KEY_DEFAULT), get_env_val(DIRK_DIR_SECRETS_ENV, DIRK_DIR_SECRETS_DEFAULT), @@ -548,6 +550,7 @@ fn create_signer_service_dirk( // write jwts to env service_config.envs.insert(JWTS_ENV.into(), format_comma_separated(&service_config.jwts)); + service_config.envs.insert(ADMIN_JWT_ENV.into(), random_jwt_secret()); // CA cert volume and env if let Some(ca_cert_path) = ca_cert_path { @@ -589,8 +592,8 @@ fn create_signer_service_dirk( environment: Environment::KvPair(envs), healthcheck: Some(Healthcheck { test: Some(HealthcheckTest::Single(format!( - "curl -f http://localhost:{}/status", - signer_config.port, + "curl -k -f {}/status", + cb_config.signer_server_url(SIGNER_PORT_DEFAULT), ))), interval: Some("30s".into()), timeout: Some("5s".into()), @@ -932,6 +935,13 @@ mod tests { service.volumes.iter().any(|v| matches!(v, Volumes::Simple(s) if s.contains(substr))) } + fn get_healthcheck_cmd(service: &Service) -> Option { + service.healthcheck.as_ref().and_then(|hc| match &hc.test { + Some(HealthcheckTest::Single(cmd)) => Some(cmd.clone()), + _ => None, + }) + } + fn has_port(service: &Service, substr: &str) -> bool { match &service.ports { Ports::Short(ports) => ports.iter().any(|p| p.contains(substr)), @@ -1309,12 +1319,33 @@ mod tests { assert!(env_str(&service, DIRK_CERT_ENV).is_some()); assert!(env_str(&service, DIRK_KEY_ENV).is_some()); assert!(env_str(&service, DIRK_DIR_SECRETS_ENV).is_some()); + assert!(has_env_key(&service, ADMIN_JWT_ENV)); + assert!(has_env_key(&service, SIGNER_TLS_CERTIFICATES_PATH_ENV)); assert!(has_volume(&service, "client.crt")); assert!(has_volume(&service, "client.key")); assert!(has_volume(&service, "dirk_secrets")); Ok(()) } + #[test] + fn test_create_signer_service_dirk_generates_admin_jwt() -> eyre::Result<()> { + let mut sc = minimal_service_config(); + let signer_config = dirk_signer_config(); + create_signer_service_dirk( + &mut sc, + &signer_config, + Path::new("/certs/client.crt"), + Path::new("/certs/client.key"), + Path::new("/dirk_secrets"), + &None, + &None, + )?; + + let admin_jwt = sc.envs.get(ADMIN_JWT_ENV).expect("ADMIN_JWT_ENV must be set"); + assert!(!admin_jwt.is_empty(), "admin JWT secret must not be empty"); + Ok(()) + } + #[test] fn test_create_signer_service_dirk_with_ca_cert() -> eyre::Result<()> { let mut sc = minimal_service_config(); @@ -1690,6 +1721,50 @@ mod tests { Ok(()) } + #[test] + fn test_create_signer_service_dirk_healthcheck_uses_https_with_tls() -> eyre::Result<()> { + let dir = tempfile::tempdir()?; + let certs_path = dir.path().to_path_buf(); + std::fs::write(certs_path.join(SIGNER_TLS_CERTIFICATE_NAME), b"cert")?; + std::fs::write(certs_path.join(SIGNER_TLS_KEY_NAME), b"key")?; + + let mut sc = service_config_with_tls(certs_path); + let signer_config = dirk_signer_config(); + let service = create_signer_service_dirk( + &mut sc, + &signer_config, + Path::new("/certs/client.crt"), + Path::new("/certs/client.key"), + Path::new("/dirk_secrets"), + &None, + &None, + )?; + + let cmd = get_healthcheck_cmd(&service).expect("healthcheck must be set"); + assert!(cmd.contains("https://"), "healthcheck must use https with TLS: {cmd}"); + assert!(cmd.contains("-k"), "healthcheck must use -k flag for self-signed certs: {cmd}"); + Ok(()) + } + + #[test] + fn test_create_signer_service_dirk_healthcheck_uses_http_without_tls() -> eyre::Result<()> { + let mut sc = minimal_service_config(); + let signer_config = dirk_signer_config(); + let service = create_signer_service_dirk( + &mut sc, + &signer_config, + Path::new("/certs/client.crt"), + Path::new("/certs/client.key"), + Path::new("/dirk_secrets"), + &None, + &None, + )?; + + let cmd = get_healthcheck_cmd(&service).expect("healthcheck must be set"); + assert!(cmd.contains("http://"), "healthcheck must use http without TLS: {cmd}"); + Ok(()) + } + // ------------------------------------------------------------------------- // create_module_service – TLS cert env/volume // ------------------------------------------------------------------------- From 84a0899292e7a2abcd2054bb9aac69bee75d1630 Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 12:31:49 -0700 Subject: [PATCH 15/25] cleaner error message if TLS CryptoProvider fails --- crates/signer/src/service.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index fdf86879..b056c96c 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -177,15 +177,14 @@ impl SigningService { break; } Err(e) => { + if attempts >= 3 { + return Err(eyre::eyre!( + "Exceeded maximum attempts to install AWS-LC as default TLS provider: {e:?}" + )); + } error!( "Failed to install AWS-LC as default TLS provider: {e:?}. Retrying..." ); - if attempts >= 3 { - error!( - "Exceeded maximum attempts to install AWS-LC as default TLS provider" - ); - break; - } attempts += 1; } } From b3b325182dc076fa2c2d76f5e4379372f786631b Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 13:03:26 -0700 Subject: [PATCH 16/25] users get error message on missing [signer] section instead of toml deserialization error message --- crates/common/src/config/module.rs | 4 ++-- crates/common/src/config/pbs.rs | 8 ++++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/crates/common/src/config/module.rs b/crates/common/src/config/module.rs index 22884551..aec45289 100644 --- a/crates/common/src/config/module.rs +++ b/crates/common/src/config/module.rs @@ -83,7 +83,7 @@ pub fn load_commit_module_config() -> Result { chain: Chain, modules: Vec>, - signer: SignerConfig, + signer: Option, } // load module config including the extra data (if any) @@ -106,7 +106,7 @@ pub fn load_commit_module_config() -> Result None, TlsMode::Certificate(path) => Some( load_env_var(SIGNER_TLS_CERTIFICATES_PATH_ENV) diff --git a/crates/common/src/config/pbs.rs b/crates/common/src/config/pbs.rs index 3fb49ee6..487c6af9 100644 --- a/crates/common/src/config/pbs.rs +++ b/crates/common/src/config/pbs.rs @@ -378,7 +378,7 @@ pub async fn load_pbs_custom_config() -> Result<(PbsModuleC chain: Chain, relays: Vec, pbs: CustomPbsConfig, - signer: SignerConfig, + signer: Option, muxes: Option, } @@ -435,7 +435,11 @@ pub async fn load_pbs_custom_config() -> Result<(PbsModuleC // if custom pbs requires a signer client, load jwt let module_jwt = Jwt(load_env_var(MODULE_JWT_ENV)?); let signer_server_url = load_env_var(SIGNER_URL_ENV)?.parse()?; - let certs_path = match cb_config.signer.tls_mode { + let certs_path = match cb_config + .signer + .ok_or_else(|| eyre::eyre!("with_signer = true but no [signer] section in config"))? + .tls_mode + { TlsMode::Insecure => None, TlsMode::Certificate(path) => Some( load_env_var(SIGNER_TLS_CERTIFICATES_PATH_ENV) From 7cb87428373cbc4ba4f015dc837efbe6aa1ac8b4 Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 15:06:20 -0700 Subject: [PATCH 17/25] support partial jwt reloads --- crates/signer/src/service.rs | 180 ++++++++++++++++++++++++++++++++--- 1 file changed, 168 insertions(+), 12 deletions(-) diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index b056c96c..b301f8dc 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -632,7 +632,6 @@ async fn handle_reload( ) -> Result { debug!(event = "reload", "New request"); - // Regenerate the config let config = match StartSignerConfig::load_from_env() { Ok(config) => config, Err(err) => { @@ -641,7 +640,6 @@ async fn handle_reload( } }; - // Start a new manager with the updated config let new_manager = match start_manager(config).await { Ok(manager) => manager, Err(err) => { @@ -650,17 +648,24 @@ async fn handle_reload( } }; - // Update the JWT configs if provided in the request + apply_reload(state, request, new_manager).await +} + +/// Applies a reload request to the signing state. Separated from +/// `handle_reload` so the business logic can be tested without requiring a +/// live environment (config file, env vars, keystore on disk). +async fn apply_reload( + state: SigningState, + request: ReloadRequest, + new_manager: SigningManager, +) -> Result { + // Update the JWT configs if provided in the request. Only the provided + // modules are updated; omitted modules keep their existing secrets. if let Some(jwt_secrets) = request.jwt_secrets { let mut jwt_configs = state.jwts.write(); - let mut new_configs = HashMap::new(); for (module_id, jwt_secret) in jwt_secrets { - if let Some(signing_id) = jwt_configs.get(&module_id).map(|cfg| cfg.signing_id) { - new_configs.insert(module_id.clone(), ModuleSigningConfig { - module_name: module_id, - jwt_secret, - signing_id, - }); + if let Some(cfg) = jwt_configs.get_mut(&module_id) { + cfg.jwt_secret = jwt_secret; } else { let error_message = format!( "Module {module_id} signing ID not found in commit-boost config, cannot reload" @@ -669,10 +674,8 @@ async fn handle_reload( return Err(SignerModuleError::RequestError(error_message)); } } - *jwt_configs = new_configs; } - // Update the rest of the state once everything has passed if let Some(admin_secret) = request.admin_secret { *state.admin_secret.write() = admin_secret; } @@ -722,3 +725,156 @@ async fn start_manager(config: StartSignerConfig) -> eyre::Result ModuleSigningConfig { + ModuleSigningConfig { + module_name: ModuleId(module_name.to_string()), + jwt_secret: secret.to_string(), + signing_id, + } + } + + fn make_state(jwts: HashMap) -> SigningState { + SigningState { + manager: Arc::new(RwLock::new(SigningManager::Local( + LocalSigningManager::new(Chain::Holesky, None).unwrap(), + ))), + jwts: Arc::new(ParkingRwLock::new(jwts)), + admin_secret: Arc::new(ParkingRwLock::new("admin".to_string())), + jwt_auth_failures: Arc::new(ParkingRwLock::new(HashMap::new())), + jwt_auth_fail_limit: 3, + jwt_auth_fail_timeout: Duration::from_secs(60), + reverse_proxy: ReverseProxyHeaderSetup::None, + } + } + + fn empty_manager() -> SigningManager { + SigningManager::Local(LocalSigningManager::new(Chain::Holesky, None).unwrap()) + } + + /// Partial reload must update only the provided modules and leave omitted + /// modules with their existing secrets. + #[tokio::test] + async fn test_partial_reload_preserves_omitted_modules() { + let module_a = ModuleId("module-a".to_string()); + let module_b = ModuleId("module-b".to_string()); + let signing_id_a = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let signing_id_b = + b256!("0202020202020202020202020202020202020202020202020202020202020202"); + + let state = make_state(HashMap::from([ + (module_a.clone(), make_signing_config("module-a", "secret-a", signing_id_a)), + (module_b.clone(), make_signing_config("module-b", "secret-b", signing_id_b)), + ])); + + let request = ReloadRequest { + jwt_secrets: Some(HashMap::from([(module_a.clone(), "rotated-secret-a".to_string())])), + admin_secret: None, + }; + + let result = apply_reload(state.clone(), request, empty_manager()).await; + assert!(result.is_ok(), "apply_reload should succeed"); + + let jwts = state.jwts.read(); + assert_eq!( + jwts[&module_a].jwt_secret, "rotated-secret-a", + "module_a secret should be updated" + ); + assert_eq!( + jwts[&module_b].jwt_secret, "secret-b", + "module_b secret must be preserved when omitted" + ); + } + + /// A full reload (all modules provided) should update every module. + #[tokio::test] + async fn test_full_reload_updates_all_modules() { + let module_a = ModuleId("module-a".to_string()); + let module_b = ModuleId("module-b".to_string()); + let signing_id_a = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let signing_id_b = + b256!("0202020202020202020202020202020202020202020202020202020202020202"); + + let state = make_state(HashMap::from([ + (module_a.clone(), make_signing_config("module-a", "secret-a", signing_id_a)), + (module_b.clone(), make_signing_config("module-b", "secret-b", signing_id_b)), + ])); + + let request = ReloadRequest { + jwt_secrets: Some(HashMap::from([ + (module_a.clone(), "new-secret-a".to_string()), + (module_b.clone(), "new-secret-b".to_string()), + ])), + admin_secret: None, + }; + + apply_reload(state.clone(), request, empty_manager()).await.unwrap(); + + let jwts = state.jwts.read(); + assert_eq!(jwts[&module_a].jwt_secret, "new-secret-a"); + assert_eq!(jwts[&module_b].jwt_secret, "new-secret-b"); + } + + /// Reload with an unknown module ID in jwt_secrets should return an error + /// and leave the existing state unchanged. + #[tokio::test] + async fn test_reload_unknown_module_returns_error() { + let module_a = ModuleId("module-a".to_string()); + let signing_id_a = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + + let state = make_state(HashMap::from([( + module_a.clone(), + make_signing_config("module-a", "secret-a", signing_id_a), + )])); + + let request = ReloadRequest { + jwt_secrets: Some(HashMap::from([( + ModuleId("unknown-module".to_string()), + "some-secret".to_string(), + )])), + admin_secret: None, + }; + + let result = apply_reload(state.clone(), request, empty_manager()).await; + assert!(result.is_err(), "unknown module should return an error"); + + // Existing module must be untouched + let jwts = state.jwts.read(); + assert_eq!(jwts[&module_a].jwt_secret, "secret-a"); + } + + /// Reload with no jwt_secrets should leave all module secrets unchanged. + #[tokio::test] + async fn test_reload_without_jwt_secrets_preserves_all() { + let module_a = ModuleId("module-a".to_string()); + let signing_id_a = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + + let state = make_state(HashMap::from([( + module_a.clone(), + make_signing_config("module-a", "secret-a", signing_id_a), + )])); + + let request = ReloadRequest { jwt_secrets: None, admin_secret: None }; + + apply_reload(state.clone(), request, empty_manager()).await.unwrap(); + + let jwts = state.jwts.read(); + assert_eq!(jwts[&module_a].jwt_secret, "secret-a"); + } +} From f0820e61df382f548344b6214d8bf3714a4b4f0c Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 15:32:54 -0700 Subject: [PATCH 18/25] require the nonce in signer-api.yml - previously was marked optional which conflicted with deserializing `SignConsensusRequest` --- api/signer-api.yml | 15 ++++++++++----- crates/common/src/commit/request.rs | 12 ++++++++++-- docs/docs/developing/prop-commit-signing.md | 4 ++-- 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/api/signer-api.yml b/api/signer-api.yml index 95897ecd..be44f8fd 100644 --- a/api/signer-api.yml +++ b/api/signer-api.yml @@ -86,7 +86,7 @@ paths: application/json: schema: type: object - required: [pubkey, object_root] + required: [pubkey, object_root, nonce] properties: pubkey: description: The 48-byte BLS public key, with optional `0x` prefix, of the proposer key that you want to request a signature from. @@ -234,7 +234,7 @@ paths: application/json: schema: type: object - required: [proxy, object_root] + required: [proxy, object_root, nonce] properties: proxy: description: The 48-byte BLS public key (for `proxy_bls` mode) or the 20-byte Ethereum address (for `proxy_ecdsa` mode), with optional `0x` prefix, of the proxy key that you want to request a signature from. @@ -382,7 +382,7 @@ paths: application/json: schema: type: object - required: [proxy, object_root] + required: [proxy, object_root, nonce] properties: proxy: description: The 20-byte Ethereum address, with optional `0x` prefix, of the proxy key that you want to request a signature from. @@ -695,7 +695,12 @@ components: $ref: "#/components/schemas/EcdsaSignature" Nonce: type: integer - description: If your module tracks nonces per signature (e.g., to prevent replay attacks), this is the unique nonce to use for the signature. It should be an unsigned 64-bit integer in big-endian format. It must be between 0 and 2^64-2, inclusive. If your module doesn't use nonces, we suggest setting this to 2^64-1 instead of 0 because 0 is a legal nonce and will cause complications with your module if you ever want to use a nonce in the future. + description: | + Replay-protection nonce, always mixed into the signing root via `PropCommitSigningInfo`. It + must be an unsigned 64-bit integer between 0 and 2^64-2 (18446744073709551614), inclusive. + + Modules that track nonces for replay protection should use a monotonically increasing value + per key. Modules that do not use replay protection should always send `0`. minimum: 0 - maximum: 18446744073709551614 // 2^64-2 + maximum: 18446744073709551614 example: 1 diff --git a/crates/common/src/commit/request.rs b/crates/common/src/commit/request.rs index a64e9a67..cd780446 100644 --- a/crates/common/src/commit/request.rs +++ b/crates/common/src/commit/request.rs @@ -84,6 +84,10 @@ impl fmt::Display for SignedProxyDelegation { pub struct SignConsensusRequest { pub pubkey: BlsPublicKey, pub object_root: B256, + /// Replay-protection nonce mixed into the signing root via + /// `PropCommitSigningInfo`. Modules that do not track nonces should + /// send `0`. Modules that do track nonces should use a monotonically + /// increasing value per key to prevent signature reuse. pub nonce: u64, } @@ -93,7 +97,7 @@ impl SignConsensusRequest { } pub fn builder(pubkey: BlsPublicKey) -> Self { - Self::new(pubkey, B256::ZERO, u64::MAX - 1) + Self::new(pubkey, B256::ZERO, 0) } pub fn with_root>(self, object_root: R) -> Self { @@ -125,6 +129,10 @@ impl Display for SignConsensusRequest { pub struct SignProxyRequest { pub proxy: T, pub object_root: B256, + /// Replay-protection nonce mixed into the signing root via + /// `PropCommitSigningInfo`. Modules that do not track nonces should + /// send `0`. Modules that do track nonces should use a monotonically + /// increasing value per key to prevent signature reuse. pub nonce: u64, } @@ -134,7 +142,7 @@ impl SignProxyRequest { } pub fn builder(proxy: T) -> Self { - Self::new(proxy, B256::ZERO, u64::MAX - 1) + Self::new(proxy, B256::ZERO, 0) } pub fn with_root>(self, object_root: R) -> Self { diff --git a/docs/docs/developing/prop-commit-signing.md b/docs/docs/developing/prop-commit-signing.md index 1e8bd249..30f70413 100644 --- a/docs/docs/developing/prop-commit-signing.md +++ b/docs/docs/developing/prop-commit-signing.md @@ -44,7 +44,7 @@ Your module has the option of using **Nonces** for each of its signature request If you want to use them within your module, your module (or whatever remote backend system it connects to) **will be responsible** for storing, comparing, validating, and otherwise using the nonces. Commit-Boost's signer service by itself **does not** store nonces or track which ones have already been used by a given module. -In terms of implementation, the nonce format conforms to the specification in [EIP-2681](https://eips.ethereum.org/EIPS/eip-2681). It is an unsigned 64-bit big-endian integer, with a minimum value of 0 and a maximum value of `2^64-2`. We recommend using `2^64-1` as a signifier indicating that your module doesn't use nonces, rather than using 0 for such a purpose. +In terms of implementation, the nonce format conforms to the specification in [EIP-2681](https://eips.ethereum.org/EIPS/eip-2681). It is an unsigned 64-bit integer, with a minimum value of 0 and a maximum value of `2^64-2`. The field is required and is always mixed into the signing root. Modules that do not use nonces for replay protection should always send `0`; modules that do should use a monotonically increasing value per key. ## Structure of a Signature @@ -63,7 +63,7 @@ where, for the sub-tree in blue: - `Signing ID` is your module's 32-byte signing ID. The signer service will load this for your module from its configuration file. -- `Nonce` is the nonce value for the signature request. While this value must be present, it can be effectively ignored by setting it to some arbitrary value if your module does not track nonces. Conforming with the tree specification, it must be added as a 256-bit unsigned little-endian integer. Most libraries will be able to do this conversion automatically if you specify the field as the language's primitive for 64-bit unsigned integers (e.g., `uint64`, `u64`, `ulong`, etc.). +- `Nonce` is the nonce value for the signature request. This field is required. Modules that do not use replay protection should always send `0`; modules that do should use a monotonically increasing value per key. Conforming with the tree specification, it must be added as a 256-bit unsigned little-endian integer. Most libraries will be able to do this conversion automatically if you specify the field as the language's primitive for 64-bit unsigned integers (e.g., `uint64`, `u64`, `ulong`, etc.). - `Chain ID` is the ID of the chain that the Signer service is currently configured to use, as indicated by the [Commit-Boost configuration file](../get_started/configuration.md). This must also be a 256-bit unsigned little-endian integer. From e23c3ee12bc42fe5ec5351d1406def11f55fd96d Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 15:43:05 -0700 Subject: [PATCH 19/25] fix suffix when displaying X-Forwaded-For --- crates/common/src/config/signer.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 343ec213..06d43a01 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -92,10 +92,12 @@ impl Display for ReverseProxyHeaderSetup { write!(f, "\"{header} (unique)\"") } ReverseProxyHeaderSetup::Rightmost { header, trusted_count } => { - let suffix = match trusted_count.get() % 10 { - 1 => "st", - 2 => "nd", - 3 => "rd", + let n = trusted_count.get(); + let suffix = match (n % 100, n % 10) { + (11..=13, _) => "th", + (_, 1) => "st", + (_, 2) => "nd", + (_, 3) => "rd", _ => "th", }; write!(f, "\"{header} ({trusted_count}{suffix} from the right)\"") From 170977b635f511fcef9ecc2d0882c0d7221f56e8 Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 15:54:43 -0700 Subject: [PATCH 20/25] only take the `jwt_auth_failures` writelock if strictly necessary, default to readlock --- crates/signer/src/service.rs | 52 +++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index b301f8dc..7edd63a8 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -273,38 +273,40 @@ async fn jwt_auth( /// Checks if the incoming request needs to be rate limited due to previous JWT /// authentication failures fn check_jwt_rate_limit(state: &SigningState, client_ip: &IpAddr) -> Result<(), SignerModuleError> { - let mut failures = state.jwt_auth_failures.write(); + let failures = state.jwt_auth_failures.read(); // Ignore clients that don't have any failures - if let Some(failure_info) = failures.get(client_ip) { - // If the last failure was more than the timeout ago, remove this entry so it's - // eligible again - let elapsed = failure_info.last_failure.elapsed(); - if elapsed > state.jwt_auth_fail_timeout { - debug!("Removing {client_ip} from JWT auth failure list"); - failures.remove(client_ip); - return Ok(()); - } + let Some(failure_info) = failures.get(client_ip) else { + debug!("Client {client_ip} has no JWT auth failures, no rate limit applied"); + return Ok(()); + }; - // If the failure threshold hasn't been met yet, don't rate limit - if failure_info.failure_count < state.jwt_auth_fail_limit { - debug!( - "Client {client_ip} has {}/{} JWT auth failures, no rate limit applied", - failure_info.failure_count, state.jwt_auth_fail_limit - ); - return Ok(()); - } + let elapsed = failure_info.last_failure.elapsed(); + + // If the last failure was more than the timeout ago, remove this entry so it's + // eligible again + if elapsed > state.jwt_auth_fail_timeout { + drop(failures); + debug!("Removing {client_ip} from JWT auth failure list"); + state.jwt_auth_failures.write().remove(client_ip); + return Ok(()); + } - // Rate limit the request - let remaining = state.jwt_auth_fail_timeout.saturating_sub(elapsed); - warn!( - "Client {client_ip} is rate limited for {remaining:?} more seconds due to JWT auth failures" + // If the failure threshold hasn't been met yet, don't rate limit + if failure_info.failure_count < state.jwt_auth_fail_limit { + debug!( + "Client {client_ip} has {}/{} JWT auth failures, no rate limit applied", + failure_info.failure_count, state.jwt_auth_fail_limit ); - return Err(SignerModuleError::RateLimited(remaining.as_secs_f64())); + return Ok(()); } - debug!("Client {client_ip} has no JWT auth failures, no rate limit applied"); - Ok(()) + // Rate limit the request + let remaining = state.jwt_auth_fail_timeout.saturating_sub(elapsed); + warn!( + "Client {client_ip} is rate limited for {remaining:?} more seconds due to JWT auth failures" + ); + Err(SignerModuleError::RateLimited(remaining.as_secs_f64())) } /// Checks if a request can successfully authenticate with the JWT secret From b48e3fbc1cd34d836427a0b253f2b2d2606e42cc Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 16:03:46 -0700 Subject: [PATCH 21/25] remove mark_jwt_failure() calls from failures unrelated to jwts --- crates/signer/src/service.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index 7edd63a8..81729272 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -248,7 +248,6 @@ async fn jwt_auth( let path = parts.uri.path(); let bytes = to_bytes(body, REQUEST_MAX_BODY_LENGTH).await.map_err(|e| { error!("Failed to read request body: {e}"); - mark_jwt_failure(&state, client_ip); SignerModuleError::RequestError(e.to_string()) })?; @@ -360,7 +359,6 @@ async fn admin_auth( let path = parts.uri.path(); let bytes = to_bytes(body, REQUEST_MAX_BODY_LENGTH).await.map_err(|e| { error!("Failed to read request body: {e}"); - mark_jwt_failure(&state, client_ip); SignerModuleError::RequestError(e.to_string()) })?; From 64f962fd2c7a765794bb8d3a2ef59a563f489394 Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Wed, 25 Mar 2026 17:35:29 -0700 Subject: [PATCH 22/25] add round-trip sign->verify unit tests --- crates/common/src/signature.rs | 60 +++++++++++++++++++++++++++++++--- 1 file changed, 56 insertions(+), 4 deletions(-) diff --git a/crates/common/src/signature.rs b/crates/common/src/signature.rs index 18c10d4a..41631e33 100644 --- a/crates/common/src/signature.rs +++ b/crates/common/src/signature.rs @@ -165,10 +165,18 @@ pub fn verify_proposer_commitment_signature_ecdsa( #[cfg(test)] mod tests { - use alloy::primitives::aliases::B32; - - use super::compute_domain; - use crate::{constants::APPLICATION_BUILDER_DOMAIN, types::Chain}; + use alloy::primitives::{U256, aliases::B32}; + + use super::{compute_domain, sign_builder_message, verify_signed_message}; + use crate::{ + constants::APPLICATION_BUILDER_DOMAIN, + pbs::{ + BlindedBeaconBlockElectra, BuilderBid, BuilderBidElectra, + ExecutionPayloadHeaderElectra, ExecutionRequests, + }, + types::{BlsSecretKey, Chain}, + utils::TestRandomSeed, + }; #[test] fn test_builder_domains() { @@ -178,4 +186,48 @@ mod tests { assert_eq!(compute_domain(Chain::Sepolia, domain), Chain::Sepolia.builder_domain()); assert_eq!(compute_domain(Chain::Hoodi, domain), Chain::Hoodi.builder_domain()); } + + #[test] + fn test_builder_bid_sign_and_verify() { + let secret_key = BlsSecretKey::test_random(); + let pubkey = secret_key.public_key(); + + let message = BuilderBid::Electra(BuilderBidElectra { + header: ExecutionPayloadHeaderElectra::test_random(), + blob_kzg_commitments: Default::default(), + execution_requests: ExecutionRequests::default(), + value: U256::from(10), + pubkey: pubkey.clone().into(), + }); + + let sig = sign_builder_message(Chain::Mainnet, &secret_key, &message); + + assert!(verify_signed_message( + Chain::Mainnet, + &pubkey, + &message, + &sig, + None, + &B32::from(APPLICATION_BUILDER_DOMAIN), + )); + } + + #[test] + fn test_blinded_block_sign_and_verify() { + let secret_key = BlsSecretKey::test_random(); + let pubkey = secret_key.public_key(); + + let block = BlindedBeaconBlockElectra::test_random(); + + let sig = sign_builder_message(Chain::Mainnet, &secret_key, &block); + + assert!(verify_signed_message( + Chain::Mainnet, + &pubkey, + &block, + &sig, + None, + &B32::from(APPLICATION_BUILDER_DOMAIN), + )); + } } From d71aff02b921c9a2819b853007c9c1de897deae3 Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Thu, 26 Mar 2026 11:46:13 -0700 Subject: [PATCH 23/25] remove optional signer client from PbsModuleConfig as signer should only be used if modules are present --- crates/cli/src/docker_init.rs | 14 ++++---- crates/common/src/config/mod.rs | 7 ++-- crates/common/src/config/pbs.rs | 44 ++------------------------ crates/common/src/config/signer.rs | 1 - docs/docs/get_started/building.md | 1 - docs/docs/get_started/configuration.md | 14 -------- tests/data/configs/pbs.happy.toml | 1 - tests/data/configs/signer.happy.toml | 1 - tests/src/utils.rs | 3 +- tests/tests/config.rs | 1 - tests/tests/pbs_cfg_file_update.rs | 2 -- 11 files changed, 14 insertions(+), 75 deletions(-) diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index f2c5e2e4..ac451341 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -1520,11 +1520,10 @@ mod tests { config } - /// Returns a `ServiceCreationInfo` whose CB config has `pbs.with_signer = - /// true` and a local signer with `TlsMode::Certificate(certs_path)`. + /// Returns a `ServiceCreationInfo` whose CB config has a local signer with + /// `TlsMode::Certificate(certs_path)`. fn service_config_with_tls(certs_path: PathBuf) -> ServiceCreationInfo { let mut sc = minimal_service_config(); - sc.config_info.cb_config.pbs.with_signer = true; sc.config_info.cb_config.signer = Some(local_signer_config_with_tls(certs_path)); sc } @@ -1620,12 +1619,15 @@ mod tests { // ------------------------------------------------------------------------- #[test] - fn test_create_pbs_service_with_tls_adds_cert_env_and_volume() -> eyre::Result<()> { + fn test_create_pbs_service_with_tls_but_no_commit_module_no_cert() -> eyre::Result<()> { + // PBS no longer connects to the signer directly; only commit modules do. + // Even when the signer is configured with TLS, the cert env/volume must + // NOT be injected into the PBS container unless a Commit module is present. let mut sc = service_config_with_tls(PathBuf::from("/my/certs")); let service = create_pbs_service(&mut sc)?; - assert!(has_env_key(&service, SIGNER_TLS_CERTIFICATES_PATH_ENV)); - assert!(has_volume(&service, SIGNER_TLS_CERTIFICATE_NAME)); + assert!(!has_env_key(&service, SIGNER_TLS_CERTIFICATES_PATH_ENV)); + assert!(!has_volume(&service, SIGNER_TLS_CERTIFICATE_NAME)); Ok(()) } diff --git a/crates/common/src/config/mod.rs b/crates/common/src/config/mod.rs index e0958342..1955ad42 100644 --- a/crates/common/src/config/mod.rs +++ b/crates/common/src/config/mod.rs @@ -131,10 +131,9 @@ impl CommitBoostConfig { /// Helper to return if the signer module is needed based on the config pub fn needs_signer_module(&self) -> bool { - self.pbs.with_signer || - self.modules.as_ref().is_some_and(|modules| { - modules.iter().any(|module| matches!(module.kind, ModuleKind::Commit)) - }) + self.modules.as_ref().is_some_and(|modules| { + modules.iter().any(|module| matches!(module.kind, ModuleKind::Commit)) + }) } pub fn signer_uses_tls(&self) -> bool { diff --git a/crates/common/src/config/pbs.rs b/crates/common/src/config/pbs.rs index 487c6af9..c9525b23 100644 --- a/crates/common/src/config/pbs.rs +++ b/crates/common/src/config/pbs.rs @@ -21,17 +21,12 @@ use super::{ load_optional_env_var, }; use crate::{ - commit::client::SignerClient, - config::{ - CONFIG_ENV, MODULE_JWT_ENV, MuxKeysLoader, PBS_IMAGE_DEFAULT, PBS_SERVICE_NAME, PbsMuxes, - SIGNER_TLS_CERTIFICATE_NAME, SIGNER_TLS_CERTIFICATES_PATH_ENV, SIGNER_URL_ENV, - SignerConfig, TlsMode, load_env_var, load_file_from_env, - }, + config::{CONFIG_ENV, MuxKeysLoader, PBS_IMAGE_DEFAULT, PbsMuxes, load_file_from_env}, pbs::{ DEFAULT_PBS_PORT, DEFAULT_REGISTRY_REFRESH_SECONDS, DefaultTimeout, LATE_IN_SLOT_TIME_MS, REGISTER_VALIDATOR_RETRY_LIMIT, RelayClient, RelayEntry, }, - types::{BlsPublicKey, Chain, Jwt, ModuleId}, + types::{BlsPublicKey, Chain}, utils::{ WEI_PER_ETH, as_eth_str, default_bool, default_host, default_u16, default_u32, default_u64, default_u256, @@ -244,9 +239,6 @@ pub struct StaticPbsConfig { /// Config of pbs module #[serde(flatten)] pub pbs_config: PbsConfig, - /// Whether to enable the signer client - #[serde(default = "default_bool::")] - pub with_signer: bool, } impl StaticPbsConfig { @@ -279,8 +271,6 @@ pub struct PbsModuleConfig { /// URL) DO NOT use this for get_header calls, use `relays` or `mux_lookup` /// instead pub all_relays: Vec, - /// Signer client to call Signer API - pub signer_client: Option, /// List of raw mux details configured, if any pub registry_muxes: Option>, /// Lookup of pubkey to mux config @@ -355,7 +345,6 @@ pub async fn load_pbs_config(config_path: Option) -> Result<(PbsModuleC pbs_config: Arc::new(config.pbs.pbs_config), relays: relay_clients, all_relays, - signer_client: None, registry_muxes, mux_lookup, }, @@ -378,7 +367,6 @@ pub async fn load_pbs_custom_config() -> Result<(PbsModuleC chain: Chain, relays: Vec, pbs: CustomPbsConfig, - signer: Option, muxes: Option, } @@ -431,33 +419,6 @@ pub async fn load_pbs_custom_config() -> Result<(PbsModuleC let all_relays = all_relays.into_values().collect(); - let signer_client = if cb_config.pbs.static_config.with_signer { - // if custom pbs requires a signer client, load jwt - let module_jwt = Jwt(load_env_var(MODULE_JWT_ENV)?); - let signer_server_url = load_env_var(SIGNER_URL_ENV)?.parse()?; - let certs_path = match cb_config - .signer - .ok_or_else(|| eyre::eyre!("with_signer = true but no [signer] section in config"))? - .tls_mode - { - TlsMode::Insecure => None, - TlsMode::Certificate(path) => Some( - load_env_var(SIGNER_TLS_CERTIFICATES_PATH_ENV) - .map(PathBuf::from) - .unwrap_or(path) - .join(SIGNER_TLS_CERTIFICATE_NAME), - ), - }; - Some(SignerClient::new( - signer_server_url, - certs_path, - module_jwt, - ModuleId(PBS_SERVICE_NAME.to_string()), - )?) - } else { - None - }; - Ok(( PbsModuleConfig { chain: cb_config.chain, @@ -465,7 +426,6 @@ pub async fn load_pbs_custom_config() -> Result<(PbsModuleC pbs_config: Arc::new(cb_config.pbs.static_config.pbs_config), relays: relay_clients, all_relays, - signer_client, registry_muxes, mux_lookup, }, diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 06d43a01..13575c8c 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -485,7 +485,6 @@ mod tests { ssv_node_api_url: Url::parse("https://example.net").unwrap(), ssv_public_api_url: Url::parse("https://example.net").unwrap(), }, - with_signer: true, }, muxes: None, modules: Some(vec![]), diff --git a/docs/docs/get_started/building.md b/docs/docs/get_started/building.md index dd860be2..9d73d6bc 100644 --- a/docs/docs/get_started/building.md +++ b/docs/docs/get_started/building.md @@ -108,7 +108,6 @@ chain = "Hoodi" [pbs] port = 18550 -with_signer = true [[relays]] url = "https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-hoodi.flashbots.net" diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index aac4a762..240de972 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -63,8 +63,6 @@ To start a local Signer Service, you need to include its parameters in the confi ```toml [pbs] ... -with_signer = true - [signer] port = 20000 @@ -97,8 +95,6 @@ We currently support Lighthouse, Prysm, Teku, Lodestar, and Nimbus's keystores s ```toml [pbs] ... -with_signer = true - [signer] port = 20000 @@ -129,8 +125,6 @@ secrets_path = "secrets" ```toml [pbs] ... -with_signer = true - [signer] port = 20000 @@ -161,8 +155,6 @@ secrets_path = "secrets/password.txt" ```toml [pbs] ... -with_signer = true - [signer] port = 20000 @@ -192,8 +184,6 @@ secrets_path = "secrets" ```toml [pbs] ... -with_signer = true - [signer] port = 20000 @@ -228,8 +218,6 @@ All keys have the same password stored in `secrets/password.txt` ```toml [pbs] ... - with_signer = true - [signer] port = 20000 @@ -397,8 +385,6 @@ Specifying it is done within Commit-Boost's configuration file using the `[signe ```toml [pbs] ... -with_signer = true - [signer] port = 20000 ... diff --git a/tests/data/configs/pbs.happy.toml b/tests/data/configs/pbs.happy.toml index 67b39911..e4695b61 100644 --- a/tests/data/configs/pbs.happy.toml +++ b/tests/data/configs/pbs.happy.toml @@ -14,7 +14,6 @@ timeout_get_header_ms = 950 timeout_get_payload_ms = 4000 timeout_register_validator_ms = 3000 wait_all_registrations = true -with_signer = false [[relays]] diff --git a/tests/data/configs/signer.happy.toml b/tests/data/configs/signer.happy.toml index 6fb76445..8a546401 100644 --- a/tests/data/configs/signer.happy.toml +++ b/tests/data/configs/signer.happy.toml @@ -2,7 +2,6 @@ chain = "Hoodi" [pbs] docker_image = "ghcr.io/commit-boost/pbs:latest" -with_signer = true host = "127.0.0.1" port = 18550 relay_check = true diff --git a/tests/src/utils.rs b/tests/src/utils.rs index 74e86f2d..bfcbccc3 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -107,7 +107,7 @@ pub fn get_pbs_config(port: u16) -> PbsConfig { } pub fn get_pbs_static_config(pbs_config: PbsConfig) -> StaticPbsConfig { - StaticPbsConfig { docker_image: String::from(""), pbs_config, with_signer: true } + StaticPbsConfig { docker_image: String::from(""), pbs_config } } pub fn get_commit_boost_config(pbs_static_config: StaticPbsConfig) -> CommitBoostConfig { @@ -132,7 +132,6 @@ pub fn to_pbs_config( chain, endpoint: SocketAddr::new(pbs_config.host.into(), pbs_config.port), pbs_config: Arc::new(pbs_config), - signer_client: None, all_relays: relays.clone(), relays, registry_muxes: None, diff --git a/tests/tests/config.rs b/tests/tests/config.rs index 27b02318..60e72aec 100644 --- a/tests/tests/config.rs +++ b/tests/tests/config.rs @@ -41,7 +41,6 @@ async fn test_load_pbs_happy() -> Result<()> { // Docker and general settings assert_eq!(config.pbs.docker_image, "ghcr.io/commit-boost/pbs:latest"); - assert!(!config.pbs.with_signer); assert_eq!(config.pbs.pbs_config.host, "127.0.0.1".parse::().unwrap()); assert_eq!(config.pbs.pbs_config.port, 18550); assert!(config.pbs.pbs_config.relay_check); diff --git a/tests/tests/pbs_cfg_file_update.rs b/tests/tests/pbs_cfg_file_update.rs index a3a01c58..01f34e2d 100644 --- a/tests/tests/pbs_cfg_file_update.rs +++ b/tests/tests/pbs_cfg_file_update.rs @@ -81,7 +81,6 @@ async fn test_cfg_file_update() -> Result<()> { pbs: StaticPbsConfig { docker_image: "cb-fake-repo/cb-fake-image:latest".to_string(), pbs_config: pbs_config.clone(), - with_signer: false, }, muxes: None, modules: None, @@ -133,7 +132,6 @@ async fn test_cfg_file_update() -> Result<()> { pbs: StaticPbsConfig { docker_image: "cb-fake-repo/cb-fake-image:latest".to_string(), pbs_config, - with_signer: false, }, muxes: None, modules: None, From 2d842f5726dee16d658b01c50d96fef5fc2c9835 Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Thu, 26 Mar 2026 13:29:47 -0700 Subject: [PATCH 24/25] refactor get_header and submit_block into modules for readability --- config.example.toml | 5 +- crates/pbs/src/routes/get_header.rs | 1156 ----------------- crates/pbs/src/routes/get_header/mod.rs | 305 +++++ crates/pbs/src/routes/get_header/relay.rs | 543 ++++++++ .../pbs/src/routes/get_header/validation.rs | 349 +++++ crates/pbs/src/routes/mod.rs | 16 + crates/pbs/src/routes/register_validator.rs | 12 +- crates/pbs/src/routes/reload.rs | 2 +- crates/pbs/src/routes/status.rs | 10 +- crates/pbs/src/routes/submit_block/mod.rs | 238 ++++ .../relay.rs} | 425 +----- .../pbs/src/routes/submit_block/validation.rs | 148 +++ 12 files changed, 1639 insertions(+), 1570 deletions(-) delete mode 100644 crates/pbs/src/routes/get_header.rs create mode 100644 crates/pbs/src/routes/get_header/mod.rs create mode 100644 crates/pbs/src/routes/get_header/relay.rs create mode 100644 crates/pbs/src/routes/get_header/validation.rs create mode 100644 crates/pbs/src/routes/submit_block/mod.rs rename crates/pbs/src/routes/{submit_block.rs => submit_block/relay.rs} (51%) create mode 100644 crates/pbs/src/routes/submit_block/validation.rs diff --git a/config.example.toml b/config.example.toml index f7745df4..0e23a6df 100644 --- a/config.example.toml +++ b/config.example.toml @@ -12,9 +12,6 @@ chain = "Holesky" # Docker image to use for the PBS module. # OPTIONAL, DEFAULT: ghcr.io/commit-boost/pbs:latest docker_image = "ghcr.io/commit-boost/pbs:latest" -# Whether to enable the PBS module to request signatures from the Signer module (not used in the default PBS image) -# OPTIONAL, DEFAULT: false -with_signer = false # Host to receive BuilderAPI calls from beacon node # OPTIONAL, DEFAULT: 127.0.0.1 host = "127.0.0.1" @@ -174,7 +171,7 @@ timeout_get_header_ms = 900 id = "mux-relay-1" url = "http://0xa119589bb33ef52acbb8116832bec2b58fca590fe5c85eac5d3230b44d5bc09fe73ccd21f88eab31d6de16194d17782e@def.xyz" -# Configuration for the Signer Module, only required if any `commit` module is present, or if `pbs.with_signer = true` +# Configuration for the Signer Module, only required if any `commit` module is present # Currently three types of Signer modules are supported (only one can be used at a time): # - Remote: a remote Web3Signer instance # - Dirk: a remote Dirk instance diff --git a/crates/pbs/src/routes/get_header.rs b/crates/pbs/src/routes/get_header.rs deleted file mode 100644 index f448552b..00000000 --- a/crates/pbs/src/routes/get_header.rs +++ /dev/null @@ -1,1156 +0,0 @@ -use std::{ - collections::HashSet, - sync::Arc, - time::{Duration, Instant}, -}; - -use alloy::{ - primitives::{B256, U256, aliases::B32, utils::format_ether}, - providers::Provider, - rpc::types::Block, -}; -use axum::{ - extract::{Path, State}, - http::{HeaderMap, HeaderValue}, - response::IntoResponse, -}; -use cb_common::{ - config::HeaderValidationMode, - constants::APPLICATION_BUILDER_DOMAIN, - pbs::{ - EMPTY_TX_ROOT_HASH, ExecutionPayloadHeaderRef, ForkName, ForkVersionDecode, GetHeaderInfo, - GetHeaderParams, GetHeaderResponse, HEADER_START_TIME_UNIX_MS, HEADER_TIMEOUT_MS, - RelayClient, SignedBuilderBid, - error::{PbsError, ValidationError}, - }, - signature::verify_signed_message, - types::{BlsPublicKey, BlsPublicKeyBytes, BlsSignature, Chain}, - utils::{ - CONSENSUS_VERSION_HEADER, EncodingType, get_accept_types, - get_bid_value_from_signed_builder_bid_ssz, get_consensus_version_header, get_user_agent, - get_user_agent_with_version, ms_into_slot, read_chunked_body_with_max, - timestamp_of_slot_start_sec, utcnow_ms, - }, -}; -use futures::future::join_all; -use parking_lot::RwLock; -use reqwest::{ - StatusCode, - header::{ACCEPT, CONTENT_TYPE, USER_AGENT}, -}; -use serde::Deserialize; -use ssz::Encode; -use tokio::time::sleep; -use tracing::{Instrument, debug, error, info, warn}; -use tree_hash::TreeHash; -use url::Url; - -use super::{CompoundGetHeaderResponse, LightGetHeaderResponse}; -use crate::{ - constants::{ - GET_HEADER_ENDPOINT_TAG, MAX_SIZE_GET_HEADER_RESPONSE, TIMEOUT_ERROR_CODE, - TIMEOUT_ERROR_CODE_STR, - }, - error::PbsClientError, - metrics::{ - BEACON_NODE_STATUS, RELAY_HEADER_VALUE, RELAY_LAST_SLOT, RELAY_LATENCY, RELAY_STATUS_CODE, - }, - state::{PbsState, PbsStateGuard}, - utils::check_gas_limit, -}; - -pub async fn handle_get_header( - State(state): State, - req_headers: HeaderMap, - Path(params): Path, -) -> Result { - tracing::Span::current().record("slot", params.slot); - tracing::Span::current().record("parent_hash", tracing::field::debug(params.parent_hash)); - tracing::Span::current().record("validator", tracing::field::debug(¶ms.pubkey)); - - let state = state.read().clone(); - - let ua = get_user_agent(&req_headers); - let ms_into_slot = ms_into_slot(params.slot, state.config.chain); - let accept_types = get_accept_types(&req_headers).map_err(|e| { - error!(%e, "error parsing accept header"); - PbsClientError::DecodeError(format!("error parsing accept header: {e}")) - })?; - let accepts_ssz = accept_types.contains(&EncodingType::Ssz); - let accepts_json = accept_types.contains(&EncodingType::Json); - - info!(ua, ms_into_slot, "new request"); - - match get_header(params, req_headers, state, accept_types).await { - Ok(res) => { - if let Some(max_bid) = res { - BEACON_NODE_STATUS.with_label_values(&["200", GET_HEADER_ENDPOINT_TAG]).inc(); - match max_bid { - CompoundGetHeaderResponse::Light(light_bid) => { - // Light validation mode, so just forward the raw response - info!( - value_eth = format_ether(light_bid.value), - "received header (unvalidated)" - ); - - // Create the headers - let consensus_version_header = - match HeaderValue::from_str(&light_bid.version.to_string()) { - Ok(consensus_version_header) => { - Ok::(consensus_version_header) - } - Err(e) => { - return Err(PbsClientError::RelayError(format!( - "error decoding consensus version from relay payload: {e}" - ))); - } - }?; - let content_type = light_bid.encoding_type.content_type(); - let content_type_header = HeaderValue::from_str(content_type).unwrap(); - - // Build response - let mut res = light_bid.raw_bytes.into_response(); - res.headers_mut() - .insert(CONSENSUS_VERSION_HEADER, consensus_version_header); - res.headers_mut().insert(CONTENT_TYPE, content_type_header); - info!("sending response as {} (light)", content_type); - Ok(res) - } - CompoundGetHeaderResponse::Full(max_bid) => { - // Full validation mode, so respond based on requester accept types - info!(value_eth = format_ether(*max_bid.data.message.value()), block_hash =% max_bid.block_hash(), "received header"); - - // Handle SSZ - if accepts_ssz { - let mut res = max_bid.data.as_ssz_bytes().into_response(); - let consensus_version_header = match HeaderValue::from_str( - &max_bid.version.to_string(), - ) { - Ok(consensus_version_header) => { - Ok::(consensus_version_header) - } - Err(e) => { - if accepts_json { - info!("sending response as JSON"); - return Ok( - (StatusCode::OK, axum::Json(max_bid)).into_response() - ); - } else { - return Err(PbsClientError::RelayError(format!( - "error decoding consensus version from relay payload: {e}" - ))); - } - } - }?; - - // This won't actually fail since the string is a const - let content_type_header = - HeaderValue::from_str(EncodingType::Ssz.content_type()).unwrap(); - - res.headers_mut() - .insert(CONSENSUS_VERSION_HEADER, consensus_version_header); - res.headers_mut().insert(CONTENT_TYPE, content_type_header); - info!("sending response as SSZ"); - return Ok(res); - } - - // Handle JSON - if accepts_json { - Ok((StatusCode::OK, axum::Json(max_bid)).into_response()) - } else { - // This shouldn't ever happen but the compiler needs it - Err(PbsClientError::DecodeError( - "no viable accept types in request".to_string(), - )) - } - } - } - } else { - // spec: return 204 if request is valid but no bid available - info!("no header available for slot"); - - BEACON_NODE_STATUS.with_label_values(&["204", GET_HEADER_ENDPOINT_TAG]).inc(); - Ok(StatusCode::NO_CONTENT.into_response()) - } - } - Err(err) => { - error!(%err, "no header available from relays"); - - let err = PbsClientError::NoPayload; - BEACON_NODE_STATUS - .with_label_values(&[err.status_code().as_str(), GET_HEADER_ENDPOINT_TAG]) - .inc(); - Err(err) - } - } -} - -// ── Relay logic ────────────────────────────────────────────────────────────── - -/// Info about an incoming get_header request. -/// Sent from get_header to each send_timed_get_header call. -#[derive(Clone)] -struct RequestInfo { - /// The blockchain parameters of the get_header request (what slot it's for, - /// which pubkey is requesting it, etc) - params: GetHeaderParams, - - /// Common baseline of headers to send with each request - headers: Arc, - - /// The chain the request is for - chain: Chain, - - /// Context for validating the header returned by the relay - validation: ValidationContext, - - /// The accepted encoding types from the original request - accepted_types: HashSet, -} - -/// Used interally to provide info and context about a get_header request and -/// its response -struct GetHeaderResponseInfo { - /// ID of the relay the response came from - relay_id: Arc, - - /// The raw body of the response - response_bytes: Vec, - - /// The content type the response is encoded with - content_type: EncodingType, - - /// Which fork the response bid is for (if provided as a header, rather than - /// part of the body) - fork: Option, - - /// The status code of the response, for logging - code: StatusCode, - - /// The round-trip latency of the request - request_latency: Duration, -} - -/// Context for validating the header -#[derive(Clone)] -struct ValidationContext { - /// Whether to skip signature verification - skip_sigverify: bool, - - /// Minimum acceptable bid, in wei - min_bid_wei: U256, - - /// The mode used for response validation - mode: HeaderValidationMode, - - /// The parent block, if fetched - parent_block: Arc>>, -} - -/// Implements https://ethereum.github.io/builder-specs/#/Builder/getHeader -/// Returns 200 if at least one relay returns 200, else 204 -pub async fn get_header( - params: GetHeaderParams, - req_headers: HeaderMap, - state: PbsState, - accepted_types: HashSet, -) -> eyre::Result> { - let parent_block = Arc::new(RwLock::new(None)); - let extra_validation_enabled = - state.config.pbs_config.header_validation_mode == HeaderValidationMode::Extra; - if extra_validation_enabled && let Some(rpc_url) = state.pbs_config().rpc_url.clone() { - tokio::spawn( - fetch_parent_block(rpc_url, params.parent_hash, parent_block.clone()).in_current_span(), - ); - } - - let ms_into_slot = ms_into_slot(params.slot, state.config.chain); - let (pbs_config, relays, maybe_mux_id) = state.mux_config_and_relays(¶ms.pubkey); - - if let Some(mux_id) = maybe_mux_id { - debug!(mux_id, relays = relays.len(), pubkey = %params.pubkey, "using mux config"); - } else { - debug!(relays = relays.len(), pubkey = %params.pubkey, "using default config"); - } - - let max_timeout_ms = pbs_config - .timeout_get_header_ms - .min(pbs_config.late_in_slot_time_ms.saturating_sub(ms_into_slot)); - - if max_timeout_ms == 0 { - warn!( - ms_into_slot, - threshold = pbs_config.late_in_slot_time_ms, - "late in slot, skipping relay requests" - ); - - return Ok(None); - } - - // Use the minimum of the time left and the user provided timeout header - let max_timeout_ms = req_headers - .get(HEADER_TIMEOUT_MS) - .map(|header| match header.to_str().ok().and_then(|v| v.parse::().ok()) { - None | Some(0) => { - // Header can't be stringified, or parsed, or it's set to 0 - warn!(?header, "invalid user-supplied timeout header, using {max_timeout_ms}ms"); - max_timeout_ms - } - Some(user_timeout) => user_timeout.min(max_timeout_ms), - }) - .unwrap_or(max_timeout_ms); - - // prepare headers, except for start time which is set in `send_one_get_header` - let mut send_headers = HeaderMap::new(); - send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); - - // Create the Accept headers for requests - let mode = state.pbs_config().header_validation_mode; - let accept_types = match mode { - HeaderValidationMode::None => { - // No validation mode, so only request what the user wants because the response - // will be forwarded directly - accepted_types.iter().map(|t| t.content_type()).collect::>().join(",") - } - _ => { - // We're unpacking the body, so request both types since we can handle both - [EncodingType::Ssz.content_type(), EncodingType::Json.content_type()].join(",") - } - }; - send_headers.insert( - ACCEPT, - HeaderValue::from_str(&accept_types) - .map_err(|e| PbsError::GeneralRequest(format!("invalid accept header value: {e}")))?, - ); - - // Send requests to all relays concurrently - let slot = params.slot as i64; - let request_info = Arc::new(RequestInfo { - params, - headers: Arc::new(send_headers), - chain: state.config.chain, - validation: ValidationContext { - skip_sigverify: state.pbs_config().skip_sigverify, - min_bid_wei: state.pbs_config().min_bid_wei, - mode, - parent_block, - }, - accepted_types, - }); - let mut handles = Vec::with_capacity(relays.len()); - for relay in relays.iter() { - handles.push( - send_timed_get_header( - request_info.clone(), - relay.clone(), - ms_into_slot, - max_timeout_ms, - ) - .in_current_span(), - ); - } - - let results = join_all(handles).await; - let mut relay_bids = Vec::with_capacity(relays.len()); - for (i, res) in results.into_iter().enumerate() { - let relay_id = relays[i].id.as_str(); - - match res { - Ok(Some(res)) => { - let value = match &res { - CompoundGetHeaderResponse::Full(full) => *full.value(), - CompoundGetHeaderResponse::Light(light) => light.value, - }; - RELAY_LAST_SLOT.with_label_values(&[relay_id]).set(slot); - let value_gwei = (value / U256::from(1_000_000_000)).try_into().unwrap_or_default(); - RELAY_HEADER_VALUE.with_label_values(&[relay_id]).set(value_gwei); - - relay_bids.push(res) - } - Ok(_) => {} - Err(err) if err.is_timeout() => error!(err = "Timed Out", relay_id), - Err(err) => error!(%err, relay_id), - } - } - - let max_bid = relay_bids.into_iter().max_by_key(|bid| match bid { - CompoundGetHeaderResponse::Full(full) => *full.value(), - CompoundGetHeaderResponse::Light(light) => light.value, - }); - - Ok(max_bid) -} - -/// Fetch the parent block from the RPC URL for extra validation of the header. -/// Extra validation will be skipped if: -/// - relay returns header before parent block is fetched -/// - parent block is not found, eg because of a RPC delay -async fn fetch_parent_block( - rpc_url: Url, - parent_hash: B256, - parent_block: Arc>>, -) { - let provider = alloy::providers::ProviderBuilder::new().connect_http(rpc_url).to_owned(); - - debug!(%parent_hash, "fetching parent block"); - - match provider.get_block_by_hash(parent_hash).await { - Ok(maybe_block) => { - debug!(block_found = maybe_block.is_some(), "fetched parent block"); - let mut guard = parent_block.write(); - *guard = maybe_block; - } - Err(err) => { - error!(%err, "fetch failed"); - } - } -} - -async fn send_timed_get_header( - request_info: Arc, - relay: RelayClient, - ms_into_slot: u64, - mut timeout_left_ms: u64, -) -> Result, PbsError> { - let params = &request_info.params; - let url = Arc::new(relay.get_header_url(params.slot, ¶ms.parent_hash, ¶ms.pubkey)?); - - if relay.config.enable_timing_games { - if let Some(target_ms) = relay.config.target_first_request_ms { - // sleep until target time in slot - - let delay = target_ms.saturating_sub(ms_into_slot); - if delay > 0 { - debug!( - relay_id = relay.id.as_ref(), - target_ms, ms_into_slot, "TG: waiting to send first header request" - ); - timeout_left_ms = timeout_left_ms.saturating_sub(delay); - sleep(Duration::from_millis(delay)).await; - } else { - debug!( - relay_id = relay.id.as_ref(), - target_ms, ms_into_slot, "TG: request already late enough in slot" - ); - } - } - - if let Some(send_freq_ms) = relay.config.frequency_get_header_ms { - let mut handles = Vec::new(); - - debug!( - relay_id = relay.id.as_ref(), - send_freq_ms, timeout_left_ms, "TG: sending multiple header requests" - ); - - loop { - handles.push(tokio::spawn( - send_one_get_header( - request_info.clone(), - relay.clone(), - url.clone(), - timeout_left_ms, - ) - .in_current_span(), - )); - - if timeout_left_ms > send_freq_ms { - // enough time for one more - timeout_left_ms = timeout_left_ms.saturating_sub(send_freq_ms); - sleep(Duration::from_millis(send_freq_ms)).await; - } else { - break; - } - } - - let results = join_all(handles).await; - let mut n_headers = 0; - - if let Some((_, maybe_header)) = results - .into_iter() - .filter_map(|res| { - // ignore join error and timeouts, log other errors - res.ok().and_then(|inner_res| match inner_res { - Ok(maybe_header) => { - if maybe_header.1.is_some() { - n_headers += 1; - Some(maybe_header) - } else { - // filter out 204 responses that are returned if the request - // is after the relay cutoff - None - } - } - Err(err) if err.is_timeout() => None, - Err(err) => { - error!(relay_id = relay.id.as_ref(),%err, "TG: error sending header request"); - None - } - }) - }) - .max_by_key(|(start_time, _)| *start_time) - { - debug!(relay_id = relay.id.as_ref(), n_headers, "TG: received headers from relay"); - return Ok(maybe_header); - } else { - // all requests failed - warn!(relay_id = relay.id.as_ref(), "TG: no headers received"); - - return Err(PbsError::RelayResponse { - error_msg: "no headers received".to_string(), - code: TIMEOUT_ERROR_CODE, - }); - } - } - } - - // if no timing games or no repeated send, just send one request - send_one_get_header(request_info, relay, url, timeout_left_ms) - .await - .map(|(_, maybe_header)| maybe_header) -} - -/// Handles requesting a header from a relay, decoding, and validation. -/// Used by send_timed_get_header to handle each individual request. -async fn send_one_get_header( - request_info: Arc, - relay: RelayClient, - url: Arc, - timeout_left_ms: u64, -) -> Result<(u64, Option), PbsError> { - match request_info.validation.mode { - HeaderValidationMode::None => { - // Minimal processing: extract fork and value, forward response bytes directly. - // Expensive crypto/structural validation is skipped (sigverify, parent hash, - // timestamp), but the min_bid check is applied. - let (start_request_time, get_header_response) = send_get_header_light( - &relay, - url, - timeout_left_ms, - (*request_info.headers).clone(), /* Create a copy of the HeaderMap because the - * impl - * will - * modify it */ - ) - .await?; - match get_header_response { - None => Ok((start_request_time, None)), - Some(res) => { - let min_bid = request_info.validation.min_bid_wei; - if res.value < min_bid { - return Err(PbsError::Validation(ValidationError::BidTooLow { - min: min_bid, - got: res.value, - })); - } - - // Make sure the response is encoded in one of the accepted - // types since we're passing the raw response directly to the client - if !request_info.accepted_types.contains(&res.encoding_type) { - return Err(PbsError::RelayResponse { - error_msg: format!( - "relay returned unsupported encoding type for get_header in no-validation mode: {:?}", - res.encoding_type - ), - code: 406, // Not Acceptable - }); - } - Ok((start_request_time, Some(CompoundGetHeaderResponse::Light(res)))) - } - } - } - _ => { - // Full processing: decode full response and validate - let (start_request_time, get_header_response) = send_get_header_full( - &relay, - url, - timeout_left_ms, - (*request_info.headers).clone(), /* Create a copy of the HeaderMap because the - * impl - * will - * modify it */ - ) - .await?; - let get_header_response = match get_header_response { - None => { - // Break if there's no header - return Ok((start_request_time, None)); - } - Some(res) => res, - }; - - // Extract the basic header data needed for validation - let header_data = match &get_header_response.data.message.header() { - ExecutionPayloadHeaderRef::Bellatrix(_) | - ExecutionPayloadHeaderRef::Capella(_) | - ExecutionPayloadHeaderRef::Deneb(_) | - ExecutionPayloadHeaderRef::Gloas(_) => { - Err(PbsError::Validation(ValidationError::UnsupportedFork)) - } - ExecutionPayloadHeaderRef::Electra(res) => Ok(HeaderData { - block_hash: res.block_hash.0, - parent_hash: res.parent_hash.0, - tx_root: res.transactions_root, - value: *get_header_response.value(), - timestamp: res.timestamp, - }), - ExecutionPayloadHeaderRef::Fulu(res) => Ok(HeaderData { - block_hash: res.block_hash.0, - parent_hash: res.parent_hash.0, - tx_root: res.transactions_root, - value: *get_header_response.value(), - timestamp: res.timestamp, - }), - }?; - - // Validate the header - let chain = request_info.chain; - let params = &request_info.params; - let validation = &request_info.validation; - validate_header_data( - &header_data, - chain, - params.parent_hash, - validation.min_bid_wei, - params.slot, - )?; - - // Validate the relay signature - if !validation.skip_sigverify { - validate_signature( - chain, - relay.pubkey(), - get_header_response.data.message.pubkey(), - &get_header_response.data.message, - &get_header_response.data.signature, - )?; - } - - // Validate the parent block if enabled - if validation.mode == HeaderValidationMode::Extra { - let parent_block = validation.parent_block.read(); - if let Some(parent_block) = parent_block.as_ref() { - extra_validation(parent_block, &get_header_response)?; - } else { - warn!( - relay_id = relay.id.as_ref(), - "parent block not found, skipping extra validation" - ); - } - } - - Ok(( - start_request_time, - Some(CompoundGetHeaderResponse::Full(Box::new(get_header_response))), - )) - } - } -} - -/// Send and decode a full get_header response, will all of the fields. -async fn send_get_header_full( - relay: &RelayClient, - url: Arc, - timeout_left_ms: u64, - headers: HeaderMap, -) -> Result<(u64, Option), PbsError> { - // Send the request - let (start_request_time, info) = - send_get_header_impl(relay, url, timeout_left_ms, headers).await?; - let info = match info { - Some(info) => info, - None => { - return Ok((start_request_time, None)); - } - }; - - // Decode the response - let get_header_response = match info.content_type { - EncodingType::Json => decode_json_payload(&info.response_bytes)?, - EncodingType::Ssz => { - let fork = info.fork.ok_or(PbsError::RelayResponse { - error_msg: "relay did not provide consensus version header for ssz payload" - .to_string(), - code: info.code.as_u16(), - })?; - decode_ssz_payload(&info.response_bytes, fork)? - } - }; - - // Log and return - debug!( - relay_id = info.relay_id.as_ref(), - header_size_bytes = info.response_bytes.len(), - latency = ?info.request_latency, - version =? get_header_response.version, - value_eth = format_ether(*get_header_response.value()), - block_hash = %get_header_response.block_hash(), - content_type = ?info.content_type, - "received new header" - ); - Ok((start_request_time, Some(get_header_response))) -} - -/// Send a get_header request and decode only the fork and bid value from the -/// response, leaving the raw bytes intact for direct forwarding to the caller. -/// Used in `HeaderValidationMode::None` where expensive crypto/structural -/// checks are skipped. -async fn send_get_header_light( - relay: &RelayClient, - url: Arc, - timeout_left_ms: u64, - headers: HeaderMap, -) -> Result<(u64, Option), PbsError> { - // Send the request - let (start_request_time, info) = - send_get_header_impl(relay, url, timeout_left_ms, headers).await?; - let info = match info { - Some(info) => info, - None => { - return Ok((start_request_time, None)); - } - }; - - // Decode the value / fork from the response - let (fork, value) = match info.content_type { - EncodingType::Json => get_light_info_from_json(&info.response_bytes)?, - EncodingType::Ssz => { - let fork = info.fork.ok_or(PbsError::RelayResponse { - error_msg: "relay did not provide consensus version header for ssz payload" - .to_string(), - code: info.code.as_u16(), - })?; - (fork, get_bid_value_from_signed_builder_bid_ssz(&info.response_bytes, fork)?) - } - }; - - // Log and return - debug!( - relay_id = info.relay_id.as_ref(), - header_size_bytes = info.response_bytes.len(), - latency = ?info.request_latency, - version =? fork, - value_eth = format_ether(value), - content_type = ?info.content_type, - "received new header (light processing)" - ); - Ok(( - start_request_time, - Some(LightGetHeaderResponse { - version: fork, - value, - raw_bytes: info.response_bytes, - encoding_type: info.content_type, - }), - )) -} - -/// Sends a get_header request to a relay, returning the response, the time the -/// request was started, and the encoding type of the response (if any). -/// Used by send_one_get_header to perform the actual request submission. -async fn send_get_header_impl( - relay: &RelayClient, - url: Arc, - timeout_left_ms: u64, - mut headers: HeaderMap, -) -> Result<(u64, Option), PbsError> { - // the timestamp in the header is the consensus block time which is fixed, - // use the beginning of the request as proxy to make sure we use only the - // last one received - let start_request = Instant::now(); - let start_request_time = utcnow_ms(); - headers.insert(HEADER_START_TIME_UNIX_MS, HeaderValue::from(start_request_time)); - - // The timeout header indicating how long a relay has to respond, so they can - // minimize timing games without losing the bid - headers.insert(HEADER_TIMEOUT_MS, HeaderValue::from(timeout_left_ms)); - - let res = match relay - .client - .get(url.as_ref().clone()) - .timeout(Duration::from_millis(timeout_left_ms)) - .headers(headers) - .send() - .await - { - Ok(res) => res, - Err(err) => { - RELAY_STATUS_CODE - .with_label_values(&[TIMEOUT_ERROR_CODE_STR, GET_HEADER_ENDPOINT_TAG, &relay.id]) - .inc(); - return Err(err.into()); - } - }; - - // Log the response code and latency - let code = res.status(); - let request_latency = start_request.elapsed(); - RELAY_LATENCY - .with_label_values(&[GET_HEADER_ENDPOINT_TAG, &relay.id]) - .observe(request_latency.as_secs_f64()); - RELAY_STATUS_CODE.with_label_values(&[code.as_str(), GET_HEADER_ENDPOINT_TAG, &relay.id]).inc(); - - // According to the spec, OK is the only allowed success code so this can break - // early - if code != StatusCode::OK { - if code == StatusCode::NO_CONTENT { - let response_bytes = - read_chunked_body_with_max(res, MAX_SIZE_GET_HEADER_RESPONSE).await?; - debug!( - relay_id = relay.id.as_ref(), - ?code, - latency = ?request_latency, - response = ?response_bytes, - "no header from relay" - ); - return Ok((start_request_time, None)); - } else { - return Err(PbsError::RelayResponse { - error_msg: format!("unexpected status code from relay: {code}"), - code: code.as_u16(), - }); - } - } - - // Get the content type - let content_type = match res.headers().get(CONTENT_TYPE) { - None => { - // Assume a missing content type means JSON; shouldn't happen in practice with - // any respectable HTTP server but just in case - EncodingType::Json - } - Some(header_value) => match header_value.to_str().map_err(|e| PbsError::RelayResponse { - error_msg: format!("cannot decode content-type header: {e}").to_string(), - code: (code.as_u16()), - })? { - header_str if header_str.eq_ignore_ascii_case(&EncodingType::Ssz.to_string()) => { - EncodingType::Ssz - } - header_str if header_str.eq_ignore_ascii_case(&EncodingType::Json.to_string()) => { - EncodingType::Json - } - header_str => { - return Err(PbsError::RelayResponse { - error_msg: format!("unsupported content type: {header_str}"), - code: code.as_u16(), - }) - } - }, - }; - - // Decode the body - let fork = get_consensus_version_header(res.headers()); - let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_GET_HEADER_RESPONSE).await?; - Ok(( - start_request_time, - Some(GetHeaderResponseInfo { - relay_id: relay.id.clone(), - response_bytes, - content_type, - fork, - code, - request_latency, - }), - )) -} - -/// Decode a JSON-encoded get_header response -fn decode_json_payload(response_bytes: &[u8]) -> Result { - match serde_json::from_slice::(response_bytes) { - Ok(parsed) => Ok(parsed), - Err(err) => Err(PbsError::JsonDecode { - err, - raw: String::from_utf8_lossy(response_bytes).into_owned(), - }), - } -} - -/// Get the value of a builder bid and the fork name from a get_header JSON -/// response (used for light-level processing) -fn get_light_info_from_json(response_bytes: &[u8]) -> Result<(ForkName, U256), PbsError> { - #[derive(Deserialize)] - struct LightBuilderBid { - #[serde(with = "serde_utils::quoted_u256")] - pub value: U256, - } - - #[derive(Deserialize)] - struct LightSignedBuilderBid { - pub message: LightBuilderBid, - } - - #[derive(Deserialize)] - struct LightHeaderResponse { - version: ForkName, - data: LightSignedBuilderBid, - } - - match serde_json::from_slice::(response_bytes) { - Ok(parsed) => Ok((parsed.version, parsed.data.message.value)), - Err(err) => Err(PbsError::JsonDecode { - err, - raw: String::from_utf8_lossy(response_bytes).into_owned(), - }), - } -} - -/// Decode an SSZ-encoded get_header response -fn decode_ssz_payload( - response_bytes: &[u8], - fork: ForkName, -) -> Result { - let data = SignedBuilderBid::from_ssz_bytes_by_fork(response_bytes, fork).map_err(|e| { - PbsError::RelayResponse { - error_msg: (format!("error decoding relay payload: {e:?}")).to_string(), - code: 200, - } - })?; - Ok(GetHeaderResponse { version: fork, data, metadata: Default::default() }) -} - -struct HeaderData { - block_hash: B256, - parent_hash: B256, - tx_root: B256, - value: U256, - timestamp: u64, -} - -fn validate_header_data( - header_data: &HeaderData, - chain: Chain, - expected_parent_hash: B256, - minimum_bid_wei: U256, - slot: u64, -) -> Result<(), ValidationError> { - if header_data.block_hash == B256::ZERO { - return Err(ValidationError::EmptyBlockhash); - } - - if expected_parent_hash != header_data.parent_hash { - return Err(ValidationError::ParentHashMismatch { - expected: expected_parent_hash, - got: header_data.parent_hash, - }); - } - - if header_data.tx_root == EMPTY_TX_ROOT_HASH { - return Err(ValidationError::EmptyTxRoot); - } - - if header_data.value < minimum_bid_wei { - return Err(ValidationError::BidTooLow { min: minimum_bid_wei, got: header_data.value }); - } - - let expected_timestamp = timestamp_of_slot_start_sec(slot, chain); - if expected_timestamp != header_data.timestamp { - return Err(ValidationError::TimestampMismatch { - expected: expected_timestamp, - got: header_data.timestamp, - }); - } - - Ok(()) -} - -fn validate_signature( - chain: Chain, - expected_relay_pubkey: &BlsPublicKey, - received_relay_pubkey: &BlsPublicKeyBytes, - message: &T, - signature: &BlsSignature, -) -> Result<(), ValidationError> { - if expected_relay_pubkey.serialize() != received_relay_pubkey.as_serialized() { - return Err(ValidationError::PubkeyMismatch { - expected: BlsPublicKeyBytes::from(expected_relay_pubkey), - got: *received_relay_pubkey, - }); - } - - if !verify_signed_message( - chain, - expected_relay_pubkey, - &message, - signature, - None, - &B32::from(APPLICATION_BUILDER_DOMAIN), - ) { - return Err(ValidationError::Sigverify); - } - - Ok(()) -} - -fn extra_validation( - parent_block: &Block, - signed_header: &GetHeaderResponse, -) -> Result<(), ValidationError> { - if signed_header.block_number() != parent_block.header.number + 1 { - return Err(ValidationError::BlockNumberMismatch { - parent: parent_block.header.number, - header: signed_header.block_number(), - }); - } - - if !check_gas_limit(signed_header.gas_limit(), parent_block.header.gas_limit) { - return Err(ValidationError::GasLimit { - parent: parent_block.header.gas_limit, - header: signed_header.gas_limit(), - }); - }; - - Ok(()) -} - -#[cfg(test)] -mod tests { - use std::{fs, path::Path}; - - use alloy::primitives::{B256, U256}; - use cb_common::{ - pbs::*, - signature::sign_builder_message, - types::{BlsPublicKeyBytes, BlsSecretKey, BlsSignature, Chain}, - utils::{TestRandomSeed, timestamp_of_slot_start_sec}, - }; - use ssz::Encode; - - use super::{validate_header_data, *}; - - #[test] - fn test_validate_header() { - let slot = 5; - let parent_hash = B256::from_slice(&[1; 32]); - let chain = Chain::Holesky; - let min_bid = U256::from(10); - - let mut mock_header_data = HeaderData { - block_hash: B256::default(), - parent_hash: B256::default(), - tx_root: EMPTY_TX_ROOT_HASH, - value: U256::default(), - timestamp: 0, - }; - - assert_eq!( - validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), - Err(ValidationError::EmptyBlockhash) - ); - - mock_header_data.block_hash.0[1] = 1; - - assert_eq!( - validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), - Err(ValidationError::ParentHashMismatch { - expected: parent_hash, - got: B256::default() - }) - ); - - mock_header_data.parent_hash = parent_hash; - - assert_eq!( - validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), - Err(ValidationError::EmptyTxRoot) - ); - - mock_header_data.tx_root = Default::default(); - - assert_eq!( - validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), - Err(ValidationError::BidTooLow { min: min_bid, got: U256::ZERO }) - ); - - mock_header_data.value = U256::from(11); - - let expected = timestamp_of_slot_start_sec(slot, chain); - assert_eq!( - validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), - Err(ValidationError::TimestampMismatch { expected, got: 0 }) - ); - - mock_header_data.timestamp = expected; - - assert!(validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot).is_ok()); - } - - #[test] - fn test_validate_signature() { - let secret_key = BlsSecretKey::test_random(); - let pubkey = secret_key.public_key(); - let wrong_pubkey = BlsPublicKeyBytes::test_random(); - let wrong_signature = BlsSignature::test_random(); - - let message = B256::random(); - - let signature = sign_builder_message(Chain::Holesky, &secret_key, &message); - - assert_eq!( - validate_signature(Chain::Holesky, &pubkey, &wrong_pubkey, &message, &wrong_signature), - Err(ValidationError::PubkeyMismatch { - expected: BlsPublicKeyBytes::from(&pubkey), - got: wrong_pubkey - }) - ); - - assert!(matches!( - validate_signature( - Chain::Holesky, - &pubkey, - &BlsPublicKeyBytes::from(&pubkey), - &message, - &wrong_signature - ), - Err(ValidationError::Sigverify) - )); - - assert!( - validate_signature( - Chain::Holesky, - &pubkey, - &BlsPublicKeyBytes::from(&pubkey), - &message, - &signature - ) - .is_ok() - ); - } - - #[test] - fn test_ssz_value_extraction() { - for fork_name in ForkName::list_all() { - match fork_name { - // Handle forks that didn't have builder bids yet - ForkName::Altair | ForkName::Base => continue, - - // Handle supported forks - ForkName::Bellatrix | - ForkName::Capella | - ForkName::Deneb | - ForkName::Electra | - ForkName::Fulu => {} - - // Skip unsupported forks - ForkName::Gloas => continue, - } - - // Load get_header JSON from test data - let fork_name_str = fork_name.to_string().to_lowercase(); - let path_str = format!("../../tests/data/get_header/{fork_name_str}.json"); - let path = Path::new(path_str.as_str()); - let json_bytes = fs::read(path).expect("file not found"); - let decoded = decode_json_payload(&json_bytes).expect("failed to decode JSON"); - - // Extract the bid value from the SSZ - let encoded = decoded.data.as_ssz_bytes(); - let bid_value = get_bid_value_from_signed_builder_bid_ssz(&encoded, fork_name) - .expect("failed to extract bid value from SSZ"); - - // Compare to the original value - println!("Testing fork: {}", fork_name); - println!("Original value: {}", decoded.value()); - println!("Extracted value: {}", bid_value); - assert_eq!(*decoded.value(), bid_value); - } - } -} diff --git a/crates/pbs/src/routes/get_header/mod.rs b/crates/pbs/src/routes/get_header/mod.rs new file mode 100644 index 00000000..3d046ae7 --- /dev/null +++ b/crates/pbs/src/routes/get_header/mod.rs @@ -0,0 +1,305 @@ +mod relay; +mod validation; + +use std::{collections::HashSet, sync::Arc}; + +use alloy::primitives::U256; +use axum::{ + extract::{Path, State}, + http::{HeaderMap, HeaderValue}, + response::IntoResponse, +}; +use cb_common::{ + config::HeaderValidationMode, + pbs::{GetHeaderInfo, GetHeaderParams, HEADER_TIMEOUT_MS, error::PbsError}, + utils::{ + CONSENSUS_VERSION_HEADER, EncodingType, get_accept_types, get_user_agent, + get_user_agent_with_version, ms_into_slot, + }, +}; +use futures::future::join_all; +use parking_lot::RwLock; +use relay::{RequestInfo, ValidationContext, send_timed_get_header}; +use reqwest::{ + StatusCode, + header::{ACCEPT, CONTENT_TYPE, USER_AGENT}, +}; +use tracing::{Instrument, debug, error, info, warn}; + +use super::CompoundGetHeaderResponse; +use crate::{ + error::PbsClientError, + metrics::{BEACON_NODE_STATUS, RELAY_HEADER_VALUE, RELAY_LAST_SLOT}, + state::{PbsState, PbsStateGuard}, +}; + +pub async fn handle_get_header( + State(state): State, + req_headers: HeaderMap, + Path(params): Path, +) -> Result { + tracing::Span::current().record("slot", params.slot); + tracing::Span::current().record("parent_hash", tracing::field::debug(params.parent_hash)); + tracing::Span::current().record("validator", tracing::field::debug(¶ms.pubkey)); + + let state = state.read().clone(); + + let ua = get_user_agent(&req_headers); + let ms_into_slot = ms_into_slot(params.slot, state.config.chain); + let accept_types = get_accept_types(&req_headers).map_err(|e| { + error!(%e, "error parsing accept header"); + PbsClientError::DecodeError(format!("error parsing accept header: {e}")) + })?; + let accepts_ssz = accept_types.contains(&EncodingType::Ssz); + let accepts_json = accept_types.contains(&EncodingType::Json); + + info!(ua, ms_into_slot, "new request"); + + match get_header(params, req_headers, state, accept_types).await { + Ok(res) => { + if let Some(max_bid) = res { + BEACON_NODE_STATUS + .with_label_values(&["200", crate::constants::GET_HEADER_ENDPOINT_TAG]) + .inc(); + match max_bid { + CompoundGetHeaderResponse::Light(light_bid) => { + // Light validation mode, so just forward the raw response + info!( + value_eth = alloy::primitives::utils::format_ether(light_bid.value), + "received header (unvalidated)" + ); + + // Create the headers + let consensus_version_header = + match HeaderValue::from_str(&light_bid.version.to_string()) { + Ok(consensus_version_header) => { + Ok::(consensus_version_header) + } + Err(e) => { + return Err(PbsClientError::RelayError(format!( + "error decoding consensus version from relay payload: {e}" + ))); + } + }?; + let content_type = light_bid.encoding_type.content_type(); + let content_type_header = HeaderValue::from_str(content_type).unwrap(); + + // Build response + let mut res = light_bid.raw_bytes.into_response(); + res.headers_mut() + .insert(CONSENSUS_VERSION_HEADER, consensus_version_header); + res.headers_mut().insert(CONTENT_TYPE, content_type_header); + info!("sending response as {} (light)", content_type); + Ok(res) + } + CompoundGetHeaderResponse::Full(max_bid) => { + // Full validation mode, so respond based on requester accept types + info!(value_eth = alloy::primitives::utils::format_ether(*max_bid.data.message.value()), block_hash =% max_bid.block_hash(), "received header"); + + // Handle SSZ + if accepts_ssz { + use ssz::Encode; + let mut res = max_bid.data.as_ssz_bytes().into_response(); + let consensus_version_header = match HeaderValue::from_str( + &max_bid.version.to_string(), + ) { + Ok(consensus_version_header) => { + Ok::(consensus_version_header) + } + Err(e) => { + if accepts_json { + info!("sending response as JSON"); + return Ok( + (StatusCode::OK, axum::Json(max_bid)).into_response() + ); + } else { + return Err(PbsClientError::RelayError(format!( + "error decoding consensus version from relay payload: {e}" + ))); + } + } + }?; + + // This won't actually fail since the string is a const + let content_type_header = + HeaderValue::from_str(EncodingType::Ssz.content_type()).unwrap(); + + res.headers_mut() + .insert(CONSENSUS_VERSION_HEADER, consensus_version_header); + res.headers_mut().insert(CONTENT_TYPE, content_type_header); + info!("sending response as SSZ"); + return Ok(res); + } + + // Handle JSON + if accepts_json { + Ok((StatusCode::OK, axum::Json(max_bid)).into_response()) + } else { + // This shouldn't ever happen but the compiler needs it + Err(PbsClientError::DecodeError( + "no viable accept types in request".to_string(), + )) + } + } + } + } else { + // spec: return 204 if request is valid but no bid available + info!("no header available for slot"); + + BEACON_NODE_STATUS + .with_label_values(&["204", crate::constants::GET_HEADER_ENDPOINT_TAG]) + .inc(); + Ok(StatusCode::NO_CONTENT.into_response()) + } + } + Err(err) => { + error!(%err, "no header available from relays"); + + let err = PbsClientError::NoPayload; + BEACON_NODE_STATUS + .with_label_values(&[ + err.status_code().as_str(), + crate::constants::GET_HEADER_ENDPOINT_TAG, + ]) + .inc(); + Err(err) + } + } +} + +// ── Relay logic ────────────────────────────────────────────────────────────── + +/// Implements https://ethereum.github.io/builder-specs/#/Builder/getHeader +/// Returns 200 if at least one relay returns 200, else 204 +pub async fn get_header( + params: GetHeaderParams, + req_headers: HeaderMap, + state: PbsState, + accepted_types: HashSet, +) -> eyre::Result> { + let parent_block = Arc::new(RwLock::new(None)); + let extra_validation_enabled = + state.config.pbs_config.header_validation_mode == HeaderValidationMode::Extra; + if extra_validation_enabled && let Some(rpc_url) = state.pbs_config().rpc_url.clone() { + tokio::spawn( + validation::fetch_parent_block(rpc_url, params.parent_hash, parent_block.clone()) + .in_current_span(), + ); + } + + let ms_into_slot = ms_into_slot(params.slot, state.config.chain); + let (pbs_config, relays, maybe_mux_id) = state.mux_config_and_relays(¶ms.pubkey); + + if let Some(mux_id) = maybe_mux_id { + debug!(mux_id, relays = relays.len(), pubkey = %params.pubkey, "using mux config"); + } else { + debug!(relays = relays.len(), pubkey = %params.pubkey, "using default config"); + } + + let max_timeout_ms = pbs_config + .timeout_get_header_ms + .min(pbs_config.late_in_slot_time_ms.saturating_sub(ms_into_slot)); + + if max_timeout_ms == 0 { + warn!( + ms_into_slot, + threshold = pbs_config.late_in_slot_time_ms, + "late in slot, skipping relay requests" + ); + + return Ok(None); + } + + // Use the minimum of the time left and the user provided timeout header + let max_timeout_ms = req_headers + .get(HEADER_TIMEOUT_MS) + .map(|header| match header.to_str().ok().and_then(|v| v.parse::().ok()) { + None | Some(0) => { + // Header can't be stringified, or parsed, or it's set to 0 + warn!(?header, "invalid user-supplied timeout header, using {max_timeout_ms}ms"); + max_timeout_ms + } + Some(user_timeout) => user_timeout.min(max_timeout_ms), + }) + .unwrap_or(max_timeout_ms); + + // prepare headers, except for start time which is set in `send_one_get_header` + let mut send_headers = HeaderMap::new(); + send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); + + // Create the Accept headers for requests + let mode = state.pbs_config().header_validation_mode; + let accept_types_str = match mode { + HeaderValidationMode::None => { + // No validation mode, so only request what the user wants because the response + // will be forwarded directly + accepted_types.iter().map(|t| t.content_type()).collect::>().join(",") + } + _ => { + // We're unpacking the body, so request both types since we can handle both + [EncodingType::Ssz.content_type(), EncodingType::Json.content_type()].join(",") + } + }; + send_headers.insert( + ACCEPT, + HeaderValue::from_str(&accept_types_str) + .map_err(|e| PbsError::GeneralRequest(format!("invalid accept header value: {e}")))?, + ); + + // Send requests to all relays concurrently + let slot = params.slot as i64; + let request_info = Arc::new(RequestInfo { + params, + headers: Arc::new(send_headers), + chain: state.config.chain, + validation: ValidationContext { + skip_sigverify: state.pbs_config().skip_sigverify, + min_bid_wei: state.pbs_config().min_bid_wei, + mode, + parent_block, + }, + accepted_types, + }); + let mut handles = Vec::with_capacity(relays.len()); + for relay in relays.iter() { + handles.push( + send_timed_get_header( + request_info.clone(), + relay.clone(), + ms_into_slot, + max_timeout_ms, + ) + .in_current_span(), + ); + } + + let results = join_all(handles).await; + let mut relay_bids = Vec::with_capacity(relays.len()); + for (i, res) in results.into_iter().enumerate() { + let relay_id = relays[i].id.as_str(); + + match res { + Ok(Some(res)) => { + let value = match &res { + CompoundGetHeaderResponse::Full(full) => *full.value(), + CompoundGetHeaderResponse::Light(light) => light.value, + }; + RELAY_LAST_SLOT.with_label_values(&[relay_id]).set(slot); + let value_gwei = (value / U256::from(1_000_000_000)).try_into().unwrap_or_default(); + RELAY_HEADER_VALUE.with_label_values(&[relay_id]).set(value_gwei); + + relay_bids.push(res) + } + Ok(_) => {} + Err(err) if err.is_timeout() => error!(err = "Timed Out", relay_id), + Err(err) => error!(%err, relay_id), + } + } + + let max_bid = relay_bids.into_iter().max_by_key(|bid| match bid { + CompoundGetHeaderResponse::Full(full) => *full.value(), + CompoundGetHeaderResponse::Light(light) => light.value, + }); + + Ok(max_bid) +} diff --git a/crates/pbs/src/routes/get_header/relay.rs b/crates/pbs/src/routes/get_header/relay.rs new file mode 100644 index 00000000..17bcd21a --- /dev/null +++ b/crates/pbs/src/routes/get_header/relay.rs @@ -0,0 +1,543 @@ +use std::{ + collections::HashSet, + sync::Arc, + time::{Duration, Instant}, +}; + +use alloy::primitives::{U256, utils::format_ether}; +use axum::http::{HeaderMap, HeaderValue}; +use cb_common::{ + config::HeaderValidationMode, + pbs::{ + ExecutionPayloadHeaderRef, ForkName, GetHeaderInfo, GetHeaderParams, GetHeaderResponse, + HEADER_START_TIME_UNIX_MS, HEADER_TIMEOUT_MS, RelayClient, + error::{PbsError, ValidationError}, + }, + types::Chain, + utils::{ + EncodingType, get_bid_value_from_signed_builder_bid_ssz, get_consensus_version_header, + read_chunked_body_with_max, utcnow_ms, + }, +}; +use parking_lot::RwLock; +use reqwest::{StatusCode, header::CONTENT_TYPE}; +use tokio::time::sleep; +use tracing::{Instrument, debug, error, warn}; +use url::Url; + +use super::{ + super::{CompoundGetHeaderResponse, LightGetHeaderResponse}, + validation::{ + HeaderData, decode_json_payload, decode_ssz_payload, extra_validation, + get_light_info_from_json, validate_header_data, validate_signature, + }, +}; +use crate::constants::{ + GET_HEADER_ENDPOINT_TAG, MAX_SIZE_GET_HEADER_RESPONSE, TIMEOUT_ERROR_CODE, + TIMEOUT_ERROR_CODE_STR, +}; + +/// Info about an incoming get_header request. +/// Sent from get_header to each send_timed_get_header call. +#[derive(Clone)] +pub struct RequestInfo { + /// The blockchain parameters of the get_header request (what slot it's for, + /// which pubkey is requesting it, etc) + pub params: GetHeaderParams, + + /// Common baseline of headers to send with each request + pub headers: Arc, + + /// The chain the request is for + pub chain: Chain, + + /// Context for validating the header returned by the relay + pub validation: ValidationContext, + + /// The accepted encoding types from the original request + pub accepted_types: HashSet, +} + +/// Used internally to provide info and context about a get_header request and +/// its response +struct GetHeaderResponseInfo { + /// ID of the relay the response came from + relay_id: Arc, + + /// The raw body of the response + response_bytes: Vec, + + /// The content type the response is encoded with + content_type: EncodingType, + + /// Which fork the response bid is for (if provided as a header, rather than + /// part of the body) + fork: Option, + + /// The status code of the response, for logging + code: StatusCode, + + /// The round-trip latency of the request + request_latency: Duration, +} + +/// Context for validating the header +#[derive(Clone)] +pub struct ValidationContext { + /// Whether to skip signature verification + pub skip_sigverify: bool, + + /// Minimum acceptable bid, in wei + pub min_bid_wei: U256, + + /// The mode used for response validation + pub mode: HeaderValidationMode, + + /// The parent block, if fetched + pub parent_block: Arc>>, +} + +pub async fn send_timed_get_header( + request_info: Arc, + relay: RelayClient, + ms_into_slot: u64, + mut timeout_left_ms: u64, +) -> Result, PbsError> { + let params = &request_info.params; + let url = Arc::new(relay.get_header_url(params.slot, ¶ms.parent_hash, ¶ms.pubkey)?); + + if relay.config.enable_timing_games { + if let Some(target_ms) = relay.config.target_first_request_ms { + // sleep until target time in slot + + let delay = target_ms.saturating_sub(ms_into_slot); + if delay > 0 { + debug!( + relay_id = relay.id.as_ref(), + target_ms, ms_into_slot, "TG: waiting to send first header request" + ); + timeout_left_ms = timeout_left_ms.saturating_sub(delay); + sleep(Duration::from_millis(delay)).await; + } else { + debug!( + relay_id = relay.id.as_ref(), + target_ms, ms_into_slot, "TG: request already late enough in slot" + ); + } + } + + if let Some(send_freq_ms) = relay.config.frequency_get_header_ms { + let mut handles = Vec::new(); + + debug!( + relay_id = relay.id.as_ref(), + send_freq_ms, timeout_left_ms, "TG: sending multiple header requests" + ); + + loop { + handles.push(tokio::spawn( + send_one_get_header( + request_info.clone(), + relay.clone(), + url.clone(), + timeout_left_ms, + ) + .in_current_span(), + )); + + if timeout_left_ms > send_freq_ms { + // enough time for one more + timeout_left_ms = timeout_left_ms.saturating_sub(send_freq_ms); + sleep(Duration::from_millis(send_freq_ms)).await; + } else { + break; + } + } + + let results = futures::future::join_all(handles).await; + let mut n_headers = 0; + + if let Some((_, maybe_header)) = results + .into_iter() + .filter_map(|res| { + // ignore join error and timeouts, log other errors + res.ok().and_then(|inner_res| match inner_res { + Ok(maybe_header) => { + if maybe_header.1.is_some() { + n_headers += 1; + Some(maybe_header) + } else { + // filter out 204 responses that are returned if the request + // is after the relay cutoff + None + } + } + Err(err) if err.is_timeout() => None, + Err(err) => { + error!(relay_id = relay.id.as_ref(),%err, "TG: error sending header request"); + None + } + }) + }) + .max_by_key(|(start_time, _)| *start_time) + { + debug!(relay_id = relay.id.as_ref(), n_headers, "TG: received headers from relay"); + return Ok(maybe_header); + } else { + // all requests failed + warn!(relay_id = relay.id.as_ref(), "TG: no headers received"); + + return Err(PbsError::RelayResponse { + error_msg: "no headers received".to_string(), + code: TIMEOUT_ERROR_CODE, + }); + } + } + } + + // if no timing games or no repeated send, just send one request + send_one_get_header(request_info, relay, url, timeout_left_ms) + .await + .map(|(_, maybe_header)| maybe_header) +} + +/// Handles requesting a header from a relay, decoding, and validation. +/// Used by send_timed_get_header to handle each individual request. +async fn send_one_get_header( + request_info: Arc, + relay: RelayClient, + url: Arc, + timeout_left_ms: u64, +) -> Result<(u64, Option), PbsError> { + match request_info.validation.mode { + HeaderValidationMode::None => { + // Minimal processing: extract fork and value, forward response bytes directly. + // Expensive crypto/structural validation is skipped (sigverify, parent hash, + // timestamp), but the min_bid check is applied. + let (start_request_time, get_header_response) = send_get_header_light( + &relay, + url, + timeout_left_ms, + (*request_info.headers).clone(), /* Create a copy of the HeaderMap because the + * impl + * will + * modify it */ + ) + .await?; + match get_header_response { + None => Ok((start_request_time, None)), + Some(res) => { + let min_bid = request_info.validation.min_bid_wei; + if res.value < min_bid { + return Err(PbsError::Validation(ValidationError::BidTooLow { + min: min_bid, + got: res.value, + })); + } + + // Make sure the response is encoded in one of the accepted + // types since we're passing the raw response directly to the client + if !request_info.accepted_types.contains(&res.encoding_type) { + return Err(PbsError::RelayResponse { + error_msg: format!( + "relay returned unsupported encoding type for get_header in no-validation mode: {:?}", + res.encoding_type + ), + code: 406, // Not Acceptable + }); + } + Ok((start_request_time, Some(CompoundGetHeaderResponse::Light(res)))) + } + } + } + _ => { + // Full processing: decode full response and validate + let (start_request_time, get_header_response) = send_get_header_full( + &relay, + url, + timeout_left_ms, + (*request_info.headers).clone(), /* Create a copy of the HeaderMap because the + * impl + * will + * modify it */ + ) + .await?; + let get_header_response = match get_header_response { + None => { + // Break if there's no header + return Ok((start_request_time, None)); + } + Some(res) => res, + }; + + // Extract the basic header data needed for validation + let header_data = match &get_header_response.data.message.header() { + ExecutionPayloadHeaderRef::Bellatrix(_) | + ExecutionPayloadHeaderRef::Capella(_) | + ExecutionPayloadHeaderRef::Deneb(_) | + ExecutionPayloadHeaderRef::Gloas(_) => { + Err(PbsError::Validation(ValidationError::UnsupportedFork)) + } + ExecutionPayloadHeaderRef::Electra(res) => Ok(HeaderData { + block_hash: res.block_hash.0, + parent_hash: res.parent_hash.0, + tx_root: res.transactions_root, + value: *get_header_response.value(), + timestamp: res.timestamp, + }), + ExecutionPayloadHeaderRef::Fulu(res) => Ok(HeaderData { + block_hash: res.block_hash.0, + parent_hash: res.parent_hash.0, + tx_root: res.transactions_root, + value: *get_header_response.value(), + timestamp: res.timestamp, + }), + }?; + + // Validate the header + let chain = request_info.chain; + let params = &request_info.params; + let validation = &request_info.validation; + validate_header_data( + &header_data, + chain, + params.parent_hash, + validation.min_bid_wei, + params.slot, + )?; + + // Validate the relay signature + if !validation.skip_sigverify { + validate_signature( + chain, + relay.pubkey(), + get_header_response.data.message.pubkey(), + &get_header_response.data.message, + &get_header_response.data.signature, + )?; + } + + // Validate the parent block if enabled + if validation.mode == HeaderValidationMode::Extra { + let parent_block = validation.parent_block.read(); + if let Some(parent_block) = parent_block.as_ref() { + extra_validation(parent_block, &get_header_response)?; + } else { + warn!( + relay_id = relay.id.as_ref(), + "parent block not found, skipping extra validation" + ); + } + } + + Ok(( + start_request_time, + Some(CompoundGetHeaderResponse::Full(Box::new(get_header_response))), + )) + } + } +} + +/// Send and decode a full get_header response, will all of the fields. +async fn send_get_header_full( + relay: &RelayClient, + url: Arc, + timeout_left_ms: u64, + headers: HeaderMap, +) -> Result<(u64, Option), PbsError> { + // Send the request + let (start_request_time, info) = + send_get_header_impl(relay, url, timeout_left_ms, headers).await?; + let info = match info { + Some(info) => info, + None => { + return Ok((start_request_time, None)); + } + }; + + // Decode the response + let get_header_response = match info.content_type { + EncodingType::Json => decode_json_payload(&info.response_bytes)?, + EncodingType::Ssz => { + let fork = info.fork.ok_or(PbsError::RelayResponse { + error_msg: "relay did not provide consensus version header for ssz payload" + .to_string(), + code: info.code.as_u16(), + })?; + decode_ssz_payload(&info.response_bytes, fork)? + } + }; + + // Log and return + debug!( + relay_id = info.relay_id.as_ref(), + header_size_bytes = info.response_bytes.len(), + latency = ?info.request_latency, + version =? get_header_response.version, + value_eth = format_ether(*get_header_response.value()), + block_hash = %get_header_response.block_hash(), + content_type = ?info.content_type, + "received new header" + ); + Ok((start_request_time, Some(get_header_response))) +} + +/// Send a get_header request and decode only the fork and bid value from the +/// response, leaving the raw bytes intact for direct forwarding to the caller. +/// Used in `HeaderValidationMode::None` where expensive crypto/structural +/// checks are skipped. +async fn send_get_header_light( + relay: &RelayClient, + url: Arc, + timeout_left_ms: u64, + headers: HeaderMap, +) -> Result<(u64, Option), PbsError> { + // Send the request + let (start_request_time, info) = + send_get_header_impl(relay, url, timeout_left_ms, headers).await?; + let info = match info { + Some(info) => info, + None => { + return Ok((start_request_time, None)); + } + }; + + // Decode the value / fork from the response + let (fork, value) = match info.content_type { + EncodingType::Json => get_light_info_from_json(&info.response_bytes)?, + EncodingType::Ssz => { + let fork = info.fork.ok_or(PbsError::RelayResponse { + error_msg: "relay did not provide consensus version header for ssz payload" + .to_string(), + code: info.code.as_u16(), + })?; + (fork, get_bid_value_from_signed_builder_bid_ssz(&info.response_bytes, fork)?) + } + }; + + // Log and return + debug!( + relay_id = info.relay_id.as_ref(), + header_size_bytes = info.response_bytes.len(), + latency = ?info.request_latency, + version =? fork, + value_eth = format_ether(value), + content_type = ?info.content_type, + "received new header (light processing)" + ); + Ok(( + start_request_time, + Some(LightGetHeaderResponse { + version: fork, + value, + raw_bytes: info.response_bytes, + encoding_type: info.content_type, + }), + )) +} + +/// Sends a get_header request to a relay, returning the response, the time the +/// request was started, and the encoding type of the response (if any). +/// Used by send_one_get_header to perform the actual request submission. +async fn send_get_header_impl( + relay: &RelayClient, + url: Arc, + timeout_left_ms: u64, + mut headers: HeaderMap, +) -> Result<(u64, Option), PbsError> { + // the timestamp in the header is the consensus block time which is fixed, + // use the beginning of the request as proxy to make sure we use only the + // last one received + let start_request = Instant::now(); + let start_request_time = utcnow_ms(); + headers.insert(HEADER_START_TIME_UNIX_MS, HeaderValue::from(start_request_time)); + + // The timeout header indicating how long a relay has to respond, so they can + // minimize timing games without losing the bid + headers.insert(HEADER_TIMEOUT_MS, HeaderValue::from(timeout_left_ms)); + + let res = match relay + .client + .get(url.as_ref().clone()) + .timeout(Duration::from_millis(timeout_left_ms)) + .headers(headers) + .send() + .await + { + Ok(res) => res, + Err(err) => { + crate::metrics::RELAY_STATUS_CODE + .with_label_values(&[TIMEOUT_ERROR_CODE_STR, GET_HEADER_ENDPOINT_TAG, &relay.id]) + .inc(); + return Err(err.into()); + } + }; + + // Log the response code and latency + let code = res.status(); + let request_latency = start_request.elapsed(); + super::super::record_relay_metrics(GET_HEADER_ENDPOINT_TAG, &relay.id, code, request_latency); + + // According to the spec, OK is the only allowed success code so this can break + // early + if code != StatusCode::OK { + if code == StatusCode::NO_CONTENT { + let response_bytes = + read_chunked_body_with_max(res, MAX_SIZE_GET_HEADER_RESPONSE).await?; + debug!( + relay_id = relay.id.as_ref(), + ?code, + latency = ?request_latency, + response = ?response_bytes, + "no header from relay" + ); + return Ok((start_request_time, None)); + } else { + return Err(PbsError::RelayResponse { + error_msg: format!("unexpected status code from relay: {code}"), + code: code.as_u16(), + }); + } + } + + // Get the content type + let content_type = match res.headers().get(CONTENT_TYPE) { + None => { + // Assume a missing content type means JSON; shouldn't happen in practice with + // any respectable HTTP server but just in case + EncodingType::Json + } + Some(header_value) => match header_value.to_str().map_err(|e| PbsError::RelayResponse { + error_msg: format!("cannot decode content-type header: {e}").to_string(), + code: (code.as_u16()), + })? { + header_str if header_str.eq_ignore_ascii_case(&EncodingType::Ssz.to_string()) => { + EncodingType::Ssz + } + header_str if header_str.eq_ignore_ascii_case(&EncodingType::Json.to_string()) => { + EncodingType::Json + } + header_str => { + return Err(PbsError::RelayResponse { + error_msg: format!("unsupported content type: {header_str}"), + code: code.as_u16(), + }) + } + }, + }; + + // Decode the body + let fork = get_consensus_version_header(res.headers()); + let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_GET_HEADER_RESPONSE).await?; + Ok(( + start_request_time, + Some(GetHeaderResponseInfo { + relay_id: relay.id.clone(), + response_bytes, + content_type, + fork, + code, + request_latency, + }), + )) +} diff --git a/crates/pbs/src/routes/get_header/validation.rs b/crates/pbs/src/routes/get_header/validation.rs new file mode 100644 index 00000000..4006d805 --- /dev/null +++ b/crates/pbs/src/routes/get_header/validation.rs @@ -0,0 +1,349 @@ +use std::sync::Arc; + +use alloy::{ + primitives::{B256, U256, aliases::B32}, + providers::Provider, + rpc::types::Block, +}; +use cb_common::{ + constants::APPLICATION_BUILDER_DOMAIN, + pbs::{ + EMPTY_TX_ROOT_HASH, ForkName, ForkVersionDecode, GetHeaderInfo, GetHeaderResponse, + SignedBuilderBid, + error::{PbsError, ValidationError}, + }, + signature::verify_signed_message, + types::{BlsPublicKey, BlsPublicKeyBytes, BlsSignature, Chain}, + utils::timestamp_of_slot_start_sec, +}; +use parking_lot::RwLock; +use serde::Deserialize; +use tracing::{debug, error}; +use tree_hash::TreeHash; +use url::Url; + +use crate::utils::check_gas_limit; + +/// Fetch the parent block from the RPC URL for extra validation of the header. +/// Extra validation will be skipped if: +/// - relay returns header before parent block is fetched +/// - parent block is not found, eg because of a RPC delay +pub async fn fetch_parent_block( + rpc_url: Url, + parent_hash: B256, + parent_block: Arc>>, +) { + let provider = alloy::providers::ProviderBuilder::new().connect_http(rpc_url).to_owned(); + + debug!(%parent_hash, "fetching parent block"); + + match provider.get_block_by_hash(parent_hash).await { + Ok(maybe_block) => { + debug!(block_found = maybe_block.is_some(), "fetched parent block"); + let mut guard = parent_block.write(); + *guard = maybe_block; + } + Err(err) => { + error!(%err, "fetch failed"); + } + } +} + +pub struct HeaderData { + pub block_hash: B256, + pub parent_hash: B256, + pub tx_root: B256, + pub value: U256, + pub timestamp: u64, +} + +pub fn validate_header_data( + header_data: &HeaderData, + chain: Chain, + expected_parent_hash: B256, + minimum_bid_wei: U256, + slot: u64, +) -> Result<(), ValidationError> { + if header_data.block_hash == B256::ZERO { + return Err(ValidationError::EmptyBlockhash); + } + + if expected_parent_hash != header_data.parent_hash { + return Err(ValidationError::ParentHashMismatch { + expected: expected_parent_hash, + got: header_data.parent_hash, + }); + } + + if header_data.tx_root == EMPTY_TX_ROOT_HASH { + return Err(ValidationError::EmptyTxRoot); + } + + if header_data.value < minimum_bid_wei { + return Err(ValidationError::BidTooLow { min: minimum_bid_wei, got: header_data.value }); + } + + let expected_timestamp = timestamp_of_slot_start_sec(slot, chain); + if expected_timestamp != header_data.timestamp { + return Err(ValidationError::TimestampMismatch { + expected: expected_timestamp, + got: header_data.timestamp, + }); + } + + Ok(()) +} + +pub fn validate_signature( + chain: Chain, + expected_relay_pubkey: &BlsPublicKey, + received_relay_pubkey: &BlsPublicKeyBytes, + message: &T, + signature: &BlsSignature, +) -> Result<(), ValidationError> { + if expected_relay_pubkey.serialize() != received_relay_pubkey.as_serialized() { + return Err(ValidationError::PubkeyMismatch { + expected: BlsPublicKeyBytes::from(expected_relay_pubkey), + got: *received_relay_pubkey, + }); + } + + if !verify_signed_message( + chain, + expected_relay_pubkey, + &message, + signature, + None, + &B32::from(APPLICATION_BUILDER_DOMAIN), + ) { + return Err(ValidationError::Sigverify); + } + + Ok(()) +} + +pub fn extra_validation( + parent_block: &Block, + signed_header: &GetHeaderResponse, +) -> Result<(), ValidationError> { + if signed_header.block_number() != parent_block.header.number + 1 { + return Err(ValidationError::BlockNumberMismatch { + parent: parent_block.header.number, + header: signed_header.block_number(), + }); + } + + if !check_gas_limit(signed_header.gas_limit(), parent_block.header.gas_limit) { + return Err(ValidationError::GasLimit { + parent: parent_block.header.gas_limit, + header: signed_header.gas_limit(), + }); + }; + + Ok(()) +} + +pub fn decode_json_payload(response_bytes: &[u8]) -> Result { + match serde_json::from_slice::(response_bytes) { + Ok(parsed) => Ok(parsed), + Err(err) => Err(PbsError::JsonDecode { + err, + raw: String::from_utf8_lossy(response_bytes).into_owned(), + }), + } +} + +pub fn get_light_info_from_json(response_bytes: &[u8]) -> Result<(ForkName, U256), PbsError> { + #[derive(Deserialize)] + struct LightBuilderBid { + #[serde(with = "serde_utils::quoted_u256")] + pub value: U256, + } + + #[derive(Deserialize)] + struct LightSignedBuilderBid { + pub message: LightBuilderBid, + } + + #[derive(Deserialize)] + struct LightHeaderResponse { + version: ForkName, + data: LightSignedBuilderBid, + } + + match serde_json::from_slice::(response_bytes) { + Ok(parsed) => Ok((parsed.version, parsed.data.message.value)), + Err(err) => Err(PbsError::JsonDecode { + err, + raw: String::from_utf8_lossy(response_bytes).into_owned(), + }), + } +} + +pub fn decode_ssz_payload( + response_bytes: &[u8], + fork: ForkName, +) -> Result { + let data = SignedBuilderBid::from_ssz_bytes_by_fork(response_bytes, fork).map_err(|e| { + PbsError::RelayResponse { + error_msg: (format!("error decoding relay payload: {e:?}")).to_string(), + code: 200, + } + })?; + Ok(GetHeaderResponse { version: fork, data, metadata: Default::default() }) +} + +#[cfg(test)] +mod tests { + use std::{fs, path::Path}; + + use alloy::primitives::{B256, U256}; + use cb_common::{ + pbs::*, + signature::sign_builder_message, + types::{BlsPublicKeyBytes, BlsSecretKey, BlsSignature, Chain}, + utils::{ + TestRandomSeed, get_bid_value_from_signed_builder_bid_ssz, timestamp_of_slot_start_sec, + }, + }; + use ssz::Encode; + + use super::{validate_header_data, *}; + + #[test] + fn test_validate_header() { + let slot = 5; + let parent_hash = B256::from_slice(&[1; 32]); + let chain = Chain::Holesky; + let min_bid = U256::from(10); + + let mut mock_header_data = HeaderData { + block_hash: B256::default(), + parent_hash: B256::default(), + tx_root: EMPTY_TX_ROOT_HASH, + value: U256::default(), + timestamp: 0, + }; + + assert_eq!( + validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), + Err(ValidationError::EmptyBlockhash) + ); + + mock_header_data.block_hash.0[1] = 1; + + assert_eq!( + validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), + Err(ValidationError::ParentHashMismatch { + expected: parent_hash, + got: B256::default() + }) + ); + + mock_header_data.parent_hash = parent_hash; + + assert_eq!( + validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), + Err(ValidationError::EmptyTxRoot) + ); + + mock_header_data.tx_root = Default::default(); + + assert_eq!( + validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), + Err(ValidationError::BidTooLow { min: min_bid, got: U256::ZERO }) + ); + + mock_header_data.value = U256::from(11); + + let expected = timestamp_of_slot_start_sec(slot, chain); + assert_eq!( + validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), + Err(ValidationError::TimestampMismatch { expected, got: 0 }) + ); + + mock_header_data.timestamp = expected; + + assert!(validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot).is_ok()); + } + + #[test] + fn test_validate_signature() { + let secret_key = BlsSecretKey::test_random(); + let pubkey = secret_key.public_key(); + let wrong_pubkey = BlsPublicKeyBytes::test_random(); + let wrong_signature = BlsSignature::test_random(); + + let message = B256::random(); + + let signature = sign_builder_message(Chain::Holesky, &secret_key, &message); + + assert_eq!( + validate_signature(Chain::Holesky, &pubkey, &wrong_pubkey, &message, &wrong_signature), + Err(ValidationError::PubkeyMismatch { + expected: BlsPublicKeyBytes::from(&pubkey), + got: wrong_pubkey + }) + ); + + assert!(matches!( + validate_signature( + Chain::Holesky, + &pubkey, + &BlsPublicKeyBytes::from(&pubkey), + &message, + &wrong_signature + ), + Err(ValidationError::Sigverify) + )); + + assert!( + validate_signature( + Chain::Holesky, + &pubkey, + &BlsPublicKeyBytes::from(&pubkey), + &message, + &signature + ) + .is_ok() + ); + } + + #[test] + fn test_ssz_value_extraction() { + for fork_name in ForkName::list_all() { + match fork_name { + // Handle forks that didn't have builder bids yet + ForkName::Altair | ForkName::Base => continue, + + // Handle supported forks + ForkName::Bellatrix | + ForkName::Capella | + ForkName::Deneb | + ForkName::Electra | + ForkName::Fulu => {} + + // Skip unsupported forks + ForkName::Gloas => continue, + } + + // Load get_header JSON from test data + let fork_name_str = fork_name.to_string().to_lowercase(); + let path_str = format!("../../tests/data/get_header/{fork_name_str}.json"); + let path = Path::new(path_str.as_str()); + let json_bytes = fs::read(path).expect("file not found"); + let decoded = decode_json_payload(&json_bytes).expect("failed to decode JSON"); + + // Extract the bid value from the SSZ + let encoded = decoded.data.as_ssz_bytes(); + let bid_value = get_bid_value_from_signed_builder_bid_ssz(&encoded, fork_name) + .expect("failed to extract bid value from SSZ"); + + // Compare to the original value + println!("Testing fork: {}", fork_name); + println!("Original value: {}", decoded.value()); + println!("Extracted value: {}", bid_value); + assert_eq!(*decoded.value(), bid_value); + } + } +} diff --git a/crates/pbs/src/routes/mod.rs b/crates/pbs/src/routes/mod.rs index 0a9e856c..5bcb1b73 100644 --- a/crates/pbs/src/routes/mod.rs +++ b/crates/pbs/src/routes/mod.rs @@ -5,6 +5,8 @@ mod router; mod status; mod submit_block; +use std::time::Duration; + use alloy::primitives::U256; use cb_common::{ pbs::{GetHeaderResponse, SubmitBlindedBlockResponse}, @@ -18,6 +20,20 @@ pub use router::create_app_router; use status::handle_get_status; use submit_block::handle_submit_block_v1; +use crate::metrics::{RELAY_LATENCY, RELAY_STATUS_CODE}; + +/// Records the HTTP status code and request latency metrics for a relay +/// endpoint interaction. +pub(crate) fn record_relay_metrics( + endpoint: &str, + relay_id: &str, + code: reqwest::StatusCode, + latency: Duration, +) { + RELAY_STATUS_CODE.with_label_values(&[code.as_str(), endpoint, relay_id]).inc(); + RELAY_LATENCY.with_label_values(&[endpoint, relay_id]).observe(latency.as_secs_f64()); +} + /// Enum that handles different GetHeader response types based on the level of /// validation required pub enum CompoundGetHeaderResponse { diff --git a/crates/pbs/src/routes/register_validator.rs b/crates/pbs/src/routes/register_validator.rs index d267ddf6..5e854a26 100644 --- a/crates/pbs/src/routes/register_validator.rs +++ b/crates/pbs/src/routes/register_validator.rs @@ -26,7 +26,7 @@ use url::Url; use crate::{ constants::{MAX_SIZE_DEFAULT, REGISTER_VALIDATOR_ENDPOINT_TAG, TIMEOUT_ERROR_CODE_STR}, error::PbsClientError, - metrics::{BEACON_NODE_STATUS, RELAY_LATENCY, RELAY_STATUS_CODE}, + metrics::{BEACON_NODE_STATUS, RELAY_STATUS_CODE}, state::{PbsState, PbsStateGuard}, }; @@ -63,7 +63,7 @@ pub async fn handle_register_validator( /// Implements https://ethereum.github.io/builder-specs/#/Builder/registerValidator /// Returns 200 if at least one relay returns 200, else 503 -pub(crate) async fn register_validator( +async fn register_validator( registrations: Vec, req_headers: HeaderMap, state: PbsState, @@ -217,14 +217,8 @@ async fn send_register_validator( } }; let request_latency = start_request.elapsed(); - RELAY_LATENCY - .with_label_values(&[REGISTER_VALIDATOR_ENDPOINT_TAG, &relay.id]) - .observe(request_latency.as_secs_f64()); - let code = res.status(); - RELAY_STATUS_CODE - .with_label_values(&[code.as_str(), REGISTER_VALIDATOR_ENDPOINT_TAG, &relay.id]) - .inc(); + super::record_relay_metrics(REGISTER_VALIDATOR_ENDPOINT_TAG, &relay.id, code, request_latency); if !code.is_success() { let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_DEFAULT).await?; diff --git a/crates/pbs/src/routes/reload.rs b/crates/pbs/src/routes/reload.rs index 6e4d5f07..d7c03172 100644 --- a/crates/pbs/src/routes/reload.rs +++ b/crates/pbs/src/routes/reload.rs @@ -45,7 +45,7 @@ pub async fn handle_reload( /// Reload the PBS state with the latest configuration in the config file /// Returns 200 if successful or 500 if failed -pub(crate) async fn reload(state: PbsState) -> eyre::Result { +async fn reload(state: PbsState) -> eyre::Result { let (pbs_config, config_path) = load_pbs_config(None).await?; let new_state = PbsState::new(pbs_config, config_path); diff --git a/crates/pbs/src/routes/status.rs b/crates/pbs/src/routes/status.rs index ef0a8c6f..4dc679b8 100644 --- a/crates/pbs/src/routes/status.rs +++ b/crates/pbs/src/routes/status.rs @@ -12,7 +12,7 @@ use tracing::{debug, error, info}; use crate::{ constants::{MAX_SIZE_DEFAULT, STATUS_ENDPOINT_TAG, TIMEOUT_ERROR_CODE_STR}, error::PbsClientError, - metrics::{BEACON_NODE_STATUS, RELAY_LATENCY, RELAY_STATUS_CODE}, + metrics::{BEACON_NODE_STATUS, RELAY_STATUS_CODE}, state::{PbsState, PbsStateGuard}, }; @@ -50,7 +50,7 @@ pub async fn handle_get_status( /// Implements https://ethereum.github.io/builder-specs/#/Builder/status /// Broadcasts a status check to all relays and returns 200 if at least one /// relay returns 200 -pub(crate) async fn get_status(req_headers: HeaderMap, state: PbsState) -> eyre::Result<()> { +async fn get_status(req_headers: HeaderMap, state: PbsState) -> eyre::Result<()> { // If no relay check, return early if !state.config.pbs_config.relay_check { Ok(()) @@ -95,12 +95,8 @@ async fn send_relay_check(relay: &RelayClient, headers: HeaderMap) -> Result<(), } }; let request_latency = start_request.elapsed(); - RELAY_LATENCY - .with_label_values(&[STATUS_ENDPOINT_TAG, &relay.id]) - .observe(request_latency.as_secs_f64()); - let code = res.status(); - RELAY_STATUS_CODE.with_label_values(&[code.as_str(), STATUS_ENDPOINT_TAG, &relay.id]).inc(); + super::record_relay_metrics(STATUS_ENDPOINT_TAG, &relay.id, code, request_latency); if !code.is_success() { let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_DEFAULT).await?; diff --git a/crates/pbs/src/routes/submit_block/mod.rs b/crates/pbs/src/routes/submit_block/mod.rs new file mode 100644 index 00000000..77adae77 --- /dev/null +++ b/crates/pbs/src/routes/submit_block/mod.rs @@ -0,0 +1,238 @@ +mod relay; +mod validation; + +use std::{collections::HashSet, sync::Arc}; + +use axum::{ + extract::State, + http::{HeaderMap, HeaderValue}, + response::IntoResponse, +}; +use cb_common::{ + config::BlockValidationMode, + pbs::{ + BuilderApiVersion, GetPayloadInfo, HEADER_START_TIME_UNIX_MS, SignedBlindedBeaconBlock, + error::PbsError, + }, + utils::{ + CONSENSUS_VERSION_HEADER, EncodingType, deserialize_body, get_accept_types, get_user_agent, + get_user_agent_with_version, timestamp_of_slot_start_millis, utcnow_ms, + }, +}; +use futures::{FutureExt, future::select_ok}; +use relay::{ProposalInfo, submit_block_with_timeout}; +use reqwest::{ + StatusCode, + header::{ACCEPT, CONTENT_TYPE, USER_AGENT}, +}; +use ssz::Encode; +use tracing::{debug, error, info, trace}; + +use super::CompoundSubmitBlockResponse; +use crate::{ + constants::SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, + error::PbsClientError, + metrics::BEACON_NODE_STATUS, + state::{PbsState, PbsStateGuard}, +}; + +pub async fn handle_submit_block_v1( + state: State, + req_headers: HeaderMap, + raw_request: cb_common::utils::RawRequest, +) -> Result { + handle_submit_block_impl(state, req_headers, raw_request, BuilderApiVersion::V1).await +} + +pub async fn handle_submit_block_v2( + state: State, + req_headers: HeaderMap, + raw_request: cb_common::utils::RawRequest, +) -> Result { + handle_submit_block_impl(state, req_headers, raw_request, BuilderApiVersion::V2).await +} + +async fn handle_submit_block_impl( + State(state): State, + req_headers: HeaderMap, + raw_request: cb_common::utils::RawRequest, + api_version: BuilderApiVersion, +) -> Result { + let signed_blinded_block = + Arc::new(deserialize_body(&req_headers, raw_request.body_bytes).await?); + tracing::Span::current().record("slot", signed_blinded_block.slot().as_u64() as i64); + tracing::Span::current() + .record("block_hash", tracing::field::debug(signed_blinded_block.block_hash())); + tracing::Span::current().record("block_number", signed_blinded_block.block_number()); + tracing::Span::current() + .record("parent_hash", tracing::field::debug(signed_blinded_block.parent_hash())); + + let state = state.read().clone(); + + let now = utcnow_ms(); + let slot = signed_blinded_block.slot(); + let block_hash = signed_blinded_block.block_hash(); + let slot_start_ms = timestamp_of_slot_start_millis(slot.into(), state.config.chain); + let ua = get_user_agent(&req_headers); + let accept_types = get_accept_types(&req_headers).map_err(|e| { + error!(%e, "error parsing accept header"); + PbsClientError::DecodeError(format!("error parsing accept header: {e}")) + })?; + let accepts_ssz = accept_types.contains(&EncodingType::Ssz); + let accepts_json = accept_types.contains(&EncodingType::Json); + + info!(ua, ms_into_slot = now.saturating_sub(slot_start_ms), "new request"); + + match submit_block(signed_blinded_block, req_headers, state, api_version, accept_types).await { + Ok(res) => match res { + crate::CompoundSubmitBlockResponse::EmptyBody => { + info!("received unblinded block (v2)"); + + // Note: this doesn't provide consensus_version_header because it doesn't pass + // the body through, and there's no content-type header since the body is empty. + BEACON_NODE_STATUS + .with_label_values(&["202", SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG]) + .inc(); + Ok((StatusCode::ACCEPTED, "").into_response()) + } + CompoundSubmitBlockResponse::Light(payload_and_blobs) => { + trace!(?payload_and_blobs); + info!("received unblinded block (v1, unvalidated)"); + + BEACON_NODE_STATUS + .with_label_values(&["200", SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG]) + .inc(); + + // Create the headers + let consensus_version_header = + match HeaderValue::from_str(&payload_and_blobs.version.to_string()) { + Ok(consensus_version_header) => { + Ok::(consensus_version_header) + } + Err(e) => { + return Err(PbsClientError::RelayError(format!( + "error decoding consensus version from relay payload: {e}" + ))); + } + }?; + let content_type = payload_and_blobs.encoding_type.content_type(); + let content_type_header = HeaderValue::from_str(content_type).unwrap(); + + // Build response + let mut res = payload_and_blobs.raw_bytes.into_response(); + res.headers_mut().insert(CONSENSUS_VERSION_HEADER, consensus_version_header); + res.headers_mut().insert(CONTENT_TYPE, content_type_header); + info!("sending response as {} (light)", content_type); + Ok(res) + } + CompoundSubmitBlockResponse::Full(payload_and_blobs) => { + trace!(?payload_and_blobs); + info!("received unblinded block (v1)"); + + BEACON_NODE_STATUS + .with_label_values(&["200", SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG]) + .inc(); + + // Try SSZ + if accepts_ssz { + let mut response = payload_and_blobs.data.as_ssz_bytes().into_response(); + + // This won't actually fail since the string is a const + let content_type_header = + HeaderValue::from_str(EncodingType::Ssz.content_type()).unwrap(); + response.headers_mut().insert(CONTENT_TYPE, content_type_header); + response.headers_mut().insert( + CONSENSUS_VERSION_HEADER, + HeaderValue::from_str(&payload_and_blobs.version.to_string()).unwrap(), + ); + info!("sending response as SSZ"); + return Ok(response); + } + + // Handle JSON + if accepts_json { + Ok((StatusCode::OK, axum::Json(payload_and_blobs)).into_response()) + } else { + // This shouldn't ever happen but the compiler needs it + Err(PbsClientError::DecodeError( + "no viable accept types in request".to_string(), + )) + } + } + }, + + Err(err) => { + error!(%err, %block_hash, "CRITICAL: no payload received from relays. Check previous logs or use the Relay Data API"); + + let err = PbsClientError::NoPayload; + BEACON_NODE_STATUS + .with_label_values(&[err.status_code().as_str(), SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG]) + .inc(); + Err(err) + } + } +} + +// ── Relay logic ────────────────────────────────────────────────────────────── + +/// Implements https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlock and +/// https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlockV2. Use `api_version` to +/// distinguish between the two. +pub(crate) async fn submit_block( + signed_blinded_block: Arc, + req_headers: HeaderMap, + state: PbsState, + api_version: BuilderApiVersion, + accepted_types: HashSet, +) -> eyre::Result { + debug!(?req_headers, "received headers"); + + // prepare headers + let mut send_headers = HeaderMap::new(); + send_headers.insert(HEADER_START_TIME_UNIX_MS, HeaderValue::from(utcnow_ms())); + send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); + + // Create the Accept headers for requests + let mode = state.pbs_config().block_validation_mode; + let accept_types_str = match mode { + BlockValidationMode::None => { + // No validation mode, so only request what the user wants because the response + // will be forwarded directly + accepted_types.iter().map(|t| t.content_type()).collect::>().join(",") + } + _ => { + // We're unpacking the body, so request both types since we can handle both + [EncodingType::Ssz.content_type(), EncodingType::Json.content_type()].join(",") + } + }; + send_headers.insert(ACCEPT, HeaderValue::from_str(&accept_types_str).unwrap()); + + // Send requests to all relays concurrently + let proposal_info = Arc::new(ProposalInfo { + signed_blinded_block, + headers: Arc::new(send_headers), + api_version, + validation_mode: mode, + accepted_types, + }); + let mut handles = Vec::with_capacity(state.all_relays().len()); + for relay in state.all_relays().iter() { + handles.push( + tokio::spawn(submit_block_with_timeout( + proposal_info.clone(), + relay.clone(), + state.pbs_config().timeout_get_payload_ms, + )) + .map(|join_result| match join_result { + Ok(res) => res, + Err(err) => Err(PbsError::TokioJoinError(err)), + }), + ); + } + + let results = select_ok(handles).await; + match results { + Ok((res, _)) => Ok(res), + Err(err) => Err(err.into()), + } +} diff --git a/crates/pbs/src/routes/submit_block.rs b/crates/pbs/src/routes/submit_block/relay.rs similarity index 51% rename from crates/pbs/src/routes/submit_block.rs rename to crates/pbs/src/routes/submit_block/relay.rs index 48b26db9..d0a0ae23 100644 --- a/crates/pbs/src/routes/submit_block.rs +++ b/crates/pbs/src/routes/submit_block/relay.rs @@ -4,288 +4,76 @@ use std::{ time::{Duration, Instant}, }; -use alloy::{eips::eip7594::CELLS_PER_EXT_BLOB, primitives::B256}; -use axum::{ - extract::State, - http::{HeaderMap, HeaderValue}, - response::IntoResponse, -}; +use axum::http::HeaderMap; use cb_common::{ config::BlockValidationMode, pbs::{ - BlindedBeaconBlock, BlobsBundle, BuilderApiVersion, ForkName, ForkVersionDecode, - GetPayloadInfo, HEADER_START_TIME_UNIX_MS, KzgCommitments, PayloadAndBlobs, RelayClient, - SignedBlindedBeaconBlock, SubmitBlindedBlockResponse, + BlindedBeaconBlock, BuilderApiVersion, ForkName, RelayClient, SignedBlindedBeaconBlock, + SubmitBlindedBlockResponse, error::{PbsError, ValidationError}, }, utils::{ - CONSENSUS_VERSION_HEADER, EncodingType, RawRequest, deserialize_body, get_accept_types, - get_consensus_version_header, get_user_agent, get_user_agent_with_version, - read_chunked_body_with_max, timestamp_of_slot_start_millis, utcnow_ms, + CONSENSUS_VERSION_HEADER, EncodingType, get_consensus_version_header, + read_chunked_body_with_max, }, }; -use futures::{FutureExt, future::select_ok}; -use reqwest::{ - StatusCode, - header::{ACCEPT, CONTENT_TYPE, USER_AGENT}, -}; -use serde::Deserialize; +use reqwest::{StatusCode, header::CONTENT_TYPE}; use ssz::Encode; -use tracing::{debug, error, info, trace, warn}; +use tracing::{debug, warn}; use url::Url; +use super::validation::{ + decode_json_payload, decode_ssz_payload, get_light_info_from_json, validate_unblinded_block, +}; use crate::{ CompoundSubmitBlockResponse, LightSubmitBlockResponse, TIMEOUT_ERROR_CODE_STR, constants::{MAX_SIZE_SUBMIT_BLOCK_RESPONSE, SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG}, - error::PbsClientError, - metrics::{BEACON_NODE_STATUS, RELAY_LATENCY, RELAY_STATUS_CODE}, - state::{PbsState, PbsStateGuard}, + metrics::RELAY_STATUS_CODE, }; -pub async fn handle_submit_block_v1( - state: State, - req_headers: HeaderMap, - raw_request: RawRequest, -) -> Result { - handle_submit_block_impl(state, req_headers, raw_request, BuilderApiVersion::V1).await -} - -pub async fn handle_submit_block_v2( - state: State, - req_headers: HeaderMap, - raw_request: RawRequest, -) -> Result { - handle_submit_block_impl(state, req_headers, raw_request, BuilderApiVersion::V2).await -} - -async fn handle_submit_block_impl( - State(state): State, - req_headers: HeaderMap, - raw_request: RawRequest, - api_version: BuilderApiVersion, -) -> Result { - let signed_blinded_block = - Arc::new(deserialize_body(&req_headers, raw_request.body_bytes).await?); - tracing::Span::current().record("slot", signed_blinded_block.slot().as_u64() as i64); - tracing::Span::current() - .record("block_hash", tracing::field::debug(signed_blinded_block.block_hash())); - tracing::Span::current().record("block_number", signed_blinded_block.block_number()); - tracing::Span::current() - .record("parent_hash", tracing::field::debug(signed_blinded_block.parent_hash())); - - let state = state.read().clone(); - - let now = utcnow_ms(); - let slot = signed_blinded_block.slot(); - let block_hash = signed_blinded_block.block_hash(); - let slot_start_ms = timestamp_of_slot_start_millis(slot.into(), state.config.chain); - let ua = get_user_agent(&req_headers); - let accept_types = get_accept_types(&req_headers).map_err(|e| { - error!(%e, "error parsing accept header"); - PbsClientError::DecodeError(format!("error parsing accept header: {e}")) - })?; - let accepts_ssz = accept_types.contains(&EncodingType::Ssz); - let accepts_json = accept_types.contains(&EncodingType::Json); - - info!(ua, ms_into_slot = now.saturating_sub(slot_start_ms), "new request"); - - match submit_block(signed_blinded_block, req_headers, state, api_version, accept_types).await { - Ok(res) => match res { - crate::CompoundSubmitBlockResponse::EmptyBody => { - info!("received unblinded block (v2)"); - - // Note: this doesn't provide consensus_version_header because it doesn't pass - // the body through, and there's no content-type header since the body is empty. - BEACON_NODE_STATUS - .with_label_values(&["202", SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG]) - .inc(); - Ok((StatusCode::ACCEPTED, "").into_response()) - } - CompoundSubmitBlockResponse::Light(payload_and_blobs) => { - trace!(?payload_and_blobs); - info!("received unblinded block (v1, unvalidated)"); - - BEACON_NODE_STATUS - .with_label_values(&["200", SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG]) - .inc(); - - // Create the headers - let consensus_version_header = - match HeaderValue::from_str(&payload_and_blobs.version.to_string()) { - Ok(consensus_version_header) => { - Ok::(consensus_version_header) - } - Err(e) => { - return Err(PbsClientError::RelayError(format!( - "error decoding consensus version from relay payload: {e}" - ))); - } - }?; - let content_type = payload_and_blobs.encoding_type.content_type(); - let content_type_header = HeaderValue::from_str(content_type).unwrap(); - - // Build response - let mut res = payload_and_blobs.raw_bytes.into_response(); - res.headers_mut().insert(CONSENSUS_VERSION_HEADER, consensus_version_header); - res.headers_mut().insert(CONTENT_TYPE, content_type_header); - info!("sending response as {} (light)", content_type); - Ok(res) - } - CompoundSubmitBlockResponse::Full(payload_and_blobs) => { - trace!(?payload_and_blobs); - info!("received unblinded block (v1)"); - - BEACON_NODE_STATUS - .with_label_values(&["200", SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG]) - .inc(); - - // Try SSZ - if accepts_ssz { - let mut response = payload_and_blobs.data.as_ssz_bytes().into_response(); - - // This won't actually fail since the string is a const - let content_type_header = - HeaderValue::from_str(EncodingType::Ssz.content_type()).unwrap(); - response.headers_mut().insert(CONTENT_TYPE, content_type_header); - response.headers_mut().insert( - CONSENSUS_VERSION_HEADER, - HeaderValue::from_str(&payload_and_blobs.version.to_string()).unwrap(), - ); - info!("sending response as SSZ"); - return Ok(response); - } - - // Handle JSON - if accepts_json { - Ok((StatusCode::OK, axum::Json(payload_and_blobs)).into_response()) - } else { - // This shouldn't ever happen but the compiler needs it - Err(PbsClientError::DecodeError( - "no viable accept types in request".to_string(), - )) - } - } - }, - - Err(err) => { - error!(%err, %block_hash, "CRITICAL: no payload received from relays. Check previous logs or use the Relay Data API"); - - let err = PbsClientError::NoPayload; - BEACON_NODE_STATUS - .with_label_values(&[err.status_code().as_str(), SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG]) - .inc(); - Err(err) - } - } -} - -// ── Relay logic ────────────────────────────────────────────────────────────── - /// Info about a proposal submission request. /// Sent from submit_block to the submit_block_with_timeout function. #[derive(Clone)] -struct ProposalInfo { +pub struct ProposalInfo { /// The signed blinded block to submit - signed_blinded_block: Arc, + pub signed_blinded_block: Arc, /// Common baseline of headers to send with each request - headers: Arc, + pub headers: Arc, /// The version of the submit_block route being used - api_version: BuilderApiVersion, + pub api_version: BuilderApiVersion, /// How to validate the block returned by the relay - validation_mode: BlockValidationMode, + pub validation_mode: BlockValidationMode, /// The accepted encoding types from the original request - accepted_types: HashSet, + pub accepted_types: HashSet, } -/// Used interally to provide info and context about a submit_block request and +/// Used internally to provide info and context about a submit_block request and /// its response -struct SubmitBlockResponseInfo { +pub struct SubmitBlockResponseInfo { /// The raw body of the response - response_bytes: Vec, + pub response_bytes: Vec, /// The content type the response is encoded with - content_type: EncodingType, + pub content_type: EncodingType, /// Which fork the response bid is for (if provided as a header, rather than /// part of the body) - fork: Option, + pub fork: Option, /// The status code of the response, for logging - code: StatusCode, + pub code: StatusCode, /// The round-trip latency of the request - request_latency: Duration, -} - -/// Implements https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlock and -/// https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlockV2. Use `api_version` to -/// distinguish between the two. -pub(crate) async fn submit_block( - signed_blinded_block: Arc, - req_headers: HeaderMap, - state: PbsState, - api_version: BuilderApiVersion, - accepted_types: HashSet, -) -> eyre::Result { - debug!(?req_headers, "received headers"); - - // prepare headers - let mut send_headers = HeaderMap::new(); - send_headers.insert(HEADER_START_TIME_UNIX_MS, HeaderValue::from(utcnow_ms())); - send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); - - // Create the Accept headers for requests - let mode = state.pbs_config().block_validation_mode; - let accept_types = match mode { - BlockValidationMode::None => { - // No validation mode, so only request what the user wants because the response - // will be forwarded directly - accepted_types.iter().map(|t| t.content_type()).collect::>().join(",") - } - _ => { - // We're unpacking the body, so request both types since we can handle both - [EncodingType::Ssz.content_type(), EncodingType::Json.content_type()].join(",") - } - }; - send_headers.insert(ACCEPT, HeaderValue::from_str(&accept_types).unwrap()); - - // Send requests to all relays concurrently - let proposal_info = Arc::new(ProposalInfo { - signed_blinded_block, - headers: Arc::new(send_headers), - api_version, - validation_mode: mode, - accepted_types, - }); - let mut handles = Vec::with_capacity(state.all_relays().len()); - for relay in state.all_relays().iter() { - handles.push( - tokio::spawn(submit_block_with_timeout( - proposal_info.clone(), - relay.clone(), - state.pbs_config().timeout_get_payload_ms, - )) - .map(|join_result| match join_result { - Ok(res) => res, - Err(err) => Err(PbsError::TokioJoinError(err)), - }), - ); - } - - let results = select_ok(handles).await; - match results { - Ok((res, _)) => Ok(res), - Err(err) => Err(err.into()), - } + pub request_latency: Duration, } /// Submit blinded block to relay, retry connection errors until the /// given timeout has passed -async fn submit_block_with_timeout( +pub async fn submit_block_with_timeout( proposal_info: Arc, relay: RelayClient, timeout_ms: u64, @@ -560,7 +348,7 @@ async fn send_submit_block_light( /// Sends the actual HTTP request to the relay's submit_block endpoint, /// returning the response (if applicable), the round-trip time, and the /// encoding type used for the body (if any). Used by send_submit_block. -async fn send_submit_block_impl( +pub async fn send_submit_block_impl( relay: &RelayClient, url: Arc, timeout_ms: u64, @@ -630,12 +418,12 @@ async fn send_submit_block_impl( // Log the response code and latency let code = res.status(); let request_latency = start_request.elapsed(); - RELAY_LATENCY - .with_label_values(&[SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, &relay.id]) - .observe(request_latency.as_secs_f64()); - RELAY_STATUS_CODE - .with_label_values(&[code.as_str(), SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, &relay.id]) - .inc(); + super::super::record_relay_metrics( + SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, + &relay.id, + code, + request_latency, + ); // If this was API v2 and succeeded then we can just return here if api_version != BuilderApiVersion::V1 { @@ -726,152 +514,3 @@ async fn send_submit_block_impl( let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_SUBMIT_BLOCK_RESPONSE).await?; Ok(SubmitBlockResponseInfo { response_bytes, content_type, fork, code, request_latency }) } - -/// Decode a JSON-encoded submit_block response -fn decode_json_payload(response_bytes: &[u8]) -> Result { - match serde_json::from_slice::(response_bytes) { - Ok(parsed) => Ok(parsed), - Err(err) => Err(PbsError::JsonDecode { - err, - raw: String::from_utf8_lossy(response_bytes).into_owned(), - }), - } -} - -/// Get the fork name from a submit_block JSON response (used for light -/// processing) -fn get_light_info_from_json(response_bytes: &[u8]) -> Result { - #[derive(Deserialize)] - struct LightSubmitBlockResponse { - version: ForkName, - } - - match serde_json::from_slice::(response_bytes) { - Ok(parsed) => Ok(parsed.version), - Err(err) => Err(PbsError::JsonDecode { - err, - raw: String::from_utf8_lossy(response_bytes).into_owned(), - }), - } -} - -/// Decode an SSZ-encoded submit_block response -fn decode_ssz_payload( - response_bytes: &[u8], - fork: ForkName, -) -> Result { - let data = PayloadAndBlobs::from_ssz_bytes_by_fork(response_bytes, fork).map_err(|e| { - PbsError::RelayResponse { - error_msg: (format!("error decoding relay payload: {e:?}")).to_string(), - code: 200, - } - })?; - Ok(SubmitBlindedBlockResponse { version: fork, data, metadata: Default::default() }) -} - -fn validate_unblinded_block( - expected_block_hash: B256, - got_block_hash: B256, - expected_commitments: &KzgCommitments, - blobs_bundle: &BlobsBundle, - fork_name: ForkName, -) -> Result<(), PbsError> { - match fork_name { - ForkName::Base | - ForkName::Altair | - ForkName::Bellatrix | - ForkName::Capella | - ForkName::Deneb | - ForkName::Gloas => Err(PbsError::Validation(ValidationError::UnsupportedFork)), - ForkName::Electra => validate_unblinded_block_electra( - expected_block_hash, - got_block_hash, - expected_commitments, - blobs_bundle, - ), - ForkName::Fulu => validate_unblinded_block_fulu( - expected_block_hash, - got_block_hash, - expected_commitments, - blobs_bundle, - ), - } -} - -fn validate_unblinded_block_electra( - expected_block_hash: B256, - got_block_hash: B256, - expected_commitments: &KzgCommitments, - blobs_bundle: &BlobsBundle, -) -> Result<(), PbsError> { - if expected_block_hash != got_block_hash { - return Err(PbsError::Validation(ValidationError::BlockHashMismatch { - expected: expected_block_hash, - got: got_block_hash, - })); - } - - if expected_commitments.len() != blobs_bundle.blobs.len() || - expected_commitments.len() != blobs_bundle.commitments.len() || - expected_commitments.len() != blobs_bundle.proofs.len() - { - return Err(PbsError::Validation(ValidationError::KzgCommitments { - expected_blobs: expected_commitments.len(), - got_blobs: blobs_bundle.blobs.len(), - got_commitments: blobs_bundle.commitments.len(), - got_proofs: blobs_bundle.proofs.len(), - })); - } - - for (i, comm) in expected_commitments.iter().enumerate() { - // this is safe since we already know they are the same length - if *comm != blobs_bundle.commitments[i] { - return Err(PbsError::Validation(ValidationError::KzgMismatch { - expected: format!("{comm}"), - got: format!("{}", blobs_bundle.commitments[i]), - index: i, - })); - } - } - - Ok(()) -} - -fn validate_unblinded_block_fulu( - expected_block_hash: B256, - got_block_hash: B256, - expected_commitments: &KzgCommitments, - blobs_bundle: &BlobsBundle, -) -> Result<(), PbsError> { - if expected_block_hash != got_block_hash { - return Err(PbsError::Validation(ValidationError::BlockHashMismatch { - expected: expected_block_hash, - got: got_block_hash, - })); - } - - if expected_commitments.len() != blobs_bundle.blobs.len() || - expected_commitments.len() != blobs_bundle.commitments.len() || - expected_commitments.len() * CELLS_PER_EXT_BLOB != blobs_bundle.proofs.len() - { - return Err(PbsError::Validation(ValidationError::KzgCommitments { - expected_blobs: expected_commitments.len(), - got_blobs: blobs_bundle.blobs.len(), - got_commitments: blobs_bundle.commitments.len(), - got_proofs: blobs_bundle.proofs.len(), - })); - } - - for (i, comm) in expected_commitments.iter().enumerate() { - // this is safe since we already know they are the same length - if *comm != blobs_bundle.commitments[i] { - return Err(PbsError::Validation(ValidationError::KzgMismatch { - expected: format!("{comm}"), - got: format!("{}", blobs_bundle.commitments[i]), - index: i, - })); - } - } - - Ok(()) -} diff --git a/crates/pbs/src/routes/submit_block/validation.rs b/crates/pbs/src/routes/submit_block/validation.rs new file mode 100644 index 00000000..97659640 --- /dev/null +++ b/crates/pbs/src/routes/submit_block/validation.rs @@ -0,0 +1,148 @@ +use alloy::{eips::eip7594::CELLS_PER_EXT_BLOB, primitives::B256}; +use cb_common::pbs::{ + BlobsBundle, ForkName, ForkVersionDecode, KzgCommitments, PayloadAndBlobs, + SubmitBlindedBlockResponse, + error::{PbsError, ValidationError}, +}; +use serde::Deserialize; + +/// Decode a JSON-encoded submit_block response +pub fn decode_json_payload(response_bytes: &[u8]) -> Result { + match serde_json::from_slice::(response_bytes) { + Ok(parsed) => Ok(parsed), + Err(err) => Err(PbsError::JsonDecode { + err, + raw: String::from_utf8_lossy(response_bytes).into_owned(), + }), + } +} + +/// Get the fork name from a submit_block JSON response (used for light +/// processing) +pub fn get_light_info_from_json(response_bytes: &[u8]) -> Result { + #[derive(Deserialize)] + struct LightVersionOnly { + version: ForkName, + } + + match serde_json::from_slice::(response_bytes) { + Ok(parsed) => Ok(parsed.version), + Err(err) => Err(PbsError::JsonDecode { + err, + raw: String::from_utf8_lossy(response_bytes).into_owned(), + }), + } +} + +/// Decode an SSZ-encoded submit_block response +pub fn decode_ssz_payload( + response_bytes: &[u8], + fork: ForkName, +) -> Result { + let data = PayloadAndBlobs::from_ssz_bytes_by_fork(response_bytes, fork).map_err(|e| { + PbsError::RelayResponse { + error_msg: (format!("error decoding relay payload: {e:?}")).to_string(), + code: 200, + } + })?; + Ok(SubmitBlindedBlockResponse { version: fork, data, metadata: Default::default() }) +} + +pub fn validate_unblinded_block( + expected_block_hash: B256, + got_block_hash: B256, + expected_commitments: &KzgCommitments, + blobs_bundle: &BlobsBundle, + fork_name: ForkName, +) -> Result<(), PbsError> { + match fork_name { + ForkName::Base | + ForkName::Altair | + ForkName::Bellatrix | + ForkName::Capella | + ForkName::Deneb | + ForkName::Gloas => Err(PbsError::Validation(ValidationError::UnsupportedFork)), + ForkName::Electra => validate_unblinded_block_electra( + expected_block_hash, + got_block_hash, + expected_commitments, + blobs_bundle, + ), + ForkName::Fulu => validate_unblinded_block_fulu( + expected_block_hash, + got_block_hash, + expected_commitments, + blobs_bundle, + ), + } +} + +pub fn validate_unblinded_block_electra( + expected_block_hash: B256, + got_block_hash: B256, + expected_commitments: &KzgCommitments, + blobs_bundle: &BlobsBundle, +) -> Result<(), PbsError> { + validate_unblinded_block_inner( + expected_block_hash, + got_block_hash, + expected_commitments, + blobs_bundle, + expected_commitments.len(), + ) +} + +pub fn validate_unblinded_block_fulu( + expected_block_hash: B256, + got_block_hash: B256, + expected_commitments: &KzgCommitments, + blobs_bundle: &BlobsBundle, +) -> Result<(), PbsError> { + validate_unblinded_block_inner( + expected_block_hash, + got_block_hash, + expected_commitments, + blobs_bundle, + expected_commitments.len() * CELLS_PER_EXT_BLOB, + ) +} + +pub fn validate_unblinded_block_inner( + expected_block_hash: B256, + got_block_hash: B256, + expected_commitments: &KzgCommitments, + blobs_bundle: &BlobsBundle, + expected_proof_count: usize, +) -> Result<(), PbsError> { + if expected_block_hash != got_block_hash { + return Err(PbsError::Validation(ValidationError::BlockHashMismatch { + expected: expected_block_hash, + got: got_block_hash, + })); + } + + if expected_commitments.len() != blobs_bundle.blobs.len() || + expected_commitments.len() != blobs_bundle.commitments.len() || + expected_proof_count != blobs_bundle.proofs.len() + { + return Err(PbsError::Validation(ValidationError::KzgCommitments { + expected_blobs: expected_commitments.len(), + got_blobs: blobs_bundle.blobs.len(), + got_commitments: blobs_bundle.commitments.len(), + got_proofs: blobs_bundle.proofs.len(), + })); + } + + for (i, comm) in expected_commitments.iter().enumerate() { + // this is safe since we already know they are the same length + if *comm != blobs_bundle.commitments[i] { + return Err(PbsError::Validation(ValidationError::KzgMismatch { + expected: format!("{comm}"), + got: format!("{}", blobs_bundle.commitments[i]), + index: i, + })); + } + } + + Ok(()) +} From 27011a3f2e6fac0244c664af48c31cdff4729a80 Mon Sep 17 00:00:00 2001 From: Jason Vranek Date: Tue, 7 Apr 2026 08:24:39 -0700 Subject: [PATCH 25/25] update microbench with different encodings --- benches/microbench/src/get_header.rs | 199 +++++++++++++++++---------- 1 file changed, 125 insertions(+), 74 deletions(-) diff --git a/benches/microbench/src/get_header.rs b/benches/microbench/src/get_header.rs index 4059242e..111f13d6 100644 --- a/benches/microbench/src/get_header.rs +++ b/benches/microbench/src/get_header.rs @@ -2,10 +2,44 @@ //! //! # What this measures //! -//! The full `get_header` pipeline end-to-end: HTTP fan-out to N in-process mock -//! relays, response parsing, header validation, signature verification, and bid -//! selection. This is wall-clock timing — useful for local development feedback -//! and catching latency regressions across relay counts. +//! The per-request `get_header` pipeline: HTTP request to a single in-process +//! mock relay, response parsing, header validation, signature verification, and +//! bid selection. This is wall-clock timing — useful for local development +//! feedback and catching latency regressions across validation configurations. +//! +//! A single relay is used because relay fan-out uses `join_all` (not +//! `tokio::spawn`), so all futures are polled on the same task. HTTP requests +//! are truly concurrent but CPU-bound validation work (deserialization, BLS sig +//! verification) is interleaved on one thread. Validation cost therefore scales +//! roughly linearly with relay count — one relay is sufficient to measure the +//! per-relay cost, and N relays can be estimated as ~N× that baseline. +//! +//! # Benchmark dimensions +//! +//! **Validation mode** (`HeaderValidationMode`): +//! - `None` — light path: skips full deserialization and sig verification, +//! extracts only fork + bid value, forwards raw bytes. Fastest option, +//! requires complete trust in relays. +//! - `Standard` — full deserialization, header validation (block hash, parent +//! hash, timestamp, fork), BLS signature verification. Default mode. +//! - `Extra` — Standard + parent block validation via RPC. NOTE: without a live +//! RPC endpoint the parent block fetch returns None and `extra_validation` is +//! skipped, so Extra degrades to Standard in this bench. It is included to +//! catch any overhead from the mode flag itself and Accept header +//! differences. A meaningful Extra benchmark would require a mock RPC server. +//! +//! **Encoding type** (`EncodingType`): +//! - JSON only — validator requests `application/json` +//! - SSZ only — validator requests `application/octet-stream` +//! - Both — validator accepts either (CB picks the best available) +//! +//! Note: in Standard and Extra modes, `get_header` always requests both +//! encodings from relays regardless of what the validator asked for, because it +//! needs to unpack the body. The encoding dimension therefore only affects the +//! None (light) path where the response is forwarded raw and must match what +//! the validator accepts. +//! +//! Total: 3 modes × 3 encodings = 9 benchmark cases. //! //! Criterion runs each benchmark hundreds of times, applies statistical //! analysis, and reports mean ± standard deviation. Results are saved to @@ -17,8 +51,11 @@ //! # Run all benchmarks //! cargo bench --package cb-bench-micro //! -//! # Run a specific variant by filter -//! cargo bench --package cb-bench-micro -- 3_relays +//! # Run only the light (None) mode benchmarks +//! cargo bench --package cb-bench-micro -- none +//! +//! # Compare modes for SSZ encoding +//! cargo bench --package cb-bench-micro -- ssz //! //! # Save a named baseline to compare against later //! cargo bench --package cb-bench-micro -- --save-baseline main @@ -31,16 +68,20 @@ //! //! - PBS HTTP server overhead (we call `get_header()` directly, bypassing axum //! routing) -//! - Mock relay startup time (servers are started once in setup, before timing +//! - Mock relay startup time (server is started once in setup, before timing //! begins) //! - `HeaderMap` allocation (created once in setup, cloned cheaply per //! iteration) +//! - Extra mode's RPC fetch (no live RPC endpoint in bench environment) use std::{collections::HashSet, path::PathBuf, sync::Arc}; use alloy::primitives::B256; use axum::http::HeaderMap; -use cb_common::{pbs::GetHeaderParams, signer::random_secret, types::Chain, utils::EncodingType}; +use cb_common::{ + config::HeaderValidationMode, pbs::GetHeaderParams, signer::random_secret, types::Chain, + utils::EncodingType, +}; use cb_pbs::{PbsState, get_header}; use cb_tests::{ mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, @@ -49,65 +90,79 @@ use cb_tests::{ use criterion::{Criterion, black_box, criterion_group, criterion_main}; const CHAIN: Chain = Chain::Hoodi; -const MAX_RELAYS: usize = 5; -const RELAY_COUNTS: [usize; 3] = [1, 3, MAX_RELAYS]; -/// Benchmarks `get_header` across three relay-count variants. +const MODES: [(HeaderValidationMode, &str); 3] = [ + (HeaderValidationMode::None, "none"), + (HeaderValidationMode::Standard, "standard"), + // Extra degrades to Standard without a live RPC endpoint — included to + // measure any overhead from the mode flag and Accept header differences. + // See module doc comment for details. + (HeaderValidationMode::Extra, "extra"), +]; + +const ENCODINGS: [(&str, &[EncodingType]); 3] = [ + ("json", &[EncodingType::Json]), + ("ssz", &[EncodingType::Ssz]), + ("both", &[EncodingType::Json, EncodingType::Ssz]), +]; + +/// Build a `PbsState` for a specific validation mode with a single relay. +/// +/// Port 0 is used because we call `get_header()` directly — no PBS server is +/// started, so the port is never bound. The actual relay endpoint is carried +/// inside the `RelayClient` object. +fn make_pbs_state(mode: HeaderValidationMode, relay: cb_common::pbs::RelayClient) -> PbsState { + let mut pbs_config = get_pbs_config(0); + pbs_config.header_validation_mode = mode; + let config = to_pbs_config(CHAIN, pbs_config, vec![relay]); + PbsState::new(config, PathBuf::new()) +} + +/// Benchmarks `get_header` across all validation modes and encoding types. /// /// # Setup (runs once, not measured) /// -/// All MAX_RELAYS mock relays are started up-front and shared across variants. -/// Each variant gets its own `PbsState` pointing to a different relay subset. -/// The mock relays are in-process axum servers on localhost. +/// A single mock relay is started up-front and shared across all variants. +/// Each variant gets its own `PbsState` configured with the appropriate +/// `HeaderValidationMode`. The mock relay is an in-process axum server on +/// localhost. /// /// # Per-iteration (measured) /// /// Each call to `b.iter(|| ...)` runs `get_header()` once: -/// - Fans out HTTP requests to N mock relays concurrently -/// - Parses and validates each relay response (header data + BLS signature) -/// - Selects the highest-value bid +/// - Sends an HTTP request to the mock relay +/// - Parses and validates the relay response (or skips in None mode) +/// - Returns the bid /// /// `black_box(...)` prevents the compiler from optimizing away inputs or the -/// return value. Without it, the optimizer could see that the result is unused -/// and eliminate the call entirely, producing a meaningless zero measurement. +/// return value. +/// +/// # Criterion grouping +/// +/// Groups are structured as `get_header/{encoding}` with the validation mode +/// as the bench function name. Each Criterion chart directly compares None vs +/// Standard vs Extra for the same encoding — the comparison that matters most +/// for understanding the latency cost of validation. fn bench_get_header(c: &mut Criterion) { let rt = tokio::runtime::Runtime::new().expect("tokio runtime"); - // Start all mock relays once and build one PbsState per relay-count variant. - // All relays share the same MockRelayState (and therefore the same signing - // key). Each relay gets its own OS-assigned port via get_free_listener() so - // there is no TOCTOU race and no hardcoded port reservations. - let (states, params) = rt.block_on(async { + // Start a single mock relay. It gets its own OS-assigned port via + // get_free_listener() so there is no TOCTOU race. + let (relay_client, params) = rt.block_on(async { let signer = random_secret(); let pubkey = signer.public_key(); let mock_state = Arc::new(MockRelayState::new(CHAIN, signer)); - let mut relay_clients = Vec::with_capacity(MAX_RELAYS); - for _ in 0..MAX_RELAYS { - let listener = get_free_listener().await; - let port = listener.local_addr().unwrap().port(); - tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), listener)); - relay_clients.push(generate_mock_relay(port, pubkey.clone()).expect("relay client")); - } + let listener = get_free_listener().await; + let port = listener.local_addr().unwrap().port(); + tokio::spawn(start_mock_relay_service_with_listener(mock_state, listener)); + let relay_client = generate_mock_relay(port, pubkey.clone()).expect("relay client"); - // Give all servers time to start accepting before benchmarking begins. + // Give the server time to start accepting before benchmarking begins. tokio::time::sleep(std::time::Duration::from_millis(200)).await; let params = GetHeaderParams { slot: 0, parent_hash: B256::ZERO, pubkey }; - - // Port 0 here is the port the PBS service itself would bind to for incoming - // validator requests. We call get_header() as a function directly, so no - // PBS server is started and this port is never used. The actual relay - // endpoints are carried inside the RelayClient objects. - let states: Vec = RELAY_COUNTS - .iter() - .map(|&n| { - let config = to_pbs_config(CHAIN, get_pbs_config(0), relay_clients[..n].to_vec()); - PbsState::new(config, PathBuf::new()) - }) - .collect(); - - (states, params) + (relay_client, params) }); // Empty HeaderMap matches what the PBS route handler receives for requests @@ -115,35 +170,31 @@ fn bench_get_header(c: &mut Criterion) { // allocation per iteration. let headers = HeaderMap::new(); - // A BenchmarkGroup groups related functions so Criterion produces a single - // comparison table and chart. All variants share the name "get_header/". - let mut group = c.benchmark_group("get_header"); - - for (i, relay_count) in RELAY_COUNTS.iter().enumerate() { - let state = states[i].clone(); - let params = params.clone(); - let headers = headers.clone(); - - // bench_function registers one timing function. The closure receives a - // `Bencher` — calling `b.iter(|| ...)` is the measured hot loop. - // Everything outside `b.iter` is setup and not timed. - group.bench_function(format!("{relay_count}_relays"), |b| { - b.iter(|| { - // block_on drives the async future to completion on the shared - // runtime. get_header takes owned args, so we clone cheap types - // (Arc-backed state, stack-sized params) on each iteration. - rt.block_on(get_header( - black_box(params.clone()), - black_box(headers.clone()), - black_box(state.clone()), - black_box(HashSet::from([EncodingType::Json, EncodingType::Ssz])), - )) - .expect("get_header failed") - }) - }); - } + for &(encoding_name, encoding_types) in &ENCODINGS { + let encodings: HashSet = encoding_types.iter().copied().collect(); + let mut group = c.benchmark_group(format!("get_header/{encoding_name}")); + + for &(mode, mode_name) in &MODES { + let state = make_pbs_state(mode, relay_client.clone()); + let params = params.clone(); + let headers = headers.clone(); + let encodings = encodings.clone(); + + group.bench_function(mode_name, |b| { + b.iter(|| { + rt.block_on(get_header( + black_box(params.clone()), + black_box(headers.clone()), + black_box(state.clone()), + black_box(encodings.clone()), + )) + .expect("get_header failed") + }) + }); + } - group.finish(); + group.finish(); + } } // criterion_group! registers bench_get_header as a benchmark group named