Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
e7f3020
v0.9.4-rc1 changelog notes (#437)
JasonVranek Mar 18, 2026
04ae8b7
address Dirk issues in docker_init.rs:
JasonVranek Mar 25, 2026
6ec3782
cleaner error message if TLS CryptoProvider fails
JasonVranek Mar 25, 2026
58b4b22
users get error message on missing [signer] section instead of toml d…
JasonVranek Mar 25, 2026
001b312
support partial jwt reloads
JasonVranek Mar 25, 2026
76905de
require the nonce in signer-api.yml - previously was marked optional …
JasonVranek Mar 25, 2026
8268572
fix suffix when displaying X-Forwaded-For
JasonVranek Mar 25, 2026
7c8cce6
only take the `jwt_auth_failures` writelock if strictly necessary, de…
JasonVranek Mar 25, 2026
4a9aff7
remove mark_jwt_failure() calls from failures unrelated to jwts
JasonVranek Mar 25, 2026
d834242
add round-trip sign->verify unit tests
JasonVranek Mar 26, 2026
2a1d315
add get_header auction winner log (#443)
ninaiiad Mar 26, 2026
a9a5c11
Merge remote-tracking branch 'upstream/main' into sigp-audit-fixes
JasonVranek Mar 26, 2026
dd87334
remove unutilized BuilderApi trait to simplify abstraction (keep pbs/…
JasonVranek Mar 24, 2026
eb90215
Removed BuilderApiState trait to complete simplifying abstraction and…
JasonVranek Mar 25, 2026
7e4648f
address Dirk issues in docker_init.rs:
JasonVranek Mar 25, 2026
84a0899
cleaner error message if TLS CryptoProvider fails
JasonVranek Mar 25, 2026
b3b3251
users get error message on missing [signer] section instead of toml d…
JasonVranek Mar 25, 2026
7cb8742
support partial jwt reloads
JasonVranek Mar 25, 2026
f0820e6
require the nonce in signer-api.yml - previously was marked optional …
JasonVranek Mar 25, 2026
e23c3ee
fix suffix when displaying X-Forwaded-For
JasonVranek Mar 25, 2026
170977b
only take the `jwt_auth_failures` writelock if strictly necessary, de…
JasonVranek Mar 25, 2026
b48e3fb
remove mark_jwt_failure() calls from failures unrelated to jwts
JasonVranek Mar 25, 2026
64f962f
add round-trip sign->verify unit tests
JasonVranek Mar 26, 2026
d71aff0
remove optional signer client from PbsModuleConfig as signer should o…
JasonVranek Mar 26, 2026
2d842f5
refactor get_header and submit_block into modules for readability
JasonVranek Mar 26, 2026
d1976c5
Merge remote-tracking branch 'upstream/sigp-audit-fixes' into remove-…
JasonVranek Mar 26, 2026
27011a3
update microbench with different encodings
JasonVranek Apr 7, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
### v0.9.4-rc1
- Unifies the `pbs`, `signer`, and `cli` binaries into one: `commit-boost`. This change changes the CLI, notably the `init` command is now invoked as `commit-boost init --config <config_name>`.
- Includes new quality of life testing improvements in the Justfile: unit test coverage tooling, local Kurtosis testnet, and microbenchmark diffing.
- Robustifies the release process to ensure no compromised maintainer can unilaterally cut a release. Additionally all binaries are now signed during CI and can easily be verified before use.
18 changes: 0 additions & 18 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[workspace]
members = ["benches/*", "bin", "crates/*", "examples/da_commit", "examples/status_api", "tests"]
members = ["benches/*", "bin", "crates/*", "examples/da_commit", "tests"]
resolver = "2"

[workspace.package]
Expand Down
15 changes: 10 additions & 5 deletions api/signer-api.yml
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ paths:
application/json:
schema:
type: object
required: [pubkey, object_root]
required: [pubkey, object_root, nonce]
properties:
pubkey:
description: The 48-byte BLS public key, with optional `0x` prefix, of the proposer key that you want to request a signature from.
Expand Down Expand Up @@ -234,7 +234,7 @@ paths:
application/json:
schema:
type: object
required: [proxy, object_root]
required: [proxy, object_root, nonce]
properties:
proxy:
description: The 48-byte BLS public key (for `proxy_bls` mode) or the 20-byte Ethereum address (for `proxy_ecdsa` mode), with optional `0x` prefix, of the proxy key that you want to request a signature from.
Expand Down Expand Up @@ -382,7 +382,7 @@ paths:
application/json:
schema:
type: object
required: [proxy, object_root]
required: [proxy, object_root, nonce]
properties:
proxy:
description: The 20-byte Ethereum address, with optional `0x` prefix, of the proxy key that you want to request a signature from.
Expand Down Expand Up @@ -695,7 +695,12 @@ components:
$ref: "#/components/schemas/EcdsaSignature"
Nonce:
type: integer
description: If your module tracks nonces per signature (e.g., to prevent replay attacks), this is the unique nonce to use for the signature. It should be an unsigned 64-bit integer in big-endian format. It must be between 0 and 2^64-2, inclusive. If your module doesn't use nonces, we suggest setting this to 2^64-1 instead of 0 because 0 is a legal nonce and will cause complications with your module if you ever want to use a nonce in the future.
description: |
Replay-protection nonce, always mixed into the signing root via `PropCommitSigningInfo`. It
must be an unsigned 64-bit integer between 0 and 2^64-2 (18446744073709551614), inclusive.

Modules that track nonces for replay protection should use a monotonically increasing value
per key. Modules that do not use replay protection should always send `0`.
minimum: 0
maximum: 18446744073709551614 // 2^64-2
maximum: 18446744073709551614
example: 1
208 changes: 129 additions & 79 deletions benches/microbench/src/get_header.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,44 @@
//!
//! # What this measures
//!
//! The full `get_header` pipeline end-to-end: HTTP fan-out to N in-process mock
//! relays, response parsing, header validation, signature verification, and bid
//! selection. This is wall-clock timing — useful for local development feedback
//! and catching latency regressions across relay counts.
//! The per-request `get_header` pipeline: HTTP request to a single in-process
//! mock relay, response parsing, header validation, signature verification, and
//! bid selection. This is wall-clock timing — useful for local development
//! feedback and catching latency regressions across validation configurations.
//!
//! A single relay is used because relay fan-out uses `join_all` (not
//! `tokio::spawn`), so all futures are polled on the same task. HTTP requests
//! are truly concurrent but CPU-bound validation work (deserialization, BLS sig
//! verification) is interleaved on one thread. Validation cost therefore scales
//! roughly linearly with relay count — one relay is sufficient to measure the
//! per-relay cost, and N relays can be estimated as ~N× that baseline.
//!
//! # Benchmark dimensions
//!
//! **Validation mode** (`HeaderValidationMode`):
//! - `None` — light path: skips full deserialization and sig verification,
//! extracts only fork + bid value, forwards raw bytes. Fastest option,
//! requires complete trust in relays.
//! - `Standard` — full deserialization, header validation (block hash, parent
//! hash, timestamp, fork), BLS signature verification. Default mode.
//! - `Extra` — Standard + parent block validation via RPC. NOTE: without a live
//! RPC endpoint the parent block fetch returns None and `extra_validation` is
//! skipped, so Extra degrades to Standard in this bench. It is included to
//! catch any overhead from the mode flag itself and Accept header
//! differences. A meaningful Extra benchmark would require a mock RPC server.
//!
//! **Encoding type** (`EncodingType`):
//! - JSON only — validator requests `application/json`
//! - SSZ only — validator requests `application/octet-stream`
//! - Both — validator accepts either (CB picks the best available)
//!
//! Note: in Standard and Extra modes, `get_header` always requests both
//! encodings from relays regardless of what the validator asked for, because it
//! needs to unpack the body. The encoding dimension therefore only affects the
//! None (light) path where the response is forwarded raw and must match what
//! the validator accepts.
//!
//! Total: 3 modes × 3 encodings = 9 benchmark cases.
//!
//! Criterion runs each benchmark hundreds of times, applies statistical
//! analysis, and reports mean ± standard deviation. Results are saved to
Expand All @@ -17,8 +51,11 @@
//! # Run all benchmarks
//! cargo bench --package cb-bench-micro
//!
//! # Run a specific variant by filter
//! cargo bench --package cb-bench-micro -- 3_relays
//! # Run only the light (None) mode benchmarks
//! cargo bench --package cb-bench-micro -- none
//!
//! # Compare modes for SSZ encoding
//! cargo bench --package cb-bench-micro -- ssz
//!
//! # Save a named baseline to compare against later
//! cargo bench --package cb-bench-micro -- --save-baseline main
Expand All @@ -31,120 +68,133 @@
//!
//! - PBS HTTP server overhead (we call `get_header()` directly, bypassing axum
//! routing)
//! - Mock relay startup time (servers are started once in setup, before timing
//! - Mock relay startup time (server is started once in setup, before timing
//! begins)
//! - `HeaderMap` allocation (created once in setup, cloned cheaply per
//! iteration)
//! - Extra mode's RPC fetch (no live RPC endpoint in bench environment)

use std::{path::PathBuf, sync::Arc, time::Duration};
use std::{collections::HashSet, path::PathBuf, sync::Arc};

use alloy::primitives::B256;
use axum::http::HeaderMap;
use cb_common::{pbs::GetHeaderParams, signer::random_secret, types::Chain};
use cb_common::{
config::HeaderValidationMode, pbs::GetHeaderParams, signer::random_secret, types::Chain,
utils::EncodingType,
};
use cb_pbs::{PbsState, get_header};
use cb_tests::{
mock_relay::{MockRelayState, start_mock_relay_service},
utils::{generate_mock_relay, get_pbs_static_config, to_pbs_config},
mock_relay::{MockRelayState, start_mock_relay_service_with_listener},
utils::{generate_mock_relay, get_free_listener, get_pbs_config, to_pbs_config},
};
use criterion::{Criterion, black_box, criterion_group, criterion_main};

// Ports 19201–19205 are reserved for the microbenchmark mock relays.
const BASE_PORT: u16 = 19200;
const CHAIN: Chain = Chain::Hoodi;
const MAX_RELAYS: usize = 5;
const RELAY_COUNTS: [usize; 3] = [1, 3, MAX_RELAYS];

/// Benchmarks `get_header` across three relay-count variants.
const MODES: [(HeaderValidationMode, &str); 3] = [
(HeaderValidationMode::None, "none"),
(HeaderValidationMode::Standard, "standard"),
// Extra degrades to Standard without a live RPC endpoint — included to
// measure any overhead from the mode flag and Accept header differences.
// See module doc comment for details.
(HeaderValidationMode::Extra, "extra"),
];

const ENCODINGS: [(&str, &[EncodingType]); 3] = [
("json", &[EncodingType::Json]),
("ssz", &[EncodingType::Ssz]),
("both", &[EncodingType::Json, EncodingType::Ssz]),
];

/// Build a `PbsState` for a specific validation mode with a single relay.
///
/// Port 0 is used because we call `get_header()` directly — no PBS server is
/// started, so the port is never bound. The actual relay endpoint is carried
/// inside the `RelayClient` object.
fn make_pbs_state(mode: HeaderValidationMode, relay: cb_common::pbs::RelayClient) -> PbsState {
let mut pbs_config = get_pbs_config(0);
pbs_config.header_validation_mode = mode;
let config = to_pbs_config(CHAIN, pbs_config, vec![relay]);
PbsState::new(config, PathBuf::new())
}

/// Benchmarks `get_header` across all validation modes and encoding types.
///
/// # Setup (runs once, not measured)
///
/// All MAX_RELAYS mock relays are started up-front and shared across variants.
/// Each variant gets its own `PbsState` pointing to a different relay subset.
/// The mock relays are in-process axum servers on localhost.
/// A single mock relay is started up-front and shared across all variants.
/// Each variant gets its own `PbsState` configured with the appropriate
/// `HeaderValidationMode`. The mock relay is an in-process axum server on
/// localhost.
///
/// # Per-iteration (measured)
///
/// Each call to `b.iter(|| ...)` runs `get_header()` once:
/// - Fans out HTTP requests to N mock relays concurrently
/// - Parses and validates each relay response (header data + BLS signature)
/// - Selects the highest-value bid
/// - Sends an HTTP request to the mock relay
/// - Parses and validates the relay response (or skips in None mode)
/// - Returns the bid
///
/// `black_box(...)` prevents the compiler from optimizing away inputs or the
/// return value. Without it, the optimizer could see that the result is unused
/// and eliminate the call entirely, producing a meaningless zero measurement.
/// return value.
///
/// # Criterion grouping
///
/// Groups are structured as `get_header/{encoding}` with the validation mode
/// as the bench function name. Each Criterion chart directly compares None vs
/// Standard vs Extra for the same encoding — the comparison that matters most
/// for understanding the latency cost of validation.
fn bench_get_header(c: &mut Criterion) {
let rt = tokio::runtime::Runtime::new().expect("tokio runtime");

// Start all mock relays once and build one PbsState per relay-count variant.
// All relays share the same MockRelayState (and therefore the same signing
// key).
let (states, params) = rt.block_on(async {
// Start a single mock relay. It gets its own OS-assigned port via
// get_free_listener() so there is no TOCTOU race.
let (relay_client, params) = rt.block_on(async {
let signer = random_secret();
let pubkey = signer.public_key();
let mock_state = Arc::new(MockRelayState::new(CHAIN, signer));

let relay_clients: Vec<_> = (0..MAX_RELAYS)
.map(|i| {
let port = BASE_PORT + 1 + i as u16;
tokio::spawn(start_mock_relay_service(mock_state.clone(), port));
generate_mock_relay(port, pubkey.clone()).expect("relay client")
})
.collect();
let listener = get_free_listener().await;
let port = listener.local_addr().unwrap().port();
tokio::spawn(start_mock_relay_service_with_listener(mock_state, listener));
let relay_client = generate_mock_relay(port, pubkey.clone()).expect("relay client");

// Give all servers time to bind before benchmarking starts.
tokio::time::sleep(Duration::from_millis(200)).await;
// Give the server time to start accepting before benchmarking begins.
tokio::time::sleep(std::time::Duration::from_millis(200)).await;

let params = GetHeaderParams { slot: 0, parent_hash: B256::ZERO, pubkey };

// Port 0 here is the port the PBS service itself would bind to for incoming
// validator requests. We call get_header() as a function directly, so no
// PBS server is started and this port is never used. The actual relay
// endpoints are carried inside the RelayClient objects (ports 19201–19205).
let states: Vec<PbsState> = RELAY_COUNTS
.iter()
.map(|&n| {
let config =
to_pbs_config(CHAIN, get_pbs_static_config(0), relay_clients[..n].to_vec());
PbsState::new(config, PathBuf::new())
})
.collect();

(states, params)
(relay_client, params)
});

// Empty HeaderMap matches what the PBS route handler receives for requests
// without custom headers. Created once here to avoid measuring its
// allocation per iteration.
let headers = HeaderMap::new();

// A BenchmarkGroup groups related functions so Criterion produces a single
// comparison table and chart. All variants share the name "get_header/".
let mut group = c.benchmark_group("get_header");

for (i, relay_count) in RELAY_COUNTS.iter().enumerate() {
let state = states[i].clone();
let params = params.clone();
let headers = headers.clone();

// bench_function registers one timing function. The closure receives a
// `Bencher` — calling `b.iter(|| ...)` is the measured hot loop.
// Everything outside `b.iter` is setup and not timed.
group.bench_function(format!("{relay_count}_relays"), |b| {
b.iter(|| {
// block_on drives the async future to completion on the shared
// runtime. get_header takes owned args, so we clone cheap types
// (Arc-backed state, stack-sized params) on each iteration.
rt.block_on(get_header(
black_box(params.clone()),
black_box(headers.clone()),
black_box(state.clone()),
))
.expect("get_header failed")
})
});
for &(encoding_name, encoding_types) in &ENCODINGS {
let encodings: HashSet<EncodingType> = encoding_types.iter().copied().collect();
let mut group = c.benchmark_group(format!("get_header/{encoding_name}"));

for &(mode, mode_name) in &MODES {
let state = make_pbs_state(mode, relay_client.clone());
let params = params.clone();
let headers = headers.clone();
let encodings = encodings.clone();

group.bench_function(mode_name, |b| {
b.iter(|| {
rt.block_on(get_header(
black_box(params.clone()),
black_box(headers.clone()),
black_box(state.clone()),
black_box(encodings.clone()),
))
.expect("get_header failed")
})
});
}

group.finish();
}

group.finish();
}

// criterion_group! registers bench_get_header as a benchmark group named
Expand Down
4 changes: 2 additions & 2 deletions bin/commit-boost.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use cb_common::{
},
utils::{initialize_tracing_log, print_logo, wait_for_signal},
};
use cb_pbs::{DefaultBuilderApi, PbsService, PbsState};
use cb_pbs::{PbsService, PbsState};
use cb_signer::service::SigningService;
use clap::{Parser, Subcommand};
use eyre::Result;
Expand Down Expand Up @@ -68,7 +68,7 @@ async fn run_pbs_service() -> Result<()> {

PbsService::init_metrics(pbs_config.chain)?;
let state = PbsState::new(pbs_config, config_path);
let server = PbsService::run::<_, DefaultBuilderApi>(state);
let server = PbsService::run(state);

tokio::select! {
maybe_err = server => {
Expand Down
5 changes: 1 addition & 4 deletions bin/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,7 @@ pub mod prelude {
utils::{initialize_tracing_log, utcnow_ms, utcnow_ns, utcnow_sec, utcnow_us},
};
pub use cb_metrics::provider::MetricsProvider;
pub use cb_pbs::{
BuilderApi, BuilderApiState, DefaultBuilderApi, PbsService, PbsState, PbsStateGuard,
get_header, get_status, register_validator, submit_block,
};
pub use cb_pbs::{PbsService, PbsState, PbsStateGuard};
// The TreeHash derive macro requires tree_hash as import
pub mod tree_hash {
pub use tree_hash::*;
Expand Down
Loading
Loading