diff --git a/Cargo.toml b/Cargo.toml index ee0dc00..8b37757 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "sphinx-packet" -version = "0.5.0" +version = "0.6.0" authors = ["Ania Piotrowska ", "Dave Hrycyszyn ", "Jędrzej Stuczyński "] -edition = "2018" +edition = "2021" license = "Apache-2.0" description = "A Sphinx packet implementation in Rust" repository = "https://github.com/nymtech/sphinx" @@ -43,3 +43,12 @@ rand_chacha = "0.3.1" name = "benchmarks" harness = false +[workspace.lints.clippy] +unwrap_used = "deny" +expect_used = "deny" +todo = "deny" +dbg_macro = "deny" +exit = "deny" +panic = "deny" +unimplemented = "deny" +unreachable = "deny" diff --git a/README.md b/README.md index fb8ef39..6c7bf47 100644 --- a/README.md +++ b/README.md @@ -56,6 +56,11 @@ removed processing and creation of packets with undefined operations - removed `RoutingKeys` in favour of `ExpandedSharedSecret` and added `ReplyTag` - type adjustments +#### v0.6.0 + +- new way of deriving `PayloadKey` that uses seed obtained from the `ExpandedSharedSecret` to reduce sizes of `SURB`s +- API changes + ### Benchmarks To run benchmarks, use: diff --git a/benches/benchmarks.rs b/benches/benchmarks.rs index 5504038..c1b72cb 100644 --- a/benches/benchmarks.rs +++ b/benches/benchmarks.rs @@ -18,12 +18,17 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion}; use sphinx_packet::constants::{ DESTINATION_ADDRESS_LENGTH, IDENTIFIER_LENGTH, NODE_ADDRESS_LENGTH, }; - use sphinx_packet::header::delays; use sphinx_packet::route::{Destination, DestinationAddressBytes, Node, NodeAddressBytes}; -use sphinx_packet::test_utils::fixtures::keygen; use sphinx_packet::SphinxPacket; use std::time::Duration; +use x25519_dalek::{PublicKey, StaticSecret}; + +fn keygen() -> (StaticSecret, PublicKey) { + let private_key = StaticSecret::random(); + let public_key = PublicKey::from(&private_key); + (private_key, public_key) +} fn make_packet_copy(packet: &SphinxPacket) -> SphinxPacket { SphinxPacket::from_bytes(&packet.to_bytes()).unwrap() diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 0000000..9b4ca61 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,3 @@ +allow-unwrap-in-tests = true +allow-expect-in-tests = true +allow-panic-in-tests = true \ No newline at end of file diff --git a/src/constants.rs b/src/constants.rs index b08effe..7bbf390 100644 --- a/src/constants.rs +++ b/src/constants.rs @@ -29,7 +29,6 @@ pub const EXPANDED_SHARED_SECRET_LENGTH: usize = crypto::STREAM_CIPHER_KEY_SIZE + BLINDING_FACTOR_SIZE + REPLAY_TAG_SIZE; -pub const HKDF_INPUT_SEED: &[u8; 97] = b"Dwste mou enan moxlo arketa makru kai ena upomoxlio gia na ton topothetisw kai tha kinisw thn gh."; pub const STREAM_CIPHER_OUTPUT_LENGTH: usize = (NODE_META_INFO_SIZE + HEADER_INTEGRITY_MAC_SIZE) * (MAX_PATH_LENGTH + 1); pub const DESTINATION_ADDRESS_LENGTH: usize = 2 * SECURITY_PARAMETER; @@ -37,6 +36,7 @@ pub const NODE_ADDRESS_LENGTH: usize = 2 * SECURITY_PARAMETER; pub const IDENTIFIER_LENGTH: usize = SECURITY_PARAMETER; pub const INTEGRITY_MAC_KEY_SIZE: usize = SECURITY_PARAMETER; pub const HEADER_INTEGRITY_MAC_SIZE: usize = SECURITY_PARAMETER; +pub const PAYLOAD_KEY_SEED_SIZE: usize = SECURITY_PARAMETER; pub const PAYLOAD_KEY_SIZE: usize = 192; // must be 192 because of the Lioness implementation we're using pub const DELAY_LENGTH: usize = 8; // how many bytes we will use to encode the delay pub const NODE_META_INFO_SIZE: usize = @@ -48,6 +48,21 @@ pub const PAYLOAD_SIZE: usize = 1024; pub const VERSION_LENGTH: usize = 3; // since version is represented as 3 u8 values: major, minor and patch // we need the single byte to detect padding length +#[deprecated(note = "use EXPANDED_SHARED_SECRET_HKDF_INFO instead")] +pub const HKDF_INPUT_SEED: &[u8] = EXPANDED_SHARED_SECRET_HKDF_INFO; + +// content due to legacy reasons +pub const EXPANDED_SHARED_SECRET_HKDF_INFO: &[u8] = + b"Dwste mou enan moxlo arketa makru kai ena upomoxlio gia na ton topothetisw kai tha kinisw thn gh."; + +// unfortunately for legacy compatibility reasons, we have to be using an empty salt +// (nodes need to be able to unconditionally recover version information from the header in order to +// decide on further processing. this value is behind the initial hkdf +pub const EXPANDED_SHARED_SECRET_HKDF_SALT: &[u8] = b""; + +pub const PAYLOAD_KEY_HKDF_INFO: &[u8] = b"sphinx-payload-key-V01-CS01-HKDF:SHA256-INFO"; +pub const PAYLOAD_KEY_HKDF_SALT: &[u8] = b"sphinx-payload-key-V01-CS01-HKDF:SHA256-SALT"; + pub type HeaderIntegrityMacSize = U16; // TODO: to replace with Blake3 diff --git a/src/header/keys.rs b/src/header/keys.rs index afdf033..9a21581 100644 --- a/src/header/keys.rs +++ b/src/header/keys.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::constants::{INTEGRITY_MAC_KEY_SIZE, PAYLOAD_KEY_SIZE}; +use crate::constants::INTEGRITY_MAC_KEY_SIZE; use crate::crypto::STREAM_CIPHER_KEY_SIZE; use crate::header::shared_secret::{expand_shared_secret, ExpandedSharedSecret}; use crate::route::Node; @@ -21,7 +21,6 @@ use x25519_dalek::{PublicKey, StaticSecret}; pub type StreamCipherKey = [u8; STREAM_CIPHER_KEY_SIZE]; pub type HeaderIntegrityMacKey = [u8; INTEGRITY_MAC_KEY_SIZE]; -pub type PayloadKey = [u8; PAYLOAD_KEY_SIZE]; pub struct KeyMaterial { pub initial_shared_secret: PublicKey, @@ -32,24 +31,24 @@ impl KeyMaterial { // derive shared keys, group elements, blinding factors pub fn derive(route: &[Node], initial_secret: &StaticSecret) -> Self { let initial_shared_secret = PublicKey::from(initial_secret); - let mut expanded_shared_secrets = Vec::with_capacity(route.len()); - let mut blinding_factors = vec![initial_secret.clone()]; + let mut expanded_shared_secrets = Vec::new(); + let mut blinding_factors = Vec::new(); + for (i, node) in route.iter().enumerate() { - let shared_key = blinding_factors - .iter() - .fold(node.pub_key, |acc, blinding_factor| { - // a nasty hack to convert `SharedSecret` into `PublicKey`, - // so that we could call `diffie_hellman` repeatedly - PublicKey::from(blinding_factor.diffie_hellman(&acc).to_bytes()) - }); - let expanded_shared_secret = expand_shared_secret(shared_key.as_bytes()); + let mut acc = node.pub_key; - // it's not the last iteration - if i != route.len() + 1 { - blinding_factors.push(expanded_shared_secret.blinding_factor()); + // avoid having to clone the initial secret by just chaining iterators + for blinding_factor in std::iter::once(initial_secret).chain(&blinding_factors) { + let shared_secret = blinding_factor.diffie_hellman(&acc); + acc = PublicKey::from(shared_secret.to_bytes()); } + let expanded_shared_secret = expand_shared_secret(acc.as_bytes()); + + if i != route.len() - 1 { + blinding_factors.push(expanded_shared_secret.blinding_factor()); + } expanded_shared_secrets.push(expanded_shared_secret); } diff --git a/src/header/mod.rs b/src/header/mod.rs index ac89289..adfd920 100644 --- a/src/header/mod.rs +++ b/src/header/mod.rs @@ -15,13 +15,14 @@ use crate::constants::HEADER_INTEGRITY_MAC_SIZE; use crate::header::delays::Delay; use crate::header::filler::Filler; -use crate::header::keys::{KeyMaterial, PayloadKey}; +use crate::header::keys::KeyMaterial; use crate::header::routing::{EncapsulatedRoutingInformation, ENCRYPTED_ROUTING_INFO_SIZE}; use crate::header::shared_secret::{ExpandSecret, ExpandedSharedSecret}; use crate::packet::ProcessedPacketData; +use crate::payload::key::{derive_payload_key, PayloadKey, PayloadKeySeed}; use crate::payload::Payload; use crate::route::{Destination, DestinationAddressBytes, Node, NodeAddressBytes, SURBIdentifier}; -use crate::version::{Version, CURRENT_VERSION, UPDATED_LEGACY_VERSION}; +use crate::version::Version; use crate::{Error, ErrorKind, ProcessedPacket, Result, SphinxPacket}; use x25519_dalek::{PublicKey, StaticSecret}; @@ -99,44 +100,50 @@ impl ProcessedHeader { } impl SphinxHeader { - // needs client's secret key, how should we inject this? - // needs to deal with SURBs too at some point - pub fn new( + #[cfg(test)] + pub(crate) fn new_current( initial_secret: &StaticSecret, route: &[Node], delays: &[Delay], destination: &Destination, - ) -> (Self, Vec) { + ) -> BuiltHeader { let key_material = keys::KeyMaterial::derive(route, initial_secret); - Self::build_header(key_material, route, delays, destination, CURRENT_VERSION) + Self::build_header( + key_material, + route, + delays, + destination, + crate::version::CURRENT_VERSION, + ) } + #[cfg(test)] #[deprecated] #[allow(deprecated)] - pub fn new_legacy( + pub(crate) fn new_legacy( initial_secret: &StaticSecret, route: &[Node], delays: &[Delay], destination: &Destination, - ) -> (Self, Vec) { + ) -> BuiltHeader { let key_material = keys::KeyMaterial::derive_legacy(route, initial_secret); Self::build_header( key_material, route, delays, destination, - UPDATED_LEGACY_VERSION, + crate::version::UPDATED_LEGACY_VERSION, ) } #[allow(deprecated)] - pub fn new_versioned( + pub(crate) fn new_versioned( initial_secret: &StaticSecret, route: &[Node], delays: &[Delay], destination: &Destination, version: Version, - ) -> (Self, Vec) { + ) -> BuiltHeader { let key_material = if version.is_legacy() { keys::KeyMaterial::derive_legacy(route, initial_secret) } else { @@ -151,29 +158,19 @@ impl SphinxHeader { delays: &[Delay], destination: &Destination, version: Version, - ) -> (Self, Vec) { + ) -> BuiltHeader { let filler_string = Filler::new(&key_material.expanded_shared_secrets[..route.len() - 1]); - let routing_info = Box::new(routing::EncapsulatedRoutingInformation::new( + let routing_info = EncapsulatedRoutingInformation::new( route, destination, delays, &key_material.expanded_shared_secrets, filler_string, version, - )); + ); // encapsulate header.routing information, compute MACs - ( - SphinxHeader { - shared_secret: key_material.initial_shared_secret, - routing_info, - }, - key_material - .expanded_shared_secrets - .iter() - .map(|expanded| *expanded.payload_key()) - .collect(), - ) + BuiltHeader::new(version, key_material, routing_info) } // note: this method is currently removed because there's too many branches to support @@ -355,11 +352,7 @@ impl SphinxHeader { let shared_secret = PublicKey::from(shared_secret_bytes); // the rest are for the encapsulated routing info - let encapsulated_routing_info_bytes = bytes[32..HEADER_SIZE].to_vec(); - - let routing_info = Box::new(EncapsulatedRoutingInformation::from_bytes( - &encapsulated_routing_info_bytes, - )?); + let routing_info = Box::new(EncapsulatedRoutingInformation::from_bytes(&bytes[32..])?); Ok(SphinxHeader { shared_secret, @@ -389,6 +382,60 @@ impl SphinxHeader { } } +pub(crate) struct BuiltHeader { + header: SphinxHeader, + version: Version, + expanded_secrets: Vec, +} + +impl BuiltHeader { + fn new( + version: Version, + key_material: KeyMaterial, + routing_information: EncapsulatedRoutingInformation, + ) -> Self { + BuiltHeader { + header: SphinxHeader { + shared_secret: key_material.initial_shared_secret, + routing_info: Box::new(routing_information), + }, + version, + expanded_secrets: key_material.expanded_shared_secrets, + } + } + + // depending on the version either use the initial hkdf output as payload keys + // or extract the seed and run it through another hkdf + pub(crate) fn derive_payload_keys(&self) -> Vec { + if self.version.expects_legacy_full_payload_keys() { + self.legacy_full_payload_keys() + } else { + self.expanded_secrets + .iter() + .map(|s| derive_payload_key(s.payload_key_seed())) + .collect() + } + } + + pub(crate) fn legacy_full_payload_keys(&self) -> Vec { + self.expanded_secrets + .iter() + .map(|s| *s.legacy_payload_key()) + .collect() + } + + pub(crate) fn payload_key_seeds(&self) -> Vec { + self.expanded_secrets + .iter() + .map(|s| *s.payload_key_seed()) + .collect() + } + + pub(crate) fn into_header(self) -> SphinxHeader { + self.header + } +} + #[cfg(test)] mod create_and_process_sphinx_packet_header { use super::*; @@ -422,8 +469,9 @@ mod create_and_process_sphinx_packet_header { let average_delay = 1; let delays = delays::generate_from_average_duration(route.len(), Duration::from_secs(average_delay)); - let (sphinx_header, _) = - SphinxHeader::new(&initial_secret, &route, &delays, &route_destination); + let sphinx_header = + SphinxHeader::new_current(&initial_secret, &route, &delays, &route_destination) + .into_header(); //let (new_header, next_hop_address, _) = sphinx_header.process(node1_sk).unwrap(); let new_header = match sphinx_header.process(&node1_sk).unwrap().data { @@ -521,8 +569,9 @@ mod create_and_process_sphinx_packet_header { let average_delay = 1; let delays = delays::generate_from_average_duration(route.len(), Duration::from_secs(average_delay)); - let (sphinx_header, _) = - SphinxHeader::new_legacy(&initial_secret, &route, &delays, &route_destination); + let sphinx_header = + SphinxHeader::new_legacy(&initial_secret, &route, &delays, &route_destination) + .into_header(); //let (new_header, next_hop_address, _) = sphinx_header.process(node1_sk).unwrap(); let new_header = match sphinx_header @@ -632,7 +681,7 @@ mod unwrap_routing_information { } => { assert_eq!( routing_info[2..2 + NODE_ADDRESS_LENGTH], - next_hop_address.as_bytes() + next_hop_address.to_bytes() ); assert_eq!( routing_info @@ -683,7 +732,8 @@ mod unwrapping_using_previously_expanded_shared_secret { let average_delay = 1; let delays = delays::generate_from_average_duration(route.len(), Duration::from_secs(average_delay)); - let (sphinx_header, _) = SphinxHeader::new(&initial_secret, &route, &delays, &destination); + let sphinx_header = + SphinxHeader::new_current(&initial_secret, &route, &delays, &destination).into_header(); let initial_secret = sphinx_header.shared_secret; let normally_unwrapped = match sphinx_header.clone().process(&node1_sk).unwrap().data { @@ -727,7 +777,8 @@ mod unwrapping_using_previously_expanded_shared_secret { let average_delay = 1; let delays = delays::generate_from_average_duration(route.len(), Duration::from_secs(average_delay)); - let (sphinx_header, _) = SphinxHeader::new(&initial_secret, &route, &delays, &destination); + let sphinx_header = + SphinxHeader::new_current(&initial_secret, &route, &delays, &destination).into_header(); let initial_secret = sphinx_header.shared_secret; let normally_unwrapped = sphinx_header.clone().process(&node1_sk).unwrap(); diff --git a/src/header/routing/mod.rs b/src/header/routing/mod.rs index 3d7aa1f..27e61b6 100644 --- a/src/header/routing/mod.rs +++ b/src/header/routing/mod.rs @@ -106,7 +106,7 @@ impl EncapsulatedRoutingInformation { route .iter() .skip(1) // we don't want the first element as person creating the packet knows the address of the first hop - .map(|node| node.address.as_bytes()) // we only care about the address field + .map(|node| node.address.to_bytes()) // we only care about the address field .zip( // we need both route (i.e. address field) and corresponding keys of the PREVIOUS hop expanded_shared_secrets diff --git a/src/header/routing/nodes.rs b/src/header/routing/nodes.rs index 09ad9c2..f1d3277 100644 --- a/src/header/routing/nodes.rs +++ b/src/header/routing/nodes.rs @@ -27,6 +27,7 @@ use crate::header::routing::{ }; use crate::header::shared_secret::ExpandedSharedSecret; use crate::header::{ProcessedHeader, ProcessedHeaderData, SphinxHeader}; +use crate::payload::key::derive_payload_key; use crate::route::{DestinationAddressBytes, NodeAddressBytes, SURBIdentifier}; use crate::utils; use crate::version::Version; @@ -72,7 +73,7 @@ impl RoutingInformation { fn concatenate_components(self) -> Vec { std::iter::once(self.flag) .chain(self.version.to_bytes().iter().copied()) - .chain(self.node_address.as_bytes_ref().iter().copied()) + .chain(self.node_address.as_bytes().iter().copied()) .chain(self.delay.to_bytes().iter().copied()) .chain(self.header_integrity_mac.into_inner()) .chain(self.next_routing_information.iter().copied()) @@ -215,6 +216,13 @@ impl ParsedRawRoutingInformation { shared_secret: PublicKey, expanded_shared_secret: &ExpandedSharedSecret, ) -> ProcessedHeader { + let version = self.version; + let payload_key = if version.expects_legacy_full_payload_keys() { + *expanded_shared_secret.legacy_payload_key() + } else { + derive_payload_key(expanded_shared_secret.payload_key_seed()) + }; + match self.data { ParsedRawRoutingInformationData::ForwardHop { next_hop_address, @@ -225,8 +233,8 @@ impl ParsedRawRoutingInformation { let new_shared_secret = expanded_shared_secret.blind_shared_secret(shared_secret); ProcessedHeader { - payload_key: *expanded_shared_secret.payload_key(), - version: self.version, + payload_key, + version, data: ProcessedHeaderData::ForwardHop { updated_header: SphinxHeader { shared_secret: new_shared_secret, @@ -241,8 +249,8 @@ impl ParsedRawRoutingInformation { destination, identifier, } => ProcessedHeader { - payload_key: *expanded_shared_secret.payload_key(), - version: self.version, + payload_key, + version, data: ProcessedHeaderData::FinalHop { destination, identifier, @@ -258,6 +266,10 @@ impl ParsedRawRoutingInformation { shared_secret: PublicKey, expanded_shared_secret: &ExpandedSharedSecret, ) -> ProcessedHeader { + // legacy processing only ever used old key derivation + let version = self.version; + let payload_key = *expanded_shared_secret.legacy_payload_key(); + match self.data { ParsedRawRoutingInformationData::ForwardHop { next_hop_address, @@ -269,8 +281,8 @@ impl ParsedRawRoutingInformation { expanded_shared_secret.legacy_blind_share_secret(shared_secret); ProcessedHeader { - payload_key: *expanded_shared_secret.payload_key(), - version: self.version, + payload_key, + version, data: ProcessedHeaderData::ForwardHop { updated_header: SphinxHeader { shared_secret: new_shared_secret, @@ -285,8 +297,8 @@ impl ParsedRawRoutingInformation { destination, identifier, } => ProcessedHeader { - payload_key: *expanded_shared_secret.payload_key(), - version: self.version, + payload_key, + version, data: ProcessedHeaderData::FinalHop { destination, identifier, @@ -406,7 +418,7 @@ mod preparing_header_layer { let concatenated_materials: Vec = [ vec![FORWARD_HOP], version.to_bytes().to_vec(), - node_address.as_bytes().to_vec(), + node_address.to_bytes().to_vec(), delay.to_bytes().to_vec(), inner_layer_routing.integrity_mac.as_bytes().to_vec(), inner_layer_routing @@ -475,7 +487,7 @@ mod encrypting_routing_information { let encryption_data = [ vec![flag], version.to_bytes().to_vec(), - address.as_bytes().to_vec(), + address.to_bytes().to_vec(), delay.to_bytes().to_vec(), mac.as_bytes().to_vec(), next_routing.to_vec(), @@ -539,7 +551,7 @@ mod parse_decrypted_routing_information { let data = [ vec![flag], version.to_bytes().to_vec(), - address_fixture.as_bytes().to_vec(), + address_fixture.to_bytes().to_vec(), delay.to_bytes().to_vec(), integrity_mac.as_bytes().to_vec(), next_routing_information.to_vec(), diff --git a/src/header/shared_secret.rs b/src/header/shared_secret.rs index 0ac7eec..d85442d 100644 --- a/src/header/shared_secret.rs +++ b/src/header/shared_secret.rs @@ -13,11 +13,14 @@ // limitations under the License. use crate::constants::{ - BLINDING_FACTOR_SIZE, EXPANDED_SHARED_SECRET_LENGTH, HKDF_INPUT_SEED, INTEGRITY_MAC_KEY_SIZE, - PAYLOAD_KEY_SIZE, REPLAY_TAG_SIZE, + BLINDING_FACTOR_SIZE, EXPANDED_SHARED_SECRET_HKDF_INFO, EXPANDED_SHARED_SECRET_HKDF_SALT, + EXPANDED_SHARED_SECRET_LENGTH, INTEGRITY_MAC_KEY_SIZE, PAYLOAD_KEY_SEED_SIZE, PAYLOAD_KEY_SIZE, + REPLAY_TAG_SIZE, }; use crate::crypto::STREAM_CIPHER_KEY_SIZE; +use crate::header::keys::{HeaderIntegrityMacKey, StreamCipherKey}; use crate::header::SphinxHeader; +use crate::payload::key::PayloadKey; use arrayref::array_ref; use hkdf::Hkdf; use sha2::Sha256; @@ -54,18 +57,18 @@ impl ExpandedSharedSecret { // replay tag has not been used before so it **has to** be created last /// Output of the hρ random oracle - pub(crate) fn stream_cipher_key(&self) -> &[u8; STREAM_CIPHER_KEY_SIZE] { + pub(crate) fn stream_cipher_key(&self) -> &StreamCipherKey { array_ref!(&self.0, 0, STREAM_CIPHER_KEY_SIZE) } /// Output of the hμ random oracle - pub(crate) fn header_integrity_hmac_key(&self) -> &[u8; INTEGRITY_MAC_KEY_SIZE] { + pub(crate) fn header_integrity_hmac_key(&self) -> &HeaderIntegrityMacKey { array_ref!(&self.0, STREAM_CIPHER_KEY_SIZE, INTEGRITY_MAC_KEY_SIZE) } - /// Output of the hπ random oracle + /// Legacy output of the hπ random oracle // NOTE: currently we expand it to full PRP key - pub(crate) fn payload_key(&self) -> &[u8; PAYLOAD_KEY_SIZE] { + pub(crate) fn legacy_payload_key(&self) -> &PayloadKey { array_ref!( &self.0, STREAM_CIPHER_KEY_SIZE + INTEGRITY_MAC_KEY_SIZE, @@ -73,6 +76,15 @@ impl ExpandedSharedSecret { ) } + /// Output of the hπ random oracle + pub(crate) fn payload_key_seed(&self) -> &[u8; PAYLOAD_KEY_SEED_SIZE] { + array_ref!( + &self.0, + STREAM_CIPHER_KEY_SIZE + INTEGRITY_MAC_KEY_SIZE, + PAYLOAD_KEY_SEED_SIZE + ) + } + /// Output of the hb random oracle pub(crate) fn blinding_factor_bytes(&self) -> &[u8; BLINDING_FACTOR_SIZE] { array_ref!( @@ -110,12 +122,13 @@ impl ExpandedSharedSecret { } pub(crate) fn expand_shared_secret(shared_secret: &[u8; 32]) -> ExpandedSharedSecret { - let hkdf = Hkdf::::new(None, shared_secret); + let hkdf = Hkdf::::new(Some(EXPANDED_SHARED_SECRET_HKDF_SALT), shared_secret); let mut output = [0u8; EXPANDED_SHARED_SECRET_LENGTH]; // SAFETY: the length of the provided okm is within the allowed range #[allow(clippy::unwrap_used)] - hkdf.expand(HKDF_INPUT_SEED, &mut output).unwrap(); + hkdf.expand(EXPANDED_SHARED_SECRET_HKDF_INFO, &mut output) + .unwrap(); ExpandedSharedSecret(output) } @@ -165,7 +178,7 @@ mod expanding_shared_secret { let expanded = expand_shared_secret(ss.as_bytes()); assert_eq!(expanded.stream_cipher_key(), &expected_sck); assert_eq!(expanded.header_integrity_hmac_key(), &expected_hihk); - assert_eq!(expanded.payload_key(), &expected_pk); + assert_eq!(expanded.legacy_payload_key(), &expected_pk); assert_eq!(expanded.blinding_factor_bytes(), &expected_bf); } } diff --git a/src/packet/builder.rs b/src/packet/builder.rs index 716a9c8..77f0873 100644 --- a/src/packet/builder.rs +++ b/src/packet/builder.rs @@ -45,23 +45,17 @@ impl<'a> SphinxPacketBuilder<'a> { destination: &Destination, delays: &[Delay], ) -> Result { - let (header, payload_keys) = match self.initial_secret.as_ref() { - Some(initial_secret) => SphinxHeader::new_versioned( - initial_secret, - route, - delays, - destination, - self.version, - ), - None => SphinxHeader::new_versioned( - &StaticSecret::random(), - route, - delays, - destination, - self.version, - ), + let initial_secret = match self.initial_secret.as_ref() { + Some(initial_secret) => initial_secret, + None => &StaticSecret::random(), }; + let built_header = + SphinxHeader::new_versioned(initial_secret, route, delays, destination, self.version); + + let payload_keys = built_header.derive_payload_keys(); + let header = built_header.into_header(); + // no need to check if plaintext has correct length as this check is already performed in payload encapsulation let payload = Payload::encapsulate_message(message.as_ref(), &payload_keys, self.payload_size)?; diff --git a/src/payload/key.rs b/src/payload/key.rs new file mode 100644 index 0000000..fc9d5ad --- /dev/null +++ b/src/payload/key.rs @@ -0,0 +1,74 @@ +// Copyright 2025 Nym Technologies SA +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::constants::{ + PAYLOAD_KEY_HKDF_INFO, PAYLOAD_KEY_HKDF_SALT, PAYLOAD_KEY_SEED_SIZE, PAYLOAD_KEY_SIZE, +}; +use hkdf::Hkdf; +use sha2::Sha256; +use std::borrow::Borrow; + +pub type PayloadKey = [u8; PAYLOAD_KEY_SIZE]; +pub type PayloadKeySeed = [u8; PAYLOAD_KEY_SEED_SIZE]; + +pub fn derive_payload_key(seed: &[u8; PAYLOAD_KEY_SEED_SIZE]) -> PayloadKey { + let hkdf = Hkdf::::new(Some(PAYLOAD_KEY_HKDF_SALT), seed); + + let mut output = [0u8; PAYLOAD_KEY_SIZE]; + + // SAFETY: the length of the provided okm is within the allowed range + #[allow(clippy::unwrap_used)] + hkdf.expand(PAYLOAD_KEY_HKDF_INFO, &mut output).unwrap(); + + output +} + +// helper trait to allow us to use either PayloadKey (as reference) directly or the seed (to create owned key) +pub trait SphinxPayloadKey<'a> { + type Key: Borrow; + + fn payload_key(&'a self) -> Self::Key; +} + +impl<'a> SphinxPayloadKey<'a> for &'a PayloadKey { + type Key = &'a PayloadKey; + + fn payload_key(&self) -> Self::Key { + self + } +} + +impl<'a> SphinxPayloadKey<'a> for PayloadKey { + type Key = &'a PayloadKey; + + fn payload_key(&'a self) -> Self::Key { + self + } +} + +impl SphinxPayloadKey<'_> for &PayloadKeySeed { + type Key = PayloadKey; + + fn payload_key(&self) -> Self::Key { + derive_payload_key(self) + } +} + +impl SphinxPayloadKey<'_> for PayloadKeySeed { + type Key = PayloadKey; + + fn payload_key(&self) -> Self::Key { + derive_payload_key(self) + } +} diff --git a/src/payload/mod.rs b/src/payload/mod.rs index 9987933..783e81f 100644 --- a/src/payload/mod.rs +++ b/src/payload/mod.rs @@ -13,12 +13,14 @@ // limitations under the License. use crate::constants::SECURITY_PARAMETER; -use crate::header::keys::PayloadKey; +use crate::payload::key::{PayloadKey, SphinxPayloadKey}; use crate::{Error, ErrorKind, Result}; -use arrayref::array_ref; use blake2::VarBlake2b; use chacha::ChaCha; // we might want to swap this one with a different implementation use lioness::Lioness; +use std::borrow::Borrow; + +pub mod key; // payload consists of security parameter long zero-padding, plaintext and '1' byte to indicate start of padding // (it can optionally be followed by zero-padding @@ -36,17 +38,20 @@ impl Payload { /// Tries to encapsulate provided plaintext message inside a sphinx payload adding /// as many layers of encryption as there are keys provided. /// Note that the encryption layers are going to be added in *reverse* order! - pub fn encapsulate_message( + pub fn encapsulate_message( plaintext_message: &[u8], - payload_keys: &[PayloadKey], + payload_keys: &[K], payload_size: usize, - ) -> Result { + ) -> Result + where + K: for<'a> SphinxPayloadKey<'a>, + { Self::validate_parameters(payload_size, plaintext_message.len())?; let mut payload = Self::set_final_payload(plaintext_message, payload_size); // remember that we need to reverse the order of encryption for payload_key in payload_keys.iter().rev() { - payload = payload.add_encryption_layer(payload_key)?; + payload = payload.add_encryption_layer(payload_key.payload_key())?; } Ok(payload) @@ -100,12 +105,8 @@ impl Payload { } /// Tries to add an additional layer of encryption onto self. - fn add_encryption_layer(mut self, payload_enc_key: &PayloadKey) -> Result { - let lioness_cipher = Lioness::::new_raw(array_ref!( - payload_enc_key, - 0, - lioness::RAW_KEY_SIZE - )); + fn add_encryption_layer>(mut self, payload_key: P) -> Result { + let lioness_cipher = Lioness::::new_raw(payload_key.borrow()); if let Err(err) = lioness_cipher.encrypt(&mut self.0) { return Err(Error::new( @@ -117,12 +118,9 @@ impl Payload { } /// Tries to remove single layer of encryption from self. - pub fn unwrap(mut self, payload_key: &PayloadKey) -> Result { - let lioness_cipher = Lioness::::new_raw(array_ref!( - payload_key, - 0, - lioness::RAW_KEY_SIZE - )); + pub fn unwrap>(mut self, payload_key: P) -> Result { + let lioness_cipher = Lioness::::new_raw(payload_key.borrow()); + if let Err(err) = lioness_cipher.decrypt(&mut self.0) { return Err(Error::new( ErrorKind::InvalidPayload, @@ -132,10 +130,29 @@ impl Payload { Ok(self) } + // attempt to find the index of the element indicating starting of the padding AFTER the initial + // SECURITY_PARAMETER 0s got ignored + // NOTE: this method must only be called after ensuring the internal vector is longer than `PAYLOAD_OVERHEAD_SIZE` + fn find_start_of_padding(&self) -> Result { + let padded_plaintext = &self.0[SECURITY_PARAMETER..]; + padded_plaintext + .iter() + .rposition(|b| *b == 1) + .ok_or(Error::new( + ErrorKind::InvalidPayload, + "malformed payload - invalid trailing padding", + )) + } + /// After calling [`unwrap`] required number of times with correct `payload_keys`, tries to parse /// the resultant payload content into original encapsulated plaintext message. pub fn recover_plaintext(self) -> Result> { - debug_assert!(self.len() > PAYLOAD_OVERHEAD_SIZE); + if self.len() < PAYLOAD_OVERHEAD_SIZE { + return Err(Error::new( + ErrorKind::InvalidPayload, + "malformed payload - no leading zero padding present", + )); + } // assuming our payload is fully decrypted it has the following structure: // 00000.... (SECURITY_PARAMETER length) @@ -154,25 +171,16 @@ impl Payload { )); } - // only trailing padding present - let padded_plaintext = self + let padding_start = self.find_start_of_padding()?; + // take only bytes until the start of the padding (but not including it) + // and furthermore, remember to skip the initial 0s + + Ok(self .into_inner() .into_iter() .skip(SECURITY_PARAMETER) - .collect::>(); - - // we are looking for first occurrence of 1 in the tail and we get its index - if let Some(i) = padded_plaintext.iter().rposition(|b| *b == 1) { - // and now we only take bytes until that point (but not including it) - let plaintext = padded_plaintext.into_iter().take(i).collect(); - return Ok(plaintext); - } - - // our plaintext is invalid - Err(Error::new( - ErrorKind::InvalidPayload, - "malformed payload - invalid trailing padding", - )) + .take(padding_start) + .collect()) } fn into_inner(self) -> Vec { @@ -293,20 +301,6 @@ mod test_encapsulating_payload { use super::*; use crate::constants::PAYLOAD_KEY_SIZE; - #[test] - fn can_be_encapsulated_without_encryption() { - let message = vec![1u8, 16]; - let payload_size = 512; - let unencrypted_message = - Payload::encapsulate_message(&message, &[], payload_size).unwrap(); - - // should be equivalent to just setting final payload - assert_eq!( - unencrypted_message, - Payload::set_final_payload(&message, payload_size) - ) - } - #[test] fn works_with_single_encryption_layer() { let message = vec![1u8, 16]; diff --git a/src/route.rs b/src/route.rs index 2eaadc4..d1cd7fd 100644 --- a/src/route.rs +++ b/src/route.rs @@ -136,12 +136,12 @@ impl NodeAddressBytes { } /// View this `NodeAddressBytes` as an array of bytes. - pub fn as_bytes_ref(&self) -> &[u8; NODE_ADDRESS_LENGTH] { + pub fn as_bytes(&self) -> &[u8; NODE_ADDRESS_LENGTH] { &self.0 } /// Convert this `NodeAddressBytes` to an array of bytes. - pub fn as_bytes(&self) -> [u8; NODE_ADDRESS_LENGTH] { + pub fn to_bytes(&self) -> [u8; NODE_ADDRESS_LENGTH] { self.0 } } diff --git a/src/surb/mod.rs b/src/surb/mod.rs index 06ff6fb..3f24566 100644 --- a/src/surb/mod.rs +++ b/src/surb/mod.rs @@ -1,6 +1,6 @@ -use crate::constants::{NODE_ADDRESS_LENGTH, PAYLOAD_KEY_SIZE}; +use crate::constants::{NODE_ADDRESS_LENGTH, PAYLOAD_KEY_SEED_SIZE, PAYLOAD_KEY_SIZE}; use crate::header::delays::Delay; -use crate::header::keys::PayloadKey; +use crate::payload::key::{PayloadKey, PayloadKeySeed}; use crate::payload::Payload; use crate::route::{Destination, Node, NodeAddressBytes}; use crate::version::Version; @@ -10,6 +10,57 @@ use header::{SphinxHeader, HEADER_SIZE}; use std::fmt; use x25519_dalek::StaticSecret; +// legacy compatibility wrapper +#[derive(Debug)] +enum PayloadKeysMaterial { + DerivedKeys(Vec), + KeySeeds(Vec), +} + +impl PayloadKeysMaterial { + fn from_bytes(bytes: &[u8]) -> Result { + // given that our maximum path length is 5, payload key is 192 and key seed is 16, + // the maximum possible size of 'updated' surb seeds is 5*16 = 80, which is smaller than + // a single key, and thus we can use this information in order to determine which variant we should attempt to parse + if bytes.len() < PAYLOAD_KEY_SIZE { + // seeds + if bytes.len() % PAYLOAD_KEY_SEED_SIZE != 0 { + return Err(Error::new( + ErrorKind::InvalidSURB, + "bytes of invalid length provided", + )); + } + let seeds_count = bytes.len() / PAYLOAD_KEY_SEED_SIZE; + let mut payload_key_seeds = Vec::with_capacity(seeds_count); + for i in 0..seeds_count { + let mut payload_key = [0u8; PAYLOAD_KEY_SEED_SIZE]; + payload_key.copy_from_slice( + &bytes[i * PAYLOAD_KEY_SEED_SIZE..(i + 1) * PAYLOAD_KEY_SEED_SIZE], + ); + payload_key_seeds.push(payload_key); + } + Ok(PayloadKeysMaterial::KeySeeds(payload_key_seeds)) + } else { + // full keys + if bytes.len() % PAYLOAD_KEY_SIZE != 0 { + return Err(Error::new( + ErrorKind::InvalidSURB, + "bytes of invalid length provided", + )); + } + let key_count = bytes.len() / PAYLOAD_KEY_SIZE; + let mut payload_keys = Vec::with_capacity(key_count); + for i in 0..key_count { + let mut payload_key = [0u8; PAYLOAD_KEY_SIZE]; + payload_key + .copy_from_slice(&bytes[i * PAYLOAD_KEY_SIZE..(i + 1) * PAYLOAD_KEY_SIZE]); + payload_keys.push(payload_key); + } + Ok(PayloadKeysMaterial::DerivedKeys(payload_keys)) + } + } +} + /// A Single Use Reply Block (SURB) must have a pre-aggregated Sphinx header, /// the address of the first hop in the route of the SURB, and the key material /// used to layer encrypt the payload. @@ -17,7 +68,7 @@ use x25519_dalek::StaticSecret; pub struct SURB { SURB_header: header::SphinxHeader, first_hop_address: NodeAddressBytes, - payload_keys: Vec, + payload_keys_material: PayloadKeysMaterial, } impl fmt::Debug for SURB { @@ -25,7 +76,7 @@ impl fmt::Debug for SURB { f.debug_struct("SURB") .field("SURB_header", &self.SURB_header) .field("first_hop_address", &self.first_hop_address) - .field("payload_keys", &self.payload_keys) + .field("payload_keys_material", &self.payload_keys_material) .finish() } } @@ -82,7 +133,7 @@ impl SURB { } #[allow(deprecated)] - let (header, payload_keys) = header::SphinxHeader::new_versioned( + let built_header = header::SphinxHeader::new_versioned( &surb_initial_secret, &surb_route, &surb_delays, @@ -90,11 +141,23 @@ impl SURB { surb_material.version, ); - Ok(SURB { - SURB_header: header, - first_hop_address: first_hop.address, - payload_keys, - }) + if surb_material.version.expects_legacy_full_payload_keys() { + Ok(SURB { + first_hop_address: first_hop.address, + payload_keys_material: PayloadKeysMaterial::DerivedKeys( + built_header.legacy_full_payload_keys(), + ), + SURB_header: built_header.into_header(), + }) + } else { + Ok(SURB { + first_hop_address: first_hop.address, + payload_keys_material: PayloadKeysMaterial::KeySeeds( + built_header.payload_key_seeds(), + ), + SURB_header: built_header.into_header(), + }) + } } /// Function takes the precomputed surb header, layer encrypts the plaintext payload content @@ -109,24 +172,38 @@ impl SURB { // Note that Payload::encapsulate_message performs checks to verify whether the plaintext // is going to fit in the packet. - let payload = - Payload::encapsulate_message(plaintext_message, &self.payload_keys, payload_size)?; + let payload = match self.payload_keys_material { + PayloadKeysMaterial::DerivedKeys(keys) => { + Payload::encapsulate_message(plaintext_message, keys.as_slice(), payload_size)? + } + PayloadKeysMaterial::KeySeeds(seeds) => { + Payload::encapsulate_message(plaintext_message, &seeds, payload_size)? + } + }; Ok((SphinxPacket { header, payload }, self.first_hop_address)) } pub fn to_bytes(&self) -> Vec { - self.SURB_header + let initial_bytes = self + .SURB_header .to_bytes() .into_iter() - .chain(self.first_hop_address.as_bytes().iter().cloned()) - .chain(self.payload_keys.iter().flat_map(|x| x.iter()).cloned()) - .collect() + .chain(self.first_hop_address.to_bytes()); + + match &self.payload_keys_material { + PayloadKeysMaterial::DerivedKeys(keys) => initial_bytes + .chain(keys.iter().flat_map(|k| k.iter().copied())) + .collect(), + PayloadKeysMaterial::KeySeeds(seeds) => initial_bytes + .chain(seeds.iter().flat_map(|s| s.iter().copied())) + .collect(), + } } pub fn from_bytes(bytes: &[u8]) -> Result { - // SURB needs to contain AT LEAST a single payload key - if bytes.len() < HEADER_SIZE + NODE_ADDRESS_LENGTH + PAYLOAD_KEY_SIZE { + // SURB needs to contain AT LEAST a single payload key (or seed) + if bytes.len() < HEADER_SIZE + NODE_ADDRESS_LENGTH + PAYLOAD_KEY_SEED_SIZE { return Err(Error::new( ErrorKind::InvalidSURB, "not enough bytes provided to try to recover a SURB", @@ -135,35 +212,33 @@ impl SURB { let header_bytes = &bytes[..HEADER_SIZE]; let first_hop_bytes = &bytes[HEADER_SIZE..HEADER_SIZE + NODE_ADDRESS_LENGTH]; - let payload_keys_bytes = &bytes[HEADER_SIZE + NODE_ADDRESS_LENGTH..]; - // make sure that bytes of valid length were sent - if payload_keys_bytes.len() % PAYLOAD_KEY_SIZE != 0 { - return Err(Error::new( - ErrorKind::InvalidSURB, - "bytes of invalid length provided", - )); - } + let payload_keys_material_bytes = &bytes[HEADER_SIZE + NODE_ADDRESS_LENGTH..]; let SURB_header = SphinxHeader::from_bytes(header_bytes)?; let first_hop_address = NodeAddressBytes::try_from_byte_slice(first_hop_bytes)?; - - let key_count = payload_keys_bytes.len() / PAYLOAD_KEY_SIZE; - let mut payload_keys = Vec::with_capacity(key_count); - - for i in 0..key_count { - let mut payload_key = [0u8; PAYLOAD_KEY_SIZE]; - payload_key.copy_from_slice( - &payload_keys_bytes[i * PAYLOAD_KEY_SIZE..(i + 1) * PAYLOAD_KEY_SIZE], - ); - payload_keys.push(payload_key); - } + let payload_keys_material = PayloadKeysMaterial::from_bytes(payload_keys_material_bytes)?; Ok(SURB { SURB_header, first_hop_address, - payload_keys, + payload_keys_material, }) } + + pub fn first_hop(&self) -> NodeAddressBytes { + self.first_hop_address + } + + pub fn materials_count(&self) -> usize { + match &self.payload_keys_material { + PayloadKeysMaterial::DerivedKeys(keys) => keys.len(), + PayloadKeysMaterial::KeySeeds(seeds) => seeds.len(), + } + } + + pub fn uses_key_seeds(&self) -> bool { + matches!(self.payload_keys_material, PayloadKeysMaterial::KeySeeds(_)) + } } #[cfg(test)] @@ -171,14 +246,14 @@ mod prepare_and_use_process_surb { use super::*; use crate::constants::NODE_ADDRESS_LENGTH; use crate::header::{delays, HEADER_SIZE}; + use crate::version::{PAYLOAD_KEYS_SEEDS_VERSION, X25519_WITH_EXPLICIT_PAYLOAD_KEYS_VERSION}; use crate::{ packet::builder::DEFAULT_PAYLOAD_SIZE, test_utils::fixtures::{destination_fixture, keygen}, }; use std::time::Duration; - #[allow(non_snake_case)] - fn SURB_fixture() -> SURB { + fn surb_material_fixture() -> SURBMaterial { let (_, node1_pk) = keygen(); let node1 = Node { address: NodeAddressBytes::from_bytes([5u8; NODE_ADDRESS_LENGTH]), @@ -197,15 +272,27 @@ mod prepare_and_use_process_surb { let surb_route = vec![node1, node2, node3]; let surb_destination = destination_fixture(); - let surb_initial_secret = StaticSecret::random(); let surb_delays = delays::generate_from_average_duration(surb_route.len(), Duration::from_secs(3)); - SURB::new( - surb_initial_secret, - SURBMaterial::new(surb_route, surb_delays, surb_destination), - ) - .unwrap() + SURBMaterial::new(surb_route, surb_delays, surb_destination) + } + + #[allow(non_snake_case)] + fn legacy_SURB_fixture() -> SURB { + let surb_initial_secret = StaticSecret::random(); + let surb_material = + surb_material_fixture().with_version(X25519_WITH_EXPLICIT_PAYLOAD_KEYS_VERSION); + + SURB::new(surb_initial_secret, surb_material).unwrap() + } + + #[allow(non_snake_case)] + fn seeded_SURB_fixture() -> SURB { + let surb_initial_secret = StaticSecret::random(); + let surb_material = surb_material_fixture().with_version(PAYLOAD_KEYS_SEEDS_VERSION); + + SURB::new(surb_initial_secret, surb_material).unwrap() } #[test] @@ -228,21 +315,40 @@ mod prepare_and_use_process_surb { #[test] fn surb_header_has_correct_length() { - let pre_surb = SURB_fixture(); + let pre_surb = legacy_SURB_fixture(); assert_eq!(pre_surb.SURB_header.to_bytes().len(), HEADER_SIZE); } #[test] fn to_bytes_returns_correct_value() { - let pre_surb = SURB_fixture(); + let pre_surb = legacy_SURB_fixture(); + let PayloadKeysMaterial::DerivedKeys(keys) = &pre_surb.payload_keys_material else { + unreachable!() + }; + + let pre_surb_bytes = pre_surb.to_bytes(); + let expected = [ + pre_surb.SURB_header.to_bytes(), + [5u8; NODE_ADDRESS_LENGTH].to_vec(), + keys[0].to_vec(), + keys[1].to_vec(), + keys[2].to_vec(), + ] + .concat(); + assert_eq!(pre_surb_bytes, expected); + + let pre_surb = seeded_SURB_fixture(); + let PayloadKeysMaterial::KeySeeds(seeds) = &pre_surb.payload_keys_material else { + unreachable!() + }; let pre_surb_bytes = pre_surb.to_bytes(); let expected = [ pre_surb.SURB_header.to_bytes(), [5u8; NODE_ADDRESS_LENGTH].to_vec(), - pre_surb.payload_keys[0].to_vec(), - pre_surb.payload_keys[1].to_vec(), - pre_surb.payload_keys[2].to_vec(), + seeds[0].to_vec(), + seeds[1].to_vec(), + seeds[2].to_vec(), ] .concat(); assert_eq!(pre_surb_bytes, expected); @@ -250,7 +356,7 @@ mod prepare_and_use_process_surb { #[test] fn returns_error_is_payload_too_large() { - let pre_surb = SURB_fixture(); + let pre_surb = legacy_SURB_fixture(); let plaintext_message = vec![42u8; 5000]; let expected = ErrorKind::InvalidPayload; @@ -262,8 +368,8 @@ mod prepare_and_use_process_surb { #[test] #[allow(non_snake_case)] - fn can_be_converted_to_and_from_bytes() { - let dummy_SURB = SURB_fixture(); + fn can_be_converted_to_and_from_bytes_with_legacy_keys() { + let dummy_SURB = legacy_SURB_fixture(); let bytes = dummy_SURB.to_bytes(); let recovered_SURB = SURB::from_bytes(&bytes).unwrap(); @@ -271,11 +377,53 @@ mod prepare_and_use_process_surb { dummy_SURB.first_hop_address, recovered_SURB.first_hop_address ); - for i in 0..dummy_SURB.payload_keys.len() { - assert_eq!( - dummy_SURB.payload_keys[i].to_vec(), - recovered_SURB.payload_keys[i].to_vec() - ) + + let PayloadKeysMaterial::DerivedKeys(original_keys) = &dummy_SURB.payload_keys_material + else { + unreachable!() + }; + + let PayloadKeysMaterial::DerivedKeys(recovered_keys) = + &recovered_SURB.payload_keys_material + else { + unreachable!() + }; + + for i in 0..original_keys.len() { + assert_eq!(original_keys[i], recovered_keys[i]) + } + + // TODO: saner way of comparing headers... + assert_eq!( + dummy_SURB.SURB_header.to_bytes(), + dummy_SURB.SURB_header.to_bytes() + ); + } + + #[test] + #[allow(non_snake_case)] + fn can_be_converted_to_and_from_bytes_with_key_seeds() { + let dummy_SURB = seeded_SURB_fixture(); + let bytes = dummy_SURB.to_bytes(); + let recovered_SURB = SURB::from_bytes(&bytes).unwrap(); + + assert_eq!( + dummy_SURB.first_hop_address, + recovered_SURB.first_hop_address + ); + + let PayloadKeysMaterial::KeySeeds(original_seeds) = &dummy_SURB.payload_keys_material + else { + unreachable!() + }; + + let PayloadKeysMaterial::KeySeeds(recovered_seeds) = &recovered_SURB.payload_keys_material + else { + unreachable!() + }; + + for i in 0..original_seeds.len() { + assert_eq!(original_seeds[i], recovered_seeds[i]) } // TODO: saner way of comparing headers... diff --git a/src/version.rs b/src/version.rs index 784defb..bbfa5f3 100644 --- a/src/version.rs +++ b/src/version.rs @@ -20,12 +20,16 @@ use crate::constants::VERSION_LENGTH; pub const INITIAL_LEGACY_VERSION: Version = Version(1); pub const UPDATED_LEGACY_VERSION: Version = Version(257); -pub const CURRENT_VERSION: Version = Version(258); +pub const X25519_WITH_EXPLICIT_PAYLOAD_KEYS_VERSION: Version = Version(258); +pub const PAYLOAD_KEYS_SEEDS_VERSION: Version = Version(259); + +pub const CURRENT_VERSION: Version = PAYLOAD_KEYS_SEEDS_VERSION; pub const KNOWN_VERSIONS: &[Version] = &[ INITIAL_LEGACY_VERSION, UPDATED_LEGACY_VERSION, - CURRENT_VERSION, + X25519_WITH_EXPLICIT_PAYLOAD_KEYS_VERSION, + PAYLOAD_KEYS_SEEDS_VERSION, ]; #[derive(Debug, Copy, Clone, PartialEq)] @@ -44,6 +48,11 @@ impl Version { self == &INITIAL_LEGACY_VERSION || self == &UPDATED_LEGACY_VERSION } + // as opposed to using payload key seed to derive the keys + pub fn expects_legacy_full_payload_keys(&self) -> bool { + self.is_legacy() || self == &X25519_WITH_EXPLICIT_PAYLOAD_KEYS_VERSION + } + // extra byte comes from the legacy interpretation pub fn from_bytes(bytes: [u8; VERSION_LENGTH]) -> Version { debug_assert_eq!(bytes[0], 0);