From f4b101f2c507f5c1db254a20d457aabd46529319 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 29 Apr 2026 09:45:56 +0200 Subject: [PATCH 01/91] Erasure code + split encryption --- Cargo.lock | 144 +++++++++++++++++- fastcrypto-tbls/Cargo.toml | 2 +- fastcrypto-tbls/src/ecies_v1.rs | 77 ++++++++++ fastcrypto-tbls/src/tests/ecies_v1_tests.rs | 7 + .../src/threshold_schnorr/batch_avss.rs | 2 + .../src/threshold_schnorr/reed_solomon.rs | 133 ++++++++++++++++ 6 files changed, 356 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 490568cfeb..22c9d628bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -63,6 +63,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ahash" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" +dependencies = [ + "getrandom 0.2.17", + "once_cell", + "version_check", +] + [[package]] name = "ahash" version = "0.8.12" @@ -566,6 +577,12 @@ dependencies = [ "hex-conservative", ] +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + [[package]] name = "bitflags" version = "2.11.0" @@ -1425,8 +1442,8 @@ dependencies = [ "hex", "itertools 0.10.5", "rand 0.8.5", + "reed-solomon-erasure", "serde", - "serde-big-array", "sha3", "tap", "tracing", @@ -1753,13 +1770,22 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.8", +] + [[package]] name = "hashbrown" version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash", + "ahash 0.8.12", ] [[package]] @@ -2086,6 +2112,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if 1.0.4", +] + [[package]] name = "ipnet" version = "2.12.0" @@ -2214,12 +2249,30 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + [[package]] name = "log" version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" +[[package]] +name = "lru" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" +dependencies = [ + "hashbrown 0.12.3", +] + [[package]] name = "lru" version = "0.16.3" @@ -2391,7 +2444,7 @@ checksum = "b285c575532a33ef6fdd3a57640d0b1c70e6ca48644d6df7bbd4b7a0cfbbb12d" dependencies = [ "bitvec", "either", - "lru", + "lru 0.16.3", "num-bigint 0.4.6", "num-integer", "num-modular", @@ -2475,6 +2528,31 @@ dependencies = [ "group 0.13.0", ] +[[package]] +name = "parking_lot" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +dependencies = [ + "cfg-if 1.0.4", + "instant", + "libc", + "redox_syscall", + "smallvec", + "winapi", +] + [[package]] name = "pasta_curves" version = "0.5.1" @@ -2693,7 +2771,7 @@ checksum = "37566cb3fdacef14c0737f9546df7cfeadbfbc9fef10991038bf5015d0c80532" dependencies = [ "bit-set", "bit-vec", - "bitflags", + "bitflags 2.11.0", "num-traits", "rand 0.9.2", "rand_chacha 0.9.0", @@ -2919,6 +2997,28 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "redox_syscall" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "reed-solomon-erasure" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7263373d500d4d4f505d43a2a662d475a894aa94503a1ee28e9188b5f3960d4f" +dependencies = [ + "libm", + "lru 0.7.8", + "parking_lot", + "smallvec", + "spin", +] + [[package]] name = "regex" version = "1.12.3" @@ -3080,7 +3180,7 @@ version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" dependencies = [ - "bitflags", + "bitflags 2.11.0", "errno", "libc", "linux-raw-sys", @@ -3179,6 +3279,12 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + [[package]] name = "sec1" version = "0.3.0" @@ -3776,7 +3882,7 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ - "bitflags", + "bitflags 2.11.0", "bytes", "futures-util", "http", @@ -4084,7 +4190,7 @@ version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" dependencies = [ - "bitflags", + "bitflags 2.11.0", "hashbrown 0.15.5", "indexmap", "semver", @@ -4119,6 +4225,22 @@ dependencies = [ "rustls-pki-types", ] +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + [[package]] name = "winapi-util" version = "0.1.11" @@ -4128,6 +4250,12 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + [[package]] name = "windows-link" version = "0.2.1" @@ -4348,7 +4476,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" dependencies = [ "anyhow", - "bitflags", + "bitflags 2.11.0", "indexmap", "log", "serde", diff --git a/fastcrypto-tbls/Cargo.toml b/fastcrypto-tbls/Cargo.toml index 6ce99b7a31..9a9f2bc20e 100644 --- a/fastcrypto-tbls/Cargo.toml +++ b/fastcrypto-tbls/Cargo.toml @@ -23,7 +23,7 @@ zeroize.workspace = true itertools = "0.10.5" hex = "0.4.3" tap = { version = "1.0.1", features = [] } -serde-big-array = "0.5.1" +reed-solomon-erasure = "6.0.0" [dev-dependencies] criterion = "0.5.1" diff --git a/fastcrypto-tbls/src/ecies_v1.rs b/fastcrypto-tbls/src/ecies_v1.rs index 40b2232951..6a24d26324 100644 --- a/fastcrypto-tbls/src/ecies_v1.rs +++ b/fastcrypto-tbls/src/ecies_v1.rs @@ -8,6 +8,7 @@ use fastcrypto::error::{FastCryptoError, FastCryptoResult}; use fastcrypto::groups::{FiatShamirChallenge, GroupElement, HashToGroupElement, Scalar}; use fastcrypto::traits::{AllowedRng, ToFromBytes}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use std::marker::PhantomData; use typenum::consts::{U16, U32}; use typenum::Unsigned; use zeroize::{Zeroize, ZeroizeOnDrop}; @@ -49,6 +50,19 @@ pub struct MultiRecipientEncryption { proof: DdhTupleNizk, } +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct SharedComponents { + c: G, + c_hat: G, + proof: DdhTupleNizk, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct EncryptedPart { + enc: Vec, + g: PhantomData, +} + impl MultiRecipientEncryption where ::ScalarType: FiatShamirChallenge + Zeroize, @@ -194,6 +208,24 @@ where &self.proof } + pub fn into_parts(self) -> (SharedComponents, Vec>) { + let MultiRecipientEncryption { + c, + c_hat, + encs, + proof, + } = self; + ( + SharedComponents { c, c_hat, proof }, + encs.into_iter() + .map(|enc| EncryptedPart { + enc, + g: PhantomData, + }) + .collect(), + ) + } + fn encs_random_oracle(encryption_random_oracle: &RandomOracle) -> RandomOracle { encryption_random_oracle.extend("encs") } @@ -229,6 +261,51 @@ fn sym_cipher(k: &[u8; 64]) -> Aes256Ctr { ) } +impl EncryptedPart +where + ::ScalarType: FiatShamirChallenge + Zeroize, + G: HashToGroupElement, +{ + pub fn decrypt( + &self, + common: &SharedComponents, + sk: &PrivateKey, + encryption_random_oracle: &RandomOracle, + receiver_index: usize, + ) -> Vec { + let enc_ro = MultiRecipientEncryption::::encs_random_oracle(encryption_random_oracle); + let ephemeral_key = common.c * sk.0; + let k = enc_ro.evaluate(&(receiver_index, ephemeral_key)); + let cipher = sym_cipher(&k); + cipher + .decrypt(&fixed_zero_nonce(), &self.enc) + .expect("Decrypt should never fail for CTR mode") + } +} + +impl SharedComponents +where + ::ScalarType: FiatShamirChallenge + Zeroize, + G: HashToGroupElement, +{ + pub fn ephemeral_key(&self) -> &G { + &self.c + } + + pub fn verify(&self, encryption_random_oracle: &RandomOracle) -> FastCryptoResult<()> { + let g_hat = G::hash_to_group_element( + &MultiRecipientEncryption::::g_hat_random_oracle(encryption_random_oracle) + .evaluate(&self.c), + ); + self.proof.verify( + &g_hat, + &self.c, + &self.c_hat, + &MultiRecipientEncryption::::zk_random_oracle(encryption_random_oracle), + ) + } +} + impl PrivateKey where G: GroupElement + Serialize, diff --git a/fastcrypto-tbls/src/tests/ecies_v1_tests.rs b/fastcrypto-tbls/src/tests/ecies_v1_tests.rs index c54fa75ff5..e2db0925a0 100644 --- a/fastcrypto-tbls/src/tests/ecies_v1_tests.rs +++ b/fastcrypto-tbls/src/tests/ecies_v1_tests.rs @@ -59,6 +59,13 @@ mod point_tests { assert_eq!(msg.as_bytes(), &decrypted); } + let (common, parts) = mr_enc.clone().into_parts(); + assert!(common.verify(&ro).is_ok()); + for (i, (part, (sk, _, msg))) in parts.iter().zip(keys_and_msg.iter()).enumerate() { + // Using parts should work as well + assert_eq!(msg.as_bytes(), part.decrypt(&common, sk, &ro, i)); + } + // test empty messages let mr_enc2 = MultiRecipientEncryption::encrypt( &keys_and_msg diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 94e2f6be04..ca912e91a2 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -285,6 +285,8 @@ impl Dealer { rng, ); + //let (shared, parts) = ciphertext.into_parts(); + // "response" polynomials from https://eprint.iacr.org/2023/536.pdf let challenge = compute_challenge( &self.random_oracle(), diff --git a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs index 4af8ce4e21..42be1a79cd 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs @@ -7,6 +7,7 @@ use crate::types::{to_scalar, ShareIndex}; use fastcrypto::error::FastCryptoError::{InputLengthWrong, InvalidInput, TooManyErrors}; use fastcrypto::error::FastCryptoResult; use itertools::Itertools; +use reed_solomon_erasure::galois_8::ReedSolomon; /// Decoder for Reed-Solomon codes. /// This can correct up to (d-1)/2 errors, where d is the distance of the code. @@ -123,6 +124,86 @@ impl RSDecoder { } } +/// A wrapper struct for the Reed-Solomon erasure coding library. +pub struct RSErasure(ReedSolomon); + +impl RSErasure { + /// Create a new Reed-Solomon erasure encoder/decoder. + /// + /// # Parameters + /// - `k`: Number of **data** shards (sometimes called the message length). + /// - `n`: Total number of shards, i.e. `k + (n-k)` where `n-k` are parity shards. + /// + /// # Errors + /// Returns [`FastCryptoError::InvalidInput`] if `k == 0`, `n <= k` or `n > 256`. + pub fn new(k: usize, n: usize) -> FastCryptoResult { + // `reed_solomon_erasure::galois_8` only supports up to 256 total shards. + if k == 0 || n <= k || n > 256 { + return Err(InvalidInput); + } + ReedSolomon::new(k, n - k) + .map_err(|_| InvalidInput) + .map(Self) + } + + /// Encode data shards into a full set of `n` shards (data + parity). + /// + /// The input must contain exactly `k` data shards. This function will append `n-k` + /// parity shards and return the full vector. + /// + /// All shards must have the same length (as required by `reed_solomon_erasure`). + /// + /// Returns [`FastCryptoError::InvalidInput`] if encoding fails (for example if shard + /// sizes are inconsistent). + pub fn encode(&self, data: Vec>) -> FastCryptoResult>> { + if data.len() != self.0.data_shard_count() { + return Err(InputLengthWrong(self.0.data_shard_count())); + } + + // `reed_solomon_erasure` requires all shards to have the same size, including parity. + let shard_len = data.first().map(|s| s.len()).unwrap_or(0); + if !data.iter().all(|s| s.len() == shard_len) { + return Err(InvalidInput); + } + + let mut shards = data; + shards.resize(self.0.total_shard_count(), vec![0u8; shard_len]); + self.0.encode(&mut shards).map_err(|_| InvalidInput)?; + Ok(shards) + } + + /// Reconstruct missing shards from a mix of present and absent shards. + /// + /// The input is a vector of length `n` where each entry is either `Some(shard)` if that + /// shard is available, or `None` if it is missing. If enough shards are present (at least + /// `k`), the missing shards will be reconstructed. + /// + /// The returned value contains all shards (data + parity) in index order. + /// + /// # Errors + /// - Returns [`FastCryptoError::InvalidInput`] if reconstruction fails (inconsistent shard sizes, wrong number of shards). + /// - Returns [`FastCryptoError::TooManyErrors`] if reconstruction succeeds, but the reconstructed set does not verify. + pub fn reconstruct(&self, shards: Vec>>) -> FastCryptoResult>> { + let mut shards = shards; + self.0.reconstruct(&mut shards).map_err(|_| InvalidInput)?; + + // `reconstruct` should have filled in every missing shard. If any are still absent, + // treat it as an invalid reconstruction. + let shards = shards + .into_iter() + .map(|s| s.ok_or(InvalidInput)) + .collect::>>()?; + + // Ensure the reconstructed shards are consistent. + let verified = self.0.verify(&shards).map_err(|_| InvalidInput)?; + if !verified { + return Err(TooManyErrors(self.0.parity_shard_count())); + } + + Ok(shards) + } +} + #[cfg(test)] mod tests { use super::*; @@ -164,4 +245,56 @@ mod tests { .unwrap(); assert_eq!(decoded_message, message); } + + #[test] + fn test_rs_erasure_encode_reconstruct_roundtrip() { + let k = 3; + let n = 5; + let rs = RSErasure::new(k, n).unwrap(); + + // All shards must have the same length. + let data = vec![b"hello".to_vec(), b"world".to_vec(), b"!!!!!".to_vec()]; + let encoded = rs.encode(data.clone()).unwrap(); + assert_eq!(encoded.len(), n); + + // Drop up to `n-k` shards and reconstruct. + let mut shards: Vec>> = encoded.into_iter().map(Some).collect(); + shards[1] = None; + shards[4] = None; + + let reconstructed = rs.reconstruct(shards).unwrap(); + assert_eq!(reconstructed.len(), n); + + // First `k` shards are the data shards. + assert_eq!(&reconstructed[..k], &data[..]); + } + + #[test] + fn test_rs_erasure_reconstruct_too_few_shards() { + let k = 3; + let n = 5; + let rs = RSErasure::new(k, n).unwrap(); + + let data = vec![b"aaaaa".to_vec(), b"bbbbb".to_vec(), b"ccccc".to_vec()]; + let encoded = rs.encode(data).unwrap(); + let mut shards: Vec>> = encoded.into_iter().map(Some).collect(); + + // Leave only 2 shards present (< k). + shards[0] = None; + shards[1] = None; + shards[2] = None; + + assert!(matches!(rs.reconstruct(shards), Err(InvalidInput))); + } + + #[test] + fn test_rs_erasure_new_invalid_params() { + // Invalid because n <= k. + assert!(matches!(RSErasure::new(3, 3), Err(InvalidInput))); + assert!(matches!(RSErasure::new(3, 2), Err(InvalidInput))); + // Invalid because k == 0. + assert!(matches!(RSErasure::new(0, 1), Err(InvalidInput))); + // Invalid because n > 256. + assert!(matches!(RSErasure::new(1, 257), Err(InvalidInput))); + } } From 2cf99d6ff559aaa9d11bf81a9322c6435872202b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 29 Apr 2026 10:51:37 +0200 Subject: [PATCH 02/91] Better interface for erasure code --- .../src/threshold_schnorr/batch_avss.rs | 16 +- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 1 + .../src/threshold_schnorr/reed_solomon.rs | 183 +++++++++--------- 3 files changed, 109 insertions(+), 91 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index ca912e91a2..5b333111d2 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -15,6 +15,7 @@ use crate::polynomial::{create_secret_sharing, Eval, Poly}; use crate::random_oracle::RandomOracle; use crate::threshold_schnorr::bcs::BCSSerialized; use crate::threshold_schnorr::complaint::{Complaint, ComplaintResponse}; +use crate::threshold_schnorr::reed_solomon::ErasureCoder; use crate::threshold_schnorr::Extensions::{Challenge, Encryption}; use crate::threshold_schnorr::{random_oracle_from_sid, EG, G, S}; use crate::types::{get_uniform_value, ShareIndex}; @@ -32,6 +33,7 @@ use std::iter::repeat_with; /// There is exactly one dealer who creates the shares and broadcasts the encrypted shares. #[allow(dead_code)] pub struct Dealer { + f: u16, t: u16, nodes: Nodes, sid: Vec, @@ -217,6 +219,7 @@ impl Dealer { pub fn new( nodes: Nodes, dealer_id: PartyId, + f: u16, t: u16, sid: Vec, batch_size_per_weight: u16, @@ -227,6 +230,7 @@ impl Dealer { // Each dealer deals a number of nonces proportional to their weight. let batch_size = nodes.weight_of(dealer_id)? as usize * batch_size_per_weight as usize; Ok(Self { + f, t, nodes, sid, @@ -285,7 +289,11 @@ impl Dealer { rng, ); - //let (shared, parts) = ciphertext.into_parts(); + // let (shared, parts) = ciphertext.into_parts(); + // let code = ErasureCoder::new(self.nodes.total_weight() as usize, (self.nodes.total_weight() - 2 * self.f) as usize)?; + // let roots = parts.iter().map(|part| { + // let shards = code.encode(&part.ciphertext)?; + // }) // "response" polynomials from https://eprint.iacr.org/2023/536.pdf let challenge = compute_challenge( @@ -576,6 +584,7 @@ mod tests { fn test_happy_path() { // No complaints, all honest. All have weight 1 let t = 3; + let f = 2; let n = 7; let batch_size_per_weight = 3; @@ -600,6 +609,7 @@ mod tests { let dealer: Dealer = Dealer::new( nodes.clone(), dealer_id, + f, t, sid.clone(), batch_size_per_weight, @@ -665,6 +675,7 @@ mod tests { fn test_happy_path_non_equal_weights() { // No complaints, all honest let t = 4; + let f = 3; let weights: Vec = vec![1, 2, 3, 4]; let batch_size_per_weight = 3; @@ -691,6 +702,7 @@ mod tests { let dealer: Dealer = Dealer::new( nodes.clone(), dealer_id, + f, t, sid.clone(), batch_size_per_weight, @@ -744,6 +756,7 @@ mod tests { #[test] fn test_share_recovery() { let t = 3; + let f = 2; let n = 7; let batch_size_per_weight: u16 = 3; @@ -769,6 +782,7 @@ mod tests { let dealer: Dealer = Dealer::new( nodes.clone(), dealer_id, + f, t, sid.clone(), batch_size_per_weight, diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index 9ac4f13bda..1503ed77ce 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -212,6 +212,7 @@ mod tests { let dealer: batch_avss::Dealer = batch_avss::Dealer::new( nodes.clone(), dealer_id, + f, t, sid.clone(), batch_size_per_weight, diff --git a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs index 42be1a79cd..f240b2b6ab 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs @@ -4,7 +4,9 @@ use crate::polynomial::{Eval, MonicLinear, Poly}; use crate::threshold_schnorr::S; use crate::types::{to_scalar, ShareIndex}; -use fastcrypto::error::FastCryptoError::{InputLengthWrong, InvalidInput, TooManyErrors}; +use fastcrypto::error::FastCryptoError::{ + InputLengthWrong, InputTooShort, InvalidInput, TooManyErrors, +}; use fastcrypto::error::FastCryptoResult; use itertools::Itertools; use reed_solomon_erasure::galois_8::ReedSolomon; @@ -125,19 +127,18 @@ impl RSDecoder { } /// A wrapper struct for the Reed-Solomon erasure coding library. -pub struct RSErasure(ReedSolomon); +pub struct ErasureCoder(ReedSolomon); -impl RSErasure { - /// Create a new Reed-Solomon erasure encoder/decoder. +impl ErasureCoder { + /// Create a new erasure encoder/decoder. /// /// # Parameters - /// - `k`: Number of **data** shards (sometimes called the message length). - /// - `n`: Total number of shards, i.e. `k + (n-k)` where `n-k` are parity shards. + /// - `n`: Total number of shards. + /// - `k`: Number of data shards. /// /// # Errors /// Returns [`FastCryptoError::InvalidInput`] if `k == 0`, `n <= k` or `n > 256`. - pub fn new(k: usize, n: usize) -> FastCryptoResult { - // `reed_solomon_erasure::galois_8` only supports up to 256 total shards. + pub fn new(n: usize, k: usize) -> FastCryptoResult { if k == 0 || n <= k || n > 256 { return Err(InvalidInput); } @@ -146,61 +147,46 @@ impl RSErasure { .map(Self) } - /// Encode data shards into a full set of `n` shards (data + parity). - /// - /// The input must contain exactly `k` data shards. This function will append `n-k` - /// parity shards and return the full vector. - /// - /// All shards must have the same length (as required by `reed_solomon_erasure`). - /// - /// Returns [`FastCryptoError::InvalidInput`] if encoding fails (for example if shard - /// sizes are inconsistent). - pub fn encode(&self, data: Vec>) -> FastCryptoResult>> { - if data.len() != self.0.data_shard_count() { - return Err(InputLengthWrong(self.0.data_shard_count())); + pub fn encode(&self, data: &[u8]) -> FastCryptoResult>> { + // Define a shard size such that the data can be contained in `k` shards. + let shard_size = data.len().div_ceil(self.0.data_shard_count()); + let mut data = data.to_vec(); + data.resize(shard_size * self.0.total_shard_count(), 0); + let mut shards = data + .chunks_exact(shard_size) + .map(|c| c.to_vec()) + .collect_vec(); + self.0.encode(&mut shards).map_err(|_| InvalidInput)?; + Ok(shards) + } + + pub fn decode(&self, shards: Vec>>) -> FastCryptoResult> { + if shards.len() != self.0.total_shard_count() { + return Err(InputTooShort(self.0.total_shard_count())); } - // `reed_solomon_erasure` requires all shards to have the same size, including parity. - let shard_len = data.first().map(|s| s.len()).unwrap_or(0); - if !data.iter().all(|s| s.len() == shard_len) { + if shards.iter().filter(|s| s.is_none()).count() > self.0.parity_shard_count() { return Err(InvalidInput); } - let mut shards = data; - shards.resize(self.0.total_shard_count(), vec![0u8; shard_len]); - self.0.encode(&mut shards).map_err(|_| InvalidInput)?; - Ok(shards) - } - - /// Reconstruct missing shards from a mix of present and absent shards. - /// - /// The input is a vector of length `n` where each entry is either `Some(shard)` if that - /// shard is available, or `None` if it is missing. If enough shards are present (at least - /// `k`), the missing shards will be reconstructed. - /// - /// The returned value contains all shards (data + parity) in index order. - /// - /// # Errors - /// - Returns [`FastCryptoError::InvalidInput`] if reconstruction fails (inconsistent shard sizes, wrong number of shards). - /// - Returns [`FastCryptoError::TooManyErrors`] if reconstruction succeeds, but the reconstructed set does not verify. - pub fn reconstruct(&self, shards: Vec>>) -> FastCryptoResult>> { let mut shards = shards; self.0.reconstruct(&mut shards).map_err(|_| InvalidInput)?; - - // `reconstruct` should have filled in every missing shard. If any are still absent, - // treat it as an invalid reconstruction. let shards = shards .into_iter() .map(|s| s.ok_or(InvalidInput)) .collect::>>()?; - // Ensure the reconstructed shards are consistent. - let verified = self.0.verify(&shards).map_err(|_| InvalidInput)?; - if !verified { - return Err(TooManyErrors(self.0.parity_shard_count())); + // Ensure the reconstructed shards are consistent + if !self.0.verify(&shards).map_err(|_| InvalidInput)? { + return Err(TooManyErrors(0)); // This is just an erasure code, so we can't correct errors. } - Ok(shards) + let data = shards + .into_iter() + .take(self.0.data_shard_count()) + .flatten() + .collect_vec(); + Ok(data) } } @@ -247,54 +233,71 @@ mod tests { } #[test] - fn test_rs_erasure_encode_reconstruct_roundtrip() { - let k = 3; - let n = 5; - let rs = RSErasure::new(k, n).unwrap(); - - // All shards must have the same length. - let data = vec![b"hello".to_vec(), b"world".to_vec(), b"!!!!!".to_vec()]; - let encoded = rs.encode(data.clone()).unwrap(); - assert_eq!(encoded.len(), n); - - // Drop up to `n-k` shards and reconstruct. - let mut shards: Vec>> = encoded.into_iter().map(Some).collect(); - shards[1] = None; - shards[4] = None; - - let reconstructed = rs.reconstruct(shards).unwrap(); - assert_eq!(reconstructed.len(), n); - - // First `k` shards are the data shards. - assert_eq!(&reconstructed[..k], &data[..]); + fn test_erasure_coder_new_rejects_invalid_parameters() { + assert!(matches!(ErasureCoder::new(10, 0), Err(InvalidInput))); + assert!(matches!(ErasureCoder::new(10, 10), Err(InvalidInput))); + assert!(matches!(ErasureCoder::new(9, 10), Err(InvalidInput))); + assert!(matches!(ErasureCoder::new(257, 1), Err(InvalidInput))); } #[test] - fn test_rs_erasure_reconstruct_too_few_shards() { - let k = 3; - let n = 5; - let rs = RSErasure::new(k, n).unwrap(); - - let data = vec![b"aaaaa".to_vec(), b"bbbbb".to_vec(), b"ccccc".to_vec()]; - let encoded = rs.encode(data).unwrap(); - let mut shards: Vec>> = encoded.into_iter().map(Some).collect(); + fn test_erasure_coder_roundtrip() { + let n = 10; + let k = 6; + let coder = ErasureCoder::new(n, k).unwrap(); + + for len in [1usize, 2, 3, 7, 8, 31, 32, 33, 100, 255] { + let data: Vec = (0..len) + .map(|i| (i as u8).wrapping_mul(31).wrapping_add(7)) + .collect(); + let shards = coder.encode(&data).unwrap(); + assert_eq!(shards.len(), n); + + // Remove up to `parity` shards (erasures) and reconstruct. + let mut opt_shards: Vec>> = shards.into_iter().map(Some).collect(); + for shard in opt_shards.iter_mut().take(n - k) { + *shard = None; + } + + let recovered = coder.decode(opt_shards).unwrap(); + let shard_size = len.div_ceil(k); + let expected_len = shard_size * k; + assert_eq!(recovered.len(), expected_len); + assert_eq!(&recovered[..len], &data); + assert!(recovered[len..].iter().all(|&b| b == 0)); + } + } - // Leave only 2 shards present (< k). - shards[0] = None; - shards[1] = None; - shards[2] = None; + #[test] + fn test_erasure_coder_decode_rejects_too_many_missing_shards() { + let n = 9; + let k = 5; + let coder = ErasureCoder::new(n, k).unwrap(); + let data: Vec = (0..123).map(|i| i as u8).collect(); + let shards = coder.encode(&data).unwrap(); + + // Parity is `n - k` -- remove more shards than that. + let mut opt_shards: Vec>> = shards.into_iter().map(Some).collect(); + for shard in opt_shards.iter_mut().take(n - k + 1) { + *shard = None; + } - assert!(matches!(rs.reconstruct(shards), Err(InvalidInput))); + assert!(matches!(coder.decode(opt_shards), Err(InvalidInput))); } #[test] - fn test_rs_erasure_new_invalid_params() { - // Invalid because n <= k. - assert!(matches!(RSErasure::new(3, 3), Err(InvalidInput))); - assert!(matches!(RSErasure::new(3, 2), Err(InvalidInput))); - // Invalid because k == 0. - assert!(matches!(RSErasure::new(0, 1), Err(InvalidInput))); - // Invalid because n > 256. - assert!(matches!(RSErasure::new(1, 257), Err(InvalidInput))); + fn test_erasure_coder_detects_corrupted_shard() { + let n = 8; + let k = 5; + let coder = ErasureCoder::new(n, k).unwrap(); + let data: Vec = (0..200).map(|i| (i as u8) ^ 0xAA).collect(); + let mut shards = coder.encode(&data).unwrap(); + + // Corrupt one shard (without declaring it missing). Reconstruction will succeed, + // but verification should fail. + shards[0][0] ^= 1; + let opt_shards = shards.into_iter().map(Some).collect_vec(); + + assert!(matches!(coder.decode(opt_shards), Err(TooManyErrors(_)))); } } From 64058fcd69e3763e0d2789c1d7236c487d806dc5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 29 Apr 2026 12:05:27 +0200 Subject: [PATCH 03/91] Compute challenge from roots --- fastcrypto-tbls/src/ecies_v1.rs | 26 ++----- fastcrypto-tbls/src/tests/ecies_v1_tests.rs | 2 +- .../src/threshold_schnorr/batch_avss.rs | 67 +++++++++++++++---- 3 files changed, 61 insertions(+), 34 deletions(-) diff --git a/fastcrypto-tbls/src/ecies_v1.rs b/fastcrypto-tbls/src/ecies_v1.rs index 6a24d26324..e1fd1a7f9c 100644 --- a/fastcrypto-tbls/src/ecies_v1.rs +++ b/fastcrypto-tbls/src/ecies_v1.rs @@ -208,22 +208,14 @@ where &self.proof } - pub fn into_parts(self) -> (SharedComponents, Vec>) { + pub fn into_parts(self) -> (SharedComponents, Vec>) { let MultiRecipientEncryption { c, c_hat, encs, proof, } = self; - ( - SharedComponents { c, c_hat, proof }, - encs.into_iter() - .map(|enc| EncryptedPart { - enc, - g: PhantomData, - }) - .collect(), - ) + (SharedComponents { c, c_hat, proof }, encs) } fn encs_random_oracle(encryption_random_oracle: &RandomOracle) -> RandomOracle { @@ -261,33 +253,27 @@ fn sym_cipher(k: &[u8; 64]) -> Aes256Ctr { ) } -impl EncryptedPart +impl SharedComponents where ::ScalarType: FiatShamirChallenge + Zeroize, G: HashToGroupElement, { pub fn decrypt( &self, - common: &SharedComponents, + enc: &[u8], sk: &PrivateKey, encryption_random_oracle: &RandomOracle, receiver_index: usize, ) -> Vec { let enc_ro = MultiRecipientEncryption::::encs_random_oracle(encryption_random_oracle); - let ephemeral_key = common.c * sk.0; + let ephemeral_key = self.c * sk.0; let k = enc_ro.evaluate(&(receiver_index, ephemeral_key)); let cipher = sym_cipher(&k); cipher - .decrypt(&fixed_zero_nonce(), &self.enc) + .decrypt(&fixed_zero_nonce(), enc) .expect("Decrypt should never fail for CTR mode") } -} -impl SharedComponents -where - ::ScalarType: FiatShamirChallenge + Zeroize, - G: HashToGroupElement, -{ pub fn ephemeral_key(&self) -> &G { &self.c } diff --git a/fastcrypto-tbls/src/tests/ecies_v1_tests.rs b/fastcrypto-tbls/src/tests/ecies_v1_tests.rs index e2db0925a0..ae31eb592c 100644 --- a/fastcrypto-tbls/src/tests/ecies_v1_tests.rs +++ b/fastcrypto-tbls/src/tests/ecies_v1_tests.rs @@ -63,7 +63,7 @@ mod point_tests { assert!(common.verify(&ro).is_ok()); for (i, (part, (sk, _, msg))) in parts.iter().zip(keys_and_msg.iter()).enumerate() { // Using parts should work as well - assert_eq!(msg.as_bytes(), part.decrypt(&common, sk, &ro, i)); + assert_eq!(msg.as_bytes(), common.decrypt(&part, sk, &ro, i)); } // test empty messages diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 5b333111d2..21404a2b9d 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -9,8 +9,8 @@ //! * The public keys along with the weights of each receiver are known to all parties and defined in the [Nodes] structure. //! * Define a new [Dealer] with the secrets who begins by calling [Dealer::create_message]. -use crate::ecies_v1::{MultiRecipientEncryption, PrivateKey}; -use crate::nodes::{Nodes, PartyId}; +use crate::ecies_v1::{MultiRecipientEncryption, PrivateKey, SharedComponents}; +use crate::nodes::{Node, Nodes, PartyId}; use crate::polynomial::{create_secret_sharing, Eval, Poly}; use crate::random_oracle::RandomOracle; use crate::threshold_schnorr::bcs::BCSSerialized; @@ -22,8 +22,10 @@ use crate::types::{get_uniform_value, ShareIndex}; use fastcrypto::error::FastCryptoError::{InvalidInput, InvalidMessage}; use fastcrypto::error::{FastCryptoError, FastCryptoResult}; use fastcrypto::groups::{GroupElement, MultiScalarMul, Scalar}; -use fastcrypto::hash::{HashFunction, Sha3_512}; +use fastcrypto::hash::{Blake2b256, HashFunction, Sha3_512}; +use fastcrypto::merkle; use fastcrypto::traits::AllowedRng; +use fastcrypto::twisted_elgamal::Ciphertext; use itertools::Itertools; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -60,6 +62,7 @@ pub struct Message { blinding_commit: G, ciphertext: MultiRecipientEncryption, response_polynomial: Poly, + roots: Vec, } /// The result of processing a message by a receiver: either valid shares or a complaint. @@ -289,18 +292,29 @@ impl Dealer { rng, ); - // let (shared, parts) = ciphertext.into_parts(); - // let code = ErasureCoder::new(self.nodes.total_weight() as usize, (self.nodes.total_weight() - 2 * self.f) as usize)?; - // let roots = parts.iter().map(|part| { - // let shards = code.encode(&part.ciphertext)?; - // }) + let (shared, ciphertexts) = ciphertext.clone().into_parts(); + let code = ErasureCoder::new( + self.nodes.total_weight() as usize, + (self.nodes.total_weight() - 2 * self.f) as usize, + )?; + let roots = ciphertexts + .iter() + .map(|part| { + let shards = code.encode(part)?; + let tree = fastcrypto::merkle::MerkleTree::::build_from_unserialized( + shards.iter(), + )?; + Ok(tree.root()) + }) + .collect::>>()?; // "response" polynomials from https://eprint.iacr.org/2023/536.pdf let challenge = compute_challenge( &self.random_oracle(), &full_public_keys, &blinding_commit, - &ciphertext, + &shared, + &roots, ); // Get the first t evaluations for the response polynomial and use these to compute the coefficients @@ -321,6 +335,7 @@ impl Dealer { blinding_commit, ciphertext, response_polynomial, + roots, }) } @@ -382,6 +397,7 @@ impl Receiver { blinding_commit, ciphertext, response_polynomial, + roots, } = message; if full_public_keys.len() != self.batch_size @@ -541,21 +557,25 @@ fn compute_challenge( random_oracle: &RandomOracle, c: &[G], c_prime: &G, - e: &MultiRecipientEncryption, + shared: &SharedComponents, + roots: &[merkle::Node], ) -> Vec { let random_oracle = random_oracle.extend(&Challenge.to_string()); - let inner_hash = Sha3_512::digest(bcs::to_bytes(&(c.to_vec(), c_prime, e)).unwrap()).digest; + let inner_hash = + Sha3_512::digest(bcs::to_bytes(&(c.to_vec(), c_prime, shared, roots)).unwrap()).digest; (0..c.len()) .map(|l| random_oracle.evaluate_to_group_element(&(l, inner_hash.to_vec()))) .collect() } fn compute_challenge_from_message(random_oracle: &RandomOracle, message: &Message) -> Vec { + let (shared, _) = message.ciphertext.clone().into_parts(); compute_challenge( random_oracle, &message.full_public_keys, &message.blinding_commit, - &message.ciphertext, + &shared, + &message.roots, ) } @@ -570,11 +590,13 @@ mod tests { use crate::nodes::{Node, Nodes}; use crate::polynomial::{Eval, Poly}; use crate::threshold_schnorr::bcs::BCSSerialized; + use crate::threshold_schnorr::reed_solomon::ErasureCoder; use crate::threshold_schnorr::Extensions::Encryption; use crate::threshold_schnorr::{EG, G}; use crate::types::ShareIndex; use fastcrypto::error::FastCryptoResult; use fastcrypto::groups::GroupElement; + use fastcrypto::hash::Blake2b256; use fastcrypto::traits::AllowedRng; use itertools::Itertools; use std::collections::HashMap; @@ -906,12 +928,30 @@ mod tests { rng, ); + let (shared, ciphertexts) = ciphertext.clone().into_parts(); + let code = ErasureCoder::new( + self.nodes.total_weight() as usize, + (self.nodes.total_weight() - 2 * self.f) as usize, + )?; + let roots = ciphertexts + .iter() + .map(|part| { + let shards = code.encode(part)?; + let tree = + fastcrypto::merkle::MerkleTree::::build_from_unserialized( + shards.iter(), + )?; + Ok(tree.root()) + }) + .collect::>>()?; + // "response" polynomials from https://eprint.iacr.org/2023/536.pdf let challenge = compute_challenge( &self.random_oracle(), &full_public_keys, &blinding_commit, - &ciphertext, + &shared, + &roots, ); let mut response_polynomial = blinding_poly; for (p_l, gamma_l) in polynomials.into_iter().zip_eq(&challenge) { @@ -923,6 +963,7 @@ mod tests { blinding_commit, ciphertext, response_polynomial, + roots, }) } } From c7f2af98470a6f6e74e9430cd7c2dcd1601a0961 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 29 Apr 2026 13:01:30 +0200 Subject: [PATCH 04/91] Group shards to parties --- fastcrypto-tbls/src/nodes.rs | 14 +++++++++++ .../src/threshold_schnorr/batch_avss.rs | 23 +++++++++---------- .../src/threshold_schnorr/reed_solomon.rs | 19 +++++++++------ 3 files changed, 37 insertions(+), 19 deletions(-) diff --git a/fastcrypto-tbls/src/nodes.rs b/fastcrypto-tbls/src/nodes.rs index 1053cd669b..c854a810f9 100644 --- a/fastcrypto-tbls/src/nodes.rs +++ b/fastcrypto-tbls/src/nodes.rs @@ -1,6 +1,7 @@ // Copyright (c) 2022, Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use itertools::Itertools; use crate::ecies_v1; use crate::types::ShareIndex; use fastcrypto::error::{FastCryptoError, FastCryptoResult}; @@ -157,6 +158,19 @@ impl Nodes { hash.finalize() } + /// Given an iterator over a set of items, one per share index, this function groups them into + /// a vector of vectors, one per node, according to the share ids of the nodes. + /// Returns error if the number of items does not match the total weight. + pub fn collect_to_nodes(&self, items: impl Iterator + ExactSizeIterator) -> FastCryptoResult>> { + if items.len() != self.total_weight as usize { + return Err(FastCryptoError::InvalidInput); + } + let mut items = items; + Ok(self.node_ids_iter().map(|id| { + items.by_ref().take(self.weight_of(id).unwrap() as usize).collect_vec() + }).collect_vec()) + } + /// Create a new set of nodes. Nodes must have consecutive ids starting from 0. /// Reduces weights up to an allowed delta in the original total weight. /// Finds the largest d such that: diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 21404a2b9d..8ba3320eac 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -15,7 +15,7 @@ use crate::polynomial::{create_secret_sharing, Eval, Poly}; use crate::random_oracle::RandomOracle; use crate::threshold_schnorr::bcs::BCSSerialized; use crate::threshold_schnorr::complaint::{Complaint, ComplaintResponse}; -use crate::threshold_schnorr::reed_solomon::ErasureCoder; +use crate::threshold_schnorr::reed_solomon::{ErasureCoder, Shard}; use crate::threshold_schnorr::Extensions::{Challenge, Encryption}; use crate::threshold_schnorr::{random_oracle_from_sid, EG, G, S}; use crate::types::{get_uniform_value, ShareIndex}; @@ -30,6 +30,7 @@ use itertools::Itertools; use serde::{Deserialize, Serialize}; use std::fmt::Debug; use std::iter::repeat_with; +use fastcrypto::merkle::MerkleTree; /// This represents a Dealer in the AVSS. /// There is exactly one dealer who creates the shares and broadcasts the encrypted shares. @@ -295,18 +296,16 @@ impl Dealer { let (shared, ciphertexts) = ciphertext.clone().into_parts(); let code = ErasureCoder::new( self.nodes.total_weight() as usize, - (self.nodes.total_weight() - 2 * self.f) as usize, + (self.nodes.total_weight() - 2 * self.f) as usize, // 2f parity shards )?; - let roots = ciphertexts - .iter() - .map(|part| { - let shards = code.encode(part)?; - let tree = fastcrypto::merkle::MerkleTree::::build_from_unserialized( - shards.iter(), - )?; - Ok(tree.root()) - }) - .collect::>>()?; + + let shards = ciphertexts.iter().map(|c| { + let shards = code.encode(c)?; // One shard per weight + self.nodes.collect_to_nodes(shards.into_iter()) // Grouped to nodes by weight + }).collect::>>()?; + + let trees = shards.iter().map(MerkleTree::::build_from_unserialized).collect::>>()?; + let roots = trees.iter().map(MerkleTree::root).collect_vec(); // "response" polynomials from https://eprint.iacr.org/2023/536.pdf let challenge = compute_challenge( diff --git a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs index f240b2b6ab..af54c4230e 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs @@ -10,6 +10,7 @@ use fastcrypto::error::FastCryptoError::{ use fastcrypto::error::FastCryptoResult; use itertools::Itertools; use reed_solomon_erasure::galois_8::ReedSolomon; +use serde::{Deserialize, Serialize}; /// Decoder for Reed-Solomon codes. /// This can correct up to (d-1)/2 errors, where d is the distance of the code. @@ -129,6 +130,10 @@ impl RSDecoder { /// A wrapper struct for the Reed-Solomon erasure coding library. pub struct ErasureCoder(ReedSolomon); +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct Shard(Vec); + impl ErasureCoder { /// Create a new erasure encoder/decoder. /// @@ -147,7 +152,7 @@ impl ErasureCoder { .map(Self) } - pub fn encode(&self, data: &[u8]) -> FastCryptoResult>> { + pub fn encode(&self, data: &[u8]) -> FastCryptoResult> { // Define a shard size such that the data can be contained in `k` shards. let shard_size = data.len().div_ceil(self.0.data_shard_count()); let mut data = data.to_vec(); @@ -157,10 +162,10 @@ impl ErasureCoder { .map(|c| c.to_vec()) .collect_vec(); self.0.encode(&mut shards).map_err(|_| InvalidInput)?; - Ok(shards) + Ok(shards.into_iter().map(Shard).collect_vec()) } - pub fn decode(&self, shards: Vec>>) -> FastCryptoResult> { + pub fn decode(&self, shards: Vec>) -> FastCryptoResult> { if shards.len() != self.0.total_shard_count() { return Err(InputTooShort(self.0.total_shard_count())); } @@ -169,7 +174,7 @@ impl ErasureCoder { return Err(InvalidInput); } - let mut shards = shards; + let mut shards = shards.into_iter().map(|s| s.map(|s| s.0)).collect_vec(); self.0.reconstruct(&mut shards).map_err(|_| InvalidInput)?; let shards = shards .into_iter() @@ -254,7 +259,7 @@ mod tests { assert_eq!(shards.len(), n); // Remove up to `parity` shards (erasures) and reconstruct. - let mut opt_shards: Vec>> = shards.into_iter().map(Some).collect(); + let mut opt_shards: Vec> = shards.into_iter().map(Some).collect(); for shard in opt_shards.iter_mut().take(n - k) { *shard = None; } @@ -277,7 +282,7 @@ mod tests { let shards = coder.encode(&data).unwrap(); // Parity is `n - k` -- remove more shards than that. - let mut opt_shards: Vec>> = shards.into_iter().map(Some).collect(); + let mut opt_shards: Vec> = shards.into_iter().map(Some).collect(); for shard in opt_shards.iter_mut().take(n - k + 1) { *shard = None; } @@ -295,7 +300,7 @@ mod tests { // Corrupt one shard (without declaring it missing). Reconstruction will succeed, // but verification should fail. - shards[0][0] ^= 1; + shards[0].0[0] ^= 1; let opt_shards = shards.into_iter().map(Some).collect_vec(); assert!(matches!(coder.decode(opt_shards), Err(TooManyErrors(_)))); From 17f85838faa0925a457d16e9e2d24d5e2a3323e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 29 Apr 2026 13:18:03 +0200 Subject: [PATCH 05/91] Individual messages --- fastcrypto-tbls/src/ecies_v1.rs | 58 +- fastcrypto-tbls/src/threshold_schnorr/avss.rs | 5 +- .../src/threshold_schnorr/batch_avss.rs | 586 +++++++++--------- .../src/threshold_schnorr/complaint.rs | 10 +- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 4 +- 5 files changed, 363 insertions(+), 300 deletions(-) diff --git a/fastcrypto-tbls/src/ecies_v1.rs b/fastcrypto-tbls/src/ecies_v1.rs index e1fd1a7f9c..2344f0ba18 100644 --- a/fastcrypto-tbls/src/ecies_v1.rs +++ b/fastcrypto-tbls/src/ecies_v1.rs @@ -46,7 +46,7 @@ pub const AES_KEY_LENGTH: usize = 32; pub struct MultiRecipientEncryption { c: G, c_hat: G, - encs: Vec>, + pub(crate) encs: Vec>, proof: DdhTupleNizk, } @@ -218,6 +218,14 @@ where (SharedComponents { c, c_hat, proof }, encs) } + pub fn shared(&self) -> SharedComponents { + SharedComponents { + c: self.c, + c_hat: self.c_hat, + proof: self.proof.clone(), + } + } + fn encs_random_oracle(encryption_random_oracle: &RandomOracle) -> RandomOracle { encryption_random_oracle.extend("encs") } @@ -290,6 +298,54 @@ where &MultiRecipientEncryption::::zk_random_oracle(encryption_random_oracle), ) } + + pub fn create_recovery_package( + &self, + sk: &PrivateKey, + recovery_random_oracle: &RandomOracle, + rng: &mut R, + ) -> RecoveryPackage { + let pk = G::generator() * sk.0; + let ephemeral_key = self.c * sk.0; + + let proof = DdhTupleNizk::::create( + &sk.0, + &self.c, + &pk, + &ephemeral_key, + recovery_random_oracle, + rng, + ); + + RecoveryPackage { + ephemeral_key, + proof, + } + } + + + pub fn decrypt_with_recovery_package( + &self, + enc: &[u8], + pkg: &RecoveryPackage, + recovery_random_oracle: &RandomOracle, + encryption_random_oracle: &RandomOracle, + receiver_pk: &PublicKey, + receiver_index: usize, + ) -> FastCryptoResult> { + pkg.proof.verify( + &self.c, + &receiver_pk.0, + &pkg.ephemeral_key, + recovery_random_oracle, + )?; + let encs_ro = MultiRecipientEncryption::::encs_random_oracle(encryption_random_oracle); + let k = encs_ro.evaluate(&(receiver_index, pkg.ephemeral_key)); + let cipher = sym_cipher(&k); + Ok(cipher + .decrypt(&fixed_zero_nonce(), enc) + .expect("Decrypt should never fail for CTR mode")) + } } impl PrivateKey diff --git a/fastcrypto-tbls/src/threshold_schnorr/avss.rs b/fastcrypto-tbls/src/threshold_schnorr/avss.rs index 57fb81ea2b..28f6f676ed 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/avss.rs @@ -286,7 +286,7 @@ impl Receiver { })), Err(_) => Ok(ProcessedMessage::Complaint(Complaint::create( self.id, - &message.ciphertext, + &message.ciphertext.shared(), &self.enc_secret_key, &self.random_oracle(), &mut rand::thread_rng(), @@ -303,7 +303,8 @@ impl Receiver { ) -> FastCryptoResult> { complaint.check( &self.nodes.node_id_to_node(complaint.accuser_id)?.pk, - &message.ciphertext, + &message.ciphertext.encs[complaint.accuser_id as usize], + &message.ciphertext.shared(), &self.random_oracle(), |shares: &SharesForNode| { verify_shares(shares, &self.nodes, complaint.accuser_id, message) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 8ba3320eac..522a660e2f 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -61,7 +61,8 @@ pub struct Receiver { pub struct Message { full_public_keys: Vec, blinding_commit: G, - ciphertext: MultiRecipientEncryption, + shared: SharedComponents, + ciphertext: Vec, response_polynomial: Poly, roots: Vec, } @@ -243,7 +244,7 @@ impl Dealer { } /// 1. The Dealer generates shares for the secrets and broadcasts the encrypted shares. - pub fn create_message(&self, rng: &mut impl AllowedRng) -> FastCryptoResult { + pub fn create_message(&self, rng: &mut impl AllowedRng) -> FastCryptoResult> { let secrets = repeat_with(|| S::rand(rng)) .take(self.batch_size) .collect_vec(); @@ -329,13 +330,14 @@ impl Dealer { .to_vec(), )?; - Ok(Message { - full_public_keys, + Ok(ciphertexts.into_iter().map(|ciphertext| Message { + full_public_keys: full_public_keys.clone(), + shared: shared.clone(), + response_polynomial: response_polynomial.clone(), + roots: roots.clone(), blinding_commit, ciphertext, - response_polynomial, - roots, - }) + }).collect_vec()) } fn random_oracle(&self) -> RandomOracle { @@ -397,6 +399,7 @@ impl Receiver { ciphertext, response_polynomial, roots, + shared, } = message; if full_public_keys.len() != self.batch_size @@ -416,12 +419,12 @@ impl Receiver { } let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); - ciphertext - .verify(&random_oracle_encryption) + shared.verify(&random_oracle_encryption) .map_err(|_| InvalidMessage)?; // Decrypt my shares - let plaintext = ciphertext.decrypt( + let plaintext = shared.decrypt( + ciphertext, &self.enc_secret_key, &random_oracle_encryption, self.id as usize, @@ -445,7 +448,7 @@ impl Receiver { })), Err(_) => Ok(ProcessedMessage::Complaint(Complaint::create( self.id, - ciphertext, + shared, &self.enc_secret_key, &self.random_oracle(), &mut rand::thread_rng(), @@ -464,6 +467,7 @@ impl Receiver { complaint.check( &self.nodes.node_id_to_node(complaint.accuser_id)?.pk, &message.ciphertext, + &message.shared, &self.random_oracle(), |shares: &SharesForNode| { verify_shares( @@ -568,12 +572,11 @@ fn compute_challenge( } fn compute_challenge_from_message(random_oracle: &RandomOracle, message: &Message) -> Vec { - let (shared, _) = message.ciphertext.clone().into_parts(); compute_challenge( random_oracle, &message.full_public_keys, &message.blinding_commit, - &shared, + &message.shared, &message.roots, ) } @@ -654,11 +657,12 @@ mod tests { }) .collect_vec(); - let message = dealer.create_message(&mut rng).unwrap(); + let messages = dealer.create_message(&mut rng).unwrap(); let all_shares = receivers .iter() - .map(|receiver| { + .zip(messages) + .map(|(receiver, message)| { ( receiver.id, assert_valid(receiver.process_message(&message).unwrap()), @@ -690,282 +694,282 @@ mod tests { assert_eq!(secrets, secrets); } - - #[test] - #[allow(clippy::single_match)] - fn test_happy_path_non_equal_weights() { - // No complaints, all honest - let t = 4; - let f = 3; - let weights: Vec = vec![1, 2, 3, 4]; - let batch_size_per_weight = 3; - - let mut rng = rand::thread_rng(); - let sks = weights - .iter() - .map(|_| ecies_v1::PrivateKey::::new(&mut rng)) - .collect::>(); - let nodes = Nodes::new( - weights - .into_iter() - .enumerate() - .map(|(i, weight)| Node { - id: i as u16, - pk: PublicKey::from_private_key(&sks[i]), - weight, - }) - .collect_vec(), - ) - .unwrap(); - - let dealer_id = 2; - let sid = b"tbls test".to_vec(); - let dealer: Dealer = Dealer::new( - nodes.clone(), - dealer_id, - f, - t, - sid.clone(), - batch_size_per_weight, - ) - .unwrap(); - - let receivers = sks - .into_iter() - .enumerate() - .map(|(i, secret_key)| { - Receiver::new( - nodes.clone(), - i as u16, - dealer_id, - t, - sid.clone(), - secret_key, - batch_size_per_weight, - ) - .unwrap() - }) - .collect_vec(); - - let message = dealer.create_message(&mut rng).unwrap(); - - let all_shares = receivers - .iter() - .flat_map(|receiver| { - assert_valid(receiver.process_message(&message).unwrap()) - .my_shares - .shares - }) - .collect::>(); - - let secrets = (0..dealer.batch_size) - .map(|l| { - Poly::recover_c0( - t, - all_shares.iter().take(t as usize).map(|s| Eval { - index: s.index, - value: s.batch[l], - }), - ) - .unwrap() - }) - .collect::>(); - - assert_eq!(secrets, secrets); - } - - #[test] - fn test_share_recovery() { - let t = 3; - let f = 2; - let n = 7; - let batch_size_per_weight: u16 = 3; - - let mut rng = rand::thread_rng(); - let sks = (0..n) - .map(|_| ecies_v1::PrivateKey::::new(&mut rng)) - .collect::>(); - let nodes = Nodes::new( - sks.iter() - .enumerate() - .map(|(id, sk)| Node { - id: id as u16, - pk: PublicKey::from_private_key(sk), - weight: 1, - }) - .collect::>(), - ) - .unwrap(); - - let sid = b"tbls test".to_vec(); - - let dealer_id = 1; - let dealer: Dealer = Dealer::new( - nodes.clone(), - dealer_id, - f, - t, - sid.clone(), - batch_size_per_weight, - ) - .unwrap(); - - let receivers = sks - .into_iter() - .enumerate() - .map(|(id, secret_key)| { - Receiver::new( - nodes.clone(), - id as u16, - dealer_id, - t, - sid.clone(), - secret_key, - batch_size_per_weight, - ) - .unwrap() - }) - .collect::>(); - - let message = dealer.create_message_cheating(&mut rng).unwrap(); - - let mut all_shares = receivers - .iter() - .map(|receiver| (receiver.id, receiver.process_message(&message).unwrap())) - .collect::>(); - - let complaint = assert_complaint(all_shares.remove(&receivers[0].id).unwrap()); - let mut all_shares = all_shares - .into_iter() - .map(|(id, pm)| (id, assert_valid(pm))) - .collect::>(); - - let responses = receivers - .iter() - .skip(1) - .map(|r| { - r.handle_complaint(&message, &complaint, all_shares.get(&r.id).unwrap()) - .unwrap() - }) - .collect::>(); - let shares = receivers[0].recover(&message, responses).unwrap(); - all_shares.insert(receivers[0].id, shares); - - // Recover with the first f+1 shares, including the reconstructed - let secrets = (0..dealer.batch_size) - .map(|l| { - let shares = all_shares - .iter() - .map(|(id, s)| (*id, s.my_shares.shares[0].batch[l])) - .collect::>(); - Poly::recover_c0( - t, - shares.iter().take(t as usize).map(|(id, v)| Eval { - index: ShareIndex::try_from(id + 1).unwrap(), - value: *v, - }), - ) - .unwrap() - }) - .collect::>(); - - assert_eq!(secrets, secrets); - } - - impl Dealer { - /// 1. The Dealer samples L nonces, generates shares and broadcasts the encrypted shares. This also returns the nonces to be secret shared along with their corresponding public keys. - pub fn create_message_cheating( - &self, - rng: &mut impl AllowedRng, - ) -> FastCryptoResult { - let polynomials = repeat_with(|| Poly::rand(self.t - 1, rng)) - .take(self.batch_size) - .collect_vec(); - - // Compute the (full) public keys for all secrets - let full_public_keys = polynomials - .iter() - .map(|p| G::generator() * p.c0()) - .collect_vec(); - - // "blinding" polynomial as defined in https://eprint.iacr.org/2023/536.pdf. - let blinding_poly = Poly::rand(self.t - 1, rng); - let blinding_commit = G::generator() * blinding_poly.c0(); - - // Encrypt all shares to the receivers - let mut pk_and_msgs = self - .nodes - .iter() - .map(|node| (node.pk.clone(), self.nodes.share_ids_of(node.id).unwrap())) - .map(|(public_key, share_ids)| { - ( - public_key, - SharesForNode { - shares: share_ids - .into_iter() - .map(|index| ShareBatch { - index, - batch: polynomials - .iter() - .map(|p_l| p_l.eval(index).value) - .collect_vec(), - blinding_share: blinding_poly.eval(index).value, - }) - .collect_vec(), - }, - ) - }) - .map(|(pk, shares_for_node)| (pk, shares_for_node.to_bytes())) - .collect_vec(); - - // Modify the first share of the first receiver to simulate a cheating dealer - pk_and_msgs[0].1[7] ^= 1; - - let ciphertext = MultiRecipientEncryption::encrypt( - &pk_and_msgs, - &self.random_oracle().extend(&Encryption.to_string()), - rng, - ); - - let (shared, ciphertexts) = ciphertext.clone().into_parts(); - let code = ErasureCoder::new( - self.nodes.total_weight() as usize, - (self.nodes.total_weight() - 2 * self.f) as usize, - )?; - let roots = ciphertexts - .iter() - .map(|part| { - let shards = code.encode(part)?; - let tree = - fastcrypto::merkle::MerkleTree::::build_from_unserialized( - shards.iter(), - )?; - Ok(tree.root()) - }) - .collect::>>()?; - - // "response" polynomials from https://eprint.iacr.org/2023/536.pdf - let challenge = compute_challenge( - &self.random_oracle(), - &full_public_keys, - &blinding_commit, - &shared, - &roots, - ); - let mut response_polynomial = blinding_poly; - for (p_l, gamma_l) in polynomials.into_iter().zip_eq(&challenge) { - response_polynomial += &(p_l * gamma_l); - } - - Ok(Message { - full_public_keys, - blinding_commit, - ciphertext, - response_polynomial, - roots, - }) - } - } + // + // #[test] + // #[allow(clippy::single_match)] + // fn test_happy_path_non_equal_weights() { + // // No complaints, all honest + // let t = 4; + // let f = 3; + // let weights: Vec = vec![1, 2, 3, 4]; + // let batch_size_per_weight = 3; + // + // let mut rng = rand::thread_rng(); + // let sks = weights + // .iter() + // .map(|_| ecies_v1::PrivateKey::::new(&mut rng)) + // .collect::>(); + // let nodes = Nodes::new( + // weights + // .into_iter() + // .enumerate() + // .map(|(i, weight)| Node { + // id: i as u16, + // pk: PublicKey::from_private_key(&sks[i]), + // weight, + // }) + // .collect_vec(), + // ) + // .unwrap(); + // + // let dealer_id = 2; + // let sid = b"tbls test".to_vec(); + // let dealer: Dealer = Dealer::new( + // nodes.clone(), + // dealer_id, + // f, + // t, + // sid.clone(), + // batch_size_per_weight, + // ) + // .unwrap(); + // + // let receivers = sks + // .into_iter() + // .enumerate() + // .map(|(i, secret_key)| { + // Receiver::new( + // nodes.clone(), + // i as u16, + // dealer_id, + // t, + // sid.clone(), + // secret_key, + // batch_size_per_weight, + // ) + // .unwrap() + // }) + // .collect_vec(); + // + // let message = dealer.create_message(&mut rng).unwrap(); + // + // let all_shares = receivers + // .iter() + // .flat_map(|receiver| { + // assert_valid(receiver.process_message(&message).unwrap()) + // .my_shares + // .shares + // }) + // .collect::>(); + // + // let secrets = (0..dealer.batch_size) + // .map(|l| { + // Poly::recover_c0( + // t, + // all_shares.iter().take(t as usize).map(|s| Eval { + // index: s.index, + // value: s.batch[l], + // }), + // ) + // .unwrap() + // }) + // .collect::>(); + // + // assert_eq!(secrets, secrets); + // } + // + // #[test] + // fn test_share_recovery() { + // let t = 3; + // let f = 2; + // let n = 7; + // let batch_size_per_weight: u16 = 3; + // + // let mut rng = rand::thread_rng(); + // let sks = (0..n) + // .map(|_| ecies_v1::PrivateKey::::new(&mut rng)) + // .collect::>(); + // let nodes = Nodes::new( + // sks.iter() + // .enumerate() + // .map(|(id, sk)| Node { + // id: id as u16, + // pk: PublicKey::from_private_key(sk), + // weight: 1, + // }) + // .collect::>(), + // ) + // .unwrap(); + // + // let sid = b"tbls test".to_vec(); + // + // let dealer_id = 1; + // let dealer: Dealer = Dealer::new( + // nodes.clone(), + // dealer_id, + // f, + // t, + // sid.clone(), + // batch_size_per_weight, + // ) + // .unwrap(); + // + // let receivers = sks + // .into_iter() + // .enumerate() + // .map(|(id, secret_key)| { + // Receiver::new( + // nodes.clone(), + // id as u16, + // dealer_id, + // t, + // sid.clone(), + // secret_key, + // batch_size_per_weight, + // ) + // .unwrap() + // }) + // .collect::>(); + // + // let message = dealer.create_message_cheating(&mut rng).unwrap(); + // + // let mut all_shares = receivers + // .iter() + // .map(|receiver| (receiver.id, receiver.process_message(&message).unwrap())) + // .collect::>(); + // + // let complaint = assert_complaint(all_shares.remove(&receivers[0].id).unwrap()); + // let mut all_shares = all_shares + // .into_iter() + // .map(|(id, pm)| (id, assert_valid(pm))) + // .collect::>(); + // + // let responses = receivers + // .iter() + // .skip(1) + // .map(|r| { + // r.handle_complaint(&message, &complaint, all_shares.get(&r.id).unwrap()) + // .unwrap() + // }) + // .collect::>(); + // let shares = receivers[0].recover(&message, responses).unwrap(); + // all_shares.insert(receivers[0].id, shares); + // + // // Recover with the first f+1 shares, including the reconstructed + // let secrets = (0..dealer.batch_size) + // .map(|l| { + // let shares = all_shares + // .iter() + // .map(|(id, s)| (*id, s.my_shares.shares[0].batch[l])) + // .collect::>(); + // Poly::recover_c0( + // t, + // shares.iter().take(t as usize).map(|(id, v)| Eval { + // index: ShareIndex::try_from(id + 1).unwrap(), + // value: *v, + // }), + // ) + // .unwrap() + // }) + // .collect::>(); + // + // assert_eq!(secrets, secrets); + // } + // + // impl Dealer { + // /// 1. The Dealer samples L nonces, generates shares and broadcasts the encrypted shares. This also returns the nonces to be secret shared along with their corresponding public keys. + // pub fn create_message_cheating( + // &self, + // rng: &mut impl AllowedRng, + // ) -> FastCryptoResult { + // let polynomials = repeat_with(|| Poly::rand(self.t - 1, rng)) + // .take(self.batch_size) + // .collect_vec(); + // + // // Compute the (full) public keys for all secrets + // let full_public_keys = polynomials + // .iter() + // .map(|p| G::generator() * p.c0()) + // .collect_vec(); + // + // // "blinding" polynomial as defined in https://eprint.iacr.org/2023/536.pdf. + // let blinding_poly = Poly::rand(self.t - 1, rng); + // let blinding_commit = G::generator() * blinding_poly.c0(); + // + // // Encrypt all shares to the receivers + // let mut pk_and_msgs = self + // .nodes + // .iter() + // .map(|node| (node.pk.clone(), self.nodes.share_ids_of(node.id).unwrap())) + // .map(|(public_key, share_ids)| { + // ( + // public_key, + // SharesForNode { + // shares: share_ids + // .into_iter() + // .map(|index| ShareBatch { + // index, + // batch: polynomials + // .iter() + // .map(|p_l| p_l.eval(index).value) + // .collect_vec(), + // blinding_share: blinding_poly.eval(index).value, + // }) + // .collect_vec(), + // }, + // ) + // }) + // .map(|(pk, shares_for_node)| (pk, shares_for_node.to_bytes())) + // .collect_vec(); + // + // // Modify the first share of the first receiver to simulate a cheating dealer + // pk_and_msgs[0].1[7] ^= 1; + // + // let ciphertext = MultiRecipientEncryption::encrypt( + // &pk_and_msgs, + // &self.random_oracle().extend(&Encryption.to_string()), + // rng, + // ); + // + // let (shared, ciphertexts) = ciphertext.clone().into_parts(); + // let code = ErasureCoder::new( + // self.nodes.total_weight() as usize, + // (self.nodes.total_weight() - 2 * self.f) as usize, + // )?; + // let roots = ciphertexts + // .iter() + // .map(|part| { + // let shards = code.encode(part)?; + // let tree = + // fastcrypto::merkle::MerkleTree::::build_from_unserialized( + // shards.iter(), + // )?; + // Ok(tree.root()) + // }) + // .collect::>>()?; + // + // // "response" polynomials from https://eprint.iacr.org/2023/536.pdf + // let challenge = compute_challenge( + // &self.random_oracle(), + // &full_public_keys, + // &blinding_commit, + // &shared, + // &roots, + // ); + // let mut response_polynomial = blinding_poly; + // for (p_l, gamma_l) in polynomials.into_iter().zip_eq(&challenge) { + // response_polynomial += &(p_l * gamma_l); + // } + // + // Ok(Message { + // full_public_keys, + // blinding_commit, + // ciphertext, + // response_polynomial, + // roots, + // }) + // } + // } fn assert_valid(processed_message: ProcessedMessage) -> ReceiverOutput { if let ProcessedMessage::Valid(output) = processed_message { diff --git a/fastcrypto-tbls/src/threshold_schnorr/complaint.rs b/fastcrypto-tbls/src/threshold_schnorr/complaint.rs index 9ab77447c3..c78a946971 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/complaint.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/complaint.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::ecies_v1; -use crate::ecies_v1::RecoveryPackage; +use crate::ecies_v1::{RecoveryPackage, SharedComponents}; use crate::nodes::PartyId; use crate::random_oracle::RandomOracle; use crate::threshold_schnorr::bcs::BCSSerialized; @@ -27,12 +27,14 @@ impl Complaint { pub fn check( &self, enc_pk: &ecies_v1::PublicKey, - ciphertext: &ecies_v1::MultiRecipientEncryption, + ciphertext: &[u8], + shared: &SharedComponents, random_oracle: &RandomOracle, verifier: impl Fn(&S) -> FastCryptoResult<()>, ) -> FastCryptoResult<()> { // Check that the recovery package is valid, and if not, return an error since the complaint is invalid. - let buffer = ciphertext.decrypt_with_recovery_package( + let buffer = shared.decrypt_with_recovery_package( + ciphertext, &self.proof, &random_oracle.extend(&Recovery(self.accuser_id).to_string()), &random_oracle.extend(&Encryption.to_string()), @@ -65,7 +67,7 @@ impl Complaint { pub fn create( accuser_id: PartyId, - ciphertext: &ecies_v1::MultiRecipientEncryption, + ciphertext: &ecies_v1::SharedComponents, enc_sk: &ecies_v1::PrivateKey, random_oracle: &RandomOracle, rng: &mut impl AllowedRng, diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index 1503ed77ce..a81b66edf4 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -236,11 +236,11 @@ mod tests { .collect::>(); // Each dealer creates a message - let message = dealer.create_message(&mut rng).unwrap(); + let messages = dealer.create_message(&mut rng).unwrap(); // Each receiver processes the message. // In this case, we assume all are honest and there are no complaints. - receivers.iter().for_each(|receiver| { + receivers.iter().zip(messages).for_each(|(receiver, message)| { let output = assert_valid_batch(receiver.process_message(&message).unwrap()); presigning_outputs .get_mut(&receiver.id) From c7f5f1469091d409baf66bcaa95ddaebc17a23b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 29 Apr 2026 13:36:11 +0200 Subject: [PATCH 06/91] Add avid part of message --- fastcrypto-tbls/src/ecies_v1.rs | 1 - fastcrypto-tbls/src/nodes.rs | 19 ++++-- .../src/threshold_schnorr/batch_avss.rs | 68 +++++++++++++++---- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 17 +++-- 4 files changed, 77 insertions(+), 28 deletions(-) diff --git a/fastcrypto-tbls/src/ecies_v1.rs b/fastcrypto-tbls/src/ecies_v1.rs index 2344f0ba18..935a4419a4 100644 --- a/fastcrypto-tbls/src/ecies_v1.rs +++ b/fastcrypto-tbls/src/ecies_v1.rs @@ -323,7 +323,6 @@ where } } - pub fn decrypt_with_recovery_package( &self, enc: &[u8], diff --git a/fastcrypto-tbls/src/nodes.rs b/fastcrypto-tbls/src/nodes.rs index c854a810f9..00c312325e 100644 --- a/fastcrypto-tbls/src/nodes.rs +++ b/fastcrypto-tbls/src/nodes.rs @@ -1,12 +1,12 @@ // Copyright (c) 2022, Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use itertools::Itertools; use crate::ecies_v1; use crate::types::ShareIndex; use fastcrypto::error::{FastCryptoError, FastCryptoResult}; use fastcrypto::groups::GroupElement; use fastcrypto::hash::{Blake2b256, Digest, HashFunction}; +use itertools::Itertools; use serde::{Deserialize, Serialize}; use tracing::debug; @@ -161,14 +161,23 @@ impl Nodes { /// Given an iterator over a set of items, one per share index, this function groups them into /// a vector of vectors, one per node, according to the share ids of the nodes. /// Returns error if the number of items does not match the total weight. - pub fn collect_to_nodes(&self, items: impl Iterator + ExactSizeIterator) -> FastCryptoResult>> { + pub fn collect_to_nodes( + &self, + items: impl Iterator + ExactSizeIterator, + ) -> FastCryptoResult>> { if items.len() != self.total_weight as usize { return Err(FastCryptoError::InvalidInput); } let mut items = items; - Ok(self.node_ids_iter().map(|id| { - items.by_ref().take(self.weight_of(id).unwrap() as usize).collect_vec() - }).collect_vec()) + Ok(self + .node_ids_iter() + .map(|id| { + items + .by_ref() + .take(self.weight_of(id).unwrap() as usize) + .collect_vec() + }) + .collect_vec()) } /// Create a new set of nodes. Nodes must have consecutive ids starting from 0. diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 522a660e2f..eb4f34deaf 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -24,13 +24,13 @@ use fastcrypto::error::{FastCryptoError, FastCryptoResult}; use fastcrypto::groups::{GroupElement, MultiScalarMul, Scalar}; use fastcrypto::hash::{Blake2b256, HashFunction, Sha3_512}; use fastcrypto::merkle; +use fastcrypto::merkle::MerkleTree; use fastcrypto::traits::AllowedRng; use fastcrypto::twisted_elgamal::Ciphertext; use itertools::Itertools; use serde::{Deserialize, Serialize}; use std::fmt::Debug; use std::iter::repeat_with; -use fastcrypto::merkle::MerkleTree; /// This represents a Dealer in the AVSS. /// There is exactly one dealer who creates the shares and broadcasts the encrypted shares. @@ -65,6 +65,7 @@ pub struct Message { ciphertext: Vec, response_polynomial: Poly, roots: Vec, + avid_message: Vec<(Vec, merkle::MerkleProof)>, } /// The result of processing a message by a receiver: either valid shares or a complaint. @@ -300,12 +301,18 @@ impl Dealer { (self.nodes.total_weight() - 2 * self.f) as usize, // 2f parity shards )?; - let shards = ciphertexts.iter().map(|c| { - let shards = code.encode(c)?; // One shard per weight - self.nodes.collect_to_nodes(shards.into_iter()) // Grouped to nodes by weight - }).collect::>>()?; + let shards = ciphertexts + .iter() + .map(|c| { + let shards = code.encode(c)?; // One shard per weight + self.nodes.collect_to_nodes(shards.into_iter()) // Grouped to nodes by weight + }) + .collect::>>()?; - let trees = shards.iter().map(MerkleTree::::build_from_unserialized).collect::>>()?; + let trees = shards + .iter() + .map(MerkleTree::::build_from_unserialized) + .collect::>>()?; let roots = trees.iter().map(MerkleTree::root).collect_vec(); // "response" polynomials from https://eprint.iacr.org/2023/536.pdf @@ -330,14 +337,34 @@ impl Dealer { .to_vec(), )?; - Ok(ciphertexts.into_iter().map(|ciphertext| Message { - full_public_keys: full_public_keys.clone(), - shared: shared.clone(), - response_polynomial: response_polynomial.clone(), - roots: roots.clone(), - blinding_commit, - ciphertext, - }).collect_vec()) + let messages = self + .nodes + .node_ids_iter() + .map(|id| { + shards + .iter() + .zip(&trees) + .map(|(s, tree)| { + let proof = tree.get_proof(id as usize)?; + Ok((s[id as usize].clone(), proof)) + }) + .collect::>>() + }) + .collect::>>()?; + + Ok(ciphertexts + .into_iter() + .zip(messages) + .map(|(ciphertext, avid_message)| Message { + full_public_keys: full_public_keys.clone(), + shared: shared.clone(), + response_polynomial: response_polynomial.clone(), + roots: roots.clone(), + blinding_commit, + ciphertext, + avid_message, + }) + .collect_vec()) } fn random_oracle(&self) -> RandomOracle { @@ -400,8 +427,18 @@ impl Receiver { response_polynomial, roots, shared, + avid_message, } = message; + roots + .iter() + .zip(avid_message) + .for_each(|(root, (shards, proof))| { + assert!(proof + .verify_proof_with_unserialized_leaf(root, &shards, self.id as usize) + .is_ok()) + }); + if full_public_keys.len() != self.batch_size || response_polynomial.degree() != self.t as usize - 1 { @@ -419,7 +456,8 @@ impl Receiver { } let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); - shared.verify(&random_oracle_encryption) + shared + .verify(&random_oracle_encryption) .map_err(|_| InvalidMessage)?; // Decrypt my shares diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index a81b66edf4..40c231aef9 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -240,13 +240,16 @@ mod tests { // Each receiver processes the message. // In this case, we assume all are honest and there are no complaints. - receivers.iter().zip(messages).for_each(|(receiver, message)| { - let output = assert_valid_batch(receiver.process_message(&message).unwrap()); - presigning_outputs - .get_mut(&receiver.id) - .unwrap() - .push(output); - }); + receivers + .iter() + .zip(messages) + .for_each(|(receiver, message)| { + let output = assert_valid_batch(receiver.process_message(&message).unwrap()); + presigning_outputs + .get_mut(&receiver.id) + .unwrap() + .push(output); + }); } // Each party can process their presigs locally from the secret shared nonces From 0c093b2fecf04604aeab232047d4db77b907147e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 29 Apr 2026 13:45:12 +0200 Subject: [PATCH 07/91] Roots as part of avid msg --- .../src/threshold_schnorr/batch_avss.rs | 56 +++++++++---------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index eb4f34deaf..ffc045765c 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -64,8 +64,7 @@ pub struct Message { shared: SharedComponents, ciphertext: Vec, response_polynomial: Poly, - roots: Vec, - avid_message: Vec<(Vec, merkle::MerkleProof)>, + avid_message: Vec<(merkle::Node, Vec, merkle::MerkleProof)>, } /// The result of processing a message by a receiver: either valid shares or a complaint. @@ -313,6 +312,22 @@ impl Dealer { .iter() .map(MerkleTree::::build_from_unserialized) .collect::>>()?; + + let messages = self + .nodes + .node_ids_iter() + .map(|id| { + shards + .iter() + .zip(&trees) + .map(|(s, tree)| { + let proof = tree.get_proof(id as usize)?; + Ok((tree.root(), s[id as usize].clone(), proof)) + }) + .collect::>>() + }) + .collect::>>()?; + let roots = trees.iter().map(MerkleTree::root).collect_vec(); // "response" polynomials from https://eprint.iacr.org/2023/536.pdf @@ -337,21 +352,6 @@ impl Dealer { .to_vec(), )?; - let messages = self - .nodes - .node_ids_iter() - .map(|id| { - shards - .iter() - .zip(&trees) - .map(|(s, tree)| { - let proof = tree.get_proof(id as usize)?; - Ok((s[id as usize].clone(), proof)) - }) - .collect::>>() - }) - .collect::>>()?; - Ok(ciphertexts .into_iter() .zip(messages) @@ -359,7 +359,6 @@ impl Dealer { full_public_keys: full_public_keys.clone(), shared: shared.clone(), response_polynomial: response_polynomial.clone(), - roots: roots.clone(), blinding_commit, ciphertext, avid_message, @@ -425,19 +424,15 @@ impl Receiver { blinding_commit, ciphertext, response_polynomial, - roots, shared, avid_message, } = message; - roots - .iter() - .zip(avid_message) - .for_each(|(root, (shards, proof))| { - assert!(proof - .verify_proof_with_unserialized_leaf(root, &shards, self.id as usize) - .is_ok()) - }); + avid_message.iter().for_each(|(root, shards, proof)| { + assert!(proof + .verify_proof_with_unserialized_leaf(root, &shards, self.id as usize) + .is_ok()) + }); if full_public_keys.len() != self.batch_size || response_polynomial.degree() != self.t as usize - 1 @@ -610,12 +605,17 @@ fn compute_challenge( } fn compute_challenge_from_message(random_oracle: &RandomOracle, message: &Message) -> Vec { + let roots = message + .avid_message + .iter() + .map(|m| m.0.clone()) + .collect_vec(); compute_challenge( random_oracle, &message.full_public_keys, &message.blinding_commit, &message.shared, - &message.roots, + &roots, ) } From 2c978828642edfa723fbf057c2f0aecd612c02b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 29 Apr 2026 14:38:54 +0200 Subject: [PATCH 08/91] Add echo_message function --- .../src/threshold_schnorr/batch_avss.rs | 68 ++++++++++++++++--- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 12 +++- 2 files changed, 67 insertions(+), 13 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index ffc045765c..16ffa28363 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -22,7 +22,7 @@ use crate::types::{get_uniform_value, ShareIndex}; use fastcrypto::error::FastCryptoError::{InvalidInput, InvalidMessage}; use fastcrypto::error::{FastCryptoError, FastCryptoResult}; use fastcrypto::groups::{GroupElement, MultiScalarMul, Scalar}; -use fastcrypto::hash::{Blake2b256, HashFunction, Sha3_512}; +use fastcrypto::hash::{Blake2b256, Digest, HashFunction, Sha3_512}; use fastcrypto::merkle; use fastcrypto::merkle::MerkleTree; use fastcrypto::traits::AllowedRng; @@ -67,6 +67,15 @@ pub struct Message { avid_message: Vec<(merkle::Node, Vec, merkle::MerkleProof)>, } +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct EchoMessage { + r: merkle::Node, + pi_i: merkle::MerkleProof, + r_i: merkle::Node, + s_ij: Vec, + hash: Digest<32>, +} + /// The result of processing a message by a receiver: either valid shares or a complaint. #[allow(clippy::large_enum_variant)] pub enum ProcessedMessage { @@ -408,6 +417,28 @@ impl Receiver { }) } + pub fn echo_message(&self, message: &Message) -> FastCryptoResult> { + if message.avid_message.iter().any(|(root, shards, proof)| { + proof + .verify_proof_with_unserialized_leaf(root, &shards, self.id as usize) + .is_err() + }) { + return Err(InvalidMessage); + } + + let r = MerkleTree::::build_from_unserialized(message.avid_message.iter().map(|(root, _, _)| root))?.root(); + let digest = compute_common_message_hash(message); + Ok(message.avid_message.iter().enumerate().map(|(i, (root, shards, proof))| { + EchoMessage { + r: r.clone(), + pi_i: proof.clone(), + r_i: root.clone(), + s_ij: shards.clone(), + hash: digest, + } + }).collect_vec()) + } + /// 2. Each receiver processes the message, verifies and decrypts its shares. /// /// If this works, the receiver can store the shares and contribute a signature on the message to a certificate. @@ -418,22 +449,16 @@ impl Receiver { /// If the message is valid but contains invalid shares for this receiver, the call will succeed but will return a [Complaint]. /// /// 3. When f+t signatures have been collected in the certificate, the receivers can now verify the certificate and finish the protocol. - pub fn process_message(&self, message: &Message) -> FastCryptoResult { + pub fn process_message(&self, message: &Message, echo_message: &[EchoMessage]) -> FastCryptoResult { let Message { full_public_keys, blinding_commit, ciphertext, response_polynomial, shared, - avid_message, + avid_message: _, } = message; - avid_message.iter().for_each(|(root, shards, proof)| { - assert!(proof - .verify_proof_with_unserialized_leaf(root, &shards, self.id as usize) - .is_ok()) - }); - if full_public_keys.len() != self.batch_size || response_polynomial.degree() != self.t as usize - 1 { @@ -619,6 +644,20 @@ fn compute_challenge_from_message(random_oracle: &RandomOracle, message: &Messag ) } +fn compute_common_message_hash(message: &Message) -> Digest<32> { + let Message { + shared, + full_public_keys, + blinding_commit, + ciphertext, + response_polynomial, + avid_message: _, + } = message; + let mut hasher = Blake2b256::new(); + hasher.update(bcs::to_bytes(&(shared, full_public_keys, blinding_commit, ciphertext, response_polynomial)).unwrap()); + hasher.finalize() +} + #[cfg(test)] mod tests { use super::{ @@ -697,13 +736,20 @@ mod tests { let messages = dealer.create_message(&mut rng).unwrap(); + let echo_messages = receivers + .iter() + .map(|receiver| receiver.echo_message(&messages[receiver.id as usize])) + .collect::>>() + .unwrap(); + let all_shares = receivers .iter() .zip(messages) - .map(|(receiver, message)| { + .zip(echo_messages) + .map(|((receiver, message), echo_message)| { ( receiver.id, - assert_valid(receiver.process_message(&message).unwrap()), + assert_valid(receiver.process_message(&message, &echo_message).unwrap()), ) }) .collect::>(); diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index 40c231aef9..ea2f8f2035 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -92,6 +92,7 @@ mod tests { use itertools::Itertools; use std::collections::HashMap; use std::hash::Hash; + use fastcrypto::error::FastCryptoResult; #[test] fn test_e2e() { @@ -238,13 +239,20 @@ mod tests { // Each dealer creates a message let messages = dealer.create_message(&mut rng).unwrap(); + let echo_messages = receivers + .iter() + .map(|receiver| receiver.echo_message(&messages[receiver.id as usize])) + .collect::>>() + .unwrap(); + // Each receiver processes the message. // In this case, we assume all are honest and there are no complaints. receivers .iter() .zip(messages) - .for_each(|(receiver, message)| { - let output = assert_valid_batch(receiver.process_message(&message).unwrap()); + .zip(&echo_messages) + .for_each(|((receiver, message), echo_message)| { + let output = assert_valid_batch(receiver.process_message(&message, &echo_message).unwrap()); presigning_outputs .get_mut(&receiver.id) .unwrap() From 14cf48aff95c3d326cedd359daad87fe0870761e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Thu, 30 Apr 2026 10:37:01 +0200 Subject: [PATCH 09/91] e2e works --- .../src/threshold_schnorr/batch_avss.rs | 138 +++++++++++++++--- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 17 ++- .../src/threshold_schnorr/reed_solomon.rs | 1 + 3 files changed, 125 insertions(+), 31 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 16ffa28363..dcbb835e87 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -19,7 +19,7 @@ use crate::threshold_schnorr::reed_solomon::{ErasureCoder, Shard}; use crate::threshold_schnorr::Extensions::{Challenge, Encryption}; use crate::threshold_schnorr::{random_oracle_from_sid, EG, G, S}; use crate::types::{get_uniform_value, ShareIndex}; -use fastcrypto::error::FastCryptoError::{InvalidInput, InvalidMessage}; +use fastcrypto::error::FastCryptoError::{InvalidInput, InvalidMessage, NotEnoughWeight}; use fastcrypto::error::{FastCryptoError, FastCryptoResult}; use fastcrypto::groups::{GroupElement, MultiScalarMul, Scalar}; use fastcrypto::hash::{Blake2b256, Digest, HashFunction, Sha3_512}; @@ -27,7 +27,7 @@ use fastcrypto::merkle; use fastcrypto::merkle::MerkleTree; use fastcrypto::traits::AllowedRng; use fastcrypto::twisted_elgamal::Ciphertext; -use itertools::Itertools; +use itertools::{repeat_n, Itertools}; use serde::{Deserialize, Serialize}; use std::fmt::Debug; use std::iter::repeat_with; @@ -51,6 +51,7 @@ pub struct Receiver { enc_secret_key: PrivateKey, nodes: Nodes, sid: Vec, + f: u16, t: u16, /// The total number of nonces that the receiver expects to receive from the dealer. batch_size: usize, @@ -69,11 +70,13 @@ pub struct Message { #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EchoMessage { + party: PartyId, r: merkle::Node, pi_i: merkle::MerkleProof, r_i: merkle::Node, s_ij: Vec, hash: Digest<32>, + pub pi_ij: merkle::MerkleProof, } /// The result of processing a message by a receiver: either valid shares or a complaint. @@ -396,6 +399,7 @@ impl Receiver { nodes: Nodes, id: PartyId, dealer_id: PartyId, + f: u16, t: u16, sid: Vec, enc_secret_key: PrivateKey, @@ -412,6 +416,7 @@ impl Receiver { enc_secret_key, nodes, sid, + f, t, batch_size, }) @@ -426,17 +431,27 @@ impl Receiver { return Err(InvalidMessage); } - let r = MerkleTree::::build_from_unserialized(message.avid_message.iter().map(|(root, _, _)| root))?.root(); + let tree = MerkleTree::::build_from_unserialized( + message.avid_message.iter().map(|(root, _, _)| root), + )?; + let r = tree.root(); let digest = compute_common_message_hash(message); - Ok(message.avid_message.iter().enumerate().map(|(i, (root, shards, proof))| { - EchoMessage { - r: r.clone(), - pi_i: proof.clone(), - r_i: root.clone(), - s_ij: shards.clone(), - hash: digest, - } - }).collect_vec()) + message + .avid_message + .iter() + .enumerate() + .map(|(i, (root, shards, proof))| { + Ok(EchoMessage { + party: self.id, + r: r.clone(), + pi_ij: proof.clone(), + pi_i: tree.get_proof(i)?, + r_i: root.clone(), + s_ij: shards.clone(), + hash: digest, + }) + }) + .collect::>>() } /// 2. Each receiver processes the message, verifies and decrypts its shares. @@ -449,7 +464,11 @@ impl Receiver { /// If the message is valid but contains invalid shares for this receiver, the call will succeed but will return a [Complaint]. /// /// 3. When f+t signatures have been collected in the certificate, the receivers can now verify the certificate and finish the protocol. - pub fn process_message(&self, message: &Message, echo_message: &[EchoMessage]) -> FastCryptoResult { + pub fn process_message( + &self, + message: &Message, + echo_messages: &[EchoMessage], + ) -> FastCryptoResult { let Message { full_public_keys, blinding_commit, @@ -459,9 +478,72 @@ impl Receiver { avid_message: _, } = message; + let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); + shared + .verify(&random_oracle_encryption) + .map_err(|_| InvalidMessage)?; + + if self + .nodes + .total_weight_of(echo_messages.iter().map(|echo_message| &echo_message.party))? + < self.nodes.total_weight() - 2 * self.f + { + return Err(NotEnoughWeight( + (self.nodes.total_weight() - 2 * self.f) as usize, + )); + } + if echo_messages.iter().any(|echo_message| { + echo_message + .pi_ij + .verify_proof_with_unserialized_leaf( + &echo_message.r_i, + &echo_message.s_ij, + echo_message.party as usize, + ) + .is_err() + || echo_message + .pi_i + .verify_proof_with_unserialized_leaf( + &echo_message.r, + &echo_message.r_i, + self.id as usize, + ) + .is_err() + }) { + return Err(InvalidMessage); + } + + let shards: Vec> = self + .nodes + .node_ids_iter() + .flat_map(|id| { + match echo_messages + .iter() + .find(|echo_message| echo_message.party == id) + { + Some(echo_message) => echo_message + .s_ij + .iter() + .map(|s| Some(s.clone())) + .collect_vec(), + None => { + repeat_n(None, self.nodes.weight_of(id).unwrap() as usize).collect_vec() + } + } + }) + .collect::>(); + + let code = ErasureCoder::new( + self.nodes.total_weight() as usize, + (self.nodes.total_weight() - 2 * self.f) as usize, // 2f parity shards + ) + .expect("should not fail with valid parameters"); + let ciphertext = code.decode(shards)?; + if full_public_keys.len() != self.batch_size || response_polynomial.degree() != self.t as usize - 1 { + let d = response_polynomial.degree(); return Err(InvalidMessage); } @@ -475,16 +557,10 @@ impl Receiver { return Err(InvalidMessage); } - let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); - shared - .verify(&random_oracle_encryption) - .map_err(|_| InvalidMessage)?; - - // Decrypt my shares - let plaintext = shared.decrypt( - ciphertext, + let plaintext = message.shared.decrypt( + &ciphertext, &self.enc_secret_key, - &random_oracle_encryption, + &self.random_oracle().extend(&Encryption.to_string()), self.id as usize, ); @@ -654,7 +730,16 @@ fn compute_common_message_hash(message: &Message) -> Digest<32> { avid_message: _, } = message; let mut hasher = Blake2b256::new(); - hasher.update(bcs::to_bytes(&(shared, full_public_keys, blinding_commit, ciphertext, response_polynomial)).unwrap()); + hasher.update( + bcs::to_bytes(&( + shared, + full_public_keys, + blinding_commit, + ciphertext, + response_polynomial, + )) + .unwrap(), + ); hasher.finalize() } @@ -725,6 +810,7 @@ mod tests { nodes.clone(), id as u16, dealer_id, + f, t, sid.clone(), secret_key, @@ -742,6 +828,12 @@ mod tests { .collect::>>() .unwrap(); + let echo_messages = receivers + .iter() + .enumerate() + .map(|(i, _)| echo_messages.iter().map(|em| em[i].clone()).collect_vec()) + .collect_vec(); + let all_shares = receivers .iter() .zip(messages) diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index ea2f8f2035..9ad982c32c 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -86,13 +86,13 @@ mod tests { use crate::threshold_schnorr::signing::{aggregate_signatures, generate_partial_signatures}; use crate::threshold_schnorr::{avss, batch_avss, EG, G, S}; use crate::types::{get_uniform_value, IndexedValue, ShareIndex}; + use fastcrypto::error::FastCryptoResult; use fastcrypto::groups::secp256k1::schnorr::SchnorrPublicKey; use fastcrypto::groups::{GroupElement, Scalar}; use fastcrypto::traits::AllowedRng; use itertools::Itertools; use std::collections::HashMap; use std::hash::Hash; - use fastcrypto::error::FastCryptoResult; #[test] fn test_e2e() { @@ -228,6 +228,7 @@ mod tests { id as u16, dealer_id, t, + f, sid.clone(), enc_secret_key.clone(), batch_size_per_weight, @@ -247,17 +248,17 @@ mod tests { // Each receiver processes the message. // In this case, we assume all are honest and there are no complaints. - receivers - .iter() - .zip(messages) - .zip(&echo_messages) - .for_each(|((receiver, message), echo_message)| { - let output = assert_valid_batch(receiver.process_message(&message, &echo_message).unwrap()); + receivers.iter().zip(messages).zip(&echo_messages).for_each( + |((receiver, message), echo_message)| { + let output = assert_valid_batch( + receiver.process_message(&message, &echo_message).unwrap(), + ); presigning_outputs .get_mut(&receiver.id) .unwrap() .push(output); - }); + }, + ); } // Each party can process their presigs locally from the secret shared nonces diff --git a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs index af54c4230e..c393906771 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs @@ -264,6 +264,7 @@ mod tests { *shard = None; } + let coder = ErasureCoder::new(n, k).unwrap(); let recovered = coder.decode(opt_shards).unwrap(); let shard_size = len.div_ceil(k); let expected_len = shard_size * k; From d25e64b49c44884b22b16ed22c97e1854e44ab20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Thu, 30 Apr 2026 11:03:13 +0200 Subject: [PATCH 10/91] Add root check --- fastcrypto-tbls/benches/batch_avss.rs | 4 +- .../src/threshold_schnorr/batch_avss.rs | 120 +++++++++++------- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 4 +- 3 files changed, 77 insertions(+), 51 deletions(-) diff --git a/fastcrypto-tbls/benches/batch_avss.rs b/fastcrypto-tbls/benches/batch_avss.rs index ac3c8485be..79a70b5999 100644 --- a/fastcrypto-tbls/benches/batch_avss.rs +++ b/fastcrypto-tbls/benches/batch_avss.rs @@ -124,7 +124,7 @@ mod batch_avss_benches { process.bench_function( format!("n={}, total_weight={}, t={}, w={}", n, total_w, t, w).as_str(), - |b| b.iter(|| r1.process_message(&message).unwrap()), + |b| b.iter(|| r1.process_echo_messages(&message).unwrap()), ); } } @@ -148,7 +148,7 @@ mod batch_avss_benches { let message = d.create_message(&mut thread_rng()).unwrap(); let r = setup_receiver(1, dealer_id as u16, t, w, &keys, batch_size_per_weight); - assert_valid_batch(r.process_message(&message).unwrap()) + assert_valid_batch(r.process_echo_messages(&message).unwrap()) }) .collect_vec(); diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index dcbb835e87..bd3d38d8e3 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -79,6 +79,14 @@ pub struct EchoMessage { pub pi_ij: merkle::MerkleProof, } +pub enum Vote { + Vote { + digest: Digest<32>, + root: merkle::Node, + }, + FaultyDealer, +} + /// The result of processing a message by a receiver: either valid shares or a complaint. #[allow(clippy::large_enum_variant)] pub enum ProcessedMessage { @@ -97,7 +105,7 @@ pub struct ReceiverOutput { /// If we say that node i has a weight `W_i`, we have /// `indices().len() == shares_for_secret(i).len() == weight() = W_i` /// -/// These can be created either by decrypting the shares from the dealer (see [Receiver::process_message]) or by recovering them from complaint responses. +/// These can be created either by decrypting the shares from the dealer (see [Receiver::process_echo_messages]) or by recovering them from complaint responses. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SharesForNode { pub shares: Vec, @@ -464,7 +472,7 @@ impl Receiver { /// If the message is valid but contains invalid shares for this receiver, the call will succeed but will return a [Complaint]. /// /// 3. When f+t signatures have been collected in the certificate, the receivers can now verify the certificate and finish the protocol. - pub fn process_message( + pub fn process_echo_messages( &self, message: &Message, echo_messages: &[EchoMessage], @@ -478,39 +486,54 @@ impl Receiver { avid_message: _, } = message; - let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); - shared - .verify(&random_oracle_encryption) - .map_err(|_| InvalidMessage)?; + if full_public_keys.len() != self.batch_size + || response_polynomial.degree() != self.t as usize - 1 + { + return Err(InvalidMessage); + } - if self - .nodes - .total_weight_of(echo_messages.iter().map(|echo_message| &echo_message.party))? - < self.nodes.total_weight() - 2 * self.f + // Verify that g^{p''(0)} == c' * prod_l c_l^{gamma_l} + let challenge = compute_challenge_from_message(&self.random_oracle(), message); + if G::generator() * response_polynomial.c0() + != blinding_commit + + G::multi_scalar_mul(&challenge, full_public_keys) + .expect("Inputs have constant lengths") { - return Err(NotEnoughWeight( - (self.nodes.total_weight() - 2 * self.f) as usize, - )); + return Err(InvalidMessage); } - if echo_messages.iter().any(|echo_message| { - echo_message - .pi_ij - .verify_proof_with_unserialized_leaf( - &echo_message.r_i, - &echo_message.s_ij, - echo_message.party as usize, - ) - .is_err() - || echo_message - .pi_i + + // Filter out invalid echo messages + let echo_messages = echo_messages + .iter() + .filter(|echo_message| { + // TODO: Check digest? + echo_message + .pi_ij .verify_proof_with_unserialized_leaf( - &echo_message.r, &echo_message.r_i, - self.id as usize, + &echo_message.s_ij, + echo_message.party as usize, ) - .is_err() - }) { - return Err(InvalidMessage); + .is_ok() + && echo_message + .pi_i + .verify_proof_with_unserialized_leaf( + &echo_message.r, + &echo_message.r_i, + self.id as usize, + ) + .is_ok() + }) + .cloned() + .collect_vec(); + + let required_weight = self.nodes.total_weight() - self.f; + if self + .nodes + .total_weight_of(echo_messages.iter().map(|echo_message| &echo_message.party))? + < required_weight + { + return Err(NotEnoughWeight(required_weight as usize)); } let shards: Vec> = self @@ -538,25 +561,12 @@ impl Receiver { (self.nodes.total_weight() - 2 * self.f) as usize, // 2f parity shards ) .expect("should not fail with valid parameters"); - let ciphertext = code.decode(shards)?; - - if full_public_keys.len() != self.batch_size - || response_polynomial.degree() != self.t as usize - 1 - { - let d = response_polynomial.degree(); - return Err(InvalidMessage); - } - - // Verify that g^{p''(0)} == c' * prod_l c_l^{gamma_l} - let challenge = compute_challenge_from_message(&self.random_oracle(), message); - if G::generator() * response_polynomial.c0() - != blinding_commit - + G::multi_scalar_mul(&challenge, full_public_keys) - .expect("Inputs have constant lengths") - { - return Err(InvalidMessage); - } + let ciphertext = code.decode(shards)?; + let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); + shared + .verify(&random_oracle_encryption) + .map_err(|_| InvalidMessage)?; let plaintext = message.shared.decrypt( &ciphertext, &self.enc_secret_key, @@ -564,6 +574,16 @@ impl Receiver { self.id as usize, ); + let new_shards = self + .nodes + .collect_to_nodes(code.encode(&ciphertext)?.into_iter())?; + let new_tree = MerkleTree::::build_from_unserialized(new_shards.iter())?; + let new_root = new_tree.root(); + + if new_root != message.avid_message[self.id as usize].0 { + return Err(InvalidMessage); + } + match SharesForNode::from_bytes(&plaintext).and_then(|my_shares| { // If there is an error in this scope, we create a complaint instead of returning an error verify_shares( @@ -841,7 +861,11 @@ mod tests { .map(|((receiver, message), echo_message)| { ( receiver.id, - assert_valid(receiver.process_message(&message, &echo_message).unwrap()), + assert_valid( + receiver + .process_echo_messages(&message, &echo_message) + .unwrap(), + ), ) }) .collect::>(); diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index 9ad982c32c..69ff899743 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -251,7 +251,9 @@ mod tests { receivers.iter().zip(messages).zip(&echo_messages).for_each( |((receiver, message), echo_message)| { let output = assert_valid_batch( - receiver.process_message(&message, &echo_message).unwrap(), + receiver + .process_echo_messages(&message, &echo_message) + .unwrap(), ); presigning_outputs .get_mut(&receiver.id) From 90e045989df7ca202e88dc5170cb5a4b82cb1d80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Thu, 30 Apr 2026 11:24:27 +0200 Subject: [PATCH 11/91] Remove ciphertext from message --- .../src/threshold_schnorr/batch_avss.rs | 33 +++++++------------ 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index bd3d38d8e3..3aec3890b8 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -63,7 +63,6 @@ pub struct Message { full_public_keys: Vec, blinding_commit: G, shared: SharedComponents, - ciphertext: Vec, response_polynomial: Poly, avid_message: Vec<(merkle::Node, Vec, merkle::MerkleProof)>, } @@ -372,15 +371,12 @@ impl Dealer { .to_vec(), )?; - Ok(ciphertexts - .into_iter() - .zip(messages) - .map(|(ciphertext, avid_message)| Message { + Ok(messages.into_iter() + .map(|avid_message| Message { full_public_keys: full_public_keys.clone(), shared: shared.clone(), response_polynomial: response_polynomial.clone(), blinding_commit, - ciphertext, avid_message, }) .collect_vec()) @@ -480,7 +476,6 @@ impl Receiver { let Message { full_public_keys, blinding_commit, - ciphertext, response_polynomial, shared, avid_message: _, @@ -563,6 +558,15 @@ impl Receiver { .expect("should not fail with valid parameters"); let ciphertext = code.decode(shards)?; + let new_shards = self + .nodes + .collect_to_nodes(code.encode(&ciphertext)?.into_iter())?; + let new_tree = MerkleTree::::build_from_unserialized(new_shards.iter())?; + let new_root = new_tree.root(); + if new_root != message.avid_message[self.id as usize].0 { + return Err(InvalidMessage); + } + let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); shared .verify(&random_oracle_encryption) @@ -573,17 +577,6 @@ impl Receiver { &self.random_oracle().extend(&Encryption.to_string()), self.id as usize, ); - - let new_shards = self - .nodes - .collect_to_nodes(code.encode(&ciphertext)?.into_iter())?; - let new_tree = MerkleTree::::build_from_unserialized(new_shards.iter())?; - let new_root = new_tree.root(); - - if new_root != message.avid_message[self.id as usize].0 { - return Err(InvalidMessage); - } - match SharesForNode::from_bytes(&plaintext).and_then(|my_shares| { // If there is an error in this scope, we create a complaint instead of returning an error verify_shares( @@ -620,7 +613,7 @@ impl Receiver { let challenge = compute_challenge_from_message(&self.random_oracle(), message); complaint.check( &self.nodes.node_id_to_node(complaint.accuser_id)?.pk, - &message.ciphertext, + &[], &message.shared, &self.random_oracle(), |shares: &SharesForNode| { @@ -745,7 +738,6 @@ fn compute_common_message_hash(message: &Message) -> Digest<32> { shared, full_public_keys, blinding_commit, - ciphertext, response_polynomial, avid_message: _, } = message; @@ -755,7 +747,6 @@ fn compute_common_message_hash(message: &Message) -> Digest<32> { shared, full_public_keys, blinding_commit, - ciphertext, response_polynomial, )) .unwrap(), From 68548c8c03e26a2501a8d0312f6f43c73bfc53f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 1 May 2026 08:51:03 +0200 Subject: [PATCH 12/91] refactoring --- .../src/threshold_schnorr/batch_avss.rs | 207 +++-- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 844 +++++++++--------- 2 files changed, 541 insertions(+), 510 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 3aec3890b8..ae749f0b7e 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -60,11 +60,23 @@ pub struct Receiver { /// The message broadcast by the dealer. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Message { + common: CommonMessage, + avid: Vec, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct CommonMessage { full_public_keys: Vec, blinding_commit: G, shared: SharedComponents, response_polynomial: Poly, - avid_message: Vec<(merkle::Node, Vec, merkle::MerkleProof)>, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct AvidMessage { + root: merkle::Node, + shards: Vec, + proof: merkle::MerkleProof, } #[derive(Clone, Debug, Serialize, Deserialize)] @@ -125,7 +137,7 @@ pub struct ShareBatch { impl ShareBatch { /// Verify a batch of shares using the given challenge. - fn verify(&self, message: &Message, challenge: &[S]) -> FastCryptoResult<()> { + fn verify(&self, message: &CommonMessage, challenge: &[S]) -> FastCryptoResult<()> { if challenge.len() != self.batch_size() { return Err(InvalidInput); } @@ -172,7 +184,7 @@ impl SharesForNode { }) } - fn verify(&self, message: &Message, challenge: &[S]) -> FastCryptoResult<()> { + fn verify(&self, message: &CommonMessage, challenge: &[S]) -> FastCryptoResult<()> { for shares in &self.shares { shares.verify(message, challenge)?; } @@ -348,6 +360,7 @@ impl Dealer { .collect::>>()?; let roots = trees.iter().map(MerkleTree::root).collect_vec(); + let root = MerkleTree::::build_from_unserialized(roots.iter())?.root(); // "response" polynomials from https://eprint.iacr.org/2023/536.pdf let challenge = compute_challenge( @@ -355,7 +368,7 @@ impl Dealer { &full_public_keys, &blinding_commit, &shared, - &roots, + &root, ); // Get the first t evaluations for the response polynomial and use these to compute the coefficients @@ -372,14 +385,19 @@ impl Dealer { )?; Ok(messages.into_iter() - .map(|avid_message| Message { + .map(|m| Message { + common: CommonMessage { full_public_keys: full_public_keys.clone(), shared: shared.clone(), response_polynomial: response_polynomial.clone(), blinding_commit, - avid_message, - }) - .collect_vec()) + }, + avid: m.iter().map(|m| AvidMessage { + root: m.0.clone(), + shards: m.1.clone(), + proof: m.2.clone(), + }).collect_vec() + }).collect_vec()) } fn random_oracle(&self) -> RandomOracle { @@ -427,7 +445,7 @@ impl Receiver { } pub fn echo_message(&self, message: &Message) -> FastCryptoResult> { - if message.avid_message.iter().any(|(root, shards, proof)| { + if message.avid.iter().any(|AvidMessage { root, shards, proof }| { proof .verify_proof_with_unserialized_leaf(root, &shards, self.id as usize) .is_err() @@ -436,15 +454,15 @@ impl Receiver { } let tree = MerkleTree::::build_from_unserialized( - message.avid_message.iter().map(|(root, _, _)| root), + message.avid.iter().map(| AvidMessage { root, .. }| root), )?; let r = tree.root(); let digest = compute_common_message_hash(message); message - .avid_message + .avid .iter() .enumerate() - .map(|(i, (root, shards, proof))| { + .map(|(i, AvidMessage { root, shards, proof })| { Ok(EchoMessage { party: self.id, r: r.clone(), @@ -470,38 +488,12 @@ impl Receiver { /// 3. When f+t signatures have been collected in the certificate, the receivers can now verify the certificate and finish the protocol. pub fn process_echo_messages( &self, - message: &Message, echo_messages: &[EchoMessage], - ) -> FastCryptoResult { - let Message { - full_public_keys, - blinding_commit, - response_polynomial, - shared, - avid_message: _, - } = message; - - if full_public_keys.len() != self.batch_size - || response_polynomial.degree() != self.t as usize - 1 - { - return Err(InvalidMessage); - } - - // Verify that g^{p''(0)} == c' * prod_l c_l^{gamma_l} - let challenge = compute_challenge_from_message(&self.random_oracle(), message); - if G::generator() * response_polynomial.c0() - != blinding_commit - + G::multi_scalar_mul(&challenge, full_public_keys) - .expect("Inputs have constant lengths") - { - return Err(InvalidMessage); - } - + ) -> FastCryptoResult> { // Filter out invalid echo messages let echo_messages = echo_messages .iter() .filter(|echo_message| { - // TODO: Check digest? echo_message .pi_ij .verify_proof_with_unserialized_leaf( @@ -511,13 +503,13 @@ impl Receiver { ) .is_ok() && echo_message - .pi_i - .verify_proof_with_unserialized_leaf( - &echo_message.r, - &echo_message.r_i, - self.id as usize, - ) - .is_ok() + .pi_i + .verify_proof_with_unserialized_leaf( + &echo_message.r, + &echo_message.r_i, + self.id as usize, + ) + .is_ok() }) .cloned() .collect_vec(); @@ -555,15 +547,47 @@ impl Receiver { self.nodes.total_weight() as usize, (self.nodes.total_weight() - 2 * self.f) as usize, // 2f parity shards ) - .expect("should not fail with valid parameters"); + .expect("should not fail with valid parameters"); let ciphertext = code.decode(shards)?; let new_shards = self .nodes .collect_to_nodes(code.encode(&ciphertext)?.into_iter())?; let new_tree = MerkleTree::::build_from_unserialized(new_shards.iter())?; - let new_root = new_tree.root(); - if new_root != message.avid_message[self.id as usize].0 { + + if new_tree.root() != echo_messages[0].r_i { + return Err(InvalidMessage); + } + + Ok(ciphertext) + } + + pub fn process_message( + &self, + root: &merkle::Node, + message: &CommonMessage, + ciphertext: &[u8], + ) -> FastCryptoResult { + let CommonMessage { + full_public_keys, + blinding_commit, + response_polynomial, + shared, + } = message; + + if full_public_keys.len() != self.batch_size + || response_polynomial.degree() != self.t as usize - 1 + { + return Err(InvalidMessage); + } + + // Verify that g^{p''(0)} == c' * prod_l c_l^{gamma_l} + let challenge = compute_challenge_from_message(&self.random_oracle(), root, message); + if G::generator() * response_polynomial.c0() + != blinding_commit + + G::multi_scalar_mul(&challenge, full_public_keys) + .expect("Inputs have constant lengths") + { return Err(InvalidMessage); } @@ -571,7 +595,7 @@ impl Receiver { shared .verify(&random_oracle_encryption) .map_err(|_| InvalidMessage)?; - let plaintext = message.shared.decrypt( + let plaintext = shared.decrypt( &ciphertext, &self.enc_secret_key, &self.random_oracle().extend(&Encryption.to_string()), @@ -589,10 +613,11 @@ impl Receiver { )?; Ok(my_shares) }) { - Ok(my_shares) => Ok(ProcessedMessage::Valid(ReceiverOutput { - my_shares, - public_keys: full_public_keys.clone(), - })), + Ok(my_shares) => Ok(ProcessedMessage::Valid( + ReceiverOutput { + my_shares, + public_keys: full_public_keys.clone(), + })), Err(_) => Ok(ProcessedMessage::Complaint(Complaint::create( self.id, shared, @@ -603,17 +628,19 @@ impl Receiver { } } + /// 4. Upon receiving a complaint, a receiver verifies it and responds with its shares. pub fn handle_complaint( &self, - message: &Message, + message: &CommonMessage, + root: &merkle::Node, complaint: &Complaint, my_output: &ReceiverOutput, ) -> FastCryptoResult> { - let challenge = compute_challenge_from_message(&self.random_oracle(), message); + let challenge = compute_challenge_from_message(&self.random_oracle(), &root, &message); complaint.check( &self.nodes.node_id_to_node(complaint.accuser_id)?.pk, - &[], + &[], // TODO &message.shared, &self.random_oracle(), |shares: &SharesForNode| { @@ -621,7 +648,7 @@ impl Receiver { shares, &self.nodes, complaint.accuser_id, - message, + &message, &challenge, self.batch_size, ) @@ -637,7 +664,8 @@ impl Receiver { /// Fails if there are not enough valid responses to recover the shares or if any of the responses come from an invalid party. pub fn recover( &self, - message: &Message, + message: &CommonMessage, + root: &merkle::Node, responses: Vec>, ) -> FastCryptoResult { // TODO: This fails if one of the responses has an invalid responder_id. We could probably just ignore those instead. @@ -650,13 +678,13 @@ impl Receiver { return Err(FastCryptoError::InputTooShort(self.t as usize)); } - let challenge = compute_challenge_from_message(&self.random_oracle(), message); + let challenge = compute_challenge_from_message(&self.random_oracle(), &root, &message); let response_shares = responses .into_iter() .filter_map(|response| { response .shares - .verify(message, &challenge) + .verify(&message, &challenge) .ok() .map(|_| response.shares) }) @@ -669,7 +697,7 @@ impl Receiver { } let my_shares = SharesForNode::recover(self, &response_shares)?; - my_shares.verify(message, &challenge)?; + my_shares.verify(&message, &challenge)?; Ok(ReceiverOutput { my_shares, @@ -691,7 +719,7 @@ fn verify_shares( shares: &SharesForNode, nodes: &Nodes, receiver: PartyId, - message: &Message, + message: &CommonMessage, challenge: &[S], expected_batch_size: usize, ) -> FastCryptoResult<()> { @@ -708,38 +736,35 @@ fn compute_challenge( c: &[G], c_prime: &G, shared: &SharedComponents, - roots: &[merkle::Node], + root: &merkle::Node, ) -> Vec { let random_oracle = random_oracle.extend(&Challenge.to_string()); let inner_hash = - Sha3_512::digest(bcs::to_bytes(&(c.to_vec(), c_prime, shared, roots)).unwrap()).digest; + Sha3_512::digest(bcs::to_bytes(&(c.to_vec(), c_prime, shared, root)).unwrap()).digest; (0..c.len()) .map(|l| random_oracle.evaluate_to_group_element(&(l, inner_hash.to_vec()))) .collect() } -fn compute_challenge_from_message(random_oracle: &RandomOracle, message: &Message) -> Vec { - let roots = message - .avid_message - .iter() - .map(|m| m.0.clone()) - .collect_vec(); +fn compute_challenge_from_message(random_oracle: &RandomOracle, root: &merkle::Node, message: &CommonMessage) -> Vec { compute_challenge( random_oracle, &message.full_public_keys, &message.blinding_commit, &message.shared, - &roots, + &root, ) } fn compute_common_message_hash(message: &Message) -> Digest<32> { let Message { - shared, - full_public_keys, - blinding_commit, - response_polynomial, - avid_message: _, + common: CommonMessage { + shared, + full_public_keys, + blinding_commit, + response_polynomial, + }, + .. } = message; let mut hasher = Blake2b256::new(); hasher.update( @@ -845,21 +870,27 @@ mod tests { .map(|(i, _)| echo_messages.iter().map(|em| em[i].clone()).collect_vec()) .collect_vec(); - let all_shares = receivers + let ciphertexts = receivers .iter() - .zip(messages) - .zip(echo_messages) + .zip(messages.iter()) + .zip(echo_messages.iter()) .map(|((receiver, message), echo_message)| { - ( - receiver.id, - assert_valid( receiver - .process_echo_messages(&message, &echo_message) - .unwrap(), - ), - ) + .process_echo_messages(&echo_message) + .unwrap() }) - .collect::>(); + .collect_vec(); + + let all_shares = receivers.iter().zip(ciphertexts).zip(messages).map(|((receiver, ciphertext), message)| { + match receiver.process_message( + &echo_messages[receiver.id as usize][0].r, + &message.common, + &ciphertext, + ) { + Ok(ProcessedMessage::Valid(output)) => (receiver.id, output), + _ => panic!("All receivers should be able to process the message in the happy path"), + } + }).collect::>(); let secrets = (0..dealer.batch_size) .map(|l| { diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index 69ff899743..aa2221413b 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -93,428 +93,428 @@ mod tests { use itertools::Itertools; use std::collections::HashMap; use std::hash::Hash; - - #[test] - fn test_e2e() { - // No complaints, all honest - let t = 3; - let f = 2; - let weights = [1, 2, 2, 2]; - let n = weights.len(); - - let batch_size_per_weight: u16 = 10; - - let mut rng = rand::thread_rng(); - let sks = (0..n) - .map(|_| ecies_v1::PrivateKey::::new(&mut rng)) - .collect::>(); - let nodes = Nodes::new( - sks.iter() - .enumerate() - .zip(weights) - .map(|((id, sk), weight)| Node { - id: id as u16, - pk: PublicKey::from_private_key(sk), - weight, - }) - .collect::>(), - ) - .unwrap(); - - // - // DKG - // - - // Map from each party to the outputs it has received - let mut dkg_outputs = HashMap::>::new(); - nodes.node_ids_iter().for_each(|id| { - dkg_outputs.insert(id, HashMap::new()); - }); - - let mut messages = Vec::new(); - for dealer_id in nodes.node_ids_iter() { - let sid = format!("dkg-test-session-{}", dealer_id).into_bytes(); - let dealer: avss::Dealer = - avss::Dealer::new(None, nodes.clone(), t, sid.clone()).unwrap(); - let receivers = sks - .iter() - .enumerate() - .map(|(id, enc_secret_key)| { - avss::Receiver::new( - nodes.clone(), - id as u16, - t, - sid.clone(), - None, - enc_secret_key.clone(), - ) - }) - .collect::>(); - - // Each dealer creates a message - let message = dealer.create_message(&mut rng); - messages.push(message.clone()); - - // Each receiver processes the message. In this case, we assume all are honest and there are no complaints. - receivers.iter().for_each(|receiver| { - let output = assert_valid(receiver.process_message(&message).unwrap()); - dkg_outputs - .get_mut(&receiver.id()) - .unwrap() - .insert(dealer_id, output); - }); - } - - // The dealers to form the certificate should have weight >= t, and are the ones whose outputs will be used to create the final shares. - let dkg_cert = [PartyId::from(1u8), PartyId::from(2u8)]; - - // Now, each party has collected their outputs from all dealers. We use the output from the dealers in dkg_cert create the final shares for signing. - // Each party should still keep the outputs from all dealers until the end of the epoch to handle complaints. - let merged_shares = nodes - .iter() - .map(|node| { - ( - node.id, - avss::ReceiverOutput::complete_dkg( - t, - &nodes, - restrict(dkg_outputs.get(&node.id).unwrap(), dkg_cert.into_iter()), - ) - .unwrap(), - ) - }) - .collect::>(); - - // All receivers should now have the same verifying key - let vk = get_uniform_value(merged_shares.values().map(|output| output.vk)).unwrap(); - - // For testing, we now recover the secret key from t shares and check that the secret key matches the verification key. - // In practice, the parties should never do this... - let shares = merged_shares - .values() - .flat_map(|output| output.my_shares.shares.clone()) - .take(t as usize); - let sk = Poly::recover_c0(t, shares).unwrap(); - assert_eq!(G::generator() * sk, vk); - - // - // PRESIGNING - // - - // Generate a batch of nonces for each party's share - let mut presigning_outputs = HashMap::>::new(); - nodes.node_ids_iter().for_each(|id| { - presigning_outputs.insert(id, Vec::new()); - }); - - // Each dealer generates a batch of presigs per share they control. - for dealer_id in nodes.node_ids_iter() { - let sid = format!("presig-test-session-{}", dealer_id).into_bytes(); - let dealer: batch_avss::Dealer = batch_avss::Dealer::new( - nodes.clone(), - dealer_id, - f, - t, - sid.clone(), - batch_size_per_weight, - ) - .unwrap(); - let receivers = sks - .iter() - .enumerate() - .map(|(id, enc_secret_key)| { - batch_avss::Receiver::new( - nodes.clone(), - id as u16, - dealer_id, - t, - f, - sid.clone(), - enc_secret_key.clone(), - batch_size_per_weight, - ) - .unwrap() - }) - .collect::>(); - - // Each dealer creates a message - let messages = dealer.create_message(&mut rng).unwrap(); - - let echo_messages = receivers - .iter() - .map(|receiver| receiver.echo_message(&messages[receiver.id as usize])) - .collect::>>() - .unwrap(); - - // Each receiver processes the message. - // In this case, we assume all are honest and there are no complaints. - receivers.iter().zip(messages).zip(&echo_messages).for_each( - |((receiver, message), echo_message)| { - let output = assert_valid_batch( - receiver - .process_echo_messages(&message, &echo_message) - .unwrap(), - ); - presigning_outputs - .get_mut(&receiver.id) - .unwrap() - .push(output); - }, - ); - } - - // Each party can process their presigs locally from the secret shared nonces - let mut presigs = presigning_outputs - .into_iter() - .map(|(id, outputs)| { - ( - id, - Presignatures::new(outputs, batch_size_per_weight, f as usize).unwrap(), - ) - }) - .collect::>(); - assert_eq!( - presigs.get(&PartyId::from(1u8)).unwrap().len(), - batch_size_per_weight as usize * (weights.iter().sum::() as usize - f as usize) - ); - - // - // SIGNING - // - - let message = b"Hello, world!"; - - // Mock a value from the random beacon - let beacon_value = S::rand(&mut rng); - - // Each party generates their partial signatures - let partial_signatures = nodes - .iter() - .map(|node| { - generate_partial_signatures( - message, - presigs.get_mut(&node.id).unwrap().next().unwrap(), - &beacon_value, - &merged_shares.get(&node.id).unwrap().my_shares, - &vk, - None, - ) - .unwrap() - }) - .collect_vec(); - - // The public parts should all be the same - let public_presig = get_uniform_value( - partial_signatures - .iter() - .map(|partial_signature| partial_signature.0), - ) - .unwrap(); - - // Aggregate partial signatures - let signature = aggregate_signatures( - message, - &public_presig, - &beacon_value, - &partial_signatures - .iter() - .flat_map(|(_, s)| s.clone()) - .collect_vec(), - t, - &vk, - None, - ) - .unwrap(); - - // Check that this produced a valid signature - SchnorrPublicKey::try_from(&vk) - .unwrap() - .verify(message, &signature) - .unwrap(); - - // - // KEY ROTATION - // - - // Map from each party to the ordered list of outputs it has received. - // Here, each party will act as dealer multiple times -- once per share they have. - let mut dkg_outputs_after_rotation = - HashMap::<(PartyId, ShareIndex), avss::PartialOutput>::new(); - let mut messages = HashMap::<(PartyId, ShareIndex), avss::Message>::new(); - - for dealer_id in nodes.node_ids_iter() { - for share_index in nodes.share_ids_of(dealer_id).unwrap() { - let sid = - format!("key-rotation-test-session-{}-{}", dealer_id, share_index).into_bytes(); - - // Each dealer uses their existing share as the secret to reshare - let secret = merged_shares - .get(&dealer_id) - .unwrap() - .share_for_index(share_index) - .unwrap() - .value; - let dealer: avss::Dealer = - avss::Dealer::new(Some(secret), nodes.clone(), t, sid.clone()).unwrap(); - - let receivers = sks - .iter() - .enumerate() - .map(|(id, enc_secret_key)| { - let commitment = merged_shares - .get(&(id as u16)) - .unwrap() - .commitments - .iter() - .find(|c| c.index == share_index) - .unwrap() - .value; - avss::Receiver::new( - nodes.clone(), - id as u16, - t, - sid.clone(), - Some(commitment), - enc_secret_key.clone(), - ) - }) - .collect::>(); - - // Each dealer creates a message - let message = dealer.create_message(&mut rng); - messages.insert((dealer_id, share_index), message.clone()); - - // Each receiver processes the message. In this case, we assume all are honest and there are no complaints. - receivers.iter().for_each(|receiver| { - let output = assert_valid(receiver.process_message(&message).unwrap()); - dkg_outputs_after_rotation.insert((receiver.id(), share_index), output); - }); - } - } - - // The first t dealers (counted by weight) form the certificate and are the ones whose outputs will be used to create the final shares. - let key_rotation_cert = [PartyId::from(1u8), PartyId::from(2u8)]; - let share_indices_in_cert = key_rotation_cert - .iter() - .flat_map(|id| nodes.share_ids_of(*id).unwrap()) - .collect_vec(); - - // Now, each party has collected their outputs from all dealers and can form their new shares from the ones in the certificate. - let merged_shares_after_rotation = nodes - .node_ids_iter() - .map(|receiver_id| { - let my_shares_from_cert = share_indices_in_cert - .iter() - .map(|&index| IndexedValue { - index, - value: dkg_outputs_after_rotation - .get(&(receiver_id, index)) - .unwrap() - .clone(), - }) - .collect_vec(); - ( - receiver_id, - avss::ReceiverOutput::complete_key_rotation( - t, - receiver_id, - &nodes, - &my_shares_from_cert - .into_iter() - .take(t as usize) - .collect_vec(), - ) - .unwrap(), - ) - }) - .collect::>(); - - // The verifying key should be the same as before - for output in merged_shares_after_rotation.values() { - assert_eq!(output.vk, vk); - } - - // For testing, we now recover the secret key from t shares and check that the secret key matches the verification key. - // In practice, the parties should never do this... - let shares = merged_shares_after_rotation - .values() - .flat_map(|output| output.my_shares.shares.clone()) - .take(t as usize); - let sk = Poly::recover_c0(t, shares).unwrap(); - assert_eq!(G::generator() * sk, vk); - - // Check commitments on the reshared secret from the first dealer - let commitment_1 = merged_shares_after_rotation - .get(&0) - .unwrap() - .commitments - .first() - .unwrap(); - let secret_1 = merged_shares_after_rotation - .get(&0) - .unwrap() - .share_for_index(commitment_1.index) - .unwrap() - .value; - assert_eq!(G::generator() * secret_1, commitment_1.value); - - // - // SIGNING (again) - // - - let message_2 = b"Hello again, world!"; - - // Mock a value from the random beacon - let beacon_value = S::rand(&mut rng); - - // Each party generates their partial signatures - let partial_signatures = nodes - .iter() - .map(|node| { - generate_partial_signatures( - message_2, - presigs.get_mut(&node.id).unwrap().next().unwrap(), - &beacon_value, - &merged_shares_after_rotation - .get(&node.id) - .unwrap() - .my_shares, - &vk, - None, - ) - .unwrap() - }) - .collect_vec(); - - // The public parts should all be the same - let public_presig = get_uniform_value( - partial_signatures - .iter() - .map(|partial_signature| partial_signature.0), - ) - .unwrap(); - - // Aggregate partial signatures - let signature_2 = aggregate_signatures( - message_2, - &public_presig, - &beacon_value, - &partial_signatures - .iter() - .flat_map(|(_, s)| s.clone()) - .collect_vec(), - t, - &vk, - None, - ) - .unwrap(); - - // Check that this produced a valid signature - SchnorrPublicKey::try_from(&vk) - .unwrap() - .verify(message_2, &signature_2) - .unwrap(); - } + // + // #[test] + // fn test_e2e() { + // // No complaints, all honest + // let t = 3; + // let f = 2; + // let weights = [1, 2, 2, 2]; + // let n = weights.len(); + // + // let batch_size_per_weight: u16 = 10; + // + // let mut rng = rand::thread_rng(); + // let sks = (0..n) + // .map(|_| ecies_v1::PrivateKey::::new(&mut rng)) + // .collect::>(); + // let nodes = Nodes::new( + // sks.iter() + // .enumerate() + // .zip(weights) + // .map(|((id, sk), weight)| Node { + // id: id as u16, + // pk: PublicKey::from_private_key(sk), + // weight, + // }) + // .collect::>(), + // ) + // .unwrap(); + // + // // + // // DKG + // // + // + // // Map from each party to the outputs it has received + // let mut dkg_outputs = HashMap::>::new(); + // nodes.node_ids_iter().for_each(|id| { + // dkg_outputs.insert(id, HashMap::new()); + // }); + // + // let mut messages = Vec::new(); + // for dealer_id in nodes.node_ids_iter() { + // let sid = format!("dkg-test-session-{}", dealer_id).into_bytes(); + // let dealer: avss::Dealer = + // avss::Dealer::new(None, nodes.clone(), t, sid.clone()).unwrap(); + // let receivers = sks + // .iter() + // .enumerate() + // .map(|(id, enc_secret_key)| { + // avss::Receiver::new( + // nodes.clone(), + // id as u16, + // t, + // sid.clone(), + // None, + // enc_secret_key.clone(), + // ) + // }) + // .collect::>(); + // + // // Each dealer creates a message + // let message = dealer.create_message(&mut rng); + // messages.push(message.clone()); + // + // // Each receiver processes the message. In this case, we assume all are honest and there are no complaints. + // receivers.iter().for_each(|receiver| { + // let output = assert_valid(receiver.process_message(&message).unwrap()); + // dkg_outputs + // .get_mut(&receiver.id()) + // .unwrap() + // .insert(dealer_id, output); + // }); + // } + // + // // The dealers to form the certificate should have weight >= t, and are the ones whose outputs will be used to create the final shares. + // let dkg_cert = [PartyId::from(1u8), PartyId::from(2u8)]; + // + // // Now, each party has collected their outputs from all dealers. We use the output from the dealers in dkg_cert create the final shares for signing. + // // Each party should still keep the outputs from all dealers until the end of the epoch to handle complaints. + // let merged_shares = nodes + // .iter() + // .map(|node| { + // ( + // node.id, + // avss::ReceiverOutput::complete_dkg( + // t, + // &nodes, + // restrict(dkg_outputs.get(&node.id).unwrap(), dkg_cert.into_iter()), + // ) + // .unwrap(), + // ) + // }) + // .collect::>(); + // + // // All receivers should now have the same verifying key + // let vk = get_uniform_value(merged_shares.values().map(|output| output.vk)).unwrap(); + // + // // For testing, we now recover the secret key from t shares and check that the secret key matches the verification key. + // // In practice, the parties should never do this... + // let shares = merged_shares + // .values() + // .flat_map(|output| output.my_shares.shares.clone()) + // .take(t as usize); + // let sk = Poly::recover_c0(t, shares).unwrap(); + // assert_eq!(G::generator() * sk, vk); + // + // // + // // PRESIGNING + // // + // + // // Generate a batch of nonces for each party's share + // let mut presigning_outputs = HashMap::>::new(); + // nodes.node_ids_iter().for_each(|id| { + // presigning_outputs.insert(id, Vec::new()); + // }); + // + // // Each dealer generates a batch of presigs per share they control. + // for dealer_id in nodes.node_ids_iter() { + // let sid = format!("presig-test-session-{}", dealer_id).into_bytes(); + // let dealer: batch_avss::Dealer = batch_avss::Dealer::new( + // nodes.clone(), + // dealer_id, + // f, + // t, + // sid.clone(), + // batch_size_per_weight, + // ) + // .unwrap(); + // let receivers = sks + // .iter() + // .enumerate() + // .map(|(id, enc_secret_key)| { + // batch_avss::Receiver::new( + // nodes.clone(), + // id as u16, + // dealer_id, + // t, + // f, + // sid.clone(), + // enc_secret_key.clone(), + // batch_size_per_weight, + // ) + // .unwrap() + // }) + // .collect::>(); + // + // // Each dealer creates a message + // let messages = dealer.create_message(&mut rng).unwrap(); + // + // let echo_messages = receivers + // .iter() + // .map(|receiver| receiver.echo_message(&messages[receiver.id as usize])) + // .collect::>>() + // .unwrap(); + // + // // Each receiver processes the message. + // // In this case, we assume all are honest and there are no complaints. + // receivers.iter().zip(messages).zip(&echo_messages).for_each( + // |((receiver, message), echo_message)| { + // let output = assert_valid_batch( + // receiver + // .process_echo_messages(&echo_message) + // .unwrap(), + // ); + // presigning_outputs + // .get_mut(&receiver.id) + // .unwrap() + // .push(output); + // }, + // ); + // } + // + // // Each party can process their presigs locally from the secret shared nonces + // let mut presigs = presigning_outputs + // .into_iter() + // .map(|(id, outputs)| { + // ( + // id, + // Presignatures::new(outputs, batch_size_per_weight, f as usize).unwrap(), + // ) + // }) + // .collect::>(); + // assert_eq!( + // presigs.get(&PartyId::from(1u8)).unwrap().len(), + // batch_size_per_weight as usize * (weights.iter().sum::() as usize - f as usize) + // ); + // + // // + // // SIGNING + // // + // + // let message = b"Hello, world!"; + // + // // Mock a value from the random beacon + // let beacon_value = S::rand(&mut rng); + // + // // Each party generates their partial signatures + // let partial_signatures = nodes + // .iter() + // .map(|node| { + // generate_partial_signatures( + // message, + // presigs.get_mut(&node.id).unwrap().next().unwrap(), + // &beacon_value, + // &merged_shares.get(&node.id).unwrap().my_shares, + // &vk, + // None, + // ) + // .unwrap() + // }) + // .collect_vec(); + // + // // The public parts should all be the same + // let public_presig = get_uniform_value( + // partial_signatures + // .iter() + // .map(|partial_signature| partial_signature.0), + // ) + // .unwrap(); + // + // // Aggregate partial signatures + // let signature = aggregate_signatures( + // message, + // &public_presig, + // &beacon_value, + // &partial_signatures + // .iter() + // .flat_map(|(_, s)| s.clone()) + // .collect_vec(), + // t, + // &vk, + // None, + // ) + // .unwrap(); + // + // // Check that this produced a valid signature + // SchnorrPublicKey::try_from(&vk) + // .unwrap() + // .verify(message, &signature) + // .unwrap(); + // + // // + // // KEY ROTATION + // // + // + // // Map from each party to the ordered list of outputs it has received. + // // Here, each party will act as dealer multiple times -- once per share they have. + // let mut dkg_outputs_after_rotation = + // HashMap::<(PartyId, ShareIndex), avss::PartialOutput>::new(); + // let mut messages = HashMap::<(PartyId, ShareIndex), avss::Message>::new(); + // + // for dealer_id in nodes.node_ids_iter() { + // for share_index in nodes.share_ids_of(dealer_id).unwrap() { + // let sid = + // format!("key-rotation-test-session-{}-{}", dealer_id, share_index).into_bytes(); + // + // // Each dealer uses their existing share as the secret to reshare + // let secret = merged_shares + // .get(&dealer_id) + // .unwrap() + // .share_for_index(share_index) + // .unwrap() + // .value; + // let dealer: avss::Dealer = + // avss::Dealer::new(Some(secret), nodes.clone(), t, sid.clone()).unwrap(); + // + // let receivers = sks + // .iter() + // .enumerate() + // .map(|(id, enc_secret_key)| { + // let commitment = merged_shares + // .get(&(id as u16)) + // .unwrap() + // .commitments + // .iter() + // .find(|c| c.index == share_index) + // .unwrap() + // .value; + // avss::Receiver::new( + // nodes.clone(), + // id as u16, + // t, + // sid.clone(), + // Some(commitment), + // enc_secret_key.clone(), + // ) + // }) + // .collect::>(); + // + // // Each dealer creates a message + // let message = dealer.create_message(&mut rng); + // messages.insert((dealer_id, share_index), message.clone()); + // + // // Each receiver processes the message. In this case, we assume all are honest and there are no complaints. + // receivers.iter().for_each(|receiver| { + // let output = assert_valid(receiver.process_message(&message).unwrap()); + // dkg_outputs_after_rotation.insert((receiver.id(), share_index), output); + // }); + // } + // } + // + // // The first t dealers (counted by weight) form the certificate and are the ones whose outputs will be used to create the final shares. + // let key_rotation_cert = [PartyId::from(1u8), PartyId::from(2u8)]; + // let share_indices_in_cert = key_rotation_cert + // .iter() + // .flat_map(|id| nodes.share_ids_of(*id).unwrap()) + // .collect_vec(); + // + // // Now, each party has collected their outputs from all dealers and can form their new shares from the ones in the certificate. + // let merged_shares_after_rotation = nodes + // .node_ids_iter() + // .map(|receiver_id| { + // let my_shares_from_cert = share_indices_in_cert + // .iter() + // .map(|&index| IndexedValue { + // index, + // value: dkg_outputs_after_rotation + // .get(&(receiver_id, index)) + // .unwrap() + // .clone(), + // }) + // .collect_vec(); + // ( + // receiver_id, + // avss::ReceiverOutput::complete_key_rotation( + // t, + // receiver_id, + // &nodes, + // &my_shares_from_cert + // .into_iter() + // .take(t as usize) + // .collect_vec(), + // ) + // .unwrap(), + // ) + // }) + // .collect::>(); + // + // // The verifying key should be the same as before + // for output in merged_shares_after_rotation.values() { + // assert_eq!(output.vk, vk); + // } + // + // // For testing, we now recover the secret key from t shares and check that the secret key matches the verification key. + // // In practice, the parties should never do this... + // let shares = merged_shares_after_rotation + // .values() + // .flat_map(|output| output.my_shares.shares.clone()) + // .take(t as usize); + // let sk = Poly::recover_c0(t, shares).unwrap(); + // assert_eq!(G::generator() * sk, vk); + // + // // Check commitments on the reshared secret from the first dealer + // let commitment_1 = merged_shares_after_rotation + // .get(&0) + // .unwrap() + // .commitments + // .first() + // .unwrap(); + // let secret_1 = merged_shares_after_rotation + // .get(&0) + // .unwrap() + // .share_for_index(commitment_1.index) + // .unwrap() + // .value; + // assert_eq!(G::generator() * secret_1, commitment_1.value); + // + // // + // // SIGNING (again) + // // + // + // let message_2 = b"Hello again, world!"; + // + // // Mock a value from the random beacon + // let beacon_value = S::rand(&mut rng); + // + // // Each party generates their partial signatures + // let partial_signatures = nodes + // .iter() + // .map(|node| { + // generate_partial_signatures( + // message_2, + // presigs.get_mut(&node.id).unwrap().next().unwrap(), + // &beacon_value, + // &merged_shares_after_rotation + // .get(&node.id) + // .unwrap() + // .my_shares, + // &vk, + // None, + // ) + // .unwrap() + // }) + // .collect_vec(); + // + // // The public parts should all be the same + // let public_presig = get_uniform_value( + // partial_signatures + // .iter() + // .map(|partial_signature| partial_signature.0), + // ) + // .unwrap(); + // + // // Aggregate partial signatures + // let signature_2 = aggregate_signatures( + // message_2, + // &public_presig, + // &beacon_value, + // &partial_signatures + // .iter() + // .flat_map(|(_, s)| s.clone()) + // .collect_vec(), + // t, + // &vk, + // None, + // ) + // .unwrap(); + // + // // Check that this produced a valid signature + // SchnorrPublicKey::try_from(&vk) + // .unwrap() + // .verify(message_2, &signature_2) + // .unwrap(); + // } fn assert_valid_batch( processed_message: batch_avss::ProcessedMessage, From 14ba032ce9729d783736d34910eccfc9a8257b4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 1 May 2026 08:57:16 +0200 Subject: [PATCH 13/91] Return root from process_echo_messages --- .../src/threshold_schnorr/batch_avss.rs | 179 +++++++++++------- 1 file changed, 110 insertions(+), 69 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index ae749f0b7e..1617f48432 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -384,20 +384,25 @@ impl Dealer { .to_vec(), )?; - Ok(messages.into_iter() + Ok(messages + .into_iter() .map(|m| Message { common: CommonMessage { - full_public_keys: full_public_keys.clone(), - shared: shared.clone(), - response_polynomial: response_polynomial.clone(), - blinding_commit, - }, - avid: m.iter().map(|m| AvidMessage { - root: m.0.clone(), - shards: m.1.clone(), - proof: m.2.clone(), - }).collect_vec() - }).collect_vec()) + full_public_keys: full_public_keys.clone(), + shared: shared.clone(), + response_polynomial: response_polynomial.clone(), + blinding_commit, + }, + avid: m + .iter() + .map(|m| AvidMessage { + root: m.0.clone(), + shards: m.1.clone(), + proof: m.2.clone(), + }) + .collect_vec(), + }) + .collect_vec()) } fn random_oracle(&self) -> RandomOracle { @@ -445,16 +450,22 @@ impl Receiver { } pub fn echo_message(&self, message: &Message) -> FastCryptoResult> { - if message.avid.iter().any(|AvidMessage { root, shards, proof }| { - proof - .verify_proof_with_unserialized_leaf(root, &shards, self.id as usize) - .is_err() - }) { + if message.avid.iter().any( + |AvidMessage { + root, + shards, + proof, + }| { + proof + .verify_proof_with_unserialized_leaf(root, &shards, self.id as usize) + .is_err() + }, + ) { return Err(InvalidMessage); } let tree = MerkleTree::::build_from_unserialized( - message.avid.iter().map(| AvidMessage { root, .. }| root), + message.avid.iter().map(|AvidMessage { root, .. }| root), )?; let r = tree.root(); let digest = compute_common_message_hash(message); @@ -462,17 +473,26 @@ impl Receiver { .avid .iter() .enumerate() - .map(|(i, AvidMessage { root, shards, proof })| { - Ok(EchoMessage { - party: self.id, - r: r.clone(), - pi_ij: proof.clone(), - pi_i: tree.get_proof(i)?, - r_i: root.clone(), - s_ij: shards.clone(), - hash: digest, - }) - }) + .map( + |( + i, + AvidMessage { + root, + shards, + proof, + }, + )| { + Ok(EchoMessage { + party: self.id, + r: r.clone(), + pi_ij: proof.clone(), + pi_i: tree.get_proof(i)?, + r_i: root.clone(), + s_ij: shards.clone(), + hash: digest, + }) + }, + ) .collect::>>() } @@ -489,7 +509,7 @@ impl Receiver { pub fn process_echo_messages( &self, echo_messages: &[EchoMessage], - ) -> FastCryptoResult> { + ) -> FastCryptoResult<(Vec, merkle::Node)> { // Filter out invalid echo messages let echo_messages = echo_messages .iter() @@ -503,17 +523,29 @@ impl Receiver { ) .is_ok() && echo_message - .pi_i - .verify_proof_with_unserialized_leaf( - &echo_message.r, - &echo_message.r_i, - self.id as usize, - ) - .is_ok() + .pi_i + .verify_proof_with_unserialized_leaf( + &echo_message.r, + &echo_message.r_i, + self.id as usize, + ) + .is_ok() }) .cloned() .collect_vec(); + // TODO: It is up to the caller to ensure that all echo messages have the same digest and root. + let r = get_uniform_value( + echo_messages + .iter() + .cloned() + .map(|echo_message| echo_message.r), + ) + .ok_or(InvalidMessage)?; + if get_uniform_value(echo_messages.iter().map(|echo_message| echo_message.hash)).is_none() { + return Err(InvalidMessage); + } + let required_weight = self.nodes.total_weight() - self.f; if self .nodes @@ -547,7 +579,7 @@ impl Receiver { self.nodes.total_weight() as usize, (self.nodes.total_weight() - 2 * self.f) as usize, // 2f parity shards ) - .expect("should not fail with valid parameters"); + .expect("should not fail with valid parameters"); let ciphertext = code.decode(shards)?; let new_shards = self @@ -559,7 +591,7 @@ impl Receiver { return Err(InvalidMessage); } - Ok(ciphertext) + Ok((ciphertext, r)) } pub fn process_message( @@ -585,8 +617,8 @@ impl Receiver { let challenge = compute_challenge_from_message(&self.random_oracle(), root, message); if G::generator() * response_polynomial.c0() != blinding_commit - + G::multi_scalar_mul(&challenge, full_public_keys) - .expect("Inputs have constant lengths") + + G::multi_scalar_mul(&challenge, full_public_keys) + .expect("Inputs have constant lengths") { return Err(InvalidMessage); } @@ -613,11 +645,10 @@ impl Receiver { )?; Ok(my_shares) }) { - Ok(my_shares) => Ok(ProcessedMessage::Valid( - ReceiverOutput { - my_shares, - public_keys: full_public_keys.clone(), - })), + Ok(my_shares) => Ok(ProcessedMessage::Valid(ReceiverOutput { + my_shares, + public_keys: full_public_keys.clone(), + })), Err(_) => Ok(ProcessedMessage::Complaint(Complaint::create( self.id, shared, @@ -628,7 +659,6 @@ impl Receiver { } } - /// 4. Upon receiving a complaint, a receiver verifies it and responds with its shares. pub fn handle_complaint( &self, @@ -746,7 +776,11 @@ fn compute_challenge( .collect() } -fn compute_challenge_from_message(random_oracle: &RandomOracle, root: &merkle::Node, message: &CommonMessage) -> Vec { +fn compute_challenge_from_message( + random_oracle: &RandomOracle, + root: &merkle::Node, + message: &CommonMessage, +) -> Vec { compute_challenge( random_oracle, &message.full_public_keys, @@ -758,12 +792,13 @@ fn compute_challenge_from_message(random_oracle: &RandomOracle, root: &merkle::N fn compute_common_message_hash(message: &Message) -> Digest<32> { let Message { - common: CommonMessage { - shared, - full_public_keys, - blinding_commit, - response_polynomial, - }, + common: + CommonMessage { + shared, + full_public_keys, + blinding_commit, + response_polynomial, + }, .. } = message; let mut hasher = Blake2b256::new(); @@ -797,6 +832,7 @@ mod tests { use fastcrypto::error::FastCryptoResult; use fastcrypto::groups::GroupElement; use fastcrypto::hash::Blake2b256; + use fastcrypto::merkle; use fastcrypto::traits::AllowedRng; use itertools::Itertools; use std::collections::HashMap; @@ -870,27 +906,32 @@ mod tests { .map(|(i, _)| echo_messages.iter().map(|em| em[i].clone()).collect_vec()) .collect_vec(); - let ciphertexts = receivers + let (ciphertexts, roots): (Vec>, Vec) = receivers .iter() .zip(messages.iter()) .zip(echo_messages.iter()) .map(|((receiver, message), echo_message)| { - receiver - .process_echo_messages(&echo_message) - .unwrap() + receiver.process_echo_messages(&echo_message).unwrap() }) - .collect_vec(); + .unzip(); - let all_shares = receivers.iter().zip(ciphertexts).zip(messages).map(|((receiver, ciphertext), message)| { - match receiver.process_message( - &echo_messages[receiver.id as usize][0].r, - &message.common, - &ciphertext, - ) { - Ok(ProcessedMessage::Valid(output)) => (receiver.id, output), - _ => panic!("All receivers should be able to process the message in the happy path"), - } - }).collect::>(); + let all_shares = receivers + .iter() + .zip(ciphertexts) + .zip(messages) + .map(|((receiver, ciphertext), message)| { + match receiver.process_message( + &roots[receiver.id as usize], + &message.common, + &ciphertext, + ) { + Ok(ProcessedMessage::Valid(output)) => (receiver.id, output), + _ => panic!( + "All receivers should be able to process the message in the happy path" + ), + } + }) + .collect::>(); let secrets = (0..dealer.batch_size) .map(|l| { From dca6b734d3faf35b413df50ec72b6132a8ef2b9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 1 May 2026 12:15:28 +0200 Subject: [PATCH 14/91] More refactor --- .../src/threshold_schnorr/batch_avss.rs | 154 +++++++++++------- 1 file changed, 95 insertions(+), 59 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 1617f48432..a3b56eb076 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -13,6 +13,7 @@ use crate::ecies_v1::{MultiRecipientEncryption, PrivateKey, SharedComponents}; use crate::nodes::{Node, Nodes, PartyId}; use crate::polynomial::{create_secret_sharing, Eval, Poly}; use crate::random_oracle::RandomOracle; +use crate::threshold_schnorr::batch_avss::DecryptedShares::{FaultyDealer, Valid}; use crate::threshold_schnorr::bcs::BCSSerialized; use crate::threshold_schnorr::complaint::{Complaint, ComplaintResponse}; use crate::threshold_schnorr::reed_solomon::{ErasureCoder, Shard}; @@ -90,12 +91,15 @@ pub struct EchoMessage { pub pi_ij: merkle::MerkleProof, } -pub enum Vote { - Vote { - digest: Digest<32>, - root: merkle::Node, - }, - FaultyDealer, +pub struct ProcessedEchoMessages { + ciphertext: Vec, + root: merkle::Node, + r_j: merkle::Node, +} + +pub enum DecryptedShares { + Valid(ReceiverOutput), + FaultyDealer { correctDec: bool }, } /// The result of processing a message by a receiver: either valid shares or a complaint. @@ -103,6 +107,7 @@ pub enum Vote { pub enum ProcessedMessage { Valid(ReceiverOutput), Complaint(Complaint), + Blame(EchoMessage), } /// The output of a receiver which is a batch of shares and public keys for all nonces. @@ -509,7 +514,7 @@ impl Receiver { pub fn process_echo_messages( &self, echo_messages: &[EchoMessage], - ) -> FastCryptoResult<(Vec, merkle::Node)> { + ) -> FastCryptoResult { // Filter out invalid echo messages let echo_messages = echo_messages .iter() @@ -535,13 +540,20 @@ impl Receiver { .collect_vec(); // TODO: It is up to the caller to ensure that all echo messages have the same digest and root. - let r = get_uniform_value( + let root = get_uniform_value( echo_messages .iter() .cloned() .map(|echo_message| echo_message.r), ) .ok_or(InvalidMessage)?; + let r_j = get_uniform_value( + echo_messages + .iter() + .cloned() + .map(|echo_message| echo_message.r_i), + ) + .ok_or(InvalidMessage)?; if get_uniform_value(echo_messages.iter().map(|echo_message| echo_message.hash)).is_none() { return Err(InvalidMessage); } @@ -582,24 +594,43 @@ impl Receiver { .expect("should not fail with valid parameters"); let ciphertext = code.decode(shards)?; + Ok(ProcessedEchoMessages { + ciphertext, + root, + r_j, + }) + } + + /// The check r_j' == r_j from the paper + pub fn verify_ciphertext( + &self, + ciphertext: &[u8], + root: &merkle::Node, + ) -> FastCryptoResult<()> { + let code = ErasureCoder::new( + self.nodes.total_weight() as usize, + (self.nodes.total_weight() - 2 * self.f) as usize, // 2f parity shards + ) + .expect("should not fail with valid parameters"); + let new_shards = self .nodes .collect_to_nodes(code.encode(&ciphertext)?.into_iter())?; let new_tree = MerkleTree::::build_from_unserialized(new_shards.iter())?; - if new_tree.root() != echo_messages[0].r_i { + if new_tree.root() != *root { return Err(InvalidMessage); } - Ok((ciphertext, r)) + Ok(()) } - pub fn process_message( + /// If a party gets echo messages, the ciphertext verifies + pub fn decrypt_shares( &self, - root: &merkle::Node, + processed_echo_messages: ProcessedEchoMessages, message: &CommonMessage, - ciphertext: &[u8], - ) -> FastCryptoResult { + ) -> FastCryptoResult { let CommonMessage { full_public_keys, blinding_commit, @@ -607,14 +638,20 @@ impl Receiver { shared, } = message; + let ProcessedEchoMessages { + ciphertext, + root, + r_j, + } = processed_echo_messages; if full_public_keys.len() != self.batch_size || response_polynomial.degree() != self.t as usize - 1 { return Err(InvalidMessage); } + // TODO: What should happen if these checks fail? // Verify that g^{p''(0)} == c' * prod_l c_l^{gamma_l} - let challenge = compute_challenge_from_message(&self.random_oracle(), root, message); + let challenge = compute_challenge_from_message(&self.random_oracle(), &root, message); if G::generator() * response_polynomial.c0() != blinding_commit + G::multi_scalar_mul(&challenge, full_public_keys) @@ -623,39 +660,42 @@ impl Receiver { return Err(InvalidMessage); } + // Check r_j' == r_j from the paper + let faulty_dealer = self.verify_ciphertext(&ciphertext, &r_j).is_err(); + let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); - shared + let decrypted_shares = shared .verify(&random_oracle_encryption) - .map_err(|_| InvalidMessage)?; - let plaintext = shared.decrypt( - &ciphertext, - &self.enc_secret_key, - &self.random_oracle().extend(&Encryption.to_string()), - self.id as usize, - ); - match SharesForNode::from_bytes(&plaintext).and_then(|my_shares| { - // If there is an error in this scope, we create a complaint instead of returning an error - verify_shares( - &my_shares, - &self.nodes, - self.id, - message, - &challenge, - self.batch_size, - )?; - Ok(my_shares) - }) { - Ok(my_shares) => Ok(ProcessedMessage::Valid(ReceiverOutput { - my_shares, + .map(|_| { + shared.decrypt( + &ciphertext, + &self.enc_secret_key, + &random_oracle_encryption, + self.id as usize, + ) + }) + .and_then(|plaintext| SharesForNode::from_bytes(&plaintext)) + .and_then(|my_shares| { + verify_shares( + &my_shares, + &self.nodes, + self.id, + message, + &challenge, + self.batch_size, + )?; + Ok(my_shares) + }); + + if faulty_dealer || decrypted_shares.is_err() { + Ok(FaultyDealer { + correctDec: decrypted_shares.is_ok(), + }) + } else { + Ok(Valid(ReceiverOutput { + my_shares: decrypted_shares?, public_keys: full_public_keys.clone(), - })), - Err(_) => Ok(ProcessedMessage::Complaint(Complaint::create( - self.id, - shared, - &self.enc_secret_key, - &self.random_oracle(), - &mut rand::thread_rng(), - ))), + })) } } @@ -817,8 +857,8 @@ fn compute_common_message_hash(message: &Message) -> Digest<32> { #[cfg(test)] mod tests { use super::{ - compute_challenge, Complaint, Dealer, Message, ProcessedMessage, Receiver, ReceiverOutput, - ShareBatch, SharesForNode, + compute_challenge, Complaint, Dealer, DecryptedShares, Message, ProcessedEchoMessages, + ProcessedMessage, Receiver, ReceiverOutput, ShareBatch, SharesForNode, }; use crate::ecies_v1; use crate::ecies_v1::{MultiRecipientEncryption, PublicKey}; @@ -906,31 +946,27 @@ mod tests { .map(|(i, _)| echo_messages.iter().map(|em| em[i].clone()).collect_vec()) .collect_vec(); - let (ciphertexts, roots): (Vec>, Vec) = receivers + let processed_echo_messages = receivers .iter() .zip(messages.iter()) .zip(echo_messages.iter()) .map(|((receiver, message), echo_message)| { receiver.process_echo_messages(&echo_message).unwrap() }) - .unzip(); + .collect_vec(); let all_shares = receivers .iter() - .zip(ciphertexts) + .zip(processed_echo_messages) .zip(messages) - .map(|((receiver, ciphertext), message)| { - match receiver.process_message( - &roots[receiver.id as usize], - &message.common, - &ciphertext, - ) { - Ok(ProcessedMessage::Valid(output)) => (receiver.id, output), + .map( + |((receiver, pem), message)| match receiver.decrypt_shares(pem, &message.common) { + Ok(DecryptedShares::Valid(output)) => (receiver.id, output), _ => panic!( "All receivers should be able to process the message in the happy path" ), - } - }) + }, + ) .collect::>(); let secrets = (0..dealer.batch_size) From 5b182b0af106d4df702576d1e8de6fdca68323e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 1 May 2026 12:53:13 +0200 Subject: [PATCH 15/91] draft --- .../src/threshold_schnorr/batch_avss.rs | 122 ++++++++---------- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 18 --- 2 files changed, 55 insertions(+), 85 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index a3b56eb076..67bd23548c 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -13,9 +13,10 @@ use crate::ecies_v1::{MultiRecipientEncryption, PrivateKey, SharedComponents}; use crate::nodes::{Node, Nodes, PartyId}; use crate::polynomial::{create_secret_sharing, Eval, Poly}; use crate::random_oracle::RandomOracle; -use crate::threshold_schnorr::batch_avss::DecryptedShares::{FaultyDealer, Valid}; +use crate::threshold_schnorr::batch_avss::DecryptedShares::{Blame, Reveal, Valid}; use crate::threshold_schnorr::bcs::BCSSerialized; -use crate::threshold_schnorr::complaint::{Complaint, ComplaintResponse}; +use crate::threshold_schnorr::complaint::Complaint; +use crate::threshold_schnorr::complaint::ComplaintResponse; use crate::threshold_schnorr::reed_solomon::{ErasureCoder, Shard}; use crate::threshold_schnorr::Extensions::{Challenge, Encryption}; use crate::threshold_schnorr::{random_oracle_from_sid, EG, G, S}; @@ -93,21 +94,14 @@ pub struct EchoMessage { pub struct ProcessedEchoMessages { ciphertext: Vec, - root: merkle::Node, + r: merkle::Node, r_j: merkle::Node, } pub enum DecryptedShares { Valid(ReceiverOutput), - FaultyDealer { correctDec: bool }, -} - -/// The result of processing a message by a receiver: either valid shares or a complaint. -#[allow(clippy::large_enum_variant)] -pub enum ProcessedMessage { - Valid(ReceiverOutput), - Complaint(Complaint), - Blame(EchoMessage), + Reveal(Complaint), + Blame, } /// The output of a receiver which is a batch of shares and public keys for all nonces. @@ -249,8 +243,8 @@ impl Dealer { /// /// * `nodes` defines the set of receivers and their weights. /// * `dealer_id` is the id of this dealer as a node. - /// * `t` is the number of shares that are needed to reconstruct the full key/signature. /// * `f` is the maximum number of Byzantine parties counted by weight. + /// * `t` is the number of shares that are needed to reconstruct the full key/signature. /// * `sid` is a session identifier that should be unique for each invocation, but the same for all parties. /// * `batch_size_per_weight` is the number of secrets a dealer should deal per weight it has. /// @@ -279,7 +273,7 @@ impl Dealer { }) } - /// 1. The Dealer generates shares for the secrets and broadcasts the encrypted shares. + /// 1. The Dealer generates shares for the secrets and creates a set of messages - one per receiver. pub fn create_message(&self, rng: &mut impl AllowedRng) -> FastCryptoResult> { let secrets = repeat_with(|| S::rand(rng)) .take(self.batch_size) @@ -421,6 +415,7 @@ impl Receiver { /// * `nodes` defines the set of receivers and what shares they should receive. /// * `id` is the id of this receiver. /// * `dealer_id` is the id of the dealer. + /// * `f` is the maximum number of Byzantine parties counted by weight. /// * `t` is the number of shares that are needed to reconstruct the full key/signature. /// * `sid` is a session identifier that should be unique for each invocation, but the same for all parties. /// * `enc_secret_key` is this Receivers' secret key for the distribution of nonces. The corresponding public key is defined in `nodes`. @@ -454,6 +449,7 @@ impl Receiver { }) } + /// 2. When a party receives its message, it verifies the Merkle tree path for it's shards and generates EchoMessages - one per party. pub fn echo_message(&self, message: &Message) -> FastCryptoResult> { if message.avid.iter().any( |AvidMessage { @@ -501,16 +497,14 @@ impl Receiver { .collect::>>() } - /// 2. Each receiver processes the message, verifies and decrypts its shares. + /// 3. When a party has received at EchoMessages from parties with at least weight W - f, it + /// tries to process them. It first filters out invalid messages and checks if the EchoMessages + /// have the same digest, r and r_i values. If not, an InvalidMessage error is returned. + /// If the filtered set of EchoMessages does not have sufficient weight, an NotEnoughWeight error + /// is returned. /// - /// If this works, the receiver can store the shares and contribute a signature on the message to a certificate. - /// - /// This returns an [InvalidMessage] error if the ciphertext cannot be verified or if the commitments are invalid. - /// All honest receivers will reject such a message with the same error, and such a message should be ignored. - /// - /// If the message is valid but contains invalid shares for this receiver, the call will succeed but will return a [Complaint]. - /// - /// 3. When f+t signatures have been collected in the certificate, the receivers can now verify the certificate and finish the protocol. + /// If these checks succeed, the party reconstructs it's message (ciphertext) from the echoed + /// shards along with the r and r_i values. pub fn process_echo_messages( &self, echo_messages: &[EchoMessage], @@ -540,7 +534,7 @@ impl Receiver { .collect_vec(); // TODO: It is up to the caller to ensure that all echo messages have the same digest and root. - let root = get_uniform_value( + let r = get_uniform_value( echo_messages .iter() .cloned() @@ -594,19 +588,11 @@ impl Receiver { .expect("should not fail with valid parameters"); let ciphertext = code.decode(shards)?; - Ok(ProcessedEchoMessages { - ciphertext, - root, - r_j, - }) + Ok(ProcessedEchoMessages { ciphertext, r, r_j }) } /// The check r_j' == r_j from the paper - pub fn verify_ciphertext( - &self, - ciphertext: &[u8], - root: &merkle::Node, - ) -> FastCryptoResult<()> { + fn verify_ciphertext(&self, ciphertext: &[u8], root: &merkle::Node) -> FastCryptoResult<()> { let code = ErasureCoder::new( self.nodes.total_weight() as usize, (self.nodes.total_weight() - 2 * self.f) as usize, // 2f parity shards @@ -625,7 +611,15 @@ impl Receiver { Ok(()) } - /// If a party gets echo messages, the ciphertext verifies + /// 3. If the party also received a valid Message from the dealer, it can now decrypt its shares. + /// If this succeeds (returns a DecryptedShared::Valid), the party should return a signed vote to the dealer. + /// + /// When parties with weight at least W -f has submitted a vote, parties who didn't get a valid + /// Message from the dealer should request the CommonMessage part of that from the parties who voted. + /// Using this, the party can decrypt the shares and verify that the shares are valid. + /// + /// If this function returns Blame or Reveal, the party should broadcast a corresponding Blame or + /// Reveal message to the other parties. pub fn decrypt_shares( &self, processed_echo_messages: ProcessedEchoMessages, @@ -638,11 +632,7 @@ impl Receiver { shared, } = message; - let ProcessedEchoMessages { - ciphertext, - root, - r_j, - } = processed_echo_messages; + let ProcessedEchoMessages { ciphertext, r, r_j } = processed_echo_messages; if full_public_keys.len() != self.batch_size || response_polynomial.degree() != self.t as usize - 1 { @@ -651,7 +641,7 @@ impl Receiver { // TODO: What should happen if these checks fail? // Verify that g^{p''(0)} == c' * prod_l c_l^{gamma_l} - let challenge = compute_challenge_from_message(&self.random_oracle(), &root, message); + let challenge = compute_challenge_from_message(&self.random_oracle(), &r, message); if G::generator() * response_polynomial.c0() != blinding_commit + G::multi_scalar_mul(&challenge, full_public_keys) @@ -687,18 +677,32 @@ impl Receiver { Ok(my_shares) }); - if faulty_dealer || decrypted_shares.is_err() { - Ok(FaultyDealer { - correctDec: decrypted_shares.is_ok(), - }) - } else { - Ok(Valid(ReceiverOutput { - my_shares: decrypted_shares?, + match (faulty_dealer, decrypted_shares) { + (false, Ok(my_shares)) => Ok(Valid(ReceiverOutput { + my_shares, public_keys: full_public_keys.clone(), - })) + })), + (true, Ok(_)) => Ok(Blame), + (_, Err(_)) => Ok(Reveal(Complaint::create( + self.id, + &shared, + &self.enc_secret_key, + &self.random_oracle(), + &mut rand::thread_rng(), + ))), } } + pub fn handle_blame( + &self, + message: &CommonMessage, + root: &merkle::Node, + blame: (), // TODO + my_output: &ReceiverOutput, + ) -> FastCryptoResult> { + panic!("Blame is not implemented"); + } + /// 4. Upon receiving a complaint, a receiver verifies it and responds with its shares. pub fn handle_complaint( &self, @@ -857,8 +861,8 @@ fn compute_common_message_hash(message: &Message) -> Digest<32> { #[cfg(test)] mod tests { use super::{ - compute_challenge, Complaint, Dealer, DecryptedShares, Message, ProcessedEchoMessages, - ProcessedMessage, Receiver, ReceiverOutput, ShareBatch, SharesForNode, + compute_challenge, Dealer, DecryptedShares, Message, ProcessedEchoMessages, Receiver, + ReceiverOutput, Reveal, ShareBatch, SharesForNode, }; use crate::ecies_v1; use crate::ecies_v1::{MultiRecipientEncryption, PublicKey}; @@ -1269,20 +1273,4 @@ mod tests { // }) // } // } - - fn assert_valid(processed_message: ProcessedMessage) -> ReceiverOutput { - if let ProcessedMessage::Valid(output) = processed_message { - output - } else { - panic!("Expected valid message"); - } - } - - fn assert_complaint(processed_message: ProcessedMessage) -> Complaint { - if let ProcessedMessage::Complaint(complaint) = processed_message { - complaint - } else { - panic!("Expected complaint"); - } - } } diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index aa2221413b..dd6ae13bdb 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -516,24 +516,6 @@ mod tests { // .unwrap(); // } - fn assert_valid_batch( - processed_message: batch_avss::ProcessedMessage, - ) -> batch_avss::ReceiverOutput { - if let batch_avss::ProcessedMessage::Valid(output) = processed_message { - output - } else { - panic!("Expected valid message"); - } - } - - fn assert_valid(processed_message: avss::ProcessedMessage) -> avss::PartialOutput { - if let avss::ProcessedMessage::Valid(output) = processed_message { - output - } else { - panic!("Expected valid message"); - } - } - /// Restrict a `HashMap` to a given set of keys. /// Panics if the given subset is not a subset of the maps' keys. pub(crate) fn restrict( From 2a8d384495f0bb024bb6d51b5141d828237b22d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 1 May 2026 12:56:47 +0200 Subject: [PATCH 16/91] doc --- fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs index c393906771..fb601ccf78 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs @@ -165,6 +165,7 @@ impl ErasureCoder { Ok(shards.into_iter().map(Shard).collect_vec()) } + /// Note that the result may be padded with zeroes, and it is up to the caller to remove them. pub fn decode(&self, shards: Vec>) -> FastCryptoResult> { if shards.len() != self.0.total_shard_count() { return Err(InputTooShort(self.0.total_shard_count())); From 5f98b96b0c531ac179151a7b48cf70db19d38997 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 1 May 2026 14:36:55 +0200 Subject: [PATCH 17/91] Refactor complaint handling - Add Complaint enum with Reveal and Blame variants - Unify handle_complaint to verify both - Rename DecryptedShares -> DecryptionOutcome, AvidMessage -> AuthenticatedShards, Message.avid -> dispersal - Cache ErasureCoder on Receiver --- .../src/threshold_schnorr/batch_avss.rs | 423 +++++++++++++----- 1 file changed, 312 insertions(+), 111 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 67bd23548c..3ca4f66953 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -13,15 +13,15 @@ use crate::ecies_v1::{MultiRecipientEncryption, PrivateKey, SharedComponents}; use crate::nodes::{Node, Nodes, PartyId}; use crate::polynomial::{create_secret_sharing, Eval, Poly}; use crate::random_oracle::RandomOracle; -use crate::threshold_schnorr::batch_avss::DecryptedShares::{Blame, Reveal, Valid}; +use crate::threshold_schnorr::batch_avss::DecryptionOutcome::Valid; use crate::threshold_schnorr::bcs::BCSSerialized; -use crate::threshold_schnorr::complaint::Complaint; +use crate::threshold_schnorr::complaint; use crate::threshold_schnorr::complaint::ComplaintResponse; use crate::threshold_schnorr::reed_solomon::{ErasureCoder, Shard}; use crate::threshold_schnorr::Extensions::{Challenge, Encryption}; use crate::threshold_schnorr::{random_oracle_from_sid, EG, G, S}; use crate::types::{get_uniform_value, ShareIndex}; -use fastcrypto::error::FastCryptoError::{InvalidInput, InvalidMessage, NotEnoughWeight}; +use fastcrypto::error::FastCryptoError::{InvalidInput, InvalidMessage, InvalidProof, NotEnoughWeight}; use fastcrypto::error::{FastCryptoError, FastCryptoResult}; use fastcrypto::groups::{GroupElement, MultiScalarMul, Scalar}; use fastcrypto::hash::{Blake2b256, Digest, HashFunction, Sha3_512}; @@ -57,13 +57,17 @@ pub struct Receiver { t: u16, /// The total number of nonces that the receiver expects to receive from the dealer. batch_size: usize, + /// Reed-Solomon `(W, W - 2f)` coder over the dealer's per-receiver ciphertexts. Cached here + /// because every echo-processing / reveal / blame path encodes or decodes with the same + /// parameters. + code: ErasureCoder, } /// The message broadcast by the dealer. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Message { common: CommonMessage, - avid: Vec, + dispersal: Vec, } #[derive(Clone, Debug, Serialize, Deserialize)] @@ -75,7 +79,7 @@ pub struct CommonMessage { } #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct AvidMessage { +pub struct AuthenticatedShards { root: merkle::Node, shards: Vec, proof: merkle::MerkleProof, @@ -96,12 +100,55 @@ pub struct ProcessedEchoMessages { ciphertext: Vec, r: merkle::Node, r_j: merkle::Node, + valid_echoes: Vec, } -pub enum DecryptedShares { +pub enum DecryptionOutcome { Valid(ReceiverOutput), - Reveal(Complaint), - Blame, + Complaint(Complaint), +} + +/// A complaint by a receiver after `decrypt_shares`. There are two flavors: +/// * [Complaint::Reveal] — the receiver could not decrypt or verify its shares. Carries the +/// reconstructed accuser's ciphertext + the proof binding it to the dealer's broadcast, plus a +/// recovery package so the responder can re-decrypt and confirm the shares are invalid. +/// * [Complaint::Blame] — the receiver decrypted valid shares but the AVID dispersal was +/// inconsistent. Carries authenticated shards so the responder can replay the reconstruction +/// and observe that the re-encoded root does not match `r_j`. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum Complaint { + Reveal { + proof: complaint::Complaint, + // TODO: Handle zero-padding + /// The reconstructed accuser's ciphertext. The responder re-encodes this and checks that + /// the resulting root matches `r_j`. + ciphertext: Vec, + r_j: merkle::Node, + /// Proof that `r_j` sits under the global `r` at the accuser's leaf. + pi_j: merkle::MerkleProof, + /// `H(val)` from the dealer's broadcast, binding the complaint to a specific [CommonMessage]. + hash: Digest<32>, + }, + Blame { + accuser_id: PartyId, + r_j: merkle::Node, + /// Proof that `r_j` sits under the global `r` at `accuser_id`'s leaf. + pi_j: merkle::MerkleProof, + /// At least `W - 2f` weight worth of shards `s_{ji}`, each with a Merkle proof under `r_j` + /// at the contributing party's leaf. + shards: Vec, + hash: Digest<32>, + }, +} + +/// One sender's contribution of shards toward reconstructing the accuser's ciphertext, with a +/// Merkle proof binding the shards to the accuser's per-ciphertext root `r_j`. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ShardContribution { + pub party: PartyId, + pub shards: Vec, + /// Proof that `shards` sits under the accuser's `r_j` at `party`'s leaf. + pub proof: merkle::MerkleProof, } /// The output of a receiver which is a batch of shares and public keys for all nonces. @@ -392,9 +439,9 @@ impl Dealer { response_polynomial: response_polynomial.clone(), blinding_commit, }, - avid: m + dispersal: m .iter() - .map(|m| AvidMessage { + .map(|m| AuthenticatedShards { root: m.0.clone(), shards: m.1.clone(), proof: m.2.clone(), @@ -438,6 +485,11 @@ impl Receiver { // The dealer is expected to deal a number of nonces proportional to it's weight let batch_size = nodes.weight_of(dealer_id)? as usize * batch_size_per_weight as usize; + let code = ErasureCoder::new( + nodes.total_weight() as usize, + (nodes.total_weight() - 2 * f) as usize, // 2f parity shards + )?; + Ok(Self { id, enc_secret_key, @@ -446,16 +498,18 @@ impl Receiver { f, t, batch_size, + code, }) } /// 2. When a party receives its message, it verifies the Merkle tree path for it's shards and generates EchoMessages - one per party. pub fn echo_message(&self, message: &Message) -> FastCryptoResult> { - if message.avid.iter().any( - |AvidMessage { + if message.dispersal.iter().any( + |AuthenticatedShards { root, shards, proof, + .. }| { proof .verify_proof_with_unserialized_leaf(root, &shards, self.id as usize) @@ -466,21 +520,22 @@ impl Receiver { } let tree = MerkleTree::::build_from_unserialized( - message.avid.iter().map(|AvidMessage { root, .. }| root), + message.dispersal.iter().map(|AuthenticatedShards { root, .. }| root), )?; let r = tree.root(); - let digest = compute_common_message_hash(message); + let digest = compute_common_message_hash(&message.common); message - .avid + .dispersal .iter() .enumerate() .map( |( i, - AvidMessage { + AuthenticatedShards { root, shards, proof, + .. }, )| { Ok(EchoMessage { @@ -533,24 +588,7 @@ impl Receiver { .cloned() .collect_vec(); - // TODO: It is up to the caller to ensure that all echo messages have the same digest and root. - let r = get_uniform_value( - echo_messages - .iter() - .cloned() - .map(|echo_message| echo_message.r), - ) - .ok_or(InvalidMessage)?; - let r_j = get_uniform_value( - echo_messages - .iter() - .cloned() - .map(|echo_message| echo_message.r_i), - ) - .ok_or(InvalidMessage)?; - if get_uniform_value(echo_messages.iter().map(|echo_message| echo_message.hash)).is_none() { - return Err(InvalidMessage); - } + let (r, r_j, _) = require_uniform_echo_metadata(&echo_messages)?; let required_weight = self.nodes.total_weight() - self.f; if self @@ -561,47 +599,40 @@ impl Receiver { return Err(NotEnoughWeight(required_weight as usize)); } + let ciphertext = self.reconstruct_ciphertext_from_echoes(&echo_messages)?; + Ok(ProcessedEchoMessages { + ciphertext, + r, + r_j, + valid_echoes: echo_messages, + }) + } + + /// Reed-Solomon decode a ciphertext from a set of authenticated [EchoMessage]s. Each echo + /// contributes `Some` shards for its sender's leaves; missing senders contribute `None` + /// erasures. The caller is responsible for having verified the echoes' Merkle proofs and + /// for ensuring the set has enough weight (≥ `W - 2f`) to decode. + fn reconstruct_ciphertext_from_echoes( + &self, + echoes: &[EchoMessage], + ) -> FastCryptoResult> { let shards: Vec> = self .nodes .node_ids_iter() - .flat_map(|id| { - match echo_messages - .iter() - .find(|echo_message| echo_message.party == id) - { - Some(echo_message) => echo_message - .s_ij - .iter() - .map(|s| Some(s.clone())) - .collect_vec(), - None => { - repeat_n(None, self.nodes.weight_of(id).unwrap() as usize).collect_vec() - } - } + .flat_map(|id| match echoes.iter().find(|e| e.party == id) { + Some(e) => e.s_ij.iter().map(|s| Some(s.clone())).collect_vec(), + None => repeat_n(None, self.nodes.weight_of(id).unwrap() as usize).collect_vec(), }) - .collect::>(); + .collect(); - let code = ErasureCoder::new( - self.nodes.total_weight() as usize, - (self.nodes.total_weight() - 2 * self.f) as usize, // 2f parity shards - ) - .expect("should not fail with valid parameters"); - - let ciphertext = code.decode(shards)?; - Ok(ProcessedEchoMessages { ciphertext, r, r_j }) + self.code.decode(shards) } /// The check r_j' == r_j from the paper fn verify_ciphertext(&self, ciphertext: &[u8], root: &merkle::Node) -> FastCryptoResult<()> { - let code = ErasureCoder::new( - self.nodes.total_weight() as usize, - (self.nodes.total_weight() - 2 * self.f) as usize, // 2f parity shards - ) - .expect("should not fail with valid parameters"); - let new_shards = self .nodes - .collect_to_nodes(code.encode(&ciphertext)?.into_iter())?; + .collect_to_nodes(self.code.encode(ciphertext)?.into_iter())?; let new_tree = MerkleTree::::build_from_unserialized(new_shards.iter())?; if new_tree.root() != *root { @@ -612,19 +643,19 @@ impl Receiver { } /// 3. If the party also received a valid Message from the dealer, it can now decrypt its shares. - /// If this succeeds (returns a DecryptedShared::Valid), the party should return a signed vote to the dealer. + /// If this succeeds (returns a DecryptionOutcome::Valid), the party should return a signed vote to the dealer. /// /// When parties with weight at least W -f has submitted a vote, parties who didn't get a valid /// Message from the dealer should request the CommonMessage part of that from the parties who voted. /// Using this, the party can decrypt the shares and verify that the shares are valid. /// - /// If this function returns Blame or Reveal, the party should broadcast a corresponding Blame or - /// Reveal message to the other parties. + /// If this function returns a [DecryptionOutcome::Complaint], the party should broadcast it + /// to the other parties. pub fn decrypt_shares( &self, processed_echo_messages: ProcessedEchoMessages, message: &CommonMessage, - ) -> FastCryptoResult { + ) -> FastCryptoResult { let CommonMessage { full_public_keys, blinding_commit, @@ -632,7 +663,12 @@ impl Receiver { shared, } = message; - let ProcessedEchoMessages { ciphertext, r, r_j } = processed_echo_messages; + let ProcessedEchoMessages { + ciphertext, + r, + r_j, + valid_echoes, + } = processed_echo_messages; if full_public_keys.len() != self.batch_size || response_polynomial.degree() != self.t as usize - 1 { @@ -657,6 +693,7 @@ impl Receiver { let decrypted_shares = shared .verify(&random_oracle_encryption) .map(|_| { + // TODO: Handle zero-padding shared.decrypt( &ciphertext, &self.enc_secret_key, @@ -682,56 +719,202 @@ impl Receiver { my_shares, public_keys: full_public_keys.clone(), })), - (true, Ok(_)) => Ok(Blame), - (_, Err(_)) => Ok(Reveal(Complaint::create( - self.id, - &shared, - &self.enc_secret_key, - &self.random_oracle(), - &mut rand::thread_rng(), - ))), + (true, Ok(_)) => { + // The accuser packages the echoes' shard-level proofs (`pi_ij`, leaf-on-r_j) as + // ShardContributions, and lifts the `pi_i` (leaf-on-r) once into `pi_j`. This + // gives the responder enough to replay the AVID inconsistency check. + let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; + let pi_j = any_echo.pi_i.clone(); + let hash = any_echo.hash; + let shards = valid_echoes + .into_iter() + .map(|e| ShardContribution { + party: e.party, + shards: e.s_ij, + proof: e.pi_ij, + }) + .collect_vec(); + Ok(DecryptionOutcome::Complaint(Complaint::Blame { + accuser_id: self.id, + r_j, + pi_j, + shards, + hash, + })) + } + (_, Err(_)) => { + let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; + Ok(DecryptionOutcome::Complaint(Complaint::Reveal { + proof: complaint::Complaint::create( + self.id, + &shared, + &self.enc_secret_key, + &self.random_oracle(), + &mut rand::thread_rng(), + ), + ciphertext, + r_j, + pi_j: any_echo.pi_i.clone(), + hash: any_echo.hash, + })) + } } } - pub fn handle_blame( + /// 4. Upon receiving a [Complaint] from another party, verify it and, if valid, respond + /// with this party's own shares so the accuser can recover. + /// + /// `root` is the global Merkle root that this party voted for, and `message` is the dealer's + /// [CommonMessage]. The complaint is bound to both. + pub fn handle_complaint( &self, message: &CommonMessage, root: &merkle::Node, - blame: (), // TODO + complaint: &Complaint, my_output: &ReceiverOutput, ) -> FastCryptoResult> { - panic!("Blame is not implemented"); + match complaint { + Complaint::Reveal { + proof, + ciphertext, + r_j, + pi_j, + hash, + } => { + self.verify_reveal(message, root, proof, ciphertext, r_j, pi_j, hash)?; + } + Complaint::Blame { + accuser_id, + r_j, + pi_j, + shards, + hash, + } => { + self.verify_blame(message, root, *accuser_id, r_j, pi_j, shards, hash)?; + } + } + Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } - /// 4. Upon receiving a complaint, a receiver verifies it and responds with its shares. - pub fn handle_complaint( + /// Verify a [Complaint::Reveal]: the ciphertext must be authenticated as the dealer's by + /// re-encoding under `r_j`, and decryption with the recovery package must yield invalid + /// shares. + fn verify_reveal( &self, message: &CommonMessage, root: &merkle::Node, - complaint: &Complaint, - my_output: &ReceiverOutput, - ) -> FastCryptoResult> { - let challenge = compute_challenge_from_message(&self.random_oracle(), &root, &message); - complaint.check( - &self.nodes.node_id_to_node(complaint.accuser_id)?.pk, - &[], // TODO + proof: &complaint::Complaint, + ciphertext: &[u8], + r_j: &merkle::Node, + pi_j: &merkle::MerkleProof, + hash: &Digest<32>, + ) -> FastCryptoResult<()> { + let accuser_id = proof.accuser_id; + let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; + + verify_outer_proof(root, r_j, pi_j, accuser_id)?; + + // Authenticate the ciphertext as the dealer's: re-encoding it must yield `r_j`. + if hash != &compute_common_message_hash(message) + || self.verify_ciphertext(ciphertext, r_j).is_err() + { + return Err(InvalidProof); + } + + let challenge = compute_challenge_from_message(&self.random_oracle(), root, message); + proof.check( + accuser_pk, + // TODO: Same padding issue as in `decrypt_shares` — `ciphertext` is shard-aligned and + // its trailing zeros decrypt to junk that breaks `bcs::from_bytes`. Truncate to the + // unpadded length once that's carried on the wire. + ciphertext, &message.shared, &self.random_oracle(), |shares: &SharesForNode| { verify_shares( shares, &self.nodes, - complaint.accuser_id, - &message, + accuser_id, + message, &challenge, self.batch_size, ) }, - )?; - Ok(ComplaintResponse { - responder_id: self.id, - shares: my_output.my_shares.clone(), - }) + ) + } + + /// Verify a [Complaint::Blame]: the accuser must have collected enough authenticated shards + /// whose re-encoded ciphertext root differs from the `r_j` the dealer committed to. + #[allow(clippy::too_many_arguments)] + fn verify_blame( + &self, + message: &CommonMessage, + root: &merkle::Node, + accuser_id: PartyId, + r_j: &merkle::Node, + pi_j: &merkle::MerkleProof, + shards: &[ShardContribution], + hash: &Digest<32>, + ) -> FastCryptoResult<()> { + self.nodes.node_id_to_node(accuser_id)?; + + if hash != &compute_common_message_hash(message) { + return Err(InvalidProof); + } + + verify_outer_proof(root, r_j, pi_j, accuser_id)?; + + if shards.iter().map(|s| s.party).unique().count() != shards.len() { + return Err(InvalidProof); + } + + if shards.iter().any(|s| { + s.proof + .verify_proof_with_unserialized_leaf(r_j, &s.shards, s.party as usize) + .is_err() + }) { + return Err(InvalidProof); + } + + let weight_of_shards = self + .nodes + .total_weight_of(shards.iter().map(|s| &s.party))?; + if weight_of_shards < self.nodes.total_weight() - 2 * self.f { + return Err(InvalidProof); + } + + let ciphertext = self + .reconstruct_ciphertext_from_shard_contributions(shards) + .map_err(|_| InvalidProof)?; + + // The blame is valid iff re-encoding the recovered ciphertext does not match `r_j`: + // that mismatch is the proof of dealer misbehavior. + if self.verify_ciphertext(&ciphertext, r_j).is_ok() { + return Err(InvalidProof); + } + + Ok(()) + } + + /// Sibling of [Self::reconstruct_ciphertext_from_echoes] that operates on a slice of + /// [ShardContribution] (the shape carried by [Complaint::Blame]). + fn reconstruct_ciphertext_from_shard_contributions( + &self, + contributions: &[ShardContribution], + ) -> FastCryptoResult> { + let shards: Vec> = self + .nodes + .node_ids_iter() + .flat_map( + |id| match contributions.iter().find(|s| s.party == id) { + Some(s) => s.shards.iter().map(|s| Some(s.clone())).collect_vec(), + None => repeat_n(None, self.nodes.weight_of(id).unwrap() as usize) + .collect_vec(), + }, + ) + .collect(); + + self.code.decode(shards) } /// 5. Upon receiving t valid responses to a complaint, the accuser can recover its shares. @@ -788,6 +971,28 @@ impl Receiver { } } +/// Verify that `r_j` sits under the global `r` at the leaf indexed by `accuser_id`. Returns +/// `InvalidProof` on mismatch. +fn verify_outer_proof( + r: &merkle::Node, + r_j: &merkle::Node, + pi_j: &merkle::MerkleProof, + accuser_id: PartyId, +) -> FastCryptoResult<()> { + pi_j.verify_proof_with_unserialized_leaf(r, r_j, accuser_id as usize) + .map_err(|_| InvalidProof) +} + +/// Pull the per-echo metadata that must agree across the entire echo set: the global Merkle root +/// `r`, the receiver's per-ciphertext root `r_j`, and the dealer's `H(val)`. Returns an error if +/// any field is non-uniform (which would indicate inconsistent echoes / a faulty sender). +fn require_uniform_echo_metadata( + echoes: &[EchoMessage], +) -> FastCryptoResult<(merkle::Node, merkle::Node, Digest<32>)> { + get_uniform_value(echoes.iter().map(|e| (e.r.clone(), e.r_i.clone(), e.hash))) + .ok_or(InvalidMessage) +} + /// Verify a set of shares receiver from a Dealer fn verify_shares( shares: &SharesForNode, @@ -834,16 +1039,12 @@ fn compute_challenge_from_message( ) } -fn compute_common_message_hash(message: &Message) -> Digest<32> { - let Message { - common: - CommonMessage { - shared, - full_public_keys, - blinding_commit, - response_polynomial, - }, - .. +fn compute_common_message_hash(message: &CommonMessage) -> Digest<32> { + let CommonMessage { + shared, + full_public_keys, + blinding_commit, + response_polynomial, } = message; let mut hasher = Blake2b256::new(); hasher.update( @@ -861,8 +1062,8 @@ fn compute_common_message_hash(message: &Message) -> Digest<32> { #[cfg(test)] mod tests { use super::{ - compute_challenge, Dealer, DecryptedShares, Message, ProcessedEchoMessages, Receiver, - ReceiverOutput, Reveal, ShareBatch, SharesForNode, + compute_challenge, Dealer, DecryptionOutcome, Message, ProcessedEchoMessages, Receiver, + ReceiverOutput, ShareBatch, SharesForNode, }; use crate::ecies_v1; use crate::ecies_v1::{MultiRecipientEncryption, PublicKey}; @@ -954,8 +1155,8 @@ mod tests { .iter() .zip(messages.iter()) .zip(echo_messages.iter()) - .map(|((receiver, message), echo_message)| { - receiver.process_echo_messages(&echo_message).unwrap() + .map(|((receiver, _message), echo_message)| { + receiver.process_echo_messages(echo_message).unwrap() }) .collect_vec(); @@ -965,7 +1166,7 @@ mod tests { .zip(messages) .map( |((receiver, pem), message)| match receiver.decrypt_shares(pem, &message.common) { - Ok(DecryptedShares::Valid(output)) => (receiver.id, output), + Ok(DecryptionOutcome::Valid(output)) => (receiver.id, output), _ => panic!( "All receivers should be able to process the message in the happy path" ), From 26b6bc5768bc42efe32e9d17484e5eb6f58ac7a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 1 May 2026 14:48:31 +0200 Subject: [PATCH 18/91] Renames and doc trims --- .../src/threshold_schnorr/batch_avss.rs | 117 +++++++++--------- 1 file changed, 56 insertions(+), 61 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 3ca4f66953..8b50b04414 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -57,9 +57,7 @@ pub struct Receiver { t: u16, /// The total number of nonces that the receiver expects to receive from the dealer. batch_size: usize, - /// Reed-Solomon `(W, W - 2f)` coder over the dealer's per-receiver ciphertexts. Cached here - /// because every echo-processing / reveal / blame path encodes or decodes with the same - /// parameters. + /// Reed-Solomon `(W, W - 2f)` coder over the dealer's per-receiver ciphertexts. code: ErasureCoder, } @@ -99,7 +97,7 @@ pub struct EchoMessage { pub struct ProcessedEchoMessages { ciphertext: Vec, r: merkle::Node, - r_j: merkle::Node, + r_i: merkle::Node, valid_echoes: Vec, } @@ -108,33 +106,30 @@ pub enum DecryptionOutcome { Complaint(Complaint), } -/// A complaint by a receiver after `decrypt_shares`. There are two flavors: -/// * [Complaint::Reveal] — the receiver could not decrypt or verify its shares. Carries the -/// reconstructed accuser's ciphertext + the proof binding it to the dealer's broadcast, plus a -/// recovery package so the responder can re-decrypt and confirm the shares are invalid. +/// A complaint by a receiver after `verify_and_decrypt`. There are two flavors: +/// * [Complaint::Reveal] — the receiver could not decrypt or verify its shares. /// * [Complaint::Blame] — the receiver decrypted valid shares but the AVID dispersal was -/// inconsistent. Carries authenticated shards so the responder can replay the reconstruction -/// and observe that the re-encoded root does not match `r_j`. +/// inconsistent. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum Complaint { Reveal { proof: complaint::Complaint, // TODO: Handle zero-padding /// The reconstructed accuser's ciphertext. The responder re-encodes this and checks that - /// the resulting root matches `r_j`. + /// the resulting root matches `r_i`. ciphertext: Vec, - r_j: merkle::Node, - /// Proof that `r_j` sits under the global `r` at the accuser's leaf. - pi_j: merkle::MerkleProof, + r_i: merkle::Node, + /// Proof that `r_i` sits under the global `r` at the accuser's leaf. + pi_i: merkle::MerkleProof, /// `H(val)` from the dealer's broadcast, binding the complaint to a specific [CommonMessage]. hash: Digest<32>, }, Blame { accuser_id: PartyId, - r_j: merkle::Node, - /// Proof that `r_j` sits under the global `r` at `accuser_id`'s leaf. - pi_j: merkle::MerkleProof, - /// At least `W - 2f` weight worth of shards `s_{ji}`, each with a Merkle proof under `r_j` + r_i: merkle::Node, + /// Proof that `r_i` sits under the global `r` at `accuser_id`'s leaf. + pi_i: merkle::MerkleProof, + /// At least `W - 2f` weight worth of shards `s_{ji}`, each with a Merkle proof under `r_i` /// at the contributing party's leaf. shards: Vec, hash: Digest<32>, @@ -142,12 +137,12 @@ pub enum Complaint { } /// One sender's contribution of shards toward reconstructing the accuser's ciphertext, with a -/// Merkle proof binding the shards to the accuser's per-ciphertext root `r_j`. +/// Merkle proof binding the shards to the accuser's per-ciphertext root `r_i`. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ShardContribution { pub party: PartyId, pub shards: Vec, - /// Proof that `shards` sits under the accuser's `r_j` at `party`'s leaf. + /// Proof that `shards` sits under the accuser's `r_i` at `party`'s leaf. pub proof: merkle::MerkleProof, } @@ -588,7 +583,7 @@ impl Receiver { .cloned() .collect_vec(); - let (r, r_j, _) = require_uniform_echo_metadata(&echo_messages)?; + let (r, r_i, _) = require_uniform_echo_metadata(&echo_messages)?; let required_weight = self.nodes.total_weight() - self.f; if self @@ -603,7 +598,7 @@ impl Receiver { Ok(ProcessedEchoMessages { ciphertext, r, - r_j, + r_i, valid_echoes: echo_messages, }) } @@ -628,8 +623,8 @@ impl Receiver { self.code.decode(shards) } - /// The check r_j' == r_j from the paper - fn verify_ciphertext(&self, ciphertext: &[u8], root: &merkle::Node) -> FastCryptoResult<()> { + /// The check r_i' == r_i from the paper + fn check_avid_consistency(&self, ciphertext: &[u8], root: &merkle::Node) -> FastCryptoResult<()> { let new_shards = self .nodes .collect_to_nodes(self.code.encode(ciphertext)?.into_iter())?; @@ -651,7 +646,7 @@ impl Receiver { /// /// If this function returns a [DecryptionOutcome::Complaint], the party should broadcast it /// to the other parties. - pub fn decrypt_shares( + pub fn verify_and_decrypt( &self, processed_echo_messages: ProcessedEchoMessages, message: &CommonMessage, @@ -666,7 +661,7 @@ impl Receiver { let ProcessedEchoMessages { ciphertext, r, - r_j, + r_i, valid_echoes, } = processed_echo_messages; if full_public_keys.len() != self.batch_size @@ -686,8 +681,8 @@ impl Receiver { return Err(InvalidMessage); } - // Check r_j' == r_j from the paper - let faulty_dealer = self.verify_ciphertext(&ciphertext, &r_j).is_err(); + // Check r_i' == r_i from the paper + let faulty_dealer = self.check_avid_consistency(&ciphertext, &r_i).is_err(); let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); let decrypted_shares = shared @@ -720,11 +715,11 @@ impl Receiver { public_keys: full_public_keys.clone(), })), (true, Ok(_)) => { - // The accuser packages the echoes' shard-level proofs (`pi_ij`, leaf-on-r_j) as - // ShardContributions, and lifts the `pi_i` (leaf-on-r) once into `pi_j`. This + // The accuser packages the echoes' shard-level proofs (`pi_ij`, leaf-on-r_i) as + // ShardContributions, and lifts the `pi_i` (leaf-on-r) once into `pi_i`. This // gives the responder enough to replay the AVID inconsistency check. let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; - let pi_j = any_echo.pi_i.clone(); + let pi_i = any_echo.pi_i.clone(); let hash = any_echo.hash; let shards = valid_echoes .into_iter() @@ -736,8 +731,8 @@ impl Receiver { .collect_vec(); Ok(DecryptionOutcome::Complaint(Complaint::Blame { accuser_id: self.id, - r_j, - pi_j, + r_i, + pi_i, shards, hash, })) @@ -753,8 +748,8 @@ impl Receiver { &mut rand::thread_rng(), ), ciphertext, - r_j, - pi_j: any_echo.pi_i.clone(), + r_i, + pi_i: any_echo.pi_i.clone(), hash: any_echo.hash, })) } @@ -777,27 +772,27 @@ impl Receiver { Complaint::Reveal { proof, ciphertext, - r_j, - pi_j, + r_i, + pi_i, hash, } => { - self.verify_reveal(message, root, proof, ciphertext, r_j, pi_j, hash)?; + self.verify_reveal(message, root, proof, ciphertext, r_i, pi_i, hash)?; } Complaint::Blame { accuser_id, - r_j, - pi_j, + r_i, + pi_i, shards, hash, } => { - self.verify_blame(message, root, *accuser_id, r_j, pi_j, shards, hash)?; + self.verify_blame(message, root, *accuser_id, r_i, pi_i, shards, hash)?; } } Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } /// Verify a [Complaint::Reveal]: the ciphertext must be authenticated as the dealer's by - /// re-encoding under `r_j`, and decryption with the recovery package must yield invalid + /// re-encoding under `r_i`, and decryption with the recovery package must yield invalid /// shares. fn verify_reveal( &self, @@ -805,18 +800,18 @@ impl Receiver { root: &merkle::Node, proof: &complaint::Complaint, ciphertext: &[u8], - r_j: &merkle::Node, - pi_j: &merkle::MerkleProof, + r_i: &merkle::Node, + pi_i: &merkle::MerkleProof, hash: &Digest<32>, ) -> FastCryptoResult<()> { let accuser_id = proof.accuser_id; let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; - verify_outer_proof(root, r_j, pi_j, accuser_id)?; + verify_outer_proof(root, r_i, pi_i, accuser_id)?; - // Authenticate the ciphertext as the dealer's: re-encoding it must yield `r_j`. + // Authenticate the ciphertext as the dealer's: re-encoding it must yield `r_i`. if hash != &compute_common_message_hash(message) - || self.verify_ciphertext(ciphertext, r_j).is_err() + || self.check_avid_consistency(ciphertext, r_i).is_err() { return Err(InvalidProof); } @@ -824,7 +819,7 @@ impl Receiver { let challenge = compute_challenge_from_message(&self.random_oracle(), root, message); proof.check( accuser_pk, - // TODO: Same padding issue as in `decrypt_shares` — `ciphertext` is shard-aligned and + // TODO: Same padding issue as in `verify_and_decrypt` — `ciphertext` is shard-aligned and // its trailing zeros decrypt to junk that breaks `bcs::from_bytes`. Truncate to the // unpadded length once that's carried on the wire. ciphertext, @@ -844,15 +839,15 @@ impl Receiver { } /// Verify a [Complaint::Blame]: the accuser must have collected enough authenticated shards - /// whose re-encoded ciphertext root differs from the `r_j` the dealer committed to. + /// whose re-encoded ciphertext root differs from the `r_i` the dealer committed to. #[allow(clippy::too_many_arguments)] fn verify_blame( &self, message: &CommonMessage, root: &merkle::Node, accuser_id: PartyId, - r_j: &merkle::Node, - pi_j: &merkle::MerkleProof, + r_i: &merkle::Node, + pi_i: &merkle::MerkleProof, shards: &[ShardContribution], hash: &Digest<32>, ) -> FastCryptoResult<()> { @@ -862,7 +857,7 @@ impl Receiver { return Err(InvalidProof); } - verify_outer_proof(root, r_j, pi_j, accuser_id)?; + verify_outer_proof(root, r_i, pi_i, accuser_id)?; if shards.iter().map(|s| s.party).unique().count() != shards.len() { return Err(InvalidProof); @@ -870,7 +865,7 @@ impl Receiver { if shards.iter().any(|s| { s.proof - .verify_proof_with_unserialized_leaf(r_j, &s.shards, s.party as usize) + .verify_proof_with_unserialized_leaf(r_i, &s.shards, s.party as usize) .is_err() }) { return Err(InvalidProof); @@ -887,9 +882,9 @@ impl Receiver { .reconstruct_ciphertext_from_shard_contributions(shards) .map_err(|_| InvalidProof)?; - // The blame is valid iff re-encoding the recovered ciphertext does not match `r_j`: + // The blame is valid iff re-encoding the recovered ciphertext does not match `r_i`: // that mismatch is the proof of dealer misbehavior. - if self.verify_ciphertext(&ciphertext, r_j).is_ok() { + if self.check_avid_consistency(&ciphertext, r_i).is_ok() { return Err(InvalidProof); } @@ -971,20 +966,20 @@ impl Receiver { } } -/// Verify that `r_j` sits under the global `r` at the leaf indexed by `accuser_id`. Returns +/// Verify that `r_i` sits under the global `r` at the leaf indexed by `accuser_id`. Returns /// `InvalidProof` on mismatch. fn verify_outer_proof( r: &merkle::Node, - r_j: &merkle::Node, - pi_j: &merkle::MerkleProof, + r_i: &merkle::Node, + pi_i: &merkle::MerkleProof, accuser_id: PartyId, ) -> FastCryptoResult<()> { - pi_j.verify_proof_with_unserialized_leaf(r, r_j, accuser_id as usize) + pi_i.verify_proof_with_unserialized_leaf(r, r_i, accuser_id as usize) .map_err(|_| InvalidProof) } /// Pull the per-echo metadata that must agree across the entire echo set: the global Merkle root -/// `r`, the receiver's per-ciphertext root `r_j`, and the dealer's `H(val)`. Returns an error if +/// `r`, the receiver's per-ciphertext root `r_i`, and the dealer's `H(val)`. Returns an error if /// any field is non-uniform (which would indicate inconsistent echoes / a faulty sender). fn require_uniform_echo_metadata( echoes: &[EchoMessage], @@ -1165,7 +1160,7 @@ mod tests { .zip(processed_echo_messages) .zip(messages) .map( - |((receiver, pem), message)| match receiver.decrypt_shares(pem, &message.common) { + |((receiver, pem), message)| match receiver.verify_and_decrypt(pem, &message.common) { Ok(DecryptionOutcome::Valid(output)) => (receiver.id, output), _ => panic!( "All receivers should be able to process the message in the happy path" From dfe6ac3d3e7b8f7848e4ff84168122bddfd5390f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 1 May 2026 15:06:30 +0200 Subject: [PATCH 19/91] Drop r_i/pi_i from Complaint, add share recovery test - handle_complaint and recover take &Message; verifier looks up r_i from message.dispersal[accuser_id] - create_message takes an optional plaintext mutation closure for tests - Restore test_share_recovery exercising the cheating-dealer/Reveal/ recover path --- .../src/threshold_schnorr/batch_avss.rs | 309 +++++++++++++----- 1 file changed, 231 insertions(+), 78 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 8b50b04414..162f830c88 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -116,21 +116,15 @@ pub enum Complaint { proof: complaint::Complaint, // TODO: Handle zero-padding /// The reconstructed accuser's ciphertext. The responder re-encodes this and checks that - /// the resulting root matches `r_i`. + /// the resulting root matches the dealer-committed `r_i` they have locally. ciphertext: Vec, - r_i: merkle::Node, - /// Proof that `r_i` sits under the global `r` at the accuser's leaf. - pi_i: merkle::MerkleProof, /// `H(val)` from the dealer's broadcast, binding the complaint to a specific [CommonMessage]. hash: Digest<32>, }, Blame { accuser_id: PartyId, - r_i: merkle::Node, - /// Proof that `r_i` sits under the global `r` at `accuser_id`'s leaf. - pi_i: merkle::MerkleProof, - /// At least `W - 2f` weight worth of shards `s_{ji}`, each with a Merkle proof under `r_i` - /// at the contributing party's leaf. + /// At least `W - 2f` weight worth of shards `s_{ji}`, each with a Merkle proof under the + /// dealer-committed `r_i` at the contributing party's leaf. shards: Vec, hash: Digest<32>, }, @@ -317,6 +311,16 @@ impl Dealer { /// 1. The Dealer generates shares for the secrets and creates a set of messages - one per receiver. pub fn create_message(&self, rng: &mut impl AllowedRng) -> FastCryptoResult> { + self.create_message_with_mutation(rng, |_| {}) + } + + /// Like [Self::create_message] but exposes a mutation hook over the pre-encryption + /// per-receiver plaintexts so tests can simulate a faulty dealer by corrupting one slot. + fn create_message_with_mutation( + &self, + rng: &mut impl AllowedRng, + mutate: impl FnOnce(&mut [(crate::ecies_v1::PublicKey, Vec)]), + ) -> FastCryptoResult> { let secrets = repeat_with(|| S::rand(rng)) .take(self.batch_size) .collect_vec(); @@ -338,7 +342,7 @@ impl Dealer { .collect_vec(); // Encrypt all shares to the receivers - let pk_and_msgs = self + let mut pk_and_msgs = self .nodes .iter() .map(|node| (node.pk.clone(), self.nodes.share_ids_of(node.id).unwrap())) @@ -360,6 +364,8 @@ impl Dealer { }) .collect_vec(); + mutate(&mut pk_and_msgs); + let ciphertext = MultiRecipientEncryption::encrypt( &pk_and_msgs, &self.random_oracle().extend(&Encryption.to_string()), @@ -716,10 +722,10 @@ impl Receiver { })), (true, Ok(_)) => { // The accuser packages the echoes' shard-level proofs (`pi_ij`, leaf-on-r_i) as - // ShardContributions, and lifts the `pi_i` (leaf-on-r) once into `pi_i`. This - // gives the responder enough to replay the AVID inconsistency check. + // The accuser packages each echo's shard-level proof (`pi_ij`, leaf-on-r_i) as a + // ShardContribution. The responder looks up r_i locally from their own copy of the + // dealer's [Message], so it doesn't have to be transmitted. let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; - let pi_i = any_echo.pi_i.clone(); let hash = any_echo.hash; let shards = valid_echoes .into_iter() @@ -731,8 +737,6 @@ impl Receiver { .collect_vec(); Ok(DecryptionOutcome::Complaint(Complaint::Blame { accuser_id: self.id, - r_i, - pi_i, shards, hash, })) @@ -748,23 +752,20 @@ impl Receiver { &mut rand::thread_rng(), ), ciphertext, - r_i, - pi_i: any_echo.pi_i.clone(), hash: any_echo.hash, })) } } } - /// 4. Upon receiving a [Complaint] from another party, verify it and, if valid, respond - /// with this party's own shares so the accuser can recover. - /// - /// `root` is the global Merkle root that this party voted for, and `message` is the dealer's - /// [CommonMessage]. The complaint is bound to both. + /// 4. Upon receiving a [Complaint] from another party, verify it and, if valid, respond with + /// this party's own shares so the accuser can recover. `message` is the dealer's full + /// [Message] as this party received it; the verifier looks up the accuser's per-ciphertext + /// root locally from `message.dispersal[accuser_id]` rather than trusting the complaint to + /// carry it. pub fn handle_complaint( &self, - message: &CommonMessage, - root: &merkle::Node, + message: &Message, complaint: &Complaint, my_output: &ReceiverOutput, ) -> FastCryptoResult> { @@ -772,65 +773,56 @@ impl Receiver { Complaint::Reveal { proof, ciphertext, - r_i, - pi_i, hash, } => { - self.verify_reveal(message, root, proof, ciphertext, r_i, pi_i, hash)?; + self.verify_reveal(message, proof, ciphertext, hash)?; } Complaint::Blame { accuser_id, - r_i, - pi_i, shards, hash, } => { - self.verify_blame(message, root, *accuser_id, r_i, pi_i, shards, hash)?; + self.verify_blame(message, *accuser_id, shards, hash)?; } } Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } /// Verify a [Complaint::Reveal]: the ciphertext must be authenticated as the dealer's by - /// re-encoding under `r_i`, and decryption with the recovery package must yield invalid - /// shares. + /// re-encoding under the locally-known `r_i`, and decryption with the recovery package must + /// yield invalid shares. fn verify_reveal( &self, - message: &CommonMessage, - root: &merkle::Node, + message: &Message, proof: &complaint::Complaint, ciphertext: &[u8], - r_i: &merkle::Node, - pi_i: &merkle::MerkleProof, hash: &Digest<32>, ) -> FastCryptoResult<()> { let accuser_id = proof.accuser_id; let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; + let r_i = self.dispersal_root_for(message, accuser_id)?; + let r = self.global_root(message)?; - verify_outer_proof(root, r_i, pi_i, accuser_id)?; - - // Authenticate the ciphertext as the dealer's: re-encoding it must yield `r_i`. - if hash != &compute_common_message_hash(message) + if hash != &compute_common_message_hash(&message.common) || self.check_avid_consistency(ciphertext, r_i).is_err() { return Err(InvalidProof); } - let challenge = compute_challenge_from_message(&self.random_oracle(), root, message); + let challenge = + compute_challenge_from_message(&self.random_oracle(), &r, &message.common); proof.check( accuser_pk, - // TODO: Same padding issue as in `verify_and_decrypt` — `ciphertext` is shard-aligned and - // its trailing zeros decrypt to junk that breaks `bcs::from_bytes`. Truncate to the - // unpadded length once that's carried on the wire. + // TODO: Handle zero-padding ciphertext, - &message.shared, + &message.common.shared, &self.random_oracle(), |shares: &SharesForNode| { verify_shares( shares, &self.nodes, accuser_id, - message, + &message.common, &challenge, self.batch_size, ) @@ -839,26 +831,20 @@ impl Receiver { } /// Verify a [Complaint::Blame]: the accuser must have collected enough authenticated shards - /// whose re-encoded ciphertext root differs from the `r_i` the dealer committed to. - #[allow(clippy::too_many_arguments)] + /// whose re-encoded ciphertext root differs from the locally-known `r_i`. fn verify_blame( &self, - message: &CommonMessage, - root: &merkle::Node, + message: &Message, accuser_id: PartyId, - r_i: &merkle::Node, - pi_i: &merkle::MerkleProof, shards: &[ShardContribution], hash: &Digest<32>, ) -> FastCryptoResult<()> { - self.nodes.node_id_to_node(accuser_id)?; + let r_i = self.dispersal_root_for(message, accuser_id)?; - if hash != &compute_common_message_hash(message) { + if hash != &compute_common_message_hash(&message.common) { return Err(InvalidProof); } - verify_outer_proof(root, r_i, pi_i, accuser_id)?; - if shards.iter().map(|s| s.party).unique().count() != shards.len() { return Err(InvalidProof); } @@ -882,8 +868,7 @@ impl Receiver { .reconstruct_ciphertext_from_shard_contributions(shards) .map_err(|_| InvalidProof)?; - // The blame is valid iff re-encoding the recovered ciphertext does not match `r_i`: - // that mismatch is the proof of dealer misbehavior. + // The blame is valid iff re-encoding the recovered ciphertext does not match `r_i`. if self.check_avid_consistency(&ciphertext, r_i).is_ok() { return Err(InvalidProof); } @@ -891,6 +876,25 @@ impl Receiver { Ok(()) } + fn dispersal_root_for<'a>( + &self, + message: &'a Message, + accuser_id: PartyId, + ) -> FastCryptoResult<&'a merkle::Node> { + Ok(&message + .dispersal + .get(accuser_id as usize) + .ok_or(InvalidProof)? + .root) + } + + fn global_root(&self, message: &Message) -> FastCryptoResult { + Ok(MerkleTree::::build_from_unserialized( + message.dispersal.iter().map(|s| &s.root), + )? + .root()) + } + /// Sibling of [Self::reconstruct_ciphertext_from_echoes] that operates on a slice of /// [ShardContribution] (the shape carried by [Complaint::Blame]). fn reconstruct_ciphertext_from_shard_contributions( @@ -916,8 +920,7 @@ impl Receiver { /// Fails if there are not enough valid responses to recover the shares or if any of the responses come from an invalid party. pub fn recover( &self, - message: &CommonMessage, - root: &merkle::Node, + message: &Message, responses: Vec>, ) -> FastCryptoResult { // TODO: This fails if one of the responses has an invalid responder_id. We could probably just ignore those instead. @@ -930,13 +933,15 @@ impl Receiver { return Err(FastCryptoError::InputTooShort(self.t as usize)); } - let challenge = compute_challenge_from_message(&self.random_oracle(), &root, &message); + let r = self.global_root(message)?; + let challenge = + compute_challenge_from_message(&self.random_oracle(), &r, &message.common); let response_shares = responses .into_iter() .filter_map(|response| { response .shares - .verify(&message, &challenge) + .verify(&message.common, &challenge) .ok() .map(|_| response.shares) }) @@ -949,11 +954,11 @@ impl Receiver { } let my_shares = SharesForNode::recover(self, &response_shares)?; - my_shares.verify(&message, &challenge)?; + my_shares.verify(&message.common, &challenge)?; Ok(ReceiverOutput { my_shares, - public_keys: message.full_public_keys.clone(), + public_keys: message.common.full_public_keys.clone(), }) } @@ -966,18 +971,6 @@ impl Receiver { } } -/// Verify that `r_i` sits under the global `r` at the leaf indexed by `accuser_id`. Returns -/// `InvalidProof` on mismatch. -fn verify_outer_proof( - r: &merkle::Node, - r_i: &merkle::Node, - pi_i: &merkle::MerkleProof, - accuser_id: PartyId, -) -> FastCryptoResult<()> { - pi_i.verify_proof_with_unserialized_leaf(r, r_i, accuser_id as usize) - .map_err(|_| InvalidProof) -} - /// Pull the per-echo metadata that must agree across the entire echo set: the global Merkle root /// `r`, the receiver's per-ciphertext root `r_i`, and the dealer's `H(val)`. Returns an error if /// any field is non-uniform (which would indicate inconsistent echoes / a faulty sender). @@ -1057,8 +1050,8 @@ fn compute_common_message_hash(message: &CommonMessage) -> Digest<32> { #[cfg(test)] mod tests { use super::{ - compute_challenge, Dealer, DecryptionOutcome, Message, ProcessedEchoMessages, Receiver, - ReceiverOutput, ShareBatch, SharesForNode, + compute_challenge, Complaint, Dealer, DecryptionOutcome, Message, ProcessedEchoMessages, + Receiver, ReceiverOutput, ShareBatch, SharesForNode, }; use crate::ecies_v1; use crate::ecies_v1::{MultiRecipientEncryption, PublicKey}; @@ -1193,6 +1186,166 @@ mod tests { assert_eq!(secrets, secrets); } + #[test] + fn test_share_recovery() { + // Dealer is honest at the AVID layer (consistent dispersal) but flips a byte in + // receiver 0's plaintext, so receiver 0's decryption succeeds but the resulting + // SharesForNode fails verification — triggering a Reveal complaint. The other receivers + // verify the complaint and respond with their own shares; receiver 0 reconstructs. + let t = 3; + let f = 2; + let n = 7; + let batch_size_per_weight: u16 = 3; + + let mut rng = rand::thread_rng(); + let sks = (0..n) + .map(|_| ecies_v1::PrivateKey::::new(&mut rng)) + .collect::>(); + let nodes = Nodes::new( + sks.iter() + .enumerate() + .map(|(id, sk)| Node { + id: id as u16, + pk: PublicKey::from_private_key(sk), + weight: 1, + }) + .collect::>(), + ) + .unwrap(); + + let sid = b"tbls test".to_vec(); + let dealer_id = 1; + let dealer = Dealer::new( + nodes.clone(), + dealer_id, + f, + t, + sid.clone(), + batch_size_per_weight, + ) + .unwrap(); + + let receivers = sks + .into_iter() + .enumerate() + .map(|(id, secret_key)| { + Receiver::new( + nodes.clone(), + id as u16, + dealer_id, + f, + t, + sid.clone(), + secret_key, + batch_size_per_weight, + ) + .unwrap() + }) + .collect_vec(); + + let messages = dealer.create_message_cheating(&mut rng).unwrap(); + + // Echo phase + let echo_messages = receivers + .iter() + .map(|r| r.echo_message(&messages[r.id as usize]).unwrap()) + .collect_vec(); + let echoes_per_recipient = (0..n) + .map(|i| echo_messages.iter().map(|em| em[i].clone()).collect_vec()) + .collect_vec(); + + // Process echoes + verify_and_decrypt + let outcomes: HashMap = receivers + .iter() + .zip(echoes_per_recipient.iter()) + .map(|(r, echoes)| { + let pem = r.process_echo_messages(echoes).unwrap(); + ( + r.id, + r.verify_and_decrypt(pem, &messages[r.id as usize].common) + .unwrap(), + ) + }) + .collect(); + + // Receiver 0 (the targeted victim) emits a Reveal complaint. + let victim_id = 0u16; + let mut outcomes = outcomes; + let complaint = match outcomes.remove(&victim_id).unwrap() { + DecryptionOutcome::Complaint(c @ Complaint::Reveal { .. }) => c, + other => panic!("expected Reveal from victim, got {:?}", outcome_kind(&other)), + }; + + // The other receivers each get a Valid output. + let mut outputs: HashMap = outcomes + .into_iter() + .map(|(id, o)| match o { + DecryptionOutcome::Valid(out) => (id, out), + other => panic!( + "expected Valid from honest receiver {id}, got {:?}", + outcome_kind(&other) + ), + }) + .collect(); + + // Each non-victim verifies the complaint and returns their shares. + let responses = receivers + .iter() + .filter(|r| r.id != victim_id) + .map(|r| { + r.handle_complaint( + &messages[r.id as usize], + &complaint, + outputs.get(&r.id).unwrap(), + ) + .unwrap() + }) + .collect_vec(); + + // Victim recovers via interpolation across t responses. + let recovered = receivers[victim_id as usize] + .recover(&messages[victim_id as usize], responses) + .unwrap(); + outputs.insert(victim_id, recovered); + + // Sanity: every receiver now holds verifiable shares for every secret. + for l in 0..dealer.batch_size { + let shares = receivers + .iter() + .take(t as usize) + .map(|r| Eval { + index: ShareIndex::try_from(r.id + 1).unwrap(), + value: outputs.get(&r.id).unwrap().my_shares.shares[0].batch[l], + }) + .collect_vec(); + Poly::recover_c0(t, shares.into_iter()).unwrap(); + } + } + + fn outcome_kind(outcome: &DecryptionOutcome) -> &'static str { + match outcome { + DecryptionOutcome::Valid(_) => "Valid", + DecryptionOutcome::Complaint(Complaint::Reveal { .. }) => "Reveal", + DecryptionOutcome::Complaint(Complaint::Blame { .. }) => "Blame", + } + } + + impl Dealer { + /// Test-only: produce a [Message] in which receiver 0's plaintext has one byte flipped + /// before encryption. AVID dispersal stays consistent (so the AVID checks pass for + /// everyone), but receiver 0's BCS-deserialized [SharesForNode] fails verification. + fn create_message_cheating( + &self, + rng: &mut impl AllowedRng, + ) -> FastCryptoResult> { + self.create_message_with_mutation(rng, |pk_and_msgs| { + // Flip a low-order byte in receiver 0's plaintext. Targeting an offset deep enough + // to land inside an actual share (past BCS length prefixes) ensures the + // deserialized struct is well-formed but holds an invalid scalar field. + pk_and_msgs[0].1[7] ^= 1; + }) + } + } // // #[test] // #[allow(clippy::single_match)] From 720e821161e4e075259c831c1b065f3b1649c1d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 1 May 2026 15:07:34 +0200 Subject: [PATCH 20/91] fmt --- .../src/threshold_schnorr/batch_avss.rs | 45 +++++++++++-------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 162f830c88..7bfc69aeb0 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -21,7 +21,9 @@ use crate::threshold_schnorr::reed_solomon::{ErasureCoder, Shard}; use crate::threshold_schnorr::Extensions::{Challenge, Encryption}; use crate::threshold_schnorr::{random_oracle_from_sid, EG, G, S}; use crate::types::{get_uniform_value, ShareIndex}; -use fastcrypto::error::FastCryptoError::{InvalidInput, InvalidMessage, InvalidProof, NotEnoughWeight}; +use fastcrypto::error::FastCryptoError::{ + InvalidInput, InvalidMessage, InvalidProof, NotEnoughWeight, +}; use fastcrypto::error::{FastCryptoError, FastCryptoResult}; use fastcrypto::groups::{GroupElement, MultiScalarMul, Scalar}; use fastcrypto::hash::{Blake2b256, Digest, HashFunction, Sha3_512}; @@ -521,7 +523,10 @@ impl Receiver { } let tree = MerkleTree::::build_from_unserialized( - message.dispersal.iter().map(|AuthenticatedShards { root, .. }| root), + message + .dispersal + .iter() + .map(|AuthenticatedShards { root, .. }| root), )?; let r = tree.root(); let digest = compute_common_message_hash(&message.common); @@ -630,7 +635,11 @@ impl Receiver { } /// The check r_i' == r_i from the paper - fn check_avid_consistency(&self, ciphertext: &[u8], root: &merkle::Node) -> FastCryptoResult<()> { + fn check_avid_consistency( + &self, + ciphertext: &[u8], + root: &merkle::Node, + ) -> FastCryptoResult<()> { let new_shards = self .nodes .collect_to_nodes(self.code.encode(ciphertext)?.into_iter())?; @@ -809,8 +818,7 @@ impl Receiver { return Err(InvalidProof); } - let challenge = - compute_challenge_from_message(&self.random_oracle(), &r, &message.common); + let challenge = compute_challenge_from_message(&self.random_oracle(), &r, &message.common); proof.check( accuser_pk, // TODO: Handle zero-padding @@ -904,13 +912,10 @@ impl Receiver { let shards: Vec> = self .nodes .node_ids_iter() - .flat_map( - |id| match contributions.iter().find(|s| s.party == id) { - Some(s) => s.shards.iter().map(|s| Some(s.clone())).collect_vec(), - None => repeat_n(None, self.nodes.weight_of(id).unwrap() as usize) - .collect_vec(), - }, - ) + .flat_map(|id| match contributions.iter().find(|s| s.party == id) { + Some(s) => s.shards.iter().map(|s| Some(s.clone())).collect_vec(), + None => repeat_n(None, self.nodes.weight_of(id).unwrap() as usize).collect_vec(), + }) .collect(); self.code.decode(shards) @@ -934,8 +939,7 @@ impl Receiver { } let r = self.global_root(message)?; - let challenge = - compute_challenge_from_message(&self.random_oracle(), &r, &message.common); + let challenge = compute_challenge_from_message(&self.random_oracle(), &r, &message.common); let response_shares = responses .into_iter() .filter_map(|response| { @@ -1152,14 +1156,14 @@ mod tests { .iter() .zip(processed_echo_messages) .zip(messages) - .map( - |((receiver, pem), message)| match receiver.verify_and_decrypt(pem, &message.common) { + .map(|((receiver, pem), message)| { + match receiver.verify_and_decrypt(pem, &message.common) { Ok(DecryptionOutcome::Valid(output)) => (receiver.id, output), _ => panic!( "All receivers should be able to process the message in the happy path" ), - }, - ) + } + }) .collect::>(); let secrets = (0..dealer.batch_size) @@ -1273,7 +1277,10 @@ mod tests { let mut outcomes = outcomes; let complaint = match outcomes.remove(&victim_id).unwrap() { DecryptionOutcome::Complaint(c @ Complaint::Reveal { .. }) => c, - other => panic!("expected Reveal from victim, got {:?}", outcome_kind(&other)), + other => panic!( + "expected Reveal from victim, got {:?}", + outcome_kind(&other) + ), }; // The other receivers each get a Valid output. From 54554b006a354cbd56d732f18342705445594e24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 10:42:05 +0200 Subject: [PATCH 21/91] Truncate ciphertext and add bcs_serialized_size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add SharesForNode::bcs_serialized_size and a roundtrip test against BCS over various weight × batch_size combinations - Truncate the Reed-Solomon decoded ciphertext to the original length in reconstruct_ciphertext, removing the zero-padding TODOs - Reject contributions whose shard count doesn't match the contributor's weight before decoding - Doc and naming cleanups --- .../src/threshold_schnorr/batch_avss.rs | 143 +++++++++++++----- 1 file changed, 103 insertions(+), 40 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 7bfc69aeb0..b43c2a3d3f 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -116,7 +116,6 @@ pub enum DecryptionOutcome { pub enum Complaint { Reveal { proof: complaint::Complaint, - // TODO: Handle zero-padding /// The reconstructed accuser's ciphertext. The responder re-encodes this and checks that /// the resulting root matches the dealer-committed `r_i` they have locally. ciphertext: Vec, @@ -199,7 +198,26 @@ impl ShareBatch { } } +/// Byte-width of a BCS-serialized [S] scalar — the secp256k1 scalar serializes to 32 bytes. +const SCALAR_BYTES: usize = 32; + impl SharesForNode { + /// BCS-serialized length of a `SharesForNode` for a node of the given weight at the given + /// batch size. Equals the per-receiver ciphertext length, since AES-CTR is length-preserving. + /// + /// Layout: + /// ```text + /// SharesForNode = Vec + /// = ULEB128(weight) + weight × ShareBatch + /// ShareBatch + /// = NonZeroU16 (= 2 bytes) + Vec + S + /// = 2 + ULEB128(batch_size) + (batch_size + 1) × SCALAR_BYTES + /// ``` + fn bcs_serialized_size(weight: usize, batch_size: usize) -> usize { + uleb128_len(weight) + + weight * (2 + uleb128_len(batch_size) + (batch_size + 1) * SCALAR_BYTES) + } + /// Get the weight of this node (number of shares it has). pub fn weight(&self) -> u16 { self.shares.len() as u16 @@ -605,7 +623,12 @@ impl Receiver { return Err(NotEnoughWeight(required_weight as usize)); } - let ciphertext = self.reconstruct_ciphertext_from_echoes(&echo_messages)?; + let ciphertext = self.reconstruct_ciphertext(self.id, |id| { + echo_messages + .iter() + .find(|e| e.party == id) + .map(|e| e.s_ij.clone()) + })?; Ok(ProcessedEchoMessages { ciphertext, r, @@ -614,24 +637,37 @@ impl Receiver { }) } - /// Reed-Solomon decode a ciphertext from a set of authenticated [EchoMessage]s. Each echo - /// contributes `Some` shards for its sender's leaves; missing senders contribute `None` - /// erasures. The caller is responsible for having verified the echoes' Merkle proofs and - /// for ensuring the set has enough weight (≥ `W - 2f`) to decode. - fn reconstruct_ciphertext_from_echoes( + /// Reed-Solomon decode the ciphertext for `accuser_id` from a set of authenticated shard + /// contributions exposed via `shards_for(party_id) -> Option>`. Fails if the + /// contributing weight is below `W - 2f` (too few contributions to reconstruct), or if a + /// party's contribution has a shard count that doesn't match its weight. The caller is + /// responsible for having authenticated the shards via their Merkle proofs. + fn reconstruct_ciphertext( &self, - echoes: &[EchoMessage], + accuser_id: PartyId, + shards_for: impl Fn(PartyId) -> Option>, ) -> FastCryptoResult> { let shards: Vec> = self .nodes .node_ids_iter() - .flat_map(|id| match echoes.iter().find(|e| e.party == id) { - Some(e) => e.s_ij.iter().map(|s| Some(s.clone())).collect_vec(), - None => repeat_n(None, self.nodes.weight_of(id).unwrap() as usize).collect_vec(), + .map(|id| -> FastCryptoResult>> { + let weight = self.nodes.weight_of(id).expect("valid party id") as usize; + match shards_for(id) { + Some(ss) if ss.len() == weight => Ok(ss.into_iter().map(Some).collect()), + // Fail if a contributor's shard count doesn't match its weight. + Some(_) => Err(InvalidInput), + None => Ok(vec![None; weight]), + } }) - .collect(); + .flatten_ok() + .collect::>>()?; - self.code.decode(shards) + let mut ciphertext = self.code.decode(shards)?; + // Reed-Solomon `decode` returns shard-aligned padding; trim back to the original encrypted + // blob length. + let weight = self.nodes.weight_of(accuser_id)? as usize; + ciphertext.truncate(SharesForNode::bcs_serialized_size(weight, self.batch_size)); + Ok(ciphertext) } /// The check r_i' == r_i from the paper @@ -703,8 +739,7 @@ impl Receiver { let decrypted_shares = shared .verify(&random_oracle_encryption) .map(|_| { - // TODO: Handle zero-padding - shared.decrypt( + shared.decrypt( &ciphertext, &self.enc_secret_key, &random_oracle_encryption, @@ -730,10 +765,9 @@ impl Receiver { public_keys: full_public_keys.clone(), })), (true, Ok(_)) => { - // The accuser packages the echoes' shard-level proofs (`pi_ij`, leaf-on-r_i) as - // The accuser packages each echo's shard-level proof (`pi_ij`, leaf-on-r_i) as a - // ShardContribution. The responder looks up r_i locally from their own copy of the - // dealer's [Message], so it doesn't have to be transmitted. + // Repackage each echo's per-shard proof as a ShardContribution. r_i stays + // implicit — the responder reads it from its own [Message] rather than receiving + // it via the complaint. let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; let hash = any_echo.hash; let shards = valid_echoes @@ -821,8 +855,7 @@ impl Receiver { let challenge = compute_challenge_from_message(&self.random_oracle(), &r, &message.common); proof.check( accuser_pk, - // TODO: Handle zero-padding - ciphertext, + ciphertext, &message.common.shared, &self.random_oracle(), |shares: &SharesForNode| { @@ -873,7 +906,12 @@ impl Receiver { } let ciphertext = self - .reconstruct_ciphertext_from_shard_contributions(shards) + .reconstruct_ciphertext(accuser_id, |id| { + shards + .iter() + .find(|s| s.party == id) + .map(|s| s.shards.clone()) + }) .map_err(|_| InvalidProof)?; // The blame is valid iff re-encoding the recovered ciphertext does not match `r_i`. @@ -903,23 +941,6 @@ impl Receiver { .root()) } - /// Sibling of [Self::reconstruct_ciphertext_from_echoes] that operates on a slice of - /// [ShardContribution] (the shape carried by [Complaint::Blame]). - fn reconstruct_ciphertext_from_shard_contributions( - &self, - contributions: &[ShardContribution], - ) -> FastCryptoResult> { - let shards: Vec> = self - .nodes - .node_ids_iter() - .flat_map(|id| match contributions.iter().find(|s| s.party == id) { - Some(s) => s.shards.iter().map(|s| Some(s.clone())).collect_vec(), - None => repeat_n(None, self.nodes.weight_of(id).unwrap() as usize).collect_vec(), - }) - .collect(); - - self.code.decode(shards) - } /// 5. Upon receiving t valid responses to a complaint, the accuser can recover its shares. /// Fails if there are not enough valid responses to recover the shares or if any of the responses come from an invalid party. @@ -975,9 +996,20 @@ impl Receiver { } } +/// Number of bytes BCS uses to encode `x` as an unsigned LEB128 length prefix. +fn uleb128_len(x: usize) -> usize { + let mut len = 1; + let mut v = x >> 7; + while v != 0 { + len += 1; + v >>= 7; + } + len +} + /// Pull the per-echo metadata that must agree across the entire echo set: the global Merkle root /// `r`, the receiver's per-ciphertext root `r_i`, and the dealer's `H(val)`. Returns an error if -/// any field is non-uniform (which would indicate inconsistent echoes / a faulty sender). +/// any field is non-uniform. fn require_uniform_echo_metadata( echoes: &[EchoMessage], ) -> FastCryptoResult<(merkle::Node, merkle::Node, Digest<32>)> { @@ -1075,6 +1107,37 @@ mod tests { use std::collections::HashMap; use std::iter::repeat_with; + #[test] + fn test_bcs_serialized_size_matches_serialization() { + // For every (weight, batch_size) in the matrix, build a real `SharesForNode` and BCS- + // serialize it; the byte length must agree with `SharesForNode::bcs_serialized_size`. Cases + // straddle the ULEB128 single-byte/two-byte boundary at 128 in both dimensions. + use crate::threshold_schnorr::S; + use fastcrypto::groups::Scalar; + + let dummy_index = ShareIndex::try_from(1u16).unwrap(); + let zero_scalar = S::zero(); + for &weight in &[1usize, 2, 5, 10, 100, 127, 128, 200] { + for &batch_size in &[1usize, 2, 3, 7, 50, 127, 128, 200] { + let shares_for_node = SharesForNode { + shares: (0..weight) + .map(|_| ShareBatch { + index: dummy_index, + batch: vec![zero_scalar; batch_size], + blinding_share: zero_scalar, + }) + .collect(), + }; + let actual = shares_for_node.to_bytes().len(); + let formula = SharesForNode::bcs_serialized_size(weight, batch_size); + assert_eq!( + actual, formula, + "weight={weight}, batch_size={batch_size}" + ); + } + } + } + #[test] fn test_happy_path() { // No complaints, all honest. All have weight 1 From 68e1d4170d3808e8bf9be6d3ac0a524719b6f562 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 11:05:24 +0200 Subject: [PATCH 22/91] Split Complaint into Reveal/Blame, add Response and Vote MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Replace the Complaint enum with two standalone Reveal and Blame structs - Add Response (Vote / Reveal / Blame) and Vote types; expose DecryptionOutcome::into_response - Drop r_i and pi_i from the Reveal/Blame payload — the verifier looks them up locally from message.dispersal[accuser_id] - Inline verify_reveal/verify_blame into the public handle_* methods - Use fastcrypto's SCALAR_SIZE_IN_BYTES instead of a local constant - Doc trims and a few clarifying TODOs --- .../src/threshold_schnorr/batch_avss.rs | 216 ++++++++++-------- 1 file changed, 121 insertions(+), 95 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index b43c2a3d3f..473e417f47 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -25,6 +25,7 @@ use fastcrypto::error::FastCryptoError::{ InvalidInput, InvalidMessage, InvalidProof, NotEnoughWeight, }; use fastcrypto::error::{FastCryptoError, FastCryptoResult}; +use fastcrypto::groups::secp256k1::SCALAR_SIZE_IN_BYTES; use fastcrypto::groups::{GroupElement, MultiScalarMul, Scalar}; use fastcrypto::hash::{Blake2b256, Digest, HashFunction, Sha3_512}; use fastcrypto::merkle; @@ -104,35 +105,62 @@ pub struct ProcessedEchoMessages { } pub enum DecryptionOutcome { - Valid(ReceiverOutput), - Complaint(Complaint), + Valid { + output: ReceiverOutput, + vote: Vote, + }, + Reveal(Reveal), + Blame(Blame), +} + +impl DecryptionOutcome { + /// Reduce this outcome to the message the party should broadcast to others: a [Vote] when + /// the dealer's broadcast verified, otherwise the [Reveal] or [Blame] itself. The receiver's + /// local [ReceiverOutput] (in the Valid case) is consumed and not part of the wire format. + pub fn into_response(self) -> Response { + match self { + DecryptionOutcome::Valid { vote, .. } => Response::Vote(vote), + DecryptionOutcome::Reveal(r) => Response::Reveal(r), + DecryptionOutcome::Blame(b) => Response::Blame(b), + } + } } -/// A complaint by a receiver after `verify_and_decrypt`. There are two flavors: -/// * [Complaint::Reveal] — the receiver could not decrypt or verify its shares. -/// * [Complaint::Blame] — the receiver decrypted valid shares but the AVID dispersal was -/// inconsistent. +/// The message a receiver broadcasts after `verify_and_decrypt`: a [Vote] endorsing the dealer's +/// broadcast on the happy path, or a [Reveal] / [Blame] complaint otherwise. #[derive(Clone, Debug, Serialize, Deserialize)] -pub enum Complaint { - Reveal { - proof: complaint::Complaint, - /// The reconstructed accuser's ciphertext. The responder re-encodes this and checks that - /// the resulting root matches the dealer-committed `r_i` they have locally. - ciphertext: Vec, - /// `H(val)` from the dealer's broadcast, binding the complaint to a specific [CommonMessage]. - hash: Digest<32>, - }, - Blame { - accuser_id: PartyId, - /// At least `W - 2f` weight worth of shards `s_{ji}`, each with a Merkle proof under the - /// dealer-committed `r_i` at the contributing party's leaf. - shards: Vec, - hash: Digest<32>, - }, +pub enum Response { + Vote(Vote), + Reveal(Reveal), + Blame(Blame), +} + +/// An endorsement of the dealer's broadcast. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct Vote { + pub r: merkle::Node, + pub hash: Digest<32>, } -/// One sender's contribution of shards toward reconstructing the accuser's ciphertext, with a -/// Merkle proof binding the shards to the accuser's per-ciphertext root `r_i`. +/// A complaint by a receiver who could not decrypt or verify its shares. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct Reveal { + pub proof: complaint::Complaint, + pub ciphertext: Vec, + /// `H(val)` from the dealer's broadcast, binding the complaint to a specific [CommonMessage]. + pub hash: Digest<32>, +} + +/// A complaint by a receiver who decrypted valid shares but found the AVID dispersal +/// inconsistent. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct Blame { + pub accuser_id: PartyId, + pub shards: Vec, + pub hash: Digest<32>, +} + +/// One sender's contribution of shards toward reconstructing the accuser's ciphertext. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ShardContribution { pub party: PartyId, @@ -198,12 +226,9 @@ impl ShareBatch { } } -/// Byte-width of a BCS-serialized [S] scalar — the secp256k1 scalar serializes to 32 bytes. -const SCALAR_BYTES: usize = 32; - impl SharesForNode { /// BCS-serialized length of a `SharesForNode` for a node of the given weight at the given - /// batch size. Equals the per-receiver ciphertext length, since AES-CTR is length-preserving. + /// batch size. /// /// Layout: /// ```text @@ -211,11 +236,14 @@ impl SharesForNode { /// = ULEB128(weight) + weight × ShareBatch /// ShareBatch /// = NonZeroU16 (= 2 bytes) + Vec + S - /// = 2 + ULEB128(batch_size) + (batch_size + 1) × SCALAR_BYTES + /// = 2 + ULEB128(batch_size) + (batch_size + 1) × SCALAR_SIZE_IN_BYTES /// ``` fn bcs_serialized_size(weight: usize, batch_size: usize) -> usize { + // TODO: A bit of a hack — this hardcodes the BCS layout of `SharesForNode`/`ShareBatch` + // and the 32-byte scalar size. Any change to those types' fields silently invalidates + // this formula; the unit test catches it but only within the tested ranges. uleb128_len(weight) - + weight * (2 + uleb128_len(batch_size) + (batch_size + 1) * SCALAR_BYTES) + + weight * (2 + uleb128_len(batch_size) + (batch_size + 1) * SCALAR_SIZE_IN_BYTES) } /// Get the weight of this node (number of shares it has). @@ -690,13 +718,16 @@ impl Receiver { /// 3. If the party also received a valid Message from the dealer, it can now decrypt its shares. /// If this succeeds (returns a DecryptionOutcome::Valid), the party should return a signed vote to the dealer. + /// The vote payload can be obtained by calling [DecryptionOutcome::into_response] on the + /// outcome, which yields a [Response::Vote] for the caller to sign. /// /// When parties with weight at least W -f has submitted a vote, parties who didn't get a valid /// Message from the dealer should request the CommonMessage part of that from the parties who voted. /// Using this, the party can decrypt the shares and verify that the shares are valid. /// - /// If this function returns a [DecryptionOutcome::Complaint], the party should broadcast it - /// to the other parties. + /// If this function returns a [Reveal] or [Blame] outcome, the party should broadcast it + /// to the other parties — but only after at least `W - f` votes from other parties have + /// appeared on the TOB/ABC channel. pub fn verify_and_decrypt( &self, processed_echo_messages: ProcessedEchoMessages, @@ -759,11 +790,21 @@ impl Receiver { Ok(my_shares) }); + // TODO: Revisit this dispatch — confirm each (faulty_dealer, decrypted_shares) combination + // produces the right outcome (Valid / Blame / Reveal) under both honest and Byzantine + // dealer behavior, including the (false, Err) case where the AVID layer agreed but + // decryption still failed. match (faulty_dealer, decrypted_shares) { - (false, Ok(my_shares)) => Ok(Valid(ReceiverOutput { - my_shares, - public_keys: full_public_keys.clone(), - })), + (false, Ok(my_shares)) => Ok(Valid { + output: ReceiverOutput { + my_shares, + public_keys: full_public_keys.clone(), + }, + vote: Vote { + r, + hash: compute_common_message_hash(message), + }, + }), (true, Ok(_)) => { // Repackage each echo's per-shard proof as a ShardContribution. r_i stays // implicit — the responder reads it from its own [Message] rather than receiving @@ -778,7 +819,7 @@ impl Receiver { proof: e.pi_ij, }) .collect_vec(); - Ok(DecryptionOutcome::Complaint(Complaint::Blame { + Ok(DecryptionOutcome::Blame(Blame { accuser_id: self.id, shards, hash, @@ -786,7 +827,7 @@ impl Receiver { } (_, Err(_)) => { let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; - Ok(DecryptionOutcome::Complaint(Complaint::Reveal { + Ok(DecryptionOutcome::Reveal(Reveal { proof: complaint::Complaint::create( self.id, &shared, @@ -801,46 +842,23 @@ impl Receiver { } } - /// 4. Upon receiving a [Complaint] from another party, verify it and, if valid, respond with - /// this party's own shares so the accuser can recover. `message` is the dealer's full - /// [Message] as this party received it; the verifier looks up the accuser's per-ciphertext - /// root locally from `message.dispersal[accuser_id]` rather than trusting the complaint to - /// carry it. - pub fn handle_complaint( + /// 4. Upon receiving a [Reveal] from another party, verify it and respond with this party's + /// own shares so the accuser can recover. The ciphertext must be authenticated as the dealer's + /// by re-encoding under the locally-known `r_i`, and decryption with the recovery package must + /// yield invalid shares. `message` is the dealer's full [Message] as this party received it; + /// the verifier looks up the accuser's per-ciphertext root locally from + /// `message.dispersal[accuser_id]` rather than trusting the complaint to carry it. + pub fn handle_reveal( &self, message: &Message, - complaint: &Complaint, + reveal: &Reveal, my_output: &ReceiverOutput, ) -> FastCryptoResult> { - match complaint { - Complaint::Reveal { - proof, - ciphertext, - hash, - } => { - self.verify_reveal(message, proof, ciphertext, hash)?; - } - Complaint::Blame { - accuser_id, - shards, - hash, - } => { - self.verify_blame(message, *accuser_id, shards, hash)?; - } - } - Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) - } - - /// Verify a [Complaint::Reveal]: the ciphertext must be authenticated as the dealer's by - /// re-encoding under the locally-known `r_i`, and decryption with the recovery package must - /// yield invalid shares. - fn verify_reveal( - &self, - message: &Message, - proof: &complaint::Complaint, - ciphertext: &[u8], - hash: &Digest<32>, - ) -> FastCryptoResult<()> { + let Reveal { + proof, + ciphertext, + hash, + } = reveal; let accuser_id = proof.accuser_id; let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; let r_i = self.dispersal_root_for(message, accuser_id)?; @@ -855,7 +873,7 @@ impl Receiver { let challenge = compute_challenge_from_message(&self.random_oracle(), &r, &message.common); proof.check( accuser_pk, - ciphertext, + ciphertext, &message.common.shared, &self.random_oracle(), |shares: &SharesForNode| { @@ -868,18 +886,26 @@ impl Receiver { self.batch_size, ) }, - ) + )?; + + Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } - /// Verify a [Complaint::Blame]: the accuser must have collected enough authenticated shards - /// whose re-encoded ciphertext root differs from the locally-known `r_i`. - fn verify_blame( + /// Counterpart to [Self::handle_reveal] for [Blame]. The accuser must have collected enough + /// authenticated shards whose re-encoded ciphertext root differs from the locally-known + /// `r_i`. On success, respond with this party's own shares. + pub fn handle_blame( &self, message: &Message, - accuser_id: PartyId, - shards: &[ShardContribution], - hash: &Digest<32>, - ) -> FastCryptoResult<()> { + blame: &Blame, + my_output: &ReceiverOutput, + ) -> FastCryptoResult> { + let Blame { + accuser_id, + shards, + hash, + } = blame; + let accuser_id = *accuser_id; let r_i = self.dispersal_root_for(message, accuser_id)?; if hash != &compute_common_message_hash(&message.common) { @@ -919,7 +945,7 @@ impl Receiver { return Err(InvalidProof); } - Ok(()) + Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } fn dispersal_root_for<'a>( @@ -1086,8 +1112,8 @@ fn compute_common_message_hash(message: &CommonMessage) -> Digest<32> { #[cfg(test)] mod tests { use super::{ - compute_challenge, Complaint, Dealer, DecryptionOutcome, Message, ProcessedEchoMessages, - Receiver, ReceiverOutput, ShareBatch, SharesForNode, + compute_challenge, Dealer, DecryptionOutcome, Message, ProcessedEchoMessages, Receiver, + ReceiverOutput, ShareBatch, SharesForNode, }; use crate::ecies_v1; use crate::ecies_v1::{MultiRecipientEncryption, PublicKey}; @@ -1221,7 +1247,7 @@ mod tests { .zip(messages) .map(|((receiver, pem), message)| { match receiver.verify_and_decrypt(pem, &message.common) { - Ok(DecryptionOutcome::Valid(output)) => (receiver.id, output), + Ok(DecryptionOutcome::Valid { output, .. }) => (receiver.id, output), _ => panic!( "All receivers should be able to process the message in the happy path" ), @@ -1338,8 +1364,8 @@ mod tests { // Receiver 0 (the targeted victim) emits a Reveal complaint. let victim_id = 0u16; let mut outcomes = outcomes; - let complaint = match outcomes.remove(&victim_id).unwrap() { - DecryptionOutcome::Complaint(c @ Complaint::Reveal { .. }) => c, + let reveal = match outcomes.remove(&victim_id).unwrap() { + DecryptionOutcome::Reveal(r) => r, other => panic!( "expected Reveal from victim, got {:?}", outcome_kind(&other) @@ -1350,7 +1376,7 @@ mod tests { let mut outputs: HashMap = outcomes .into_iter() .map(|(id, o)| match o { - DecryptionOutcome::Valid(out) => (id, out), + DecryptionOutcome::Valid { output, .. } => (id, output), other => panic!( "expected Valid from honest receiver {id}, got {:?}", outcome_kind(&other) @@ -1363,9 +1389,9 @@ mod tests { .iter() .filter(|r| r.id != victim_id) .map(|r| { - r.handle_complaint( + r.handle_reveal( &messages[r.id as usize], - &complaint, + &reveal, outputs.get(&r.id).unwrap(), ) .unwrap() @@ -1394,9 +1420,9 @@ mod tests { fn outcome_kind(outcome: &DecryptionOutcome) -> &'static str { match outcome { - DecryptionOutcome::Valid(_) => "Valid", - DecryptionOutcome::Complaint(Complaint::Reveal { .. }) => "Reveal", - DecryptionOutcome::Complaint(Complaint::Blame { .. }) => "Blame", + DecryptionOutcome::Valid { .. } => "Valid", + DecryptionOutcome::Reveal(_) => "Reveal", + DecryptionOutcome::Blame(_) => "Blame", } } From dd98c71e08d20bcc8deecf92e95c41f88f4978db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 11:22:45 +0200 Subject: [PATCH 23/91] Resurrect test_e2e, take Message in verify_and_decrypt - verify_and_decrypt and the test happy path now take &Message instead of &CommonMessage (consistent with handle_reveal/blame and recover) - Restore test_e2e exercising DKG -> presigning -> signing -> key rotation -> signing against the current API, with explanatory comments and a reshare-commitment check - Remove the rest of the commented-out tests in batch_avss.rs - Add short docs on EchoMessage, ProcessedEchoMessages, DecryptionOutcome - Rename shadowed echo_messages bindings to echoes_by_sender / echoes_by_recipient --- .../src/threshold_schnorr/batch_avss.rs | 310 +------ fastcrypto-tbls/src/threshold_schnorr/mod.rs | 851 +++++++++--------- 2 files changed, 449 insertions(+), 712 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 473e417f47..40a3702414 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -86,6 +86,8 @@ pub struct AuthenticatedShards { proof: merkle::MerkleProof, } +/// One sender's echo to a single recipient: their shard for the recipient's ciphertext, with +/// Merkle proofs binding it to the dealer's broadcast. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EchoMessage { party: PartyId, @@ -97,6 +99,7 @@ pub struct EchoMessage { pub pi_ij: merkle::MerkleProof, } +/// The receiver's reconstructed ciphertext together with the metadata extracted from the echoes. pub struct ProcessedEchoMessages { ciphertext: Vec, r: merkle::Node, @@ -104,6 +107,8 @@ pub struct ProcessedEchoMessages { valid_echoes: Vec, } +/// The result of [Receiver::verify_and_decrypt]: either valid shares plus a vote to broadcast, or +/// a complaint to broadcast instead. pub enum DecryptionOutcome { Valid { output: ReceiverOutput, @@ -127,7 +132,7 @@ impl DecryptionOutcome { } /// The message a receiver broadcasts after `verify_and_decrypt`: a [Vote] endorsing the dealer's -/// broadcast on the happy path, or a [Reveal] / [Blame] complaint otherwise. +/// broadcast or a [Reveal] / [Blame] complaint otherwise. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum Response { Vote(Vote), @@ -731,14 +736,14 @@ impl Receiver { pub fn verify_and_decrypt( &self, processed_echo_messages: ProcessedEchoMessages, - message: &CommonMessage, + message: &Message, ) -> FastCryptoResult { let CommonMessage { full_public_keys, blinding_commit, response_polynomial, shared, - } = message; + } = &message.common; let ProcessedEchoMessages { ciphertext, @@ -754,7 +759,8 @@ impl Receiver { // TODO: What should happen if these checks fail? // Verify that g^{p''(0)} == c' * prod_l c_l^{gamma_l} - let challenge = compute_challenge_from_message(&self.random_oracle(), &r, message); + let challenge = + compute_challenge_from_message(&self.random_oracle(), &r, &message.common); if G::generator() * response_polynomial.c0() != blinding_commit + G::multi_scalar_mul(&challenge, full_public_keys) @@ -783,7 +789,7 @@ impl Receiver { &my_shares, &self.nodes, self.id, - message, + &message.common, &challenge, self.batch_size, )?; @@ -802,7 +808,7 @@ impl Receiver { }, vote: Vote { r, - hash: compute_common_message_hash(message), + hash: compute_common_message_hash(&message.common), }, }), (true, Ok(_)) => { @@ -1220,24 +1226,24 @@ mod tests { let messages = dealer.create_message(&mut rng).unwrap(); - let echo_messages = receivers + let echoes_by_sender = receivers .iter() .map(|receiver| receiver.echo_message(&messages[receiver.id as usize])) .collect::>>() .unwrap(); - let echo_messages = receivers + let echoes_by_recipient = receivers .iter() .enumerate() - .map(|(i, _)| echo_messages.iter().map(|em| em[i].clone()).collect_vec()) + .map(|(i, _)| echoes_by_sender.iter().map(|em| em[i].clone()).collect_vec()) .collect_vec(); let processed_echo_messages = receivers .iter() .zip(messages.iter()) - .zip(echo_messages.iter()) - .map(|((receiver, _message), echo_message)| { - receiver.process_echo_messages(echo_message).unwrap() + .zip(echoes_by_recipient.iter()) + .map(|((receiver, _message), echoes)| { + receiver.process_echo_messages(echoes).unwrap() }) .collect_vec(); @@ -1246,7 +1252,7 @@ mod tests { .zip(processed_echo_messages) .zip(messages) .map(|((receiver, pem), message)| { - match receiver.verify_and_decrypt(pem, &message.common) { + match receiver.verify_and_decrypt(pem, &message) { Ok(DecryptionOutcome::Valid { output, .. }) => (receiver.id, output), _ => panic!( "All receivers should be able to process the message in the happy path" @@ -1355,7 +1361,7 @@ mod tests { let pem = r.process_echo_messages(echoes).unwrap(); ( r.id, - r.verify_and_decrypt(pem, &messages[r.id as usize].common) + r.verify_and_decrypt(pem, &messages[r.id as usize]) .unwrap(), ) }) @@ -1442,280 +1448,4 @@ mod tests { }) } } - // - // #[test] - // #[allow(clippy::single_match)] - // fn test_happy_path_non_equal_weights() { - // // No complaints, all honest - // let t = 4; - // let f = 3; - // let weights: Vec = vec![1, 2, 3, 4]; - // let batch_size_per_weight = 3; - // - // let mut rng = rand::thread_rng(); - // let sks = weights - // .iter() - // .map(|_| ecies_v1::PrivateKey::::new(&mut rng)) - // .collect::>(); - // let nodes = Nodes::new( - // weights - // .into_iter() - // .enumerate() - // .map(|(i, weight)| Node { - // id: i as u16, - // pk: PublicKey::from_private_key(&sks[i]), - // weight, - // }) - // .collect_vec(), - // ) - // .unwrap(); - // - // let dealer_id = 2; - // let sid = b"tbls test".to_vec(); - // let dealer: Dealer = Dealer::new( - // nodes.clone(), - // dealer_id, - // f, - // t, - // sid.clone(), - // batch_size_per_weight, - // ) - // .unwrap(); - // - // let receivers = sks - // .into_iter() - // .enumerate() - // .map(|(i, secret_key)| { - // Receiver::new( - // nodes.clone(), - // i as u16, - // dealer_id, - // t, - // sid.clone(), - // secret_key, - // batch_size_per_weight, - // ) - // .unwrap() - // }) - // .collect_vec(); - // - // let message = dealer.create_message(&mut rng).unwrap(); - // - // let all_shares = receivers - // .iter() - // .flat_map(|receiver| { - // assert_valid(receiver.process_message(&message).unwrap()) - // .my_shares - // .shares - // }) - // .collect::>(); - // - // let secrets = (0..dealer.batch_size) - // .map(|l| { - // Poly::recover_c0( - // t, - // all_shares.iter().take(t as usize).map(|s| Eval { - // index: s.index, - // value: s.batch[l], - // }), - // ) - // .unwrap() - // }) - // .collect::>(); - // - // assert_eq!(secrets, secrets); - // } - // - // #[test] - // fn test_share_recovery() { - // let t = 3; - // let f = 2; - // let n = 7; - // let batch_size_per_weight: u16 = 3; - // - // let mut rng = rand::thread_rng(); - // let sks = (0..n) - // .map(|_| ecies_v1::PrivateKey::::new(&mut rng)) - // .collect::>(); - // let nodes = Nodes::new( - // sks.iter() - // .enumerate() - // .map(|(id, sk)| Node { - // id: id as u16, - // pk: PublicKey::from_private_key(sk), - // weight: 1, - // }) - // .collect::>(), - // ) - // .unwrap(); - // - // let sid = b"tbls test".to_vec(); - // - // let dealer_id = 1; - // let dealer: Dealer = Dealer::new( - // nodes.clone(), - // dealer_id, - // f, - // t, - // sid.clone(), - // batch_size_per_weight, - // ) - // .unwrap(); - // - // let receivers = sks - // .into_iter() - // .enumerate() - // .map(|(id, secret_key)| { - // Receiver::new( - // nodes.clone(), - // id as u16, - // dealer_id, - // t, - // sid.clone(), - // secret_key, - // batch_size_per_weight, - // ) - // .unwrap() - // }) - // .collect::>(); - // - // let message = dealer.create_message_cheating(&mut rng).unwrap(); - // - // let mut all_shares = receivers - // .iter() - // .map(|receiver| (receiver.id, receiver.process_message(&message).unwrap())) - // .collect::>(); - // - // let complaint = assert_complaint(all_shares.remove(&receivers[0].id).unwrap()); - // let mut all_shares = all_shares - // .into_iter() - // .map(|(id, pm)| (id, assert_valid(pm))) - // .collect::>(); - // - // let responses = receivers - // .iter() - // .skip(1) - // .map(|r| { - // r.handle_complaint(&message, &complaint, all_shares.get(&r.id).unwrap()) - // .unwrap() - // }) - // .collect::>(); - // let shares = receivers[0].recover(&message, responses).unwrap(); - // all_shares.insert(receivers[0].id, shares); - // - // // Recover with the first f+1 shares, including the reconstructed - // let secrets = (0..dealer.batch_size) - // .map(|l| { - // let shares = all_shares - // .iter() - // .map(|(id, s)| (*id, s.my_shares.shares[0].batch[l])) - // .collect::>(); - // Poly::recover_c0( - // t, - // shares.iter().take(t as usize).map(|(id, v)| Eval { - // index: ShareIndex::try_from(id + 1).unwrap(), - // value: *v, - // }), - // ) - // .unwrap() - // }) - // .collect::>(); - // - // assert_eq!(secrets, secrets); - // } - // - // impl Dealer { - // /// 1. The Dealer samples L nonces, generates shares and broadcasts the encrypted shares. This also returns the nonces to be secret shared along with their corresponding public keys. - // pub fn create_message_cheating( - // &self, - // rng: &mut impl AllowedRng, - // ) -> FastCryptoResult { - // let polynomials = repeat_with(|| Poly::rand(self.t - 1, rng)) - // .take(self.batch_size) - // .collect_vec(); - // - // // Compute the (full) public keys for all secrets - // let full_public_keys = polynomials - // .iter() - // .map(|p| G::generator() * p.c0()) - // .collect_vec(); - // - // // "blinding" polynomial as defined in https://eprint.iacr.org/2023/536.pdf. - // let blinding_poly = Poly::rand(self.t - 1, rng); - // let blinding_commit = G::generator() * blinding_poly.c0(); - // - // // Encrypt all shares to the receivers - // let mut pk_and_msgs = self - // .nodes - // .iter() - // .map(|node| (node.pk.clone(), self.nodes.share_ids_of(node.id).unwrap())) - // .map(|(public_key, share_ids)| { - // ( - // public_key, - // SharesForNode { - // shares: share_ids - // .into_iter() - // .map(|index| ShareBatch { - // index, - // batch: polynomials - // .iter() - // .map(|p_l| p_l.eval(index).value) - // .collect_vec(), - // blinding_share: blinding_poly.eval(index).value, - // }) - // .collect_vec(), - // }, - // ) - // }) - // .map(|(pk, shares_for_node)| (pk, shares_for_node.to_bytes())) - // .collect_vec(); - // - // // Modify the first share of the first receiver to simulate a cheating dealer - // pk_and_msgs[0].1[7] ^= 1; - // - // let ciphertext = MultiRecipientEncryption::encrypt( - // &pk_and_msgs, - // &self.random_oracle().extend(&Encryption.to_string()), - // rng, - // ); - // - // let (shared, ciphertexts) = ciphertext.clone().into_parts(); - // let code = ErasureCoder::new( - // self.nodes.total_weight() as usize, - // (self.nodes.total_weight() - 2 * self.f) as usize, - // )?; - // let roots = ciphertexts - // .iter() - // .map(|part| { - // let shards = code.encode(part)?; - // let tree = - // fastcrypto::merkle::MerkleTree::::build_from_unserialized( - // shards.iter(), - // )?; - // Ok(tree.root()) - // }) - // .collect::>>()?; - // - // // "response" polynomials from https://eprint.iacr.org/2023/536.pdf - // let challenge = compute_challenge( - // &self.random_oracle(), - // &full_public_keys, - // &blinding_commit, - // &shared, - // &roots, - // ); - // let mut response_polynomial = blinding_poly; - // for (p_l, gamma_l) in polynomials.into_iter().zip_eq(&challenge) { - // response_polynomial += &(p_l * gamma_l); - // } - // - // Ok(Message { - // full_public_keys, - // blinding_commit, - // ciphertext, - // response_polynomial, - // roots, - // }) - // } - // } } diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index dd6ae13bdb..feb75db7e8 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -93,428 +93,435 @@ mod tests { use itertools::Itertools; use std::collections::HashMap; use std::hash::Hash; - // - // #[test] - // fn test_e2e() { - // // No complaints, all honest - // let t = 3; - // let f = 2; - // let weights = [1, 2, 2, 2]; - // let n = weights.len(); - // - // let batch_size_per_weight: u16 = 10; - // - // let mut rng = rand::thread_rng(); - // let sks = (0..n) - // .map(|_| ecies_v1::PrivateKey::::new(&mut rng)) - // .collect::>(); - // let nodes = Nodes::new( - // sks.iter() - // .enumerate() - // .zip(weights) - // .map(|((id, sk), weight)| Node { - // id: id as u16, - // pk: PublicKey::from_private_key(sk), - // weight, - // }) - // .collect::>(), - // ) - // .unwrap(); - // - // // - // // DKG - // // - // - // // Map from each party to the outputs it has received - // let mut dkg_outputs = HashMap::>::new(); - // nodes.node_ids_iter().for_each(|id| { - // dkg_outputs.insert(id, HashMap::new()); - // }); - // - // let mut messages = Vec::new(); - // for dealer_id in nodes.node_ids_iter() { - // let sid = format!("dkg-test-session-{}", dealer_id).into_bytes(); - // let dealer: avss::Dealer = - // avss::Dealer::new(None, nodes.clone(), t, sid.clone()).unwrap(); - // let receivers = sks - // .iter() - // .enumerate() - // .map(|(id, enc_secret_key)| { - // avss::Receiver::new( - // nodes.clone(), - // id as u16, - // t, - // sid.clone(), - // None, - // enc_secret_key.clone(), - // ) - // }) - // .collect::>(); - // - // // Each dealer creates a message - // let message = dealer.create_message(&mut rng); - // messages.push(message.clone()); - // - // // Each receiver processes the message. In this case, we assume all are honest and there are no complaints. - // receivers.iter().for_each(|receiver| { - // let output = assert_valid(receiver.process_message(&message).unwrap()); - // dkg_outputs - // .get_mut(&receiver.id()) - // .unwrap() - // .insert(dealer_id, output); - // }); - // } - // - // // The dealers to form the certificate should have weight >= t, and are the ones whose outputs will be used to create the final shares. - // let dkg_cert = [PartyId::from(1u8), PartyId::from(2u8)]; - // - // // Now, each party has collected their outputs from all dealers. We use the output from the dealers in dkg_cert create the final shares for signing. - // // Each party should still keep the outputs from all dealers until the end of the epoch to handle complaints. - // let merged_shares = nodes - // .iter() - // .map(|node| { - // ( - // node.id, - // avss::ReceiverOutput::complete_dkg( - // t, - // &nodes, - // restrict(dkg_outputs.get(&node.id).unwrap(), dkg_cert.into_iter()), - // ) - // .unwrap(), - // ) - // }) - // .collect::>(); - // - // // All receivers should now have the same verifying key - // let vk = get_uniform_value(merged_shares.values().map(|output| output.vk)).unwrap(); - // - // // For testing, we now recover the secret key from t shares and check that the secret key matches the verification key. - // // In practice, the parties should never do this... - // let shares = merged_shares - // .values() - // .flat_map(|output| output.my_shares.shares.clone()) - // .take(t as usize); - // let sk = Poly::recover_c0(t, shares).unwrap(); - // assert_eq!(G::generator() * sk, vk); - // - // // - // // PRESIGNING - // // - // - // // Generate a batch of nonces for each party's share - // let mut presigning_outputs = HashMap::>::new(); - // nodes.node_ids_iter().for_each(|id| { - // presigning_outputs.insert(id, Vec::new()); - // }); - // - // // Each dealer generates a batch of presigs per share they control. - // for dealer_id in nodes.node_ids_iter() { - // let sid = format!("presig-test-session-{}", dealer_id).into_bytes(); - // let dealer: batch_avss::Dealer = batch_avss::Dealer::new( - // nodes.clone(), - // dealer_id, - // f, - // t, - // sid.clone(), - // batch_size_per_weight, - // ) - // .unwrap(); - // let receivers = sks - // .iter() - // .enumerate() - // .map(|(id, enc_secret_key)| { - // batch_avss::Receiver::new( - // nodes.clone(), - // id as u16, - // dealer_id, - // t, - // f, - // sid.clone(), - // enc_secret_key.clone(), - // batch_size_per_weight, - // ) - // .unwrap() - // }) - // .collect::>(); - // - // // Each dealer creates a message - // let messages = dealer.create_message(&mut rng).unwrap(); - // - // let echo_messages = receivers - // .iter() - // .map(|receiver| receiver.echo_message(&messages[receiver.id as usize])) - // .collect::>>() - // .unwrap(); - // - // // Each receiver processes the message. - // // In this case, we assume all are honest and there are no complaints. - // receivers.iter().zip(messages).zip(&echo_messages).for_each( - // |((receiver, message), echo_message)| { - // let output = assert_valid_batch( - // receiver - // .process_echo_messages(&echo_message) - // .unwrap(), - // ); - // presigning_outputs - // .get_mut(&receiver.id) - // .unwrap() - // .push(output); - // }, - // ); - // } - // - // // Each party can process their presigs locally from the secret shared nonces - // let mut presigs = presigning_outputs - // .into_iter() - // .map(|(id, outputs)| { - // ( - // id, - // Presignatures::new(outputs, batch_size_per_weight, f as usize).unwrap(), - // ) - // }) - // .collect::>(); - // assert_eq!( - // presigs.get(&PartyId::from(1u8)).unwrap().len(), - // batch_size_per_weight as usize * (weights.iter().sum::() as usize - f as usize) - // ); - // - // // - // // SIGNING - // // - // - // let message = b"Hello, world!"; - // - // // Mock a value from the random beacon - // let beacon_value = S::rand(&mut rng); - // - // // Each party generates their partial signatures - // let partial_signatures = nodes - // .iter() - // .map(|node| { - // generate_partial_signatures( - // message, - // presigs.get_mut(&node.id).unwrap().next().unwrap(), - // &beacon_value, - // &merged_shares.get(&node.id).unwrap().my_shares, - // &vk, - // None, - // ) - // .unwrap() - // }) - // .collect_vec(); - // - // // The public parts should all be the same - // let public_presig = get_uniform_value( - // partial_signatures - // .iter() - // .map(|partial_signature| partial_signature.0), - // ) - // .unwrap(); - // - // // Aggregate partial signatures - // let signature = aggregate_signatures( - // message, - // &public_presig, - // &beacon_value, - // &partial_signatures - // .iter() - // .flat_map(|(_, s)| s.clone()) - // .collect_vec(), - // t, - // &vk, - // None, - // ) - // .unwrap(); - // - // // Check that this produced a valid signature - // SchnorrPublicKey::try_from(&vk) - // .unwrap() - // .verify(message, &signature) - // .unwrap(); - // - // // - // // KEY ROTATION - // // - // - // // Map from each party to the ordered list of outputs it has received. - // // Here, each party will act as dealer multiple times -- once per share they have. - // let mut dkg_outputs_after_rotation = - // HashMap::<(PartyId, ShareIndex), avss::PartialOutput>::new(); - // let mut messages = HashMap::<(PartyId, ShareIndex), avss::Message>::new(); - // - // for dealer_id in nodes.node_ids_iter() { - // for share_index in nodes.share_ids_of(dealer_id).unwrap() { - // let sid = - // format!("key-rotation-test-session-{}-{}", dealer_id, share_index).into_bytes(); - // - // // Each dealer uses their existing share as the secret to reshare - // let secret = merged_shares - // .get(&dealer_id) - // .unwrap() - // .share_for_index(share_index) - // .unwrap() - // .value; - // let dealer: avss::Dealer = - // avss::Dealer::new(Some(secret), nodes.clone(), t, sid.clone()).unwrap(); - // - // let receivers = sks - // .iter() - // .enumerate() - // .map(|(id, enc_secret_key)| { - // let commitment = merged_shares - // .get(&(id as u16)) - // .unwrap() - // .commitments - // .iter() - // .find(|c| c.index == share_index) - // .unwrap() - // .value; - // avss::Receiver::new( - // nodes.clone(), - // id as u16, - // t, - // sid.clone(), - // Some(commitment), - // enc_secret_key.clone(), - // ) - // }) - // .collect::>(); - // - // // Each dealer creates a message - // let message = dealer.create_message(&mut rng); - // messages.insert((dealer_id, share_index), message.clone()); - // - // // Each receiver processes the message. In this case, we assume all are honest and there are no complaints. - // receivers.iter().for_each(|receiver| { - // let output = assert_valid(receiver.process_message(&message).unwrap()); - // dkg_outputs_after_rotation.insert((receiver.id(), share_index), output); - // }); - // } - // } - // - // // The first t dealers (counted by weight) form the certificate and are the ones whose outputs will be used to create the final shares. - // let key_rotation_cert = [PartyId::from(1u8), PartyId::from(2u8)]; - // let share_indices_in_cert = key_rotation_cert - // .iter() - // .flat_map(|id| nodes.share_ids_of(*id).unwrap()) - // .collect_vec(); - // - // // Now, each party has collected their outputs from all dealers and can form their new shares from the ones in the certificate. - // let merged_shares_after_rotation = nodes - // .node_ids_iter() - // .map(|receiver_id| { - // let my_shares_from_cert = share_indices_in_cert - // .iter() - // .map(|&index| IndexedValue { - // index, - // value: dkg_outputs_after_rotation - // .get(&(receiver_id, index)) - // .unwrap() - // .clone(), - // }) - // .collect_vec(); - // ( - // receiver_id, - // avss::ReceiverOutput::complete_key_rotation( - // t, - // receiver_id, - // &nodes, - // &my_shares_from_cert - // .into_iter() - // .take(t as usize) - // .collect_vec(), - // ) - // .unwrap(), - // ) - // }) - // .collect::>(); - // - // // The verifying key should be the same as before - // for output in merged_shares_after_rotation.values() { - // assert_eq!(output.vk, vk); - // } - // - // // For testing, we now recover the secret key from t shares and check that the secret key matches the verification key. - // // In practice, the parties should never do this... - // let shares = merged_shares_after_rotation - // .values() - // .flat_map(|output| output.my_shares.shares.clone()) - // .take(t as usize); - // let sk = Poly::recover_c0(t, shares).unwrap(); - // assert_eq!(G::generator() * sk, vk); - // - // // Check commitments on the reshared secret from the first dealer - // let commitment_1 = merged_shares_after_rotation - // .get(&0) - // .unwrap() - // .commitments - // .first() - // .unwrap(); - // let secret_1 = merged_shares_after_rotation - // .get(&0) - // .unwrap() - // .share_for_index(commitment_1.index) - // .unwrap() - // .value; - // assert_eq!(G::generator() * secret_1, commitment_1.value); - // - // // - // // SIGNING (again) - // // - // - // let message_2 = b"Hello again, world!"; - // - // // Mock a value from the random beacon - // let beacon_value = S::rand(&mut rng); - // - // // Each party generates their partial signatures - // let partial_signatures = nodes - // .iter() - // .map(|node| { - // generate_partial_signatures( - // message_2, - // presigs.get_mut(&node.id).unwrap().next().unwrap(), - // &beacon_value, - // &merged_shares_after_rotation - // .get(&node.id) - // .unwrap() - // .my_shares, - // &vk, - // None, - // ) - // .unwrap() - // }) - // .collect_vec(); - // - // // The public parts should all be the same - // let public_presig = get_uniform_value( - // partial_signatures - // .iter() - // .map(|partial_signature| partial_signature.0), - // ) - // .unwrap(); - // - // // Aggregate partial signatures - // let signature_2 = aggregate_signatures( - // message_2, - // &public_presig, - // &beacon_value, - // &partial_signatures - // .iter() - // .flat_map(|(_, s)| s.clone()) - // .collect_vec(), - // t, - // &vk, - // None, - // ) - // .unwrap(); - // - // // Check that this produced a valid signature - // SchnorrPublicKey::try_from(&vk) - // .unwrap() - // .verify(message_2, &signature_2) - // .unwrap(); - // } + #[test] + fn test_e2e() { + // 4 parties with weights [1, 2, 2, 2], total weight 7. No complaints, all honest. + let t = 3; + let f = 2; + let weights: [u16; 4] = [1, 2, 2, 2]; + let n = weights.len(); + let batch_size_per_weight: u16 = 10; + + let mut rng = rand::thread_rng(); + let sks = (0..n) + .map(|_| ecies_v1::PrivateKey::::new(&mut rng)) + .collect::>(); + let nodes = Nodes::new( + sks.iter() + .enumerate() + .zip(weights) + .map(|((id, sk), weight)| Node { + id: id as u16, + pk: PublicKey::from_private_key(sk), + weight, + }) + .collect::>(), + ) + .unwrap(); + + // + // DKG (via avss) + // + + // Map from each party to the outputs it has received from each dealer. + let mut dkg_outputs: HashMap> = + HashMap::new(); + nodes.node_ids_iter().for_each(|id| { + dkg_outputs.insert(id, HashMap::new()); + }); + + for dealer_id in nodes.node_ids_iter() { + let sid = format!("dkg-test-session-{dealer_id}").into_bytes(); + let dealer = avss::Dealer::new(None, nodes.clone(), t, sid.clone()).unwrap(); + let receivers = sks + .iter() + .enumerate() + .map(|(id, enc_secret_key)| { + avss::Receiver::new( + nodes.clone(), + id as u16, + t, + sid.clone(), + None, + enc_secret_key.clone(), + ) + }) + .collect::>(); + + // Each dealer creates a message + let message = dealer.create_message(&mut rng); + + // Each receiver processes the message. In this case, we assume all are honest and + // there are no complaints. + receivers.iter().for_each(|r| { + let pm = r.process_message(&message).unwrap(); + let output = match pm { + avss::ProcessedMessage::Valid(po) => po, + avss::ProcessedMessage::Complaint(_) => { + panic!("expected valid avss output") + } + }; + dkg_outputs + .get_mut(&r.id()) + .unwrap() + .insert(dealer_id, output); + }); + } + + // The dealers to form the certificate should have weight >= t, and are the ones whose + // outputs will be used to create the final shares. + let dkg_cert = [PartyId::from(1u8), PartyId::from(2u8)]; + + // Now, each party has collected their outputs from all dealers. We use the outputs from + // the dealers in `dkg_cert` to create the final shares for signing. Each party should + // still keep the outputs from all dealers until the end of the epoch to handle complaints. + let merged_shares: HashMap = nodes + .iter() + .map(|node| { + ( + node.id, + avss::ReceiverOutput::complete_dkg( + t, + &nodes, + restrict(dkg_outputs.get(&node.id).unwrap(), dkg_cert.into_iter()), + ) + .unwrap(), + ) + }) + .collect(); + + // All receivers should now have the same verifying key. + let vk = get_uniform_value(merged_shares.values().map(|out| out.vk)).unwrap(); + + // For testing, we now recover the secret key from t shares and check that the secret + // key matches the verifying key. In practice, the parties should never do this. + let shares = merged_shares + .values() + .flat_map(|output| output.my_shares.shares.clone()) + .take(t as usize); + let sk = Poly::recover_c0(t, shares).unwrap(); + assert_eq!(G::generator() * sk, vk); + + // + // PRESIGNING (via batch_avss) + // + + // Generate a batch of nonces for each party's share. + let mut presigning_outputs: HashMap> = + HashMap::new(); + nodes.node_ids_iter().for_each(|id| { + presigning_outputs.insert(id, Vec::new()); + }); + + // Each dealer generates a batch of presigs per share they control. + for dealer_id in nodes.node_ids_iter() { + let sid = format!("presig-test-session-{dealer_id}").into_bytes(); + let dealer = batch_avss::Dealer::new( + nodes.clone(), + dealer_id, + f, + t, + sid.clone(), + batch_size_per_weight, + ) + .unwrap(); + let receivers = sks + .iter() + .enumerate() + .map(|(id, enc_secret_key)| { + batch_avss::Receiver::new( + nodes.clone(), + id as u16, + dealer_id, + f, + t, + sid.clone(), + enc_secret_key.clone(), + batch_size_per_weight, + ) + .unwrap() + }) + .collect::>(); + + // Each dealer creates a message + let messages = dealer.create_message(&mut rng).unwrap(); + + // Each receiver produces echoes addressed to every party. + let echoes: Vec> = receivers + .iter() + .map(|r| r.echo_message(&messages[r.id as usize]).unwrap()) + .collect(); + + // Bundle echoes per recipient: echoes_per_recipient[i] = echoes addressed to party i. + let echoes_per_recipient: Vec> = (0..n) + .map(|i| echoes.iter().map(|em| em[i].clone()).collect()) + .collect(); + + // Each receiver processes the message. In this case, we assume all are honest and + // there are no complaints. + for ((r, echoes), msg) in + receivers.iter().zip(&echoes_per_recipient).zip(&messages) + { + let pem = r.process_echo_messages(echoes).unwrap(); + let outcome = r.verify_and_decrypt(pem, msg).unwrap(); + let output = match outcome { + batch_avss::DecryptionOutcome::Valid { output, .. } => output, + _ => panic!("expected valid presigning output"), + }; + presigning_outputs.get_mut(&r.id).unwrap().push(output); + } + } + + // Each party can process their presigs locally from the secret-shared nonces. + let mut presigs: HashMap = presigning_outputs + .into_iter() + .map(|(id, outputs)| { + ( + id, + Presignatures::new(outputs, batch_size_per_weight, f as usize).unwrap(), + ) + }) + .collect(); + assert_eq!( + presigs.get(&PartyId::from(1u8)).unwrap().len(), + batch_size_per_weight as usize + * (weights.iter().sum::() as usize - f as usize) + ); + + // + // SIGNING + // + + let message = b"Hello, world!"; + + // Mock a value from the random beacon. + let beacon_value = S::rand(&mut rng); + + // Each party generates their partial signatures. + let partial_signatures = nodes + .iter() + .map(|node| { + generate_partial_signatures( + message, + presigs.get_mut(&node.id).unwrap().next().unwrap(), + &beacon_value, + &merged_shares.get(&node.id).unwrap().my_shares, + &vk, + None, + ) + .unwrap() + }) + .collect_vec(); + + // The public parts should all be the same. + let public_presig = + get_uniform_value(partial_signatures.iter().map(|p| p.0)).unwrap(); + + // Aggregate partial signatures. + let signature = aggregate_signatures( + message, + &public_presig, + &beacon_value, + &partial_signatures + .iter() + .flat_map(|(_, s)| s.clone()) + .collect_vec(), + t, + &vk, + None, + ) + .unwrap(); + + // Check that this produced a valid signature. + SchnorrPublicKey::try_from(&vk) + .unwrap() + .verify(message, &signature) + .unwrap(); + + // + // KEY ROTATION + // + + // Map from each party to the ordered list of outputs it has received. Here, each party + // will act as dealer multiple times — once per share they have. + let mut dkg_outputs_after_rotation: HashMap< + (PartyId, ShareIndex), + avss::PartialOutput, + > = HashMap::new(); + + for dealer_id in nodes.node_ids_iter() { + for share_index in nodes.share_ids_of(dealer_id).unwrap() { + let sid = + format!("key-rotation-test-session-{dealer_id}-{share_index}").into_bytes(); + + // Each dealer uses their existing share as the secret to reshare. + let secret = merged_shares + .get(&dealer_id) + .unwrap() + .share_for_index(share_index) + .unwrap() + .value; + let dealer = + avss::Dealer::new(Some(secret), nodes.clone(), t, sid.clone()).unwrap(); + + let receivers = sks + .iter() + .enumerate() + .map(|(id, enc_secret_key)| { + let commitment = merged_shares + .get(&(id as u16)) + .unwrap() + .commitment_for_index(share_index) + .unwrap() + .value; + avss::Receiver::new( + nodes.clone(), + id as u16, + t, + sid.clone(), + Some(commitment), + enc_secret_key.clone(), + ) + }) + .collect::>(); + + // Each dealer creates a message + let message = dealer.create_message(&mut rng); + + // Each receiver processes the message. In this case, we assume all are honest and + // there are no complaints. + receivers.iter().for_each(|r| { + let pm = r.process_message(&message).unwrap(); + let output = match pm { + avss::ProcessedMessage::Valid(po) => po, + avss::ProcessedMessage::Complaint(_) => { + panic!("expected valid avss output") + } + }; + dkg_outputs_after_rotation.insert((r.id(), share_index), output); + }); + } + } + + // The first t dealers (counted by weight) form the certificate and are the ones whose + // outputs will be used to create the final shares. + let key_rotation_cert = [PartyId::from(1u8), PartyId::from(2u8)]; + let share_indices_in_cert: Vec = key_rotation_cert + .iter() + .flat_map(|id| nodes.share_ids_of(*id).unwrap()) + .collect_vec(); + + // Now, each party has collected their outputs from all dealers and can form their new + // shares from the ones in the certificate. + let merged_shares: HashMap = nodes + .node_ids_iter() + .map(|receiver_id| { + let my_shares_from_cert = share_indices_in_cert + .iter() + .map(|&index| IndexedValue { + index, + value: dkg_outputs_after_rotation + .get(&(receiver_id, index)) + .unwrap() + .clone(), + }) + .collect_vec(); + ( + receiver_id, + avss::ReceiverOutput::complete_key_rotation( + t, + receiver_id, + &nodes, + &my_shares_from_cert + .into_iter() + .take(t as usize) + .collect_vec(), + ) + .unwrap(), + ) + }) + .collect(); + + // The verifying key should be the same as before. + for output in merged_shares.values() { + assert_eq!(output.vk, vk); + } + + // For testing, we now recover the secret key from t shares and check that the secret key + // matches the verifying key. In practice, the parties should never do this. + let shares = merged_shares + .values() + .flat_map(|output| output.my_shares.shares.clone()) + .take(t as usize); + let sk = Poly::recover_c0(t, shares).unwrap(); + assert_eq!(G::generator() * sk, vk); + + // Check commitments on the reshared secret from the first dealer. + let commitment_1 = merged_shares + .get(&0) + .unwrap() + .commitments + .first() + .unwrap(); + let secret_1 = merged_shares + .get(&0) + .unwrap() + .share_for_index(commitment_1.index) + .unwrap() + .value; + assert_eq!(G::generator() * secret_1, commitment_1.value); + + // + // SIGNING (again with rotated shares) + // + + let message_2 = b"Hello again, world!"; + + // Mock a value from the random beacon. + let beacon_value = S::rand(&mut rng); + + // Each party generates their partial signatures. + let partial_signatures = nodes + .iter() + .map(|node| { + generate_partial_signatures( + message_2, + presigs.get_mut(&node.id).unwrap().next().unwrap(), + &beacon_value, + &merged_shares.get(&node.id).unwrap().my_shares, + &vk, + None, + ) + .unwrap() + }) + .collect_vec(); + + // The public parts should all be the same. + let public_presig = + get_uniform_value(partial_signatures.iter().map(|p| p.0)).unwrap(); + + // Aggregate partial signatures. + let signature_2 = aggregate_signatures( + message_2, + &public_presig, + &beacon_value, + &partial_signatures + .iter() + .flat_map(|(_, s)| s.clone()) + .collect_vec(), + t, + &vk, + None, + ) + .unwrap(); + + // Check that this produced a valid signature. + SchnorrPublicKey::try_from(&vk) + .unwrap() + .verify(message_2, &signature_2) + .unwrap(); + } + /// Restrict a `HashMap` to a given set of keys. /// Panics if the given subset is not a subset of the maps' keys. From 2be05a27d259e0ebb7e0e0244efa732a695a5d89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 12:09:50 +0200 Subject: [PATCH 24/91] Restore original style in test_e2e --- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 185 +++++++++---------- 1 file changed, 91 insertions(+), 94 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index feb75db7e8..6d899efd49 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -95,11 +95,12 @@ mod tests { use std::hash::Hash; #[test] fn test_e2e() { - // 4 parties with weights [1, 2, 2, 2], total weight 7. No complaints, all honest. + // No complaints, all honest let t = 3; let f = 2; - let weights: [u16; 4] = [1, 2, 2, 2]; + let weights = [1, 2, 2, 2]; let n = weights.len(); + let batch_size_per_weight: u16 = 10; let mut rng = rand::thread_rng(); @@ -120,19 +121,19 @@ mod tests { .unwrap(); // - // DKG (via avss) + // DKG // - // Map from each party to the outputs it has received from each dealer. - let mut dkg_outputs: HashMap> = - HashMap::new(); + // Map from each party to the outputs it has received + let mut dkg_outputs = HashMap::>::new(); nodes.node_ids_iter().for_each(|id| { dkg_outputs.insert(id, HashMap::new()); }); for dealer_id in nodes.node_ids_iter() { - let sid = format!("dkg-test-session-{dealer_id}").into_bytes(); - let dealer = avss::Dealer::new(None, nodes.clone(), t, sid.clone()).unwrap(); + let sid = format!("dkg-test-session-{}", dealer_id).into_bytes(); + let dealer: avss::Dealer = + avss::Dealer::new(None, nodes.clone(), t, sid.clone()).unwrap(); let receivers = sks .iter() .enumerate() @@ -151,31 +152,22 @@ mod tests { // Each dealer creates a message let message = dealer.create_message(&mut rng); - // Each receiver processes the message. In this case, we assume all are honest and - // there are no complaints. - receivers.iter().for_each(|r| { - let pm = r.process_message(&message).unwrap(); - let output = match pm { - avss::ProcessedMessage::Valid(po) => po, - avss::ProcessedMessage::Complaint(_) => { - panic!("expected valid avss output") - } - }; + // Each receiver processes the message. In this case, we assume all are honest and there are no complaints. + receivers.iter().for_each(|receiver| { + let output = assert_valid(receiver.process_message(&message).unwrap()); dkg_outputs - .get_mut(&r.id()) + .get_mut(&receiver.id()) .unwrap() .insert(dealer_id, output); }); } - // The dealers to form the certificate should have weight >= t, and are the ones whose - // outputs will be used to create the final shares. + // The dealers to form the certificate should have weight >= t, and are the ones whose outputs will be used to create the final shares. let dkg_cert = [PartyId::from(1u8), PartyId::from(2u8)]; - // Now, each party has collected their outputs from all dealers. We use the outputs from - // the dealers in `dkg_cert` to create the final shares for signing. Each party should - // still keep the outputs from all dealers until the end of the epoch to handle complaints. - let merged_shares: HashMap = nodes + // Now, each party has collected their outputs from all dealers. We use the output from the dealers in dkg_cert create the final shares for signing. + // Each party should still keep the outputs from all dealers until the end of the epoch to handle complaints. + let merged_shares = nodes .iter() .map(|node| { ( @@ -188,13 +180,13 @@ mod tests { .unwrap(), ) }) - .collect(); + .collect::>(); - // All receivers should now have the same verifying key. - let vk = get_uniform_value(merged_shares.values().map(|out| out.vk)).unwrap(); + // All receivers should now have the same verifying key + let vk = get_uniform_value(merged_shares.values().map(|output| output.vk)).unwrap(); - // For testing, we now recover the secret key from t shares and check that the secret - // key matches the verifying key. In practice, the parties should never do this. + // For testing, we now recover the secret key from t shares and check that the secret key matches the verification key. + // In practice, the parties should never do this... let shares = merged_shares .values() .flat_map(|output| output.my_shares.shares.clone()) @@ -203,20 +195,19 @@ mod tests { assert_eq!(G::generator() * sk, vk); // - // PRESIGNING (via batch_avss) + // PRESIGNING // - // Generate a batch of nonces for each party's share. - let mut presigning_outputs: HashMap> = - HashMap::new(); + // Generate a batch of nonces for each party's share + let mut presigning_outputs = HashMap::>::new(); nodes.node_ids_iter().for_each(|id| { presigning_outputs.insert(id, Vec::new()); }); // Each dealer generates a batch of presigs per share they control. for dealer_id in nodes.node_ids_iter() { - let sid = format!("presig-test-session-{dealer_id}").into_bytes(); - let dealer = batch_avss::Dealer::new( + let sid = format!("presig-test-session-{}", dealer_id).into_bytes(); + let dealer: batch_avss::Dealer = batch_avss::Dealer::new( nodes.clone(), dealer_id, f, @@ -257,23 +248,19 @@ mod tests { .map(|i| echoes.iter().map(|em| em[i].clone()).collect()) .collect(); - // Each receiver processes the message. In this case, we assume all are honest and - // there are no complaints. + // Each receiver processes the message. + // In this case, we assume all are honest and there are no complaints. for ((r, echoes), msg) in receivers.iter().zip(&echoes_per_recipient).zip(&messages) { let pem = r.process_echo_messages(echoes).unwrap(); - let outcome = r.verify_and_decrypt(pem, msg).unwrap(); - let output = match outcome { - batch_avss::DecryptionOutcome::Valid { output, .. } => output, - _ => panic!("expected valid presigning output"), - }; + let output = assert_valid_batch(r.verify_and_decrypt(pem, msg).unwrap()); presigning_outputs.get_mut(&r.id).unwrap().push(output); } } - // Each party can process their presigs locally from the secret-shared nonces. - let mut presigs: HashMap = presigning_outputs + // Each party can process their presigs locally from the secret shared nonces + let mut presigs = presigning_outputs .into_iter() .map(|(id, outputs)| { ( @@ -281,11 +268,10 @@ mod tests { Presignatures::new(outputs, batch_size_per_weight, f as usize).unwrap(), ) }) - .collect(); + .collect::>(); assert_eq!( presigs.get(&PartyId::from(1u8)).unwrap().len(), - batch_size_per_weight as usize - * (weights.iter().sum::() as usize - f as usize) + batch_size_per_weight as usize * (weights.iter().sum::() as usize - f as usize) ); // @@ -294,10 +280,10 @@ mod tests { let message = b"Hello, world!"; - // Mock a value from the random beacon. + // Mock a value from the random beacon let beacon_value = S::rand(&mut rng); - // Each party generates their partial signatures. + // Each party generates their partial signatures let partial_signatures = nodes .iter() .map(|node| { @@ -313,11 +299,15 @@ mod tests { }) .collect_vec(); - // The public parts should all be the same. - let public_presig = - get_uniform_value(partial_signatures.iter().map(|p| p.0)).unwrap(); + // The public parts should all be the same + let public_presig = get_uniform_value( + partial_signatures + .iter() + .map(|partial_signature| partial_signature.0), + ) + .unwrap(); - // Aggregate partial signatures. + // Aggregate partial signatures let signature = aggregate_signatures( message, &public_presig, @@ -332,7 +322,7 @@ mod tests { ) .unwrap(); - // Check that this produced a valid signature. + // Check that this produced a valid signature SchnorrPublicKey::try_from(&vk) .unwrap() .verify(message, &signature) @@ -342,26 +332,24 @@ mod tests { // KEY ROTATION // - // Map from each party to the ordered list of outputs it has received. Here, each party - // will act as dealer multiple times — once per share they have. - let mut dkg_outputs_after_rotation: HashMap< - (PartyId, ShareIndex), - avss::PartialOutput, - > = HashMap::new(); + // Map from each party to the ordered list of outputs it has received. + // Here, each party will act as dealer multiple times -- once per share they have. + let mut dkg_outputs_after_rotation = + HashMap::<(PartyId, ShareIndex), avss::PartialOutput>::new(); for dealer_id in nodes.node_ids_iter() { for share_index in nodes.share_ids_of(dealer_id).unwrap() { let sid = - format!("key-rotation-test-session-{dealer_id}-{share_index}").into_bytes(); + format!("key-rotation-test-session-{}-{}", dealer_id, share_index).into_bytes(); - // Each dealer uses their existing share as the secret to reshare. + // Each dealer uses their existing share as the secret to reshare let secret = merged_shares .get(&dealer_id) .unwrap() .share_for_index(share_index) .unwrap() .value; - let dealer = + let dealer: avss::Dealer = avss::Dealer::new(Some(secret), nodes.clone(), t, sid.clone()).unwrap(); let receivers = sks @@ -388,32 +376,23 @@ mod tests { // Each dealer creates a message let message = dealer.create_message(&mut rng); - // Each receiver processes the message. In this case, we assume all are honest and - // there are no complaints. - receivers.iter().for_each(|r| { - let pm = r.process_message(&message).unwrap(); - let output = match pm { - avss::ProcessedMessage::Valid(po) => po, - avss::ProcessedMessage::Complaint(_) => { - panic!("expected valid avss output") - } - }; - dkg_outputs_after_rotation.insert((r.id(), share_index), output); + // Each receiver processes the message. In this case, we assume all are honest and there are no complaints. + receivers.iter().for_each(|receiver| { + let output = assert_valid(receiver.process_message(&message).unwrap()); + dkg_outputs_after_rotation.insert((receiver.id(), share_index), output); }); } } - // The first t dealers (counted by weight) form the certificate and are the ones whose - // outputs will be used to create the final shares. + // The first t dealers (counted by weight) form the certificate and are the ones whose outputs will be used to create the final shares. let key_rotation_cert = [PartyId::from(1u8), PartyId::from(2u8)]; - let share_indices_in_cert: Vec = key_rotation_cert + let share_indices_in_cert = key_rotation_cert .iter() .flat_map(|id| nodes.share_ids_of(*id).unwrap()) .collect_vec(); - // Now, each party has collected their outputs from all dealers and can form their new - // shares from the ones in the certificate. - let merged_shares: HashMap = nodes + // Now, each party has collected their outputs from all dealers and can form their new shares from the ones in the certificate. + let merged_shares = nodes .node_ids_iter() .map(|receiver_id| { let my_shares_from_cert = share_indices_in_cert @@ -440,15 +419,15 @@ mod tests { .unwrap(), ) }) - .collect(); + .collect::>(); - // The verifying key should be the same as before. + // The verifying key should be the same as before for output in merged_shares.values() { assert_eq!(output.vk, vk); } - // For testing, we now recover the secret key from t shares and check that the secret key - // matches the verifying key. In practice, the parties should never do this. + // For testing, we now recover the secret key from t shares and check that the secret key matches the verification key. + // In practice, the parties should never do this... let shares = merged_shares .values() .flat_map(|output| output.my_shares.shares.clone()) @@ -456,7 +435,7 @@ mod tests { let sk = Poly::recover_c0(t, shares).unwrap(); assert_eq!(G::generator() * sk, vk); - // Check commitments on the reshared secret from the first dealer. + // Check commitments on the reshared secret from the first dealer let commitment_1 = merged_shares .get(&0) .unwrap() @@ -472,15 +451,15 @@ mod tests { assert_eq!(G::generator() * secret_1, commitment_1.value); // - // SIGNING (again with rotated shares) + // SIGNING (again) // let message_2 = b"Hello again, world!"; - // Mock a value from the random beacon. + // Mock a value from the random beacon let beacon_value = S::rand(&mut rng); - // Each party generates their partial signatures. + // Each party generates their partial signatures let partial_signatures = nodes .iter() .map(|node| { @@ -496,11 +475,15 @@ mod tests { }) .collect_vec(); - // The public parts should all be the same. - let public_presig = - get_uniform_value(partial_signatures.iter().map(|p| p.0)).unwrap(); + // The public parts should all be the same + let public_presig = get_uniform_value( + partial_signatures + .iter() + .map(|partial_signature| partial_signature.0), + ) + .unwrap(); - // Aggregate partial signatures. + // Aggregate partial signatures let signature_2 = aggregate_signatures( message_2, &public_presig, @@ -515,7 +498,7 @@ mod tests { ) .unwrap(); - // Check that this produced a valid signature. + // Check that this produced a valid signature SchnorrPublicKey::try_from(&vk) .unwrap() .verify(message_2, &signature_2) @@ -523,6 +506,20 @@ mod tests { } + fn assert_valid(pm: avss::ProcessedMessage) -> avss::PartialOutput { + match pm { + avss::ProcessedMessage::Valid(po) => po, + avss::ProcessedMessage::Complaint(_) => panic!("expected valid avss output"), + } + } + + fn assert_valid_batch(outcome: batch_avss::DecryptionOutcome) -> batch_avss::ReceiverOutput { + match outcome { + batch_avss::DecryptionOutcome::Valid { output, .. } => output, + _ => panic!("expected valid batch_avss output"), + } + } + /// Restrict a `HashMap` to a given set of keys. /// Panics if the given subset is not a subset of the maps' keys. pub(crate) fn restrict( From 16685440cda8d44e93d16bb24cb7418e5dd73a91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 12:44:09 +0200 Subject: [PATCH 25/91] Cargo fmt + clippy fixes; rename Reveal/Blame variants - Variants in DecryptionOutcome and Response renamed to InvalidShares / InvalidDispersal (struct types stay Reveal / Blame) - Field renames in EchoMessage, ProcessedEchoMessages, Vote, Reveal, Blame, ShardContribution, AuthenticatedShards for clarity (sender, global_root, recipient_root, recipient_root_proof, shards_proof, common_message_hash) - Use SCALAR_SIZE_IN_BYTES from fastcrypto::groups::secp256k1 - Construct AuthenticatedShards directly in create_message; lift CommonMessage out of the per-receiver loop - Drop the orphan `pub` on EchoMessage.shards_proof - Bump protocol step numbers to 1..6 - Apply cargo fmt and resolve all clippy --all-targets warnings: unused imports, doc_lazy_continuation, large_enum_variant, too_many_arguments, needless_borrow, implied_bounds_in_impls - Rewrite the broken batch_avss bench to match the current API --- fastcrypto-tbls/benches/batch_avss.rs | 72 +++- fastcrypto-tbls/src/nodes.rs | 2 +- fastcrypto-tbls/src/tests/ecies_v1_tests.rs | 2 +- .../src/threshold_schnorr/batch_avss.rs | 347 +++++++++--------- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 13 +- 5 files changed, 233 insertions(+), 203 deletions(-) diff --git a/fastcrypto-tbls/benches/batch_avss.rs b/fastcrypto-tbls/benches/batch_avss.rs index 79a70b5999..21a2f6bcf7 100644 --- a/fastcrypto-tbls/benches/batch_avss.rs +++ b/fastcrypto-tbls/benches/batch_avss.rs @@ -23,9 +23,11 @@ pub fn generate_ecies_keys( .collect() } +#[allow(clippy::too_many_arguments)] pub fn setup_receiver( id: PartyId, dealer_id: PartyId, + f: u16, threshold: u16, weight: u16, // Per node keys: &[(PartyId, ecies_v1::PrivateKey, ecies_v1::PublicKey)], @@ -43,6 +45,7 @@ pub fn setup_receiver( Nodes::new(nodes).unwrap(), id, dealer_id, + f, threshold, b"avss".to_vec(), keys.get(id as usize).unwrap().1.clone(), @@ -53,6 +56,7 @@ pub fn setup_receiver( pub fn setup_dealer( dealer_id: u16, + f: u16, threshold: u16, weight: u16, // Per node keys: &[(PartyId, ecies_v1::PrivateKey, ecies_v1::PublicKey)], @@ -69,6 +73,7 @@ pub fn setup_dealer( batch_avss::Dealer::new( Nodes::new(nodes).unwrap(), dealer_id, + f, threshold, b"avss".to_vec(), batch_size_per_weight, @@ -100,8 +105,9 @@ mod batch_avss_benches { let w = total_w / n; let total_w = w * n; let t = total_w / 3 - 1; + let f = t.saturating_sub(1); let keys = generate_ecies_keys(*n); - let d0 = setup_dealer(0, t, w, &keys, batch_size_per_weight); + let d0 = setup_dealer(0, f, t, w, &keys, batch_size_per_weight); create.bench_function( format!("n={}, total_weight={}, t={}, w={}", n, total_w, t, w).as_str(), |b| b.iter(|| d0.create_message(&mut thread_rng())), @@ -117,14 +123,25 @@ mod batch_avss_benches { let w = total_w / n; let total_w = w * n; let t = total_w / 3 - 1; + let f = t.saturating_sub(1); let keys = generate_ecies_keys(*n); - let d0 = setup_dealer(0, t, w, &keys, batch_size_per_weight); - let r1 = setup_receiver(1, 0, t, w, &keys, batch_size_per_weight); - let message = d0.create_message(&mut thread_rng()).unwrap(); + let d0 = setup_dealer(0, f, t, w, &keys, batch_size_per_weight); + let receivers: Vec = (0..*n) + .map(|id| setup_receiver(id, 0, f, t, w, &keys, batch_size_per_weight)) + .collect(); + let messages = d0.create_message(&mut thread_rng()).unwrap(); + let echoes: Vec> = receivers + .iter() + .enumerate() + .map(|(i, r)| r.echo_message(&messages[i]).unwrap()) + .collect(); + let echoes_for_party_1: Vec = + echoes.iter().map(|em| em[1].clone()).collect(); + let r1 = &receivers[1]; process.bench_function( format!("n={}, total_weight={}, t={}, w={}", n, total_w, t, w).as_str(), - |b| b.iter(|| r1.process_echo_messages(&message).unwrap()), + |b| b.iter(|| r1.process_echo_messages(&echoes_for_party_1).unwrap()), ); } } @@ -136,19 +153,43 @@ mod batch_avss_benches { let w = total_w / n; let total_w = w * n; let t = total_w / 3 - 1; + let f = t.saturating_sub(1); let keys = generate_ecies_keys(*n); let quorum = (2 * n / 3 + 1) as usize; let dealers: Vec = (0..quorum) - .map(|id| setup_dealer(id as u16, t, w, &keys, batch_size_per_weight)) + .map(|id| setup_dealer(id as u16, f, t, w, &keys, batch_size_per_weight)) .collect(); let outputs = dealers .iter() .enumerate() .map(|(dealer_id, d)| { - let message = d.create_message(&mut thread_rng()).unwrap(); - let r = - setup_receiver(1, dealer_id as u16, t, w, &keys, batch_size_per_weight); - assert_valid_batch(r.process_echo_messages(&message).unwrap()) + let messages = d.create_message(&mut thread_rng()).unwrap(); + let receivers: Vec = (0..*n) + .map(|id| { + setup_receiver( + id, + dealer_id as u16, + f, + t, + w, + &keys, + batch_size_per_weight, + ) + }) + .collect(); + let echoes: Vec> = receivers + .iter() + .enumerate() + .map(|(i, r)| r.echo_message(&messages[i]).unwrap()) + .collect(); + let echoes_for_party_1: Vec = + echoes.iter().map(|em| em[1].clone()).collect(); + let pem = receivers[1] + .process_echo_messages(&echoes_for_party_1) + .unwrap(); + assert_valid_batch( + receivers[1].verify_and_decrypt(pem, &messages[1]).unwrap(), + ) }) .collect_vec(); @@ -196,12 +237,9 @@ mod batch_avss_benches { criterion_main!(batch_avss_benches::batch_avss_benches); -fn assert_valid_batch( - processed_message: batch_avss::ProcessedMessage, -) -> batch_avss::ReceiverOutput { - if let batch_avss::ProcessedMessage::Valid(output) = processed_message { - output - } else { - panic!("Expected valid message"); +fn assert_valid_batch(outcome: batch_avss::DecryptionOutcome) -> batch_avss::ReceiverOutput { + match outcome { + batch_avss::DecryptionOutcome::Valid { output, .. } => output, + _ => panic!("Expected valid outcome"), } } diff --git a/fastcrypto-tbls/src/nodes.rs b/fastcrypto-tbls/src/nodes.rs index 00c312325e..e7db9d0fd8 100644 --- a/fastcrypto-tbls/src/nodes.rs +++ b/fastcrypto-tbls/src/nodes.rs @@ -163,7 +163,7 @@ impl Nodes { /// Returns error if the number of items does not match the total weight. pub fn collect_to_nodes( &self, - items: impl Iterator + ExactSizeIterator, + items: impl ExactSizeIterator, ) -> FastCryptoResult>> { if items.len() != self.total_weight as usize { return Err(FastCryptoError::InvalidInput); diff --git a/fastcrypto-tbls/src/tests/ecies_v1_tests.rs b/fastcrypto-tbls/src/tests/ecies_v1_tests.rs index ae31eb592c..22c9cb3e81 100644 --- a/fastcrypto-tbls/src/tests/ecies_v1_tests.rs +++ b/fastcrypto-tbls/src/tests/ecies_v1_tests.rs @@ -63,7 +63,7 @@ mod point_tests { assert!(common.verify(&ro).is_ok()); for (i, (part, (sk, _, msg))) in parts.iter().zip(keys_and_msg.iter()).enumerate() { // Using parts should work as well - assert_eq!(msg.as_bytes(), common.decrypt(&part, sk, &ro, i)); + assert_eq!(msg.as_bytes(), common.decrypt(part, sk, &ro, i)); } // test empty messages diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 40a3702414..3b94f4a9a1 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -10,7 +10,7 @@ //! * Define a new [Dealer] with the secrets who begins by calling [Dealer::create_message]. use crate::ecies_v1::{MultiRecipientEncryption, PrivateKey, SharedComponents}; -use crate::nodes::{Node, Nodes, PartyId}; +use crate::nodes::{Nodes, PartyId}; use crate::polynomial::{create_secret_sharing, Eval, Poly}; use crate::random_oracle::RandomOracle; use crate::threshold_schnorr::batch_avss::DecryptionOutcome::Valid; @@ -31,8 +31,7 @@ use fastcrypto::hash::{Blake2b256, Digest, HashFunction, Sha3_512}; use fastcrypto::merkle; use fastcrypto::merkle::MerkleTree; use fastcrypto::traits::AllowedRng; -use fastcrypto::twisted_elgamal::Ciphertext; -use itertools::{repeat_n, Itertools}; +use itertools::Itertools; use serde::{Deserialize, Serialize}; use std::fmt::Debug; use std::iter::repeat_with; @@ -83,68 +82,67 @@ pub struct CommonMessage { pub struct AuthenticatedShards { root: merkle::Node, shards: Vec, - proof: merkle::MerkleProof, + shards_proof: merkle::MerkleProof, } /// One sender's echo to a single recipient: their shard for the recipient's ciphertext, with /// Merkle proofs binding it to the dealer's broadcast. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EchoMessage { - party: PartyId, - r: merkle::Node, - pi_i: merkle::MerkleProof, - r_i: merkle::Node, - s_ij: Vec, - hash: Digest<32>, - pub pi_ij: merkle::MerkleProof, + sender: PartyId, + global_root: merkle::Node, + recipient_root_proof: merkle::MerkleProof, + recipient_root: merkle::Node, + shards: Vec, + common_message_hash: Digest<32>, + shards_proof: merkle::MerkleProof, } /// The receiver's reconstructed ciphertext together with the metadata extracted from the echoes. pub struct ProcessedEchoMessages { ciphertext: Vec, - r: merkle::Node, - r_i: merkle::Node, + global_root: merkle::Node, + recipient_root: merkle::Node, valid_echoes: Vec, } /// The result of [Receiver::verify_and_decrypt]: either valid shares plus a vote to broadcast, or /// a complaint to broadcast instead. +#[allow(clippy::large_enum_variant)] pub enum DecryptionOutcome { - Valid { - output: ReceiverOutput, - vote: Vote, - }, - Reveal(Reveal), - Blame(Blame), + Valid { output: ReceiverOutput, vote: Vote }, + InvalidShares(Reveal), + InvalidDispersal(Blame), } impl DecryptionOutcome { /// Reduce this outcome to the message the party should broadcast to others: a [Vote] when - /// the dealer's broadcast verified, otherwise the [Reveal] or [Blame] itself. The receiver's + /// the dealer's broadcast verified, otherwise the [InvalidShares] or [InvalidDispersal] itself. The receiver's /// local [ReceiverOutput] (in the Valid case) is consumed and not part of the wire format. pub fn into_response(self) -> Response { match self { DecryptionOutcome::Valid { vote, .. } => Response::Vote(vote), - DecryptionOutcome::Reveal(r) => Response::Reveal(r), - DecryptionOutcome::Blame(b) => Response::Blame(b), + DecryptionOutcome::InvalidShares(r) => Response::InvalidShares(r), + DecryptionOutcome::InvalidDispersal(b) => Response::InvalidDispersal(b), } } } /// The message a receiver broadcasts after `verify_and_decrypt`: a [Vote] endorsing the dealer's -/// broadcast or a [Reveal] / [Blame] complaint otherwise. +/// broadcast or a [InvalidShares] / [InvalidDispersal] complaint otherwise. +#[allow(clippy::large_enum_variant)] #[derive(Clone, Debug, Serialize, Deserialize)] pub enum Response { Vote(Vote), - Reveal(Reveal), - Blame(Blame), + InvalidShares(Reveal), + InvalidDispersal(Blame), } /// An endorsement of the dealer's broadcast. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Vote { - pub r: merkle::Node, - pub hash: Digest<32>, + pub global_root: merkle::Node, + pub common_message_hash: Digest<32>, } /// A complaint by a receiver who could not decrypt or verify its shares. @@ -153,7 +151,7 @@ pub struct Reveal { pub proof: complaint::Complaint, pub ciphertext: Vec, /// `H(val)` from the dealer's broadcast, binding the complaint to a specific [CommonMessage]. - pub hash: Digest<32>, + pub common_message_hash: Digest<32>, } /// A complaint by a receiver who decrypted valid shares but found the AVID dispersal @@ -162,16 +160,16 @@ pub struct Reveal { pub struct Blame { pub accuser_id: PartyId, pub shards: Vec, - pub hash: Digest<32>, + pub common_message_hash: Digest<32>, } /// One sender's contribution of shards toward reconstructing the accuser's ciphertext. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ShardContribution { - pub party: PartyId, + pub sender: PartyId, pub shards: Vec, - /// Proof that `shards` sits under the accuser's `r_i` at `party`'s leaf. - pub proof: merkle::MerkleProof, + /// Proof that `shards` sits under the accuser's `recipient_root` at `sender`'s leaf. + pub shards_proof: merkle::MerkleProof, } /// The output of a receiver which is a batch of shares and public keys for all nonces. @@ -444,7 +442,7 @@ impl Dealer { .map(MerkleTree::::build_from_unserialized) .collect::>>()?; - let messages = self + let dispersals: Vec> = self .nodes .node_ids_iter() .map(|id| { @@ -452,15 +450,19 @@ impl Dealer { .iter() .zip(&trees) .map(|(s, tree)| { - let proof = tree.get_proof(id as usize)?; - Ok((tree.root(), s[id as usize].clone(), proof)) + Ok(AuthenticatedShards { + root: tree.root(), + shards: s[id as usize].clone(), + shards_proof: tree.get_proof(id as usize)?, + }) }) .collect::>>() }) .collect::>>()?; - let roots = trees.iter().map(MerkleTree::root).collect_vec(); - let root = MerkleTree::::build_from_unserialized(roots.iter())?.root(); + let root = + MerkleTree::::build_from_unserialized(trees.iter().map(MerkleTree::root))? + .root(); // "response" polynomials from https://eprint.iacr.org/2023/536.pdf let challenge = compute_challenge( @@ -484,23 +486,18 @@ impl Dealer { .to_vec(), )?; - Ok(messages + let common = CommonMessage { + full_public_keys, + shared, + response_polynomial, + blinding_commit, + }; + + Ok(dispersals .into_iter() - .map(|m| Message { - common: CommonMessage { - full_public_keys: full_public_keys.clone(), - shared: shared.clone(), - response_polynomial: response_polynomial.clone(), - blinding_commit, - }, - dispersal: m - .iter() - .map(|m| AuthenticatedShards { - root: m.0.clone(), - shards: m.1.clone(), - proof: m.2.clone(), - }) - .collect_vec(), + .map(|dispersal| Message { + common: common.clone(), + dispersal, }) .collect_vec()) } @@ -523,6 +520,7 @@ impl Receiver { /// * `batch_size_per_weight` is the number of secrets a dealer should deal per weight it has. /// /// Returns an `InvalidInput` error if the `id` or `dealer_id` is invalid. + #[allow(clippy::too_many_arguments)] pub fn new( nodes: Nodes, id: PartyId, @@ -562,10 +560,10 @@ impl Receiver { |AuthenticatedShards { root, shards, - proof, + shards_proof, .. }| { - proof + shards_proof .verify_proof_with_unserialized_leaf(root, &shards, self.id as usize) .is_err() }, @@ -591,18 +589,18 @@ impl Receiver { AuthenticatedShards { root, shards, - proof, + shards_proof, .. }, )| { Ok(EchoMessage { - party: self.id, - r: r.clone(), - pi_ij: proof.clone(), - pi_i: tree.get_proof(i)?, - r_i: root.clone(), - s_ij: shards.clone(), - hash: digest, + sender: self.id, + global_root: r.clone(), + recipient_root_proof: tree.get_proof(i)?, + recipient_root: root.clone(), + shards: shards.clone(), + common_message_hash: digest, + shards_proof: shards_proof.clone(), }) }, ) @@ -610,13 +608,13 @@ impl Receiver { } /// 3. When a party has received at EchoMessages from parties with at least weight W - f, it - /// tries to process them. It first filters out invalid messages and checks if the EchoMessages - /// have the same digest, r and r_i values. If not, an InvalidMessage error is returned. - /// If the filtered set of EchoMessages does not have sufficient weight, an NotEnoughWeight error - /// is returned. + /// tries to process them. It first filters out invalid messages and checks if the EchoMessages + /// have the same digest, r and r_i values. If not, an InvalidMessage error is returned. + /// If the filtered set of EchoMessages does not have sufficient weight, an NotEnoughWeight error + /// is returned. /// - /// If these checks succeed, the party reconstructs it's message (ciphertext) from the echoed - /// shards along with the r and r_i values. + /// If these checks succeed, the party reconstructs it's message (ciphertext) from the echoed + /// shards along with the r and r_i values. pub fn process_echo_messages( &self, echo_messages: &[EchoMessage], @@ -626,18 +624,18 @@ impl Receiver { .iter() .filter(|echo_message| { echo_message - .pi_ij + .shards_proof .verify_proof_with_unserialized_leaf( - &echo_message.r_i, - &echo_message.s_ij, - echo_message.party as usize, + &echo_message.recipient_root, + &echo_message.shards, + echo_message.sender as usize, ) .is_ok() && echo_message - .pi_i + .recipient_root_proof .verify_proof_with_unserialized_leaf( - &echo_message.r, - &echo_message.r_i, + &echo_message.global_root, + &echo_message.recipient_root, self.id as usize, ) .is_ok() @@ -645,13 +643,14 @@ impl Receiver { .cloned() .collect_vec(); - let (r, r_i, _) = require_uniform_echo_metadata(&echo_messages)?; + let (global_root, recipient_root, _) = require_uniform_echo_metadata(&echo_messages)?; let required_weight = self.nodes.total_weight() - self.f; - if self - .nodes - .total_weight_of(echo_messages.iter().map(|echo_message| &echo_message.party))? - < required_weight + if self.nodes.total_weight_of( + echo_messages + .iter() + .map(|echo_message| &echo_message.sender), + )? < required_weight { return Err(NotEnoughWeight(required_weight as usize)); } @@ -659,13 +658,13 @@ impl Receiver { let ciphertext = self.reconstruct_ciphertext(self.id, |id| { echo_messages .iter() - .find(|e| e.party == id) - .map(|e| e.s_ij.clone()) + .find(|e| e.sender == id) + .map(|e| e.shards.clone()) })?; Ok(ProcessedEchoMessages { ciphertext, - r, - r_i, + global_root, + recipient_root, valid_echoes: echo_messages, }) } @@ -721,18 +720,18 @@ impl Receiver { Ok(()) } - /// 3. If the party also received a valid Message from the dealer, it can now decrypt its shares. - /// If this succeeds (returns a DecryptionOutcome::Valid), the party should return a signed vote to the dealer. - /// The vote payload can be obtained by calling [DecryptionOutcome::into_response] on the - /// outcome, which yields a [Response::Vote] for the caller to sign. + /// 4. If the party also received a valid Message from the dealer, it can now decrypt its shares. + /// If this succeeds (returns a DecryptionOutcome::Valid), the party should return a signed vote to the dealer. + /// The vote payload can be obtained by calling [DecryptionOutcome::into_response] on the + /// outcome, which yields a [Response::Vote] for the caller to sign. /// - /// When parties with weight at least W -f has submitted a vote, parties who didn't get a valid - /// Message from the dealer should request the CommonMessage part of that from the parties who voted. - /// Using this, the party can decrypt the shares and verify that the shares are valid. + /// When parties with weight at least W -f has submitted a vote, parties who didn't get a valid + /// Message from the dealer should request the CommonMessage part of that from the parties who voted. + /// Using this, the party can decrypt the shares and verify that the shares are valid. /// - /// If this function returns a [Reveal] or [Blame] outcome, the party should broadcast it - /// to the other parties — but only after at least `W - f` votes from other parties have - /// appeared on the TOB/ABC channel. + /// If this function returns a [InvalidShares] or [InvalidDispersal] outcome, the party should broadcast it + /// to the other parties — but only after at least `W - f` votes from other parties have + /// appeared on the TOB/ABC channel. pub fn verify_and_decrypt( &self, processed_echo_messages: ProcessedEchoMessages, @@ -747,8 +746,8 @@ impl Receiver { let ProcessedEchoMessages { ciphertext, - r, - r_i, + global_root, + recipient_root, valid_echoes, } = processed_echo_messages; if full_public_keys.len() != self.batch_size @@ -760,7 +759,7 @@ impl Receiver { // TODO: What should happen if these checks fail? // Verify that g^{p''(0)} == c' * prod_l c_l^{gamma_l} let challenge = - compute_challenge_from_message(&self.random_oracle(), &r, &message.common); + compute_challenge_from_message(&self.random_oracle(), &global_root, &message.common); if G::generator() * response_polynomial.c0() != blinding_commit + G::multi_scalar_mul(&challenge, full_public_keys) @@ -770,13 +769,15 @@ impl Receiver { } // Check r_i' == r_i from the paper - let faulty_dealer = self.check_avid_consistency(&ciphertext, &r_i).is_err(); + let faulty_dealer = self + .check_avid_consistency(&ciphertext, &recipient_root) + .is_err(); let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); let decrypted_shares = shared .verify(&random_oracle_encryption) .map(|_| { - shared.decrypt( + shared.decrypt( &ciphertext, &self.enc_secret_key, &random_oracle_encryption, @@ -796,10 +797,7 @@ impl Receiver { Ok(my_shares) }); - // TODO: Revisit this dispatch — confirm each (faulty_dealer, decrypted_shares) combination - // produces the right outcome (Valid / Blame / Reveal) under both honest and Byzantine - // dealer behavior, including the (false, Err) case where the AVID layer agreed but - // decryption still failed. + // TODO: Revisit this dispatch. match (faulty_dealer, decrypted_shares) { (false, Ok(my_shares)) => Ok(Valid { output: ReceiverOutput { @@ -807,8 +805,8 @@ impl Receiver { public_keys: full_public_keys.clone(), }, vote: Vote { - r, - hash: compute_common_message_hash(&message.common), + global_root, + common_message_hash: compute_common_message_hash(&message.common), }, }), (true, Ok(_)) => { @@ -816,44 +814,44 @@ impl Receiver { // implicit — the responder reads it from its own [Message] rather than receiving // it via the complaint. let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; - let hash = any_echo.hash; + let common_message_hash = any_echo.common_message_hash; let shards = valid_echoes .into_iter() .map(|e| ShardContribution { - party: e.party, - shards: e.s_ij, - proof: e.pi_ij, + sender: e.sender, + shards: e.shards, + shards_proof: e.shards_proof, }) .collect_vec(); - Ok(DecryptionOutcome::Blame(Blame { + Ok(DecryptionOutcome::InvalidDispersal(Blame { accuser_id: self.id, shards, - hash, + common_message_hash, })) } (_, Err(_)) => { let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; - Ok(DecryptionOutcome::Reveal(Reveal { + Ok(DecryptionOutcome::InvalidShares(Reveal { proof: complaint::Complaint::create( self.id, - &shared, + shared, &self.enc_secret_key, &self.random_oracle(), &mut rand::thread_rng(), ), ciphertext, - hash: any_echo.hash, + common_message_hash: any_echo.common_message_hash, })) } } } - /// 4. Upon receiving a [Reveal] from another party, verify it and respond with this party's - /// own shares so the accuser can recover. The ciphertext must be authenticated as the dealer's - /// by re-encoding under the locally-known `r_i`, and decryption with the recovery package must - /// yield invalid shares. `message` is the dealer's full [Message] as this party received it; - /// the verifier looks up the accuser's per-ciphertext root locally from - /// `message.dispersal[accuser_id]` rather than trusting the complaint to carry it. + /// 5. Upon receiving a [Reveal] from another party, verify it and respond with this party's + /// own shares so the accuser can recover. The ciphertext must be authenticated as the dealer's + /// by re-encoding under the locally-known `r_i`, and decryption with the recovery package must + /// yield invalid shares. `message` is the dealer's full [Message] as this party received it; + /// the verifier looks up the accuser's per-ciphertext root locally from + /// `message.dispersal[accuser_id]` rather than trusting the complaint to carry it. pub fn handle_reveal( &self, message: &Message, @@ -863,20 +861,23 @@ impl Receiver { let Reveal { proof, ciphertext, - hash, + common_message_hash, } = reveal; let accuser_id = proof.accuser_id; let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; - let r_i = self.dispersal_root_for(message, accuser_id)?; - let r = self.global_root(message)?; + let recipient_root = self.dispersal_root_for(message, accuser_id)?; + let global_root = self.global_root(message)?; - if hash != &compute_common_message_hash(&message.common) - || self.check_avid_consistency(ciphertext, r_i).is_err() + if common_message_hash != &compute_common_message_hash(&message.common) + || self + .check_avid_consistency(ciphertext, recipient_root) + .is_err() { return Err(InvalidProof); } - let challenge = compute_challenge_from_message(&self.random_oracle(), &r, &message.common); + let challenge = + compute_challenge_from_message(&self.random_oracle(), &global_root, &message.common); proof.check( accuser_pk, ciphertext, @@ -897,7 +898,7 @@ impl Receiver { Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } - /// Counterpart to [Self::handle_reveal] for [Blame]. The accuser must have collected enough + /// Counterpart to [Self::handle_reveal] for [InvalidDispersal]. The accuser must have collected enough /// authenticated shards whose re-encoded ciphertext root differs from the locally-known /// `r_i`. On success, respond with this party's own shares. pub fn handle_blame( @@ -909,22 +910,22 @@ impl Receiver { let Blame { accuser_id, shards, - hash, + common_message_hash, } = blame; let accuser_id = *accuser_id; - let r_i = self.dispersal_root_for(message, accuser_id)?; + let recipient_root = self.dispersal_root_for(message, accuser_id)?; - if hash != &compute_common_message_hash(&message.common) { + if common_message_hash != &compute_common_message_hash(&message.common) { return Err(InvalidProof); } - if shards.iter().map(|s| s.party).unique().count() != shards.len() { + if shards.iter().map(|s| s.sender).unique().count() != shards.len() { return Err(InvalidProof); } if shards.iter().any(|s| { - s.proof - .verify_proof_with_unserialized_leaf(r_i, &s.shards, s.party as usize) + s.shards_proof + .verify_proof_with_unserialized_leaf(recipient_root, &s.shards, s.sender as usize) .is_err() }) { return Err(InvalidProof); @@ -932,7 +933,7 @@ impl Receiver { let weight_of_shards = self .nodes - .total_weight_of(shards.iter().map(|s| &s.party))?; + .total_weight_of(shards.iter().map(|s| &s.sender))?; if weight_of_shards < self.nodes.total_weight() - 2 * self.f { return Err(InvalidProof); } @@ -941,13 +942,16 @@ impl Receiver { .reconstruct_ciphertext(accuser_id, |id| { shards .iter() - .find(|s| s.party == id) + .find(|s| s.sender == id) .map(|s| s.shards.clone()) }) .map_err(|_| InvalidProof)?; // The blame is valid iff re-encoding the recovered ciphertext does not match `r_i`. - if self.check_avid_consistency(&ciphertext, r_i).is_ok() { + if self + .check_avid_consistency(&ciphertext, recipient_root) + .is_ok() + { return Err(InvalidProof); } @@ -973,8 +977,7 @@ impl Receiver { .root()) } - - /// 5. Upon receiving t valid responses to a complaint, the accuser can recover its shares. + /// 6. Upon receiving t valid responses to a complaint, the accuser can recover its shares. /// Fails if there are not enough valid responses to recover the shares or if any of the responses come from an invalid party. pub fn recover( &self, @@ -1045,8 +1048,14 @@ fn uleb128_len(x: usize) -> usize { fn require_uniform_echo_metadata( echoes: &[EchoMessage], ) -> FastCryptoResult<(merkle::Node, merkle::Node, Digest<32>)> { - get_uniform_value(echoes.iter().map(|e| (e.r.clone(), e.r_i.clone(), e.hash))) - .ok_or(InvalidMessage) + get_uniform_value(echoes.iter().map(|e| { + ( + e.global_root.clone(), + e.recipient_root.clone(), + e.common_message_hash, + ) + })) + .ok_or(InvalidMessage) } /// Verify a set of shares receiver from a Dealer @@ -1091,7 +1100,7 @@ fn compute_challenge_from_message( &message.full_public_keys, &message.blinding_commit, &message.shared, - &root, + root, ) } @@ -1118,26 +1127,19 @@ fn compute_common_message_hash(message: &CommonMessage) -> Digest<32> { #[cfg(test)] mod tests { use super::{ - compute_challenge, Dealer, DecryptionOutcome, Message, ProcessedEchoMessages, Receiver, - ReceiverOutput, ShareBatch, SharesForNode, + Dealer, DecryptionOutcome, Message, Receiver, ReceiverOutput, ShareBatch, SharesForNode, }; use crate::ecies_v1; - use crate::ecies_v1::{MultiRecipientEncryption, PublicKey}; + use crate::ecies_v1::PublicKey; use crate::nodes::{Node, Nodes}; use crate::polynomial::{Eval, Poly}; use crate::threshold_schnorr::bcs::BCSSerialized; - use crate::threshold_schnorr::reed_solomon::ErasureCoder; - use crate::threshold_schnorr::Extensions::Encryption; - use crate::threshold_schnorr::{EG, G}; + use crate::threshold_schnorr::EG; use crate::types::ShareIndex; use fastcrypto::error::FastCryptoResult; - use fastcrypto::groups::GroupElement; - use fastcrypto::hash::Blake2b256; - use fastcrypto::merkle; use fastcrypto::traits::AllowedRng; use itertools::Itertools; use std::collections::HashMap; - use std::iter::repeat_with; #[test] fn test_bcs_serialized_size_matches_serialization() { @@ -1145,7 +1147,7 @@ mod tests { // serialize it; the byte length must agree with `SharesForNode::bcs_serialized_size`. Cases // straddle the ULEB128 single-byte/two-byte boundary at 128 in both dimensions. use crate::threshold_schnorr::S; - use fastcrypto::groups::Scalar; + use fastcrypto::groups::GroupElement; let dummy_index = ShareIndex::try_from(1u16).unwrap(); let zero_scalar = S::zero(); @@ -1162,10 +1164,7 @@ mod tests { }; let actual = shares_for_node.to_bytes().len(); let formula = SharesForNode::bcs_serialized_size(weight, batch_size); - assert_eq!( - actual, formula, - "weight={weight}, batch_size={batch_size}" - ); + assert_eq!(actual, formula, "weight={weight}, batch_size={batch_size}"); } } } @@ -1235,30 +1234,33 @@ mod tests { let echoes_by_recipient = receivers .iter() .enumerate() - .map(|(i, _)| echoes_by_sender.iter().map(|em| em[i].clone()).collect_vec()) + .map(|(i, _)| { + echoes_by_sender + .iter() + .map(|em| em[i].clone()) + .collect_vec() + }) .collect_vec(); let processed_echo_messages = receivers .iter() .zip(messages.iter()) .zip(echoes_by_recipient.iter()) - .map(|((receiver, _message), echoes)| { - receiver.process_echo_messages(echoes).unwrap() - }) + .map(|((receiver, _message), echoes)| receiver.process_echo_messages(echoes).unwrap()) .collect_vec(); let all_shares = receivers .iter() .zip(processed_echo_messages) .zip(messages) - .map(|((receiver, pem), message)| { - match receiver.verify_and_decrypt(pem, &message) { + .map( + |((receiver, pem), message)| match receiver.verify_and_decrypt(pem, &message) { Ok(DecryptionOutcome::Valid { output, .. }) => (receiver.id, output), _ => panic!( "All receivers should be able to process the message in the happy path" ), - } - }) + }, + ) .collect::>(); let secrets = (0..dealer.batch_size) @@ -1289,7 +1291,7 @@ mod tests { fn test_share_recovery() { // Dealer is honest at the AVID layer (consistent dispersal) but flips a byte in // receiver 0's plaintext, so receiver 0's decryption succeeds but the resulting - // SharesForNode fails verification — triggering a Reveal complaint. The other receivers + // SharesForNode fails verification — triggering a InvalidShares complaint. The other receivers // verify the complaint and respond with their own shares; receiver 0 reconstructs. let t = 3; let f = 2; @@ -1361,19 +1363,18 @@ mod tests { let pem = r.process_echo_messages(echoes).unwrap(); ( r.id, - r.verify_and_decrypt(pem, &messages[r.id as usize]) - .unwrap(), + r.verify_and_decrypt(pem, &messages[r.id as usize]).unwrap(), ) }) .collect(); - // Receiver 0 (the targeted victim) emits a Reveal complaint. + // Receiver 0 (the targeted victim) emits a InvalidShares complaint. let victim_id = 0u16; let mut outcomes = outcomes; let reveal = match outcomes.remove(&victim_id).unwrap() { - DecryptionOutcome::Reveal(r) => r, + DecryptionOutcome::InvalidShares(r) => r, other => panic!( - "expected Reveal from victim, got {:?}", + "expected InvalidShares from victim, got {:?}", outcome_kind(&other) ), }; @@ -1427,8 +1428,8 @@ mod tests { fn outcome_kind(outcome: &DecryptionOutcome) -> &'static str { match outcome { DecryptionOutcome::Valid { .. } => "Valid", - DecryptionOutcome::Reveal(_) => "Reveal", - DecryptionOutcome::Blame(_) => "Blame", + DecryptionOutcome::InvalidShares(_) => "InvalidShares", + DecryptionOutcome::InvalidDispersal(_) => "InvalidDispersal", } } diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index 6d899efd49..9616f3233e 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -86,7 +86,6 @@ mod tests { use crate::threshold_schnorr::signing::{aggregate_signatures, generate_partial_signatures}; use crate::threshold_schnorr::{avss, batch_avss, EG, G, S}; use crate::types::{get_uniform_value, IndexedValue, ShareIndex}; - use fastcrypto::error::FastCryptoResult; use fastcrypto::groups::secp256k1::schnorr::SchnorrPublicKey; use fastcrypto::groups::{GroupElement, Scalar}; use fastcrypto::traits::AllowedRng; @@ -250,9 +249,7 @@ mod tests { // Each receiver processes the message. // In this case, we assume all are honest and there are no complaints. - for ((r, echoes), msg) in - receivers.iter().zip(&echoes_per_recipient).zip(&messages) - { + for ((r, echoes), msg) in receivers.iter().zip(&echoes_per_recipient).zip(&messages) { let pem = r.process_echo_messages(echoes).unwrap(); let output = assert_valid_batch(r.verify_and_decrypt(pem, msg).unwrap()); presigning_outputs.get_mut(&r.id).unwrap().push(output); @@ -436,12 +433,7 @@ mod tests { assert_eq!(G::generator() * sk, vk); // Check commitments on the reshared secret from the first dealer - let commitment_1 = merged_shares - .get(&0) - .unwrap() - .commitments - .first() - .unwrap(); + let commitment_1 = merged_shares.get(&0).unwrap().commitments.first().unwrap(); let secret_1 = merged_shares .get(&0) .unwrap() @@ -505,7 +497,6 @@ mod tests { .unwrap(); } - fn assert_valid(pm: avss::ProcessedMessage) -> avss::PartialOutput { match pm { avss::ProcessedMessage::Valid(po) => po, From 48d71924c35036380da9973bafa1cca908f6b257 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 13:00:51 +0200 Subject: [PATCH 26/91] Fold shards triple into AuthenticatedShards in EchoMessage - Replace EchoMessage's (recipient_root, shards, shards_proof) trio with a single authenticated_shards: AuthenticatedShards field - Add AuthenticatedShards::verify(leaf_index) and use it in echo_message and process_echo_messages instead of inline Merkle proof checks - Move impl AuthenticatedShards / impl DecryptionOutcome after the type definitions block - Misc tidies (typo fix, rename `r` -> `global_root`, etc.) --- .../src/threshold_schnorr/batch_avss.rs | 111 ++++++++---------- 1 file changed, 48 insertions(+), 63 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 3b94f4a9a1..969aa0fd7d 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -91,11 +91,10 @@ pub struct AuthenticatedShards { pub struct EchoMessage { sender: PartyId, global_root: merkle::Node, + /// Proof that `authenticated_shards.root` sits under `global_root` at the recipient's leaf. recipient_root_proof: merkle::MerkleProof, - recipient_root: merkle::Node, - shards: Vec, + authenticated_shards: AuthenticatedShards, common_message_hash: Digest<32>, - shards_proof: merkle::MerkleProof, } /// The receiver's reconstructed ciphertext together with the metadata extracted from the echoes. @@ -115,19 +114,6 @@ pub enum DecryptionOutcome { InvalidDispersal(Blame), } -impl DecryptionOutcome { - /// Reduce this outcome to the message the party should broadcast to others: a [Vote] when - /// the dealer's broadcast verified, otherwise the [InvalidShares] or [InvalidDispersal] itself. The receiver's - /// local [ReceiverOutput] (in the Valid case) is consumed and not part of the wire format. - pub fn into_response(self) -> Response { - match self { - DecryptionOutcome::Valid { vote, .. } => Response::Vote(vote), - DecryptionOutcome::InvalidShares(r) => Response::InvalidShares(r), - DecryptionOutcome::InvalidDispersal(b) => Response::InvalidDispersal(b), - } - } -} - /// The message a receiver broadcasts after `verify_and_decrypt`: a [Vote] endorsing the dealer's /// broadcast or a [InvalidShares] / [InvalidDispersal] complaint otherwise. #[allow(clippy::large_enum_variant)] @@ -202,6 +188,28 @@ pub struct ShareBatch { pub blinding_share: S, } +impl AuthenticatedShards { + /// Verify that `shards` are the leaf at `leaf_index` under `root` using `shards_proof`. + fn verify(&self, leaf_index: usize) -> FastCryptoResult<()> { + self.shards_proof + .verify_proof_with_unserialized_leaf(&self.root, &self.shards, leaf_index) + } +} + +impl DecryptionOutcome { + /// Reduce this outcome to the message the party should broadcast to others: a [Vote] when + /// the dealer's broadcast verified, otherwise the [InvalidShares] or [InvalidDispersal] itself. + /// The receiver's local [ReceiverOutput] (in the Valid case) is consumed and not part of the + /// wire format. + pub fn into_response(self) -> Response { + match self { + DecryptionOutcome::Valid { vote, .. } => Response::Vote(vote), + DecryptionOutcome::InvalidShares(r) => Response::InvalidShares(r), + DecryptionOutcome::InvalidDispersal(b) => Response::InvalidDispersal(b), + } + } +} + impl ShareBatch { /// Verify a batch of shares using the given challenge. fn verify(&self, message: &CommonMessage, challenge: &[S]) -> FastCryptoResult<()> { @@ -556,18 +564,11 @@ impl Receiver { /// 2. When a party receives its message, it verifies the Merkle tree path for it's shards and generates EchoMessages - one per party. pub fn echo_message(&self, message: &Message) -> FastCryptoResult> { - if message.dispersal.iter().any( - |AuthenticatedShards { - root, - shards, - shards_proof, - .. - }| { - shards_proof - .verify_proof_with_unserialized_leaf(root, &shards, self.id as usize) - .is_err() - }, - ) { + if message + .dispersal + .iter() + .any(|auth| auth.verify(self.id as usize).is_err()) + { return Err(InvalidMessage); } @@ -575,39 +576,27 @@ impl Receiver { message .dispersal .iter() - .map(|AuthenticatedShards { root, .. }| root), + .map(|auth| &auth.root), )?; - let r = tree.root(); + let global_root = tree.root(); let digest = compute_common_message_hash(&message.common); message .dispersal .iter() .enumerate() - .map( - |( - i, - AuthenticatedShards { - root, - shards, - shards_proof, - .. - }, - )| { - Ok(EchoMessage { - sender: self.id, - global_root: r.clone(), - recipient_root_proof: tree.get_proof(i)?, - recipient_root: root.clone(), - shards: shards.clone(), - common_message_hash: digest, - shards_proof: shards_proof.clone(), - }) - }, - ) + .map(|(i, authenticated_shards)| { + Ok(EchoMessage { + sender: self.id, + global_root: global_root.clone(), + recipient_root_proof: tree.get_proof(i)?, + authenticated_shards: authenticated_shards.clone(), + common_message_hash: digest, + }) + }) .collect::>>() } - /// 3. When a party has received at EchoMessages from parties with at least weight W - f, it + /// 3. When a party has received EchoMessages from parties with at least weight W - f, it /// tries to process them. It first filters out invalid messages and checks if the EchoMessages /// have the same digest, r and r_i values. If not, an InvalidMessage error is returned. /// If the filtered set of EchoMessages does not have sufficient weight, an NotEnoughWeight error @@ -624,18 +613,14 @@ impl Receiver { .iter() .filter(|echo_message| { echo_message - .shards_proof - .verify_proof_with_unserialized_leaf( - &echo_message.recipient_root, - &echo_message.shards, - echo_message.sender as usize, - ) + .authenticated_shards + .verify(echo_message.sender as usize) .is_ok() && echo_message .recipient_root_proof .verify_proof_with_unserialized_leaf( &echo_message.global_root, - &echo_message.recipient_root, + &echo_message.authenticated_shards.root, self.id as usize, ) .is_ok() @@ -659,7 +644,7 @@ impl Receiver { echo_messages .iter() .find(|e| e.sender == id) - .map(|e| e.shards.clone()) + .map(|e| e.authenticated_shards.shards.clone()) })?; Ok(ProcessedEchoMessages { ciphertext, @@ -819,8 +804,8 @@ impl Receiver { .into_iter() .map(|e| ShardContribution { sender: e.sender, - shards: e.shards, - shards_proof: e.shards_proof, + shards: e.authenticated_shards.shards, + shards_proof: e.authenticated_shards.shards_proof, }) .collect_vec(); Ok(DecryptionOutcome::InvalidDispersal(Blame { @@ -1051,7 +1036,7 @@ fn require_uniform_echo_metadata( get_uniform_value(echoes.iter().map(|e| { ( e.global_root.clone(), - e.recipient_root.clone(), + e.authenticated_shards.root.clone(), e.common_message_hash, ) })) From eaaed44241de8a2b50e26f2b2650c6d9a7ecd421 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 13:07:52 +0200 Subject: [PATCH 27/91] Hoist roots, rename echo_messages -> valid_echoes --- .../src/threshold_schnorr/batch_avss.rs | 35 +++++++++---------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 969aa0fd7d..8e673e58d2 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -450,6 +450,8 @@ impl Dealer { .map(MerkleTree::::build_from_unserialized) .collect::>>()?; + let roots: Vec = trees.iter().map(MerkleTree::root).collect(); + let dispersals: Vec> = self .nodes .node_ids_iter() @@ -457,9 +459,10 @@ impl Dealer { shards .iter() .zip(&trees) - .map(|(s, tree)| { + .zip(&roots) + .map(|((s, tree), root)| { Ok(AuthenticatedShards { - root: tree.root(), + root: root.clone(), shards: s[id as usize].clone(), shards_proof: tree.get_proof(id as usize)?, }) @@ -468,9 +471,7 @@ impl Dealer { }) .collect::>>()?; - let root = - MerkleTree::::build_from_unserialized(trees.iter().map(MerkleTree::root))? - .root(); + let global_root = MerkleTree::::build_from_unserialized(roots.iter())?.root(); // "response" polynomials from https://eprint.iacr.org/2023/536.pdf let challenge = compute_challenge( @@ -478,7 +479,7 @@ impl Dealer { &full_public_keys, &blinding_commit, &shared, - &root, + &global_root, ); // Get the first t evaluations for the response polynomial and use these to compute the coefficients @@ -573,10 +574,7 @@ impl Receiver { } let tree = MerkleTree::::build_from_unserialized( - message - .dispersal - .iter() - .map(|auth| &auth.root), + message.dispersal.iter().map(|auth| &auth.root), )?; let global_root = tree.root(); let digest = compute_common_message_hash(&message.common); @@ -609,7 +607,7 @@ impl Receiver { echo_messages: &[EchoMessage], ) -> FastCryptoResult { // Filter out invalid echo messages - let echo_messages = echo_messages + let valid_echoes = echo_messages .iter() .filter(|echo_message| { echo_message @@ -628,20 +626,19 @@ impl Receiver { .cloned() .collect_vec(); - let (global_root, recipient_root, _) = require_uniform_echo_metadata(&echo_messages)?; + let (global_root, recipient_root, _) = require_uniform_echo_metadata(&valid_echoes)?; let required_weight = self.nodes.total_weight() - self.f; - if self.nodes.total_weight_of( - echo_messages - .iter() - .map(|echo_message| &echo_message.sender), - )? < required_weight + if self + .nodes + .total_weight_of(valid_echoes.iter().map(|echo_message| &echo_message.sender))? + < required_weight { return Err(NotEnoughWeight(required_weight as usize)); } let ciphertext = self.reconstruct_ciphertext(self.id, |id| { - echo_messages + valid_echoes .iter() .find(|e| e.sender == id) .map(|e| e.authenticated_shards.shards.clone()) @@ -650,7 +647,7 @@ impl Receiver { ciphertext, global_root, recipient_root, - valid_echoes: echo_messages, + valid_echoes, }) } From cda8478fc306d0dd7e23d372d141cbaa8cd02b8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 13:24:58 +0200 Subject: [PATCH 28/91] Rename EchoMessage/ProcessedEchoMessages and field renames - EchoMessage -> Echo, ProcessedEchoMessages -> ProcessedEchos - AuthenticatedShards: root -> recipient_root, shards_proof -> proof - ShardContribution: shards_proof -> proof - Local rename digest -> common_message_hash - Add a short doc on AuthenticatedShards - Reuse assert_valid in test_happy_path --- fastcrypto-tbls/benches/batch_avss.rs | 8 +- .../src/threshold_schnorr/batch_avss.rs | 90 ++++++++++--------- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 4 +- 3 files changed, 55 insertions(+), 47 deletions(-) diff --git a/fastcrypto-tbls/benches/batch_avss.rs b/fastcrypto-tbls/benches/batch_avss.rs index 21a2f6bcf7..c8ae4f4277 100644 --- a/fastcrypto-tbls/benches/batch_avss.rs +++ b/fastcrypto-tbls/benches/batch_avss.rs @@ -130,12 +130,12 @@ mod batch_avss_benches { .map(|id| setup_receiver(id, 0, f, t, w, &keys, batch_size_per_weight)) .collect(); let messages = d0.create_message(&mut thread_rng()).unwrap(); - let echoes: Vec> = receivers + let echoes: Vec> = receivers .iter() .enumerate() .map(|(i, r)| r.echo_message(&messages[i]).unwrap()) .collect(); - let echoes_for_party_1: Vec = + let echoes_for_party_1: Vec = echoes.iter().map(|em| em[1].clone()).collect(); let r1 = &receivers[1]; @@ -177,12 +177,12 @@ mod batch_avss_benches { ) }) .collect(); - let echoes: Vec> = receivers + let echoes: Vec> = receivers .iter() .enumerate() .map(|(i, r)| r.echo_message(&messages[i]).unwrap()) .collect(); - let echoes_for_party_1: Vec = + let echoes_for_party_1: Vec = echoes.iter().map(|em| em[1].clone()).collect(); let pem = receivers[1] .process_echo_messages(&echoes_for_party_1) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 8e673e58d2..759e145dfc 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -78,31 +78,33 @@ pub struct CommonMessage { response_polynomial: Poly, } +/// One recipient's shards for one ciphertext, with a Merkle proof binding them to the +/// per-ciphertext root the dealer committed to. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct AuthenticatedShards { - root: merkle::Node, + recipient_root: merkle::Node, shards: Vec, - shards_proof: merkle::MerkleProof, + proof: merkle::MerkleProof, } /// One sender's echo to a single recipient: their shard for the recipient's ciphertext, with /// Merkle proofs binding it to the dealer's broadcast. #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct EchoMessage { +pub struct Echo { sender: PartyId, global_root: merkle::Node, - /// Proof that `authenticated_shards.root` sits under `global_root` at the recipient's leaf. + /// Proof that `authenticated_shards.recipient_root` sits under `global_root` at the recipient's leaf. recipient_root_proof: merkle::MerkleProof, authenticated_shards: AuthenticatedShards, common_message_hash: Digest<32>, } /// The receiver's reconstructed ciphertext together with the metadata extracted from the echoes. -pub struct ProcessedEchoMessages { +pub struct ProcessedEchos { ciphertext: Vec, global_root: merkle::Node, recipient_root: merkle::Node, - valid_echoes: Vec, + valid_echoes: Vec, } /// The result of [Receiver::verify_and_decrypt]: either valid shares plus a vote to broadcast, or @@ -155,7 +157,7 @@ pub struct ShardContribution { pub sender: PartyId, pub shards: Vec, /// Proof that `shards` sits under the accuser's `recipient_root` at `sender`'s leaf. - pub shards_proof: merkle::MerkleProof, + pub proof: merkle::MerkleProof, } /// The output of a receiver which is a batch of shares and public keys for all nonces. @@ -189,10 +191,13 @@ pub struct ShareBatch { } impl AuthenticatedShards { - /// Verify that `shards` are the leaf at `leaf_index` under `root` using `shards_proof`. + /// Verify that `shards` are the leaf at `leaf_index` under `recipient_root` using `proof`. fn verify(&self, leaf_index: usize) -> FastCryptoResult<()> { - self.shards_proof - .verify_proof_with_unserialized_leaf(&self.root, &self.shards, leaf_index) + self.proof.verify_proof_with_unserialized_leaf( + &self.recipient_root, + &self.shards, + leaf_index, + ) } } @@ -462,9 +467,9 @@ impl Dealer { .zip(&roots) .map(|((s, tree), root)| { Ok(AuthenticatedShards { - root: root.clone(), + recipient_root: root.clone(), shards: s[id as usize].clone(), - shards_proof: tree.get_proof(id as usize)?, + proof: tree.get_proof(id as usize)?, }) }) .collect::>>() @@ -563,8 +568,8 @@ impl Receiver { }) } - /// 2. When a party receives its message, it verifies the Merkle tree path for it's shards and generates EchoMessages - one per party. - pub fn echo_message(&self, message: &Message) -> FastCryptoResult> { + /// 2. When a party receives its message, it verifies the Merkle tree path for it's shards and generates Echos - one per party. + pub fn echo_message(&self, message: &Message) -> FastCryptoResult> { if message .dispersal .iter() @@ -574,38 +579,38 @@ impl Receiver { } let tree = MerkleTree::::build_from_unserialized( - message.dispersal.iter().map(|auth| &auth.root), + message.dispersal.iter().map(|auth| &auth.recipient_root), )?; let global_root = tree.root(); - let digest = compute_common_message_hash(&message.common); + let common_message_hash = compute_common_message_hash(&message.common); message .dispersal .iter() .enumerate() .map(|(i, authenticated_shards)| { - Ok(EchoMessage { + Ok(Echo { sender: self.id, global_root: global_root.clone(), recipient_root_proof: tree.get_proof(i)?, authenticated_shards: authenticated_shards.clone(), - common_message_hash: digest, + common_message_hash, }) }) .collect::>>() } - /// 3. When a party has received EchoMessages from parties with at least weight W - f, it - /// tries to process them. It first filters out invalid messages and checks if the EchoMessages + /// 3. When a party has received Echos from parties with at least weight W - f, it + /// tries to process them. It first filters out invalid messages and checks if the Echos /// have the same digest, r and r_i values. If not, an InvalidMessage error is returned. - /// If the filtered set of EchoMessages does not have sufficient weight, an NotEnoughWeight error + /// If the filtered set of Echos does not have sufficient weight, an NotEnoughWeight error /// is returned. /// /// If these checks succeed, the party reconstructs it's message (ciphertext) from the echoed /// shards along with the r and r_i values. pub fn process_echo_messages( &self, - echo_messages: &[EchoMessage], - ) -> FastCryptoResult { + echo_messages: &[Echo], + ) -> FastCryptoResult { // Filter out invalid echo messages let valid_echoes = echo_messages .iter() @@ -618,7 +623,7 @@ impl Receiver { .recipient_root_proof .verify_proof_with_unserialized_leaf( &echo_message.global_root, - &echo_message.authenticated_shards.root, + &echo_message.authenticated_shards.recipient_root, self.id as usize, ) .is_ok() @@ -643,7 +648,7 @@ impl Receiver { .find(|e| e.sender == id) .map(|e| e.authenticated_shards.shards.clone()) })?; - Ok(ProcessedEchoMessages { + Ok(ProcessedEchos { ciphertext, global_root, recipient_root, @@ -716,7 +721,7 @@ impl Receiver { /// appeared on the TOB/ABC channel. pub fn verify_and_decrypt( &self, - processed_echo_messages: ProcessedEchoMessages, + processed_echo_messages: ProcessedEchos, message: &Message, ) -> FastCryptoResult { let CommonMessage { @@ -726,7 +731,7 @@ impl Receiver { shared, } = &message.common; - let ProcessedEchoMessages { + let ProcessedEchos { ciphertext, global_root, recipient_root, @@ -802,7 +807,7 @@ impl Receiver { .map(|e| ShardContribution { sender: e.sender, shards: e.authenticated_shards.shards, - shards_proof: e.authenticated_shards.shards_proof, + proof: e.authenticated_shards.proof, }) .collect_vec(); Ok(DecryptionOutcome::InvalidDispersal(Blame { @@ -906,7 +911,7 @@ impl Receiver { } if shards.iter().any(|s| { - s.shards_proof + s.proof .verify_proof_with_unserialized_leaf(recipient_root, &s.shards, s.sender as usize) .is_err() }) { @@ -949,12 +954,12 @@ impl Receiver { .dispersal .get(accuser_id as usize) .ok_or(InvalidProof)? - .root) + .recipient_root) } fn global_root(&self, message: &Message) -> FastCryptoResult { Ok(MerkleTree::::build_from_unserialized( - message.dispersal.iter().map(|s| &s.root), + message.dispersal.iter().map(|s| &s.recipient_root), )? .root()) } @@ -1028,12 +1033,12 @@ fn uleb128_len(x: usize) -> usize { /// `r`, the receiver's per-ciphertext root `r_i`, and the dealer's `H(val)`. Returns an error if /// any field is non-uniform. fn require_uniform_echo_metadata( - echoes: &[EchoMessage], + echoes: &[Echo], ) -> FastCryptoResult<(merkle::Node, merkle::Node, Digest<32>)> { get_uniform_value(echoes.iter().map(|e| { ( e.global_root.clone(), - e.authenticated_shards.root.clone(), + e.authenticated_shards.recipient_root.clone(), e.common_message_hash, ) })) @@ -1235,14 +1240,10 @@ mod tests { .iter() .zip(processed_echo_messages) .zip(messages) - .map( - |((receiver, pem), message)| match receiver.verify_and_decrypt(pem, &message) { - Ok(DecryptionOutcome::Valid { output, .. }) => (receiver.id, output), - _ => panic!( - "All receivers should be able to process the message in the happy path" - ), - }, - ) + .map(|((receiver, pem), message)| { + let output = assert_valid(receiver.verify_and_decrypt(pem, &message).unwrap()); + (receiver.id, output) + }) .collect::>(); let secrets = (0..dealer.batch_size) @@ -1407,6 +1408,13 @@ mod tests { } } + fn assert_valid(outcome: DecryptionOutcome) -> ReceiverOutput { + match outcome { + DecryptionOutcome::Valid { output, .. } => output, + other => panic!("expected valid outcome, got {:?}", outcome_kind(&other)), + } + } + fn outcome_kind(outcome: &DecryptionOutcome) -> &'static str { match outcome { DecryptionOutcome::Valid { .. } => "Valid", diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index 9616f3233e..993924181a 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -237,13 +237,13 @@ mod tests { let messages = dealer.create_message(&mut rng).unwrap(); // Each receiver produces echoes addressed to every party. - let echoes: Vec> = receivers + let echoes: Vec> = receivers .iter() .map(|r| r.echo_message(&messages[r.id as usize]).unwrap()) .collect(); // Bundle echoes per recipient: echoes_per_recipient[i] = echoes addressed to party i. - let echoes_per_recipient: Vec> = (0..n) + let echoes_per_recipient: Vec> = (0..n) .map(|i| echoes.iter().map(|em| em[i].clone()).collect()) .collect(); From 7b2daebb5ec69ecbefdad91ac3bc65743abc45e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 13:37:50 +0200 Subject: [PATCH 29/91] Add Echo::verify and recipient_tree helpers - Echo::verify wraps both Merkle proof checks (inner shards + outer recipient-root binding); used in process_echo_messages - Free function recipient_tree builds the per-recipient Merkle tree over per-node shard chunks; used by Dealer::create_message and Receiver::check_avid_consistency - global_root method becomes global_tree returning a MerkleTree; callers take .root() as needed - Drop the recipient_roots Vec; just call tree.root() inline --- .../src/threshold_schnorr/batch_avss.rs | 67 ++++++++++--------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 759e145dfc..2bbc9aadbc 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -201,6 +201,19 @@ impl AuthenticatedShards { } } +impl Echo { + /// Verify both Merkle proofs in this echo. + fn verify(&self, recipient_id: PartyId) -> FastCryptoResult<()> { + self.authenticated_shards.verify(self.sender as usize)?; + self.recipient_root_proof + .verify_proof_with_unserialized_leaf( + &self.global_root, + &self.authenticated_shards.recipient_root, + recipient_id as usize, + ) + } +} + impl DecryptionOutcome { /// Reduce this outcome to the message the party should broadcast to others: a [Vote] when /// the dealer's broadcast verified, otherwise the [InvalidShares] or [InvalidDispersal] itself. @@ -452,11 +465,9 @@ impl Dealer { let trees = shards .iter() - .map(MerkleTree::::build_from_unserialized) + .map(recipient_tree) .collect::>>()?; - let roots: Vec = trees.iter().map(MerkleTree::root).collect(); - let dispersals: Vec> = self .nodes .node_ids_iter() @@ -464,10 +475,9 @@ impl Dealer { shards .iter() .zip(&trees) - .zip(&roots) - .map(|((s, tree), root)| { + .map(|(s, tree)| { Ok(AuthenticatedShards { - recipient_root: root.clone(), + recipient_root: tree.root(), shards: s[id as usize].clone(), proof: tree.get_proof(id as usize)?, }) @@ -476,7 +486,9 @@ impl Dealer { }) .collect::>>()?; - let global_root = MerkleTree::::build_from_unserialized(roots.iter())?.root(); + let global_root = + MerkleTree::::build_from_unserialized(trees.iter().map(MerkleTree::root))? + .root(); // "response" polynomials from https://eprint.iacr.org/2023/536.pdf let challenge = compute_challenge( @@ -578,9 +590,7 @@ impl Receiver { return Err(InvalidMessage); } - let tree = MerkleTree::::build_from_unserialized( - message.dispersal.iter().map(|auth| &auth.recipient_root), - )?; + let tree = self.global_tree(message)?; let global_root = tree.root(); let common_message_hash = compute_common_message_hash(&message.common); message @@ -614,20 +624,7 @@ impl Receiver { // Filter out invalid echo messages let valid_echoes = echo_messages .iter() - .filter(|echo_message| { - echo_message - .authenticated_shards - .verify(echo_message.sender as usize) - .is_ok() - && echo_message - .recipient_root_proof - .verify_proof_with_unserialized_leaf( - &echo_message.global_root, - &echo_message.authenticated_shards.recipient_root, - self.id as usize, - ) - .is_ok() - }) + .filter(|echo_message| echo_message.verify(self.id).is_ok()) .cloned() .collect_vec(); @@ -698,9 +695,7 @@ impl Receiver { let new_shards = self .nodes .collect_to_nodes(self.code.encode(ciphertext)?.into_iter())?; - let new_tree = MerkleTree::::build_from_unserialized(new_shards.iter())?; - - if new_tree.root() != *root { + if recipient_tree(&new_shards)?.root() != *root { return Err(InvalidMessage); } @@ -853,7 +848,7 @@ impl Receiver { let accuser_id = proof.accuser_id; let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; let recipient_root = self.dispersal_root_for(message, accuser_id)?; - let global_root = self.global_root(message)?; + let global_root = self.global_tree(message)?.root(); if common_message_hash != &compute_common_message_hash(&message.common) || self @@ -957,11 +952,10 @@ impl Receiver { .recipient_root) } - fn global_root(&self, message: &Message) -> FastCryptoResult { - Ok(MerkleTree::::build_from_unserialized( + fn global_tree(&self, message: &Message) -> FastCryptoResult> { + MerkleTree::::build_from_unserialized( message.dispersal.iter().map(|s| &s.recipient_root), - )? - .root()) + ) } /// 6. Upon receiving t valid responses to a complaint, the accuser can recover its shares. @@ -981,7 +975,7 @@ impl Receiver { return Err(FastCryptoError::InputTooShort(self.t as usize)); } - let r = self.global_root(message)?; + let r = self.global_tree(message)?.root(); let challenge = compute_challenge_from_message(&self.random_oracle(), &r, &message.common); let response_shares = responses .into_iter() @@ -1032,6 +1026,13 @@ fn uleb128_len(x: usize) -> usize { /// Pull the per-echo metadata that must agree across the entire echo set: the global Merkle root /// `r`, the receiver's per-ciphertext root `r_i`, and the dealer's `H(val)`. Returns an error if /// any field is non-uniform. +/// Build the per-recipient Merkle tree over `shards` (per-node grouped shard chunks of one +/// ciphertext). The root of this tree is the per-recipient `recipient_root`. +#[allow(clippy::ptr_arg)] +fn recipient_tree(shards: &Vec>) -> FastCryptoResult> { + MerkleTree::::build_from_unserialized(shards.iter()) +} + fn require_uniform_echo_metadata( echoes: &[Echo], ) -> FastCryptoResult<(merkle::Node, merkle::Node, Digest<32>)> { From a46c2db4e78850b3f1184ab6dfcad1b034b8905f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 13:40:39 +0200 Subject: [PATCH 30/91] Add ShardContribution::verify and use it in handle_blame --- .../src/threshold_schnorr/batch_avss.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 2bbc9aadbc..babaaf3074 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -214,6 +214,17 @@ impl Echo { } } +impl ShardContribution { + /// Verify that `shards` are the leaf at `sender` under `recipient_root` using `proof`. + fn verify(&self, recipient_root: &merkle::Node) -> FastCryptoResult<()> { + self.proof.verify_proof_with_unserialized_leaf( + recipient_root, + &self.shards, + self.sender as usize, + ) + } +} + impl DecryptionOutcome { /// Reduce this outcome to the message the party should broadcast to others: a [Vote] when /// the dealer's broadcast verified, otherwise the [InvalidShares] or [InvalidDispersal] itself. @@ -905,11 +916,7 @@ impl Receiver { return Err(InvalidProof); } - if shards.iter().any(|s| { - s.proof - .verify_proof_with_unserialized_leaf(recipient_root, &s.shards, s.sender as usize) - .is_err() - }) { + if shards.iter().any(|s| s.verify(recipient_root).is_err()) { return Err(InvalidProof); } From 6019d1062756179b0c38485b6420ed4ee108865b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 14:19:40 +0200 Subject: [PATCH 31/91] Clean up --- .../src/threshold_schnorr/batch_avss.rs | 283 +++++++++--------- fastcrypto-tbls/src/threshold_schnorr/bcs.rs | 4 +- 2 files changed, 146 insertions(+), 141 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index babaaf3074..640cb170f7 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -190,41 +190,6 @@ pub struct ShareBatch { pub blinding_share: S, } -impl AuthenticatedShards { - /// Verify that `shards` are the leaf at `leaf_index` under `recipient_root` using `proof`. - fn verify(&self, leaf_index: usize) -> FastCryptoResult<()> { - self.proof.verify_proof_with_unserialized_leaf( - &self.recipient_root, - &self.shards, - leaf_index, - ) - } -} - -impl Echo { - /// Verify both Merkle proofs in this echo. - fn verify(&self, recipient_id: PartyId) -> FastCryptoResult<()> { - self.authenticated_shards.verify(self.sender as usize)?; - self.recipient_root_proof - .verify_proof_with_unserialized_leaf( - &self.global_root, - &self.authenticated_shards.recipient_root, - recipient_id as usize, - ) - } -} - -impl ShardContribution { - /// Verify that `shards` are the leaf at `sender` under `recipient_root` using `proof`. - fn verify(&self, recipient_root: &merkle::Node) -> FastCryptoResult<()> { - self.proof.verify_proof_with_unserialized_leaf( - recipient_root, - &self.shards, - self.sender as usize, - ) - } -} - impl DecryptionOutcome { /// Reduce this outcome to the message the party should broadcast to others: a [Vote] when /// the dealer's broadcast verified, otherwise the [InvalidShares] or [InvalidDispersal] itself. @@ -474,10 +439,11 @@ impl Dealer { }) .collect::>>()?; - let trees = shards + let recipient_trees = shards .iter() .map(recipient_tree) .collect::>>()?; + let recipient_roots = recipient_trees.iter().map(MerkleTree::root); let dispersals: Vec> = self .nodes @@ -485,10 +451,11 @@ impl Dealer { .map(|id| { shards .iter() - .zip(&trees) - .map(|(s, tree)| { + .zip(&recipient_trees) + .zip(recipient_roots.clone()) + .map(|((s, tree), recipient_root)| { Ok(AuthenticatedShards { - recipient_root: tree.root(), + recipient_root, shards: s[id as usize].clone(), proof: tree.get_proof(id as usize)?, }) @@ -497,9 +464,7 @@ impl Dealer { }) .collect::>>()?; - let global_root = - MerkleTree::::build_from_unserialized(trees.iter().map(MerkleTree::root))? - .root(); + let global_root = global_tree(recipient_roots)?.root(); // "response" polynomials from https://eprint.iacr.org/2023/536.pdf let challenge = compute_challenge( @@ -591,7 +556,7 @@ impl Receiver { }) } - /// 2. When a party receives its message, it verifies the Merkle tree path for it's shards and generates Echos - one per party. + /// 2. When a party receives its message, it verifies the Merkle tree path for its shards and generates Echos, one per party. pub fn echo_message(&self, message: &Message) -> FastCryptoResult> { if message .dispersal @@ -601,8 +566,8 @@ impl Receiver { return Err(InvalidMessage); } - let tree = self.global_tree(message)?; - let global_root = tree.root(); + let global_tree = global_tree_from_message(message)?; + let global_root = global_tree.root(); let common_message_hash = compute_common_message_hash(&message.common); message .dispersal @@ -612,7 +577,7 @@ impl Receiver { Ok(Echo { sender: self.id, global_root: global_root.clone(), - recipient_root_proof: tree.get_proof(i)?, + recipient_root_proof: global_tree.get_proof(i)?, authenticated_shards: authenticated_shards.clone(), common_message_hash, }) @@ -664,55 +629,6 @@ impl Receiver { }) } - /// Reed-Solomon decode the ciphertext for `accuser_id` from a set of authenticated shard - /// contributions exposed via `shards_for(party_id) -> Option>`. Fails if the - /// contributing weight is below `W - 2f` (too few contributions to reconstruct), or if a - /// party's contribution has a shard count that doesn't match its weight. The caller is - /// responsible for having authenticated the shards via their Merkle proofs. - fn reconstruct_ciphertext( - &self, - accuser_id: PartyId, - shards_for: impl Fn(PartyId) -> Option>, - ) -> FastCryptoResult> { - let shards: Vec> = self - .nodes - .node_ids_iter() - .map(|id| -> FastCryptoResult>> { - let weight = self.nodes.weight_of(id).expect("valid party id") as usize; - match shards_for(id) { - Some(ss) if ss.len() == weight => Ok(ss.into_iter().map(Some).collect()), - // Fail if a contributor's shard count doesn't match its weight. - Some(_) => Err(InvalidInput), - None => Ok(vec![None; weight]), - } - }) - .flatten_ok() - .collect::>>()?; - - let mut ciphertext = self.code.decode(shards)?; - // Reed-Solomon `decode` returns shard-aligned padding; trim back to the original encrypted - // blob length. - let weight = self.nodes.weight_of(accuser_id)? as usize; - ciphertext.truncate(SharesForNode::bcs_serialized_size(weight, self.batch_size)); - Ok(ciphertext) - } - - /// The check r_i' == r_i from the paper - fn check_avid_consistency( - &self, - ciphertext: &[u8], - root: &merkle::Node, - ) -> FastCryptoResult<()> { - let new_shards = self - .nodes - .collect_to_nodes(self.code.encode(ciphertext)?.into_iter())?; - if recipient_tree(&new_shards)?.root() != *root { - return Err(InvalidMessage); - } - - Ok(()) - } - /// 4. If the party also received a valid Message from the dealer, it can now decrypt its shares. /// If this succeeds (returns a DecryptionOutcome::Valid), the party should return a signed vote to the dealer. /// The vote payload can be obtained by calling [DecryptionOutcome::into_response] on the @@ -722,8 +638,8 @@ impl Receiver { /// Message from the dealer should request the CommonMessage part of that from the parties who voted. /// Using this, the party can decrypt the shares and verify that the shares are valid. /// - /// If this function returns a [InvalidShares] or [InvalidDispersal] outcome, the party should broadcast it - /// to the other parties — but only after at least `W - f` votes from other parties have + /// If this function returns an [InvalidShares] or [InvalidDispersal] outcome, the party should broadcast it + /// to the other parties, but only after at least `W - f` votes from other parties have /// appeared on the TOB/ABC channel. pub fn verify_and_decrypt( &self, @@ -736,6 +652,11 @@ impl Receiver { response_polynomial, shared, } = &message.common; + if full_public_keys.len() != self.batch_size + || response_polynomial.degree() != self.t as usize - 1 + { + return Err(InvalidMessage); + } let ProcessedEchos { ciphertext, @@ -743,16 +664,14 @@ impl Receiver { recipient_root, valid_echoes, } = processed_echo_messages; - if full_public_keys.len() != self.batch_size - || response_polynomial.degree() != self.t as usize - 1 - { - return Err(InvalidMessage); - } // TODO: What should happen if these checks fail? // Verify that g^{p''(0)} == c' * prod_l c_l^{gamma_l} - let challenge = - compute_challenge_from_message(&self.random_oracle(), &global_root, &message.common); + let challenge = compute_challenge_from_common_message( + &self.random_oracle(), + &global_root, + &message.common, + ); if G::generator() * response_polynomial.c0() != blinding_commit + G::multi_scalar_mul(&challenge, full_public_keys) @@ -777,7 +696,7 @@ impl Receiver { self.id as usize, ) }) - .and_then(|plaintext| SharesForNode::from_bytes(&plaintext)) + .and_then(SharesForNode::from_bytes) .and_then(|my_shares| { verify_shares( &my_shares, @@ -803,9 +722,7 @@ impl Receiver { }, }), (true, Ok(_)) => { - // Repackage each echo's per-shard proof as a ShardContribution. r_i stays - // implicit — the responder reads it from its own [Message] rather than receiving - // it via the complaint. + // Repackage each echo's per-shard proof as a ShardContribution let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; let common_message_hash = any_echo.common_message_hash; let shards = valid_echoes @@ -859,7 +776,6 @@ impl Receiver { let accuser_id = proof.accuser_id; let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; let recipient_root = self.dispersal_root_for(message, accuser_id)?; - let global_root = self.global_tree(message)?.root(); if common_message_hash != &compute_common_message_hash(&message.common) || self @@ -869,8 +785,12 @@ impl Receiver { return Err(InvalidProof); } - let challenge = - compute_challenge_from_message(&self.random_oracle(), &global_root, &message.common); + let global_root = global_tree_from_message(message)?.root(); + let challenge = compute_challenge_from_common_message( + &self.random_oracle(), + &global_root, + &message.common, + ); proof.check( accuser_pk, ciphertext, @@ -908,15 +828,10 @@ impl Receiver { let accuser_id = *accuser_id; let recipient_root = self.dispersal_root_for(message, accuser_id)?; - if common_message_hash != &compute_common_message_hash(&message.common) { - return Err(InvalidProof); - } - - if shards.iter().map(|s| s.sender).unique().count() != shards.len() { - return Err(InvalidProof); - } - - if shards.iter().any(|s| s.verify(recipient_root).is_err()) { + if common_message_hash != &compute_common_message_hash(&message.common) + || shards.iter().map(|s| s.sender).unique().count() != shards.len() + || shards.iter().any(|s| s.verify(recipient_root).is_err()) + { return Err(InvalidProof); } @@ -947,6 +862,56 @@ impl Receiver { Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } + /// Reed-Solomon decode the ciphertext for `accuser_id` from a set of authenticated shard + /// contributions exposed via `shards_for(party_id) -> Option>`. Fails if the + /// contributing weight is below `W - 2f` (too few contributions to reconstruct), or if a + /// party's contribution has a shard count that doesn't match its weight. The caller is + /// responsible for having authenticated the shards via their Merkle proofs. + fn reconstruct_ciphertext( + &self, + accuser_id: PartyId, + shards_for: impl Fn(PartyId) -> Option>, + ) -> FastCryptoResult> { + let shards: Vec> = self + .nodes + .node_ids_iter() + .map(|id| -> FastCryptoResult>> { + let weight = self.nodes.weight_of(id).expect("valid party id") as usize; + match shards_for(id) { + Some(ss) if ss.len() == weight => Ok(ss.into_iter().map(Some).collect()), + // Fail if a contributor's shard count doesn't match its weight. + Some(_) => Err(InvalidInput), + None => Ok(vec![None; weight]), + } + }) + .flatten_ok() + .collect::>>()?; + + let mut ciphertext = self.code.decode(shards)?; + // Reed-Solomon `decode` returns shard-aligned padding; trim back to the original encrypted blob length. + // The encryption used, counter-mode, is length-preserving, so the length of the ciphertext is equal to the length of the plaintext. + ciphertext.truncate(SharesForNode::bcs_serialized_size( + self.nodes.weight_of(accuser_id)? as usize, + self.batch_size, + )); + Ok(ciphertext) + } + + /// The check r_i' == r_i from the paper + fn check_avid_consistency( + &self, + ciphertext: &[u8], + expected_root: &merkle::Node, + ) -> FastCryptoResult<()> { + let new_shards = self + .nodes + .collect_to_nodes(self.code.encode(ciphertext)?.into_iter())?; + if recipient_tree(&new_shards)?.root() != *expected_root { + return Err(InvalidMessage); + } + Ok(()) + } + fn dispersal_root_for<'a>( &self, message: &'a Message, @@ -959,12 +924,6 @@ impl Receiver { .recipient_root) } - fn global_tree(&self, message: &Message) -> FastCryptoResult> { - MerkleTree::::build_from_unserialized( - message.dispersal.iter().map(|s| &s.recipient_root), - ) - } - /// 6. Upon receiving t valid responses to a complaint, the accuser can recover its shares. /// Fails if there are not enough valid responses to recover the shares or if any of the responses come from an invalid party. pub fn recover( @@ -982,8 +941,12 @@ impl Receiver { return Err(FastCryptoError::InputTooShort(self.t as usize)); } - let r = self.global_tree(message)?.root(); - let challenge = compute_challenge_from_message(&self.random_oracle(), &r, &message.common); + let global_root = global_tree_from_message(message)?.root(); + let challenge = compute_challenge_from_common_message( + &self.random_oracle(), + &global_root, + &message.common, + ); let response_shares = responses .into_iter() .filter_map(|response| { @@ -1019,6 +982,58 @@ impl Receiver { } } +impl AuthenticatedShards { + /// Verify that `shards` are the leaf at `leaf_index` under `recipient_root` using `proof`. + fn verify(&self, leaf_index: usize) -> FastCryptoResult<()> { + self.proof.verify_proof_with_unserialized_leaf( + &self.recipient_root, + &self.shards, + leaf_index, + ) + } +} + +impl Echo { + /// Verify both Merkle proofs in this echo. + fn verify(&self, recipient_id: PartyId) -> FastCryptoResult<()> { + self.authenticated_shards.verify(self.sender as usize)?; + self.recipient_root_proof + .verify_proof_with_unserialized_leaf( + &self.global_root, + &self.authenticated_shards.recipient_root, + recipient_id as usize, + ) + } +} + +impl ShardContribution { + /// Verify that `shards` are the leaf at `sender` under `recipient_root` using `proof`. + fn verify(&self, recipient_root: &merkle::Node) -> FastCryptoResult<()> { + self.proof.verify_proof_with_unserialized_leaf( + recipient_root, + &self.shards, + self.sender as usize, + ) + } +} + +/// Build the per-recipient Merkle tree over `shards` (per-node grouped shard chunks of one +/// ciphertext). The root of this tree is the per-recipient `recipient_root`. +#[allow(clippy::ptr_arg)] +fn recipient_tree(shards: &Vec>) -> FastCryptoResult> { + MerkleTree::::build_from_unserialized(shards.iter()) +} + +fn global_tree( + recipient_roots: impl ExactSizeIterator, +) -> FastCryptoResult> { + MerkleTree::::build_from_unserialized(recipient_roots) +} + +fn global_tree_from_message(message: &Message) -> FastCryptoResult> { + global_tree(message.dispersal.iter().map(|s| s.recipient_root.clone())) +} + /// Number of bytes BCS uses to encode `x` as an unsigned LEB128 length prefix. fn uleb128_len(x: usize) -> usize { let mut len = 1; @@ -1030,16 +1045,6 @@ fn uleb128_len(x: usize) -> usize { len } -/// Pull the per-echo metadata that must agree across the entire echo set: the global Merkle root -/// `r`, the receiver's per-ciphertext root `r_i`, and the dealer's `H(val)`. Returns an error if -/// any field is non-uniform. -/// Build the per-recipient Merkle tree over `shards` (per-node grouped shard chunks of one -/// ciphertext). The root of this tree is the per-recipient `recipient_root`. -#[allow(clippy::ptr_arg)] -fn recipient_tree(shards: &Vec>) -> FastCryptoResult> { - MerkleTree::::build_from_unserialized(shards.iter()) -} - fn require_uniform_echo_metadata( echoes: &[Echo], ) -> FastCryptoResult<(merkle::Node, merkle::Node, Digest<32>)> { @@ -1085,7 +1090,7 @@ fn compute_challenge( .collect() } -fn compute_challenge_from_message( +fn compute_challenge_from_common_message( random_oracle: &RandomOracle, root: &merkle::Node, message: &CommonMessage, diff --git a/fastcrypto-tbls/src/threshold_schnorr/bcs.rs b/fastcrypto-tbls/src/threshold_schnorr/bcs.rs index 596755e607..558c226041 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/bcs.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/bcs.rs @@ -11,10 +11,10 @@ pub trait BCSSerialized: Serialize + for<'de> Deserialize<'de> { bcs::to_bytes(self).unwrap() } - fn from_bytes(bytes: &[u8]) -> FastCryptoResult + fn from_bytes(bytes: impl AsRef<[u8]>) -> FastCryptoResult where Self: Sized, { - bcs::from_bytes(bytes).map_err(|_| InvalidInput) + bcs::from_bytes(bytes.as_ref()).map_err(|_| InvalidInput) } } From 1b6a72f4b0b11426b97521db21eac2532bde887c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 14:32:56 +0200 Subject: [PATCH 32/91] Reorder + clean up --- .../src/threshold_schnorr/batch_avss.rs | 40 +++++++++---------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 640cb170f7..b09daf332b 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -232,25 +232,6 @@ impl ShareBatch { } impl SharesForNode { - /// BCS-serialized length of a `SharesForNode` for a node of the given weight at the given - /// batch size. - /// - /// Layout: - /// ```text - /// SharesForNode = Vec - /// = ULEB128(weight) + weight × ShareBatch - /// ShareBatch - /// = NonZeroU16 (= 2 bytes) + Vec + S - /// = 2 + ULEB128(batch_size) + (batch_size + 1) × SCALAR_SIZE_IN_BYTES - /// ``` - fn bcs_serialized_size(weight: usize, batch_size: usize) -> usize { - // TODO: A bit of a hack — this hardcodes the BCS layout of `SharesForNode`/`ShareBatch` - // and the 32-byte scalar size. Any change to those types' fields silently invalidates - // this formula; the unit test catches it but only within the tested ranges. - uleb128_len(weight) - + weight * (2 + uleb128_len(batch_size) + (batch_size + 1) * SCALAR_SIZE_IN_BYTES) - } - /// Get the weight of this node (number of shares it has). pub fn weight(&self) -> u16 { self.shares.len() as u16 @@ -323,6 +304,21 @@ impl SharesForNode { .collect::>>()?; Ok(Self { shares }) } + + /// BCS-serialized length of a `SharesForNode` for a node of the given weight at the given + /// batch size. + fn bcs_serialized_size(weight: usize, batch_size: usize) -> usize { + // Layout: + // SharesForNode = Vec + // = ULEB128(weight) + weight × ShareBatch + // ShareBatch + // = NonZeroU16 (= 2 bytes) + Vec + S + // = 2 + ULEB128(batch_size) + (batch_size + 1) × SCALAR_SIZE_IN_BYTES + + // TODO: A bit of a hack — this hardcodes the BCS layout of `SharesForNode` + uleb128_len(weight) + + weight * (2 + uleb128_len(batch_size) + (batch_size + 1) * SCALAR_SIZE_IN_BYTES) + } } impl BCSSerialized for SharesForNode {} @@ -367,8 +363,7 @@ impl Dealer { self.create_message_with_mutation(rng, |_| {}) } - /// Like [Self::create_message] but exposes a mutation hook over the pre-encryption - /// per-receiver plaintexts so tests can simulate a faulty dealer by corrupting one slot. + /// Like [Self::create_message] but exposes a mutation hook over the plaintexts so tests can simulate a faulty dealer by corrupting one slot. fn create_message_with_mutation( &self, rng: &mut impl AllowedRng, @@ -572,13 +567,14 @@ impl Receiver { message .dispersal .iter() + .cloned() .enumerate() .map(|(i, authenticated_shards)| { Ok(Echo { sender: self.id, global_root: global_root.clone(), recipient_root_proof: global_tree.get_proof(i)?, - authenticated_shards: authenticated_shards.clone(), + authenticated_shards, common_message_hash, }) }) From 4449865f11bc9c3977bbb7a14b0f53c390258dda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 14:39:25 +0200 Subject: [PATCH 33/91] Update docs + function signature --- .../src/threshold_schnorr/batch_avss.rs | 23 ++++++++++--------- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 2 +- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index b09daf332b..c4b590a1e6 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -66,7 +66,7 @@ pub struct Receiver { /// The message broadcast by the dealer. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Message { - common: CommonMessage, + pub common: CommonMessage, dispersal: Vec, } @@ -625,13 +625,13 @@ impl Receiver { }) } - /// 4. If the party also received a valid Message from the dealer, it can now decrypt its shares. + /// 4. If the party also received a valid [Message] from the dealer, it can now decrypt its shares using the [CommonMessage] part of the message. /// If this succeeds (returns a DecryptionOutcome::Valid), the party should return a signed vote to the dealer. /// The vote payload can be obtained by calling [DecryptionOutcome::into_response] on the /// outcome, which yields a [Response::Vote] for the caller to sign. /// /// When parties with weight at least W -f has submitted a vote, parties who didn't get a valid - /// Message from the dealer should request the CommonMessage part of that from the parties who voted. + /// [Message] from the dealer should request the [CommonMessage] part of that from the parties who voted. /// Using this, the party can decrypt the shares and verify that the shares are valid. /// /// If this function returns an [InvalidShares] or [InvalidDispersal] outcome, the party should broadcast it @@ -640,14 +640,14 @@ impl Receiver { pub fn verify_and_decrypt( &self, processed_echo_messages: ProcessedEchos, - message: &Message, + common_message: &CommonMessage, ) -> FastCryptoResult { let CommonMessage { full_public_keys, blinding_commit, response_polynomial, shared, - } = &message.common; + } = &common_message; if full_public_keys.len() != self.batch_size || response_polynomial.degree() != self.t as usize - 1 { @@ -666,7 +666,7 @@ impl Receiver { let challenge = compute_challenge_from_common_message( &self.random_oracle(), &global_root, - &message.common, + &common_message, ); if G::generator() * response_polynomial.c0() != blinding_commit @@ -676,7 +676,6 @@ impl Receiver { return Err(InvalidMessage); } - // Check r_i' == r_i from the paper let faulty_dealer = self .check_avid_consistency(&ciphertext, &recipient_root) .is_err(); @@ -698,7 +697,7 @@ impl Receiver { &my_shares, &self.nodes, self.id, - &message.common, + &common_message, &challenge, self.batch_size, )?; @@ -714,7 +713,7 @@ impl Receiver { }, vote: Vote { global_root, - common_message_hash: compute_common_message_hash(&message.common), + common_message_hash: compute_common_message_hash(&common_message), }, }), (true, Ok(_)) => { @@ -1250,7 +1249,8 @@ mod tests { .zip(processed_echo_messages) .zip(messages) .map(|((receiver, pem), message)| { - let output = assert_valid(receiver.verify_and_decrypt(pem, &message).unwrap()); + let output = + assert_valid(receiver.verify_and_decrypt(pem, &message.common).unwrap()); (receiver.id, output) }) .collect::>(); @@ -1355,7 +1355,8 @@ mod tests { let pem = r.process_echo_messages(echoes).unwrap(); ( r.id, - r.verify_and_decrypt(pem, &messages[r.id as usize]).unwrap(), + r.verify_and_decrypt(pem, &messages[r.id as usize].common) + .unwrap(), ) }) .collect(); diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index 993924181a..2157dfb2a9 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -251,7 +251,7 @@ mod tests { // In this case, we assume all are honest and there are no complaints. for ((r, echoes), msg) in receivers.iter().zip(&echoes_per_recipient).zip(&messages) { let pem = r.process_echo_messages(echoes).unwrap(); - let output = assert_valid_batch(r.verify_and_decrypt(pem, msg).unwrap()); + let output = assert_valid_batch(r.verify_and_decrypt(pem, &msg.common).unwrap()); presigning_outputs.get_mut(&r.id).unwrap().push(output); } } From acf864503f340ef588b08b496be86ab6cf5e19f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 14:46:39 +0200 Subject: [PATCH 34/91] Change signature --- .../src/threshold_schnorr/batch_avss.rs | 35 ++++++++++++------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index c4b590a1e6..d7f0f846ea 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -759,8 +759,9 @@ impl Receiver { /// `message.dispersal[accuser_id]` rather than trusting the complaint to carry it. pub fn handle_reveal( &self, - message: &Message, reveal: &Reveal, + processed_echos: &ProcessedEchos, + common_message: &CommonMessage, my_output: &ReceiverOutput, ) -> FastCryptoResult> { let Reveal { @@ -770,33 +771,37 @@ impl Receiver { } = reveal; let accuser_id = proof.accuser_id; let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; - let recipient_root = self.dispersal_root_for(message, accuser_id)?; - if common_message_hash != &compute_common_message_hash(&message.common) + let ProcessedEchos { + global_root, + recipient_root, + .. + } = processed_echos; + + if common_message_hash != &compute_common_message_hash(&common_message) || self - .check_avid_consistency(ciphertext, recipient_root) + .check_avid_consistency(ciphertext, &recipient_root) .is_err() { return Err(InvalidProof); } - let global_root = global_tree_from_message(message)?.root(); let challenge = compute_challenge_from_common_message( &self.random_oracle(), &global_root, - &message.common, + &common_message, ); proof.check( accuser_pk, ciphertext, - &message.common.shared, + &common_message.shared, &self.random_oracle(), |shares: &SharesForNode| { verify_shares( shares, &self.nodes, accuser_id, - &message.common, + &common_message, &challenge, self.batch_size, ) @@ -811,8 +816,9 @@ impl Receiver { /// `r_i`. On success, respond with this party's own shares. pub fn handle_blame( &self, - message: &Message, blame: &Blame, + processed_echos: &ProcessedEchos, + common_message: &CommonMessage, my_output: &ReceiverOutput, ) -> FastCryptoResult> { let Blame { @@ -821,11 +827,15 @@ impl Receiver { common_message_hash, } = blame; let accuser_id = *accuser_id; - let recipient_root = self.dispersal_root_for(message, accuser_id)?; - if common_message_hash != &compute_common_message_hash(&message.common) + let ProcessedEchos { + recipient_root, + .. + } = processed_echos; + + if common_message_hash != &compute_common_message_hash(&common_message) || shards.iter().map(|s| s.sender).unique().count() != shards.len() - || shards.iter().any(|s| s.verify(recipient_root).is_err()) + || shards.iter().any(|s| s.verify(&recipient_root).is_err()) { return Err(InvalidProof); } @@ -1391,7 +1401,6 @@ mod tests { .map(|r| { r.handle_reveal( &messages[r.id as usize], - &reveal, outputs.get(&r.id).unwrap(), ) .unwrap() From 197e9667fec81f375e6eb860c0131e633fcbc162 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 15:22:05 +0200 Subject: [PATCH 35/91] Drop AVID consistency check from handle_reveal --- fastcrypto-tbls/benches/batch_avss.rs | 4 +- .../src/threshold_schnorr/batch_avss.rs | 76 ++++++++----------- 2 files changed, 33 insertions(+), 47 deletions(-) diff --git a/fastcrypto-tbls/benches/batch_avss.rs b/fastcrypto-tbls/benches/batch_avss.rs index c8ae4f4277..3b2bcd0296 100644 --- a/fastcrypto-tbls/benches/batch_avss.rs +++ b/fastcrypto-tbls/benches/batch_avss.rs @@ -188,7 +188,9 @@ mod batch_avss_benches { .process_echo_messages(&echoes_for_party_1) .unwrap(); assert_valid_batch( - receivers[1].verify_and_decrypt(pem, &messages[1]).unwrap(), + receivers[1] + .verify_and_decrypt(pem, &messages[1].common) + .unwrap(), ) }) .collect_vec(); diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index d7f0f846ea..a3e3b7b757 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -100,6 +100,7 @@ pub struct Echo { } /// The receiver's reconstructed ciphertext together with the metadata extracted from the echoes. +#[derive(Clone)] pub struct ProcessedEchos { ciphertext: Vec, global_root: merkle::Node, @@ -589,6 +590,9 @@ impl Receiver { /// /// If these checks succeed, the party reconstructs it's message (ciphertext) from the echoed /// shards along with the r and r_i values. + /// + /// The party should keep its [ProcessedEchos] around in order to handle future requests + /// through [Self::handle_reveal] and [Self::handle_blame]. pub fn process_echo_messages( &self, echo_messages: &[Echo], @@ -639,7 +643,7 @@ impl Receiver { /// appeared on the TOB/ABC channel. pub fn verify_and_decrypt( &self, - processed_echo_messages: ProcessedEchos, + processed_echos: ProcessedEchos, common_message: &CommonMessage, ) -> FastCryptoResult { let CommonMessage { @@ -659,14 +663,14 @@ impl Receiver { global_root, recipient_root, valid_echoes, - } = processed_echo_messages; + } = processed_echos; // TODO: What should happen if these checks fail? // Verify that g^{p''(0)} == c' * prod_l c_l^{gamma_l} let challenge = compute_challenge_from_common_message( &self.random_oracle(), &global_root, - &common_message, + common_message, ); if G::generator() * response_polynomial.c0() != blinding_commit @@ -697,7 +701,7 @@ impl Receiver { &my_shares, &self.nodes, self.id, - &common_message, + common_message, &challenge, self.batch_size, )?; @@ -713,7 +717,7 @@ impl Receiver { }, vote: Vote { global_root, - common_message_hash: compute_common_message_hash(&common_message), + common_message_hash: compute_common_message_hash(common_message), }, }), (true, Ok(_)) => { @@ -752,11 +756,8 @@ impl Receiver { } /// 5. Upon receiving a [Reveal] from another party, verify it and respond with this party's - /// own shares so the accuser can recover. The ciphertext must be authenticated as the dealer's - /// by re-encoding under the locally-known `r_i`, and decryption with the recovery package must - /// yield invalid shares. `message` is the dealer's full [Message] as this party received it; - /// the verifier looks up the accuser's per-ciphertext root locally from - /// `message.dispersal[accuser_id]` rather than trusting the complaint to carry it. + /// own shares so the accuser can recover. Decryption with the recovery package must yield + /// invalid shares against `common_message`. pub fn handle_reveal( &self, reveal: &Reveal, @@ -772,24 +773,16 @@ impl Receiver { let accuser_id = proof.accuser_id; let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; - let ProcessedEchos { - global_root, - recipient_root, - .. - } = processed_echos; + let ProcessedEchos { global_root, .. } = processed_echos; - if common_message_hash != &compute_common_message_hash(&common_message) - || self - .check_avid_consistency(ciphertext, &recipient_root) - .is_err() - { + if common_message_hash != &compute_common_message_hash(common_message) { return Err(InvalidProof); } let challenge = compute_challenge_from_common_message( &self.random_oracle(), - &global_root, - &common_message, + global_root, + common_message, ); proof.check( accuser_pk, @@ -801,7 +794,7 @@ impl Receiver { shares, &self.nodes, accuser_id, - &common_message, + common_message, &challenge, self.batch_size, ) @@ -828,14 +821,11 @@ impl Receiver { } = blame; let accuser_id = *accuser_id; - let ProcessedEchos { - recipient_root, - .. - } = processed_echos; + let ProcessedEchos { recipient_root, .. } = processed_echos; - if common_message_hash != &compute_common_message_hash(&common_message) + if common_message_hash != &compute_common_message_hash(common_message) || shards.iter().map(|s| s.sender).unique().count() != shards.len() - || shards.iter().any(|s| s.verify(&recipient_root).is_err()) + || shards.iter().any(|s| s.verify(recipient_root).is_err()) { return Err(InvalidProof); } @@ -917,18 +907,6 @@ impl Receiver { Ok(()) } - fn dispersal_root_for<'a>( - &self, - message: &'a Message, - accuser_id: PartyId, - ) -> FastCryptoResult<&'a merkle::Node> { - Ok(&message - .dispersal - .get(accuser_id as usize) - .ok_or(InvalidProof)? - .recipient_root) - } - /// 6. Upon receiving t valid responses to a complaint, the accuser can recover its shares. /// Fails if there are not enough valid responses to recover the shares or if any of the responses come from an invalid party. pub fn recover( @@ -1132,7 +1110,8 @@ fn compute_common_message_hash(message: &CommonMessage) -> Digest<32> { #[cfg(test)] mod tests { use super::{ - Dealer, DecryptionOutcome, Message, Receiver, ReceiverOutput, ShareBatch, SharesForNode, + Dealer, DecryptionOutcome, Message, ProcessedEchos, Receiver, ReceiverOutput, ShareBatch, + SharesForNode, }; use crate::ecies_v1; use crate::ecies_v1::PublicKey; @@ -1247,7 +1226,7 @@ mod tests { }) .collect_vec(); - let processed_echo_messages = receivers + let processed_echos = receivers .iter() .zip(messages.iter()) .zip(echoes_by_recipient.iter()) @@ -1256,7 +1235,7 @@ mod tests { let all_shares = receivers .iter() - .zip(processed_echo_messages) + .zip(processed_echos) .zip(messages) .map(|((receiver, pem), message)| { let output = @@ -1357,12 +1336,15 @@ mod tests { .map(|i| echo_messages.iter().map(|em| em[i].clone()).collect_vec()) .collect_vec(); - // Process echoes + verify_and_decrypt + // Process echoes + verify_and_decrypt. Each receiver also keeps its [ProcessedEchos] in + // order to handle later complaints. + let mut pems: HashMap = HashMap::new(); let outcomes: HashMap = receivers .iter() .zip(echoes_per_recipient.iter()) .map(|(r, echoes)| { let pem = r.process_echo_messages(echoes).unwrap(); + pems.insert(r.id, pem.clone()); ( r.id, r.verify_and_decrypt(pem, &messages[r.id as usize].common) @@ -1400,7 +1382,9 @@ mod tests { .filter(|r| r.id != victim_id) .map(|r| { r.handle_reveal( - &messages[r.id as usize], + &reveal, + pems.get(&r.id).unwrap(), + &messages[r.id as usize].common, outputs.get(&r.id).unwrap(), ) .unwrap() From 9f0a330223b1489e04b7f480fef1b99908570f56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 15:51:35 +0200 Subject: [PATCH 36/91] refactor --- .../src/threshold_schnorr/batch_avss.rs | 53 ++++++++++--------- 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index a3e3b7b757..7139cdc00e 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -254,7 +254,19 @@ impl SharesForNode { }) } - fn verify(&self, message: &CommonMessage, challenge: &[S]) -> FastCryptoResult<()> { + fn verify( + &self, + nodes: &Nodes, + receiver: PartyId, + message: &CommonMessage, + challenge: &[S], + expected_batch_size: usize, + ) -> FastCryptoResult<()> { + if self.weight() != nodes.weight_of(receiver)? + || self.try_uniform_batch_size()? != expected_batch_size + { + return Err(InvalidMessage); + } for shares in &self.shares { shares.verify(message, challenge)?; } @@ -697,8 +709,7 @@ impl Receiver { }) .and_then(SharesForNode::from_bytes) .and_then(|my_shares| { - verify_shares( - &my_shares, + my_shares.verify( &self.nodes, self.id, common_message, @@ -790,8 +801,7 @@ impl Receiver { &common_message.shared, &self.random_oracle(), |shares: &SharesForNode| { - verify_shares( - shares, + shares.verify( &self.nodes, accuser_id, common_message, @@ -935,7 +945,13 @@ impl Receiver { .filter_map(|response| { response .shares - .verify(&message.common, &challenge) + .verify( + &self.nodes, + response.responder_id, + &message.common, + &challenge, + self.batch_size, + ) .ok() .map(|_| response.shares) }) @@ -948,7 +964,13 @@ impl Receiver { } let my_shares = SharesForNode::recover(self, &response_shares)?; - my_shares.verify(&message.common, &challenge)?; + my_shares.verify( + &self.nodes, + self.id, + &message.common, + &challenge, + self.batch_size, + )?; Ok(ReceiverOutput { my_shares, @@ -1041,23 +1063,6 @@ fn require_uniform_echo_metadata( .ok_or(InvalidMessage) } -/// Verify a set of shares receiver from a Dealer -fn verify_shares( - shares: &SharesForNode, - nodes: &Nodes, - receiver: PartyId, - message: &CommonMessage, - challenge: &[S], - expected_batch_size: usize, -) -> FastCryptoResult<()> { - if shares.weight() != nodes.weight_of(receiver)? - || shares.try_uniform_batch_size()? != expected_batch_size - { - return Err(InvalidMessage); - } - shares.verify(message, challenge) -} - fn compute_challenge( random_oracle: &RandomOracle, c: &[G], From 78dc46d08a1dd43f3642270dd33e7689e9840a48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 16:04:56 +0200 Subject: [PATCH 37/91] refactor --- .../src/threshold_schnorr/batch_avss.rs | 49 +++++++------------ .../src/threshold_schnorr/reed_solomon.rs | 27 ++++++---- 2 files changed, 35 insertions(+), 41 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 7139cdc00e..7739b6a05d 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -256,15 +256,12 @@ impl SharesForNode { fn verify( &self, - nodes: &Nodes, - receiver: PartyId, message: &CommonMessage, challenge: &[S], + weight: u16, expected_batch_size: usize, ) -> FastCryptoResult<()> { - if self.weight() != nodes.weight_of(receiver)? - || self.try_uniform_batch_size()? != expected_batch_size - { + if self.weight() != weight || self.try_uniform_batch_size()? != expected_batch_size { return Err(InvalidMessage); } for shares in &self.shares { @@ -710,10 +707,9 @@ impl Receiver { .and_then(SharesForNode::from_bytes) .and_then(|my_shares| { my_shares.verify( - &self.nodes, - self.id, common_message, &challenge, + self.nodes.weight_of(self.id)?, self.batch_size, )?; Ok(my_shares) @@ -782,6 +778,7 @@ impl Receiver { common_message_hash, } = reveal; let accuser_id = proof.accuser_id; + let accuser_weight = self.nodes.weight_of(accuser_id)?; let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; let ProcessedEchos { global_root, .. } = processed_echos; @@ -801,13 +798,7 @@ impl Receiver { &common_message.shared, &self.random_oracle(), |shares: &SharesForNode| { - shares.verify( - &self.nodes, - accuser_id, - common_message, - &challenge, - self.batch_size, - ) + shares.verify(common_message, &challenge, accuser_weight, self.batch_size) }, )?; @@ -892,14 +883,12 @@ impl Receiver { .flatten_ok() .collect::>>()?; - let mut ciphertext = self.code.decode(shards)?; - // Reed-Solomon `decode` returns shard-aligned padding; trim back to the original encrypted blob length. // The encryption used, counter-mode, is length-preserving, so the length of the ciphertext is equal to the length of the plaintext. - ciphertext.truncate(SharesForNode::bcs_serialized_size( + let expected_length = SharesForNode::bcs_serialized_size( self.nodes.weight_of(accuser_id)? as usize, self.batch_size, - )); - Ok(ciphertext) + ); + self.code.decode(shards, expected_length) } /// The check r_i' == r_i from the paper @@ -943,17 +932,16 @@ impl Receiver { let response_shares = responses .into_iter() .filter_map(|response| { - response - .shares - .verify( - &self.nodes, - response.responder_id, - &message.common, - &challenge, - self.batch_size, - ) + self.nodes + .weight_of(response.responder_id) + .map(|w| (w, response.shares)) + .ok() + }) + .filter_map(|(weight, shares)| { + shares + .verify(&message.common, &challenge, weight, self.batch_size) .ok() - .map(|_| response.shares) + .map(|_| shares) }) .collect_vec(); @@ -965,10 +953,9 @@ impl Receiver { let my_shares = SharesForNode::recover(self, &response_shares)?; my_shares.verify( - &self.nodes, - self.id, &message.common, &challenge, + self.nodes.weight_of(self.id)?, self.batch_size, )?; diff --git a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs index fb601ccf78..fe90ab6a5a 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs @@ -166,7 +166,11 @@ impl ErasureCoder { } /// Note that the result may be padded with zeroes, and it is up to the caller to remove them. - pub fn decode(&self, shards: Vec>) -> FastCryptoResult> { + pub fn decode( + &self, + shards: Vec>, + expected_len: usize, + ) -> FastCryptoResult> { if shards.len() != self.0.total_shard_count() { return Err(InputTooShort(self.0.total_shard_count())); } @@ -187,11 +191,15 @@ impl ErasureCoder { return Err(TooManyErrors(0)); // This is just an erasure code, so we can't correct errors. } - let data = shards + let mut data = shards .into_iter() .take(self.0.data_shard_count()) .flatten() .collect_vec(); + if data.len() > expected_len { + return Err(InvalidInput); + } + data.truncate(expected_len); Ok(data) } } @@ -266,12 +274,8 @@ mod tests { } let coder = ErasureCoder::new(n, k).unwrap(); - let recovered = coder.decode(opt_shards).unwrap(); - let shard_size = len.div_ceil(k); - let expected_len = shard_size * k; - assert_eq!(recovered.len(), expected_len); - assert_eq!(&recovered[..len], &data); - assert!(recovered[len..].iter().all(|&b| b == 0)); + let recovered = coder.decode(opt_shards, len).unwrap(); + assert_eq!(recovered, data); } } @@ -289,7 +293,7 @@ mod tests { *shard = None; } - assert!(matches!(coder.decode(opt_shards), Err(InvalidInput))); + assert!(matches!(coder.decode(opt_shards, 123), Err(InvalidInput))); } #[test] @@ -305,6 +309,9 @@ mod tests { shards[0].0[0] ^= 1; let opt_shards = shards.into_iter().map(Some).collect_vec(); - assert!(matches!(coder.decode(opt_shards), Err(TooManyErrors(_)))); + assert!(matches!( + coder.decode(opt_shards, 200), + Err(TooManyErrors(_)) + )); } } From f5b4ce9c7b9b6b6d58a8192b34008b806f07dd05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 18:37:31 +0200 Subject: [PATCH 38/91] Fix inverted length check in RS decode Recovered data is shard-aligned, so it's always >= expected_len; the old check rejected every roundtrip whose length wasn't an exact multiple of the shard size. --- fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs index fe90ab6a5a..c11fafb010 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs @@ -196,7 +196,7 @@ impl ErasureCoder { .take(self.0.data_shard_count()) .flatten() .collect_vec(); - if data.len() > expected_len { + if data.len() < expected_len { return Err(InvalidInput); } data.truncate(expected_len); From a0ca65e6bf73a493ab775abef91af7144481f227 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 18:38:01 +0200 Subject: [PATCH 39/91] Add State to DecryptionOutcome::Valid State carries common_message + global_root + recipient_root, so a receiver only needs (State, ReceiverOutput) to handle later Reveal / Blame requests. handle_reveal and handle_blame drop their separate ProcessedEchos / CommonMessage parameters. --- .../src/threshold_schnorr/batch_avss.rs | 68 +++++++++++++------ 1 file changed, 48 insertions(+), 20 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 7739b6a05d..6e0729c84f 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -112,11 +112,26 @@ pub struct ProcessedEchos { /// a complaint to broadcast instead. #[allow(clippy::large_enum_variant)] pub enum DecryptionOutcome { - Valid { output: ReceiverOutput, vote: Vote }, + Valid { + output: ReceiverOutput, + vote: Vote, + /// State the party should retain to handle later [Reveal] / [Blame] requests via + /// [Receiver::handle_reveal] and [Receiver::handle_blame]. + state: State, + }, InvalidShares(Reveal), InvalidDispersal(Blame), } +/// Context retained by a receiver after [Receiver::verify_and_decrypt] succeeds. Together with +/// the [ReceiverOutput] it is sufficient to handle later [Reveal] / [Blame] requests. +#[derive(Clone, Debug)] +pub struct State { + pub common_message: CommonMessage, + pub global_root: merkle::Node, + pub recipient_root: merkle::Node, +} + /// The message a receiver broadcasts after `verify_and_decrypt`: a [Vote] endorsing the dealer's /// broadcast or a [InvalidShares] / [InvalidDispersal] complaint otherwise. #[allow(clippy::large_enum_variant)] @@ -600,8 +615,9 @@ impl Receiver { /// If these checks succeed, the party reconstructs it's message (ciphertext) from the echoed /// shards along with the r and r_i values. /// - /// The party should keep its [ProcessedEchos] around in order to handle future requests - /// through [Self::handle_reveal] and [Self::handle_blame]. + /// Once [Self::verify_and_decrypt] is called, the party should keep the resulting [State] + /// around in order to handle future requests through [Self::handle_reveal] and + /// [Self::handle_blame]. pub fn process_echo_messages( &self, echo_messages: &[Echo], @@ -723,9 +739,14 @@ impl Receiver { public_keys: full_public_keys.clone(), }, vote: Vote { - global_root, + global_root: global_root.clone(), common_message_hash: compute_common_message_hash(common_message), }, + state: State { + common_message: common_message.clone(), + global_root, + recipient_root, + }, }), (true, Ok(_)) => { // Repackage each echo's per-shard proof as a ShardContribution @@ -768,8 +789,7 @@ impl Receiver { pub fn handle_reveal( &self, reveal: &Reveal, - processed_echos: &ProcessedEchos, - common_message: &CommonMessage, + state: &State, my_output: &ReceiverOutput, ) -> FastCryptoResult> { let Reveal { @@ -781,7 +801,11 @@ impl Receiver { let accuser_weight = self.nodes.weight_of(accuser_id)?; let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; - let ProcessedEchos { global_root, .. } = processed_echos; + let State { + common_message, + global_root, + .. + } = state; if common_message_hash != &compute_common_message_hash(common_message) { return Err(InvalidProof); @@ -811,8 +835,7 @@ impl Receiver { pub fn handle_blame( &self, blame: &Blame, - processed_echos: &ProcessedEchos, - common_message: &CommonMessage, + state: &State, my_output: &ReceiverOutput, ) -> FastCryptoResult> { let Blame { @@ -822,7 +845,11 @@ impl Receiver { } = blame; let accuser_id = *accuser_id; - let ProcessedEchos { recipient_root, .. } = processed_echos; + let State { + common_message, + recipient_root, + .. + } = state; if common_message_hash != &compute_common_message_hash(common_message) || shards.iter().map(|s| s.sender).unique().count() != shards.len() @@ -1102,8 +1129,8 @@ fn compute_common_message_hash(message: &CommonMessage) -> Digest<32> { #[cfg(test)] mod tests { use super::{ - Dealer, DecryptionOutcome, Message, ProcessedEchos, Receiver, ReceiverOutput, ShareBatch, - SharesForNode, + Dealer, DecryptionOutcome, Message, Receiver, ReceiverOutput, ShareBatch, SharesForNode, + State, }; use crate::ecies_v1; use crate::ecies_v1::PublicKey; @@ -1328,15 +1355,12 @@ mod tests { .map(|i| echo_messages.iter().map(|em| em[i].clone()).collect_vec()) .collect_vec(); - // Process echoes + verify_and_decrypt. Each receiver also keeps its [ProcessedEchos] in - // order to handle later complaints. - let mut pems: HashMap = HashMap::new(); + // Process echoes + verify_and_decrypt. let outcomes: HashMap = receivers .iter() .zip(echoes_per_recipient.iter()) .map(|(r, echoes)| { let pem = r.process_echo_messages(echoes).unwrap(); - pems.insert(r.id, pem.clone()); ( r.id, r.verify_and_decrypt(pem, &messages[r.id as usize].common) @@ -1356,11 +1380,16 @@ mod tests { ), }; - // The other receivers each get a Valid output. + // The other receivers each get a Valid output. Keep both `output` and `state` so the + // honest receivers can answer the victim's complaint. + let mut states: HashMap = HashMap::new(); let mut outputs: HashMap = outcomes .into_iter() .map(|(id, o)| match o { - DecryptionOutcome::Valid { output, .. } => (id, output), + DecryptionOutcome::Valid { output, state, .. } => { + states.insert(id, state); + (id, output) + } other => panic!( "expected Valid from honest receiver {id}, got {:?}", outcome_kind(&other) @@ -1375,8 +1404,7 @@ mod tests { .map(|r| { r.handle_reveal( &reveal, - pems.get(&r.id).unwrap(), - &messages[r.id as usize].common, + states.get(&r.id).unwrap(), outputs.get(&r.id).unwrap(), ) .unwrap() From dd45eee37abfaf289c949a3302d0b2749ef0cc49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 18:48:29 +0200 Subject: [PATCH 40/91] Rename echo_message -> echo, process_echo_messages -> process_echos --- fastcrypto-tbls/benches/batch_avss.rs | 10 +++---- .../src/threshold_schnorr/batch_avss.rs | 29 +++++++++---------- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 4 +-- 3 files changed, 19 insertions(+), 24 deletions(-) diff --git a/fastcrypto-tbls/benches/batch_avss.rs b/fastcrypto-tbls/benches/batch_avss.rs index 3b2bcd0296..6956aed406 100644 --- a/fastcrypto-tbls/benches/batch_avss.rs +++ b/fastcrypto-tbls/benches/batch_avss.rs @@ -133,7 +133,7 @@ mod batch_avss_benches { let echoes: Vec> = receivers .iter() .enumerate() - .map(|(i, r)| r.echo_message(&messages[i]).unwrap()) + .map(|(i, r)| r.echo(&messages[i]).unwrap()) .collect(); let echoes_for_party_1: Vec = echoes.iter().map(|em| em[1].clone()).collect(); @@ -141,7 +141,7 @@ mod batch_avss_benches { process.bench_function( format!("n={}, total_weight={}, t={}, w={}", n, total_w, t, w).as_str(), - |b| b.iter(|| r1.process_echo_messages(&echoes_for_party_1).unwrap()), + |b| b.iter(|| r1.process_echos(&echoes_for_party_1).unwrap()), ); } } @@ -180,13 +180,11 @@ mod batch_avss_benches { let echoes: Vec> = receivers .iter() .enumerate() - .map(|(i, r)| r.echo_message(&messages[i]).unwrap()) + .map(|(i, r)| r.echo(&messages[i]).unwrap()) .collect(); let echoes_for_party_1: Vec = echoes.iter().map(|em| em[1].clone()).collect(); - let pem = receivers[1] - .process_echo_messages(&echoes_for_party_1) - .unwrap(); + let pem = receivers[1].process_echos(&echoes_for_party_1).unwrap(); assert_valid_batch( receivers[1] .verify_and_decrypt(pem, &messages[1].common) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 6e0729c84f..04c8fc94b2 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -187,7 +187,7 @@ pub struct ReceiverOutput { /// If we say that node i has a weight `W_i`, we have /// `indices().len() == shares_for_secret(i).len() == weight() = W_i` /// -/// These can be created either by decrypting the shares from the dealer (see [Receiver::process_echo_messages]) or by recovering them from complaint responses. +/// These can be created either by decrypting the shares from the dealer (see [Receiver::process_echos]) or by recovering them from complaint responses. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SharesForNode { pub shares: Vec, @@ -353,8 +353,8 @@ impl Dealer { /// /// * `nodes` defines the set of receivers and their weights. /// * `dealer_id` is the id of this dealer as a node. - /// * `f` is the maximum number of Byzantine parties counted by weight. /// * `t` is the number of shares that are needed to reconstruct the full key/signature. + /// * `f` is the maximum number of Byzantine parties counted by weight. /// * `sid` is a session identifier that should be unique for each invocation, but the same for all parties. /// * `batch_size_per_weight` is the number of secrets a dealer should deal per weight it has. /// @@ -577,7 +577,7 @@ impl Receiver { } /// 2. When a party receives its message, it verifies the Merkle tree path for its shards and generates Echos, one per party. - pub fn echo_message(&self, message: &Message) -> FastCryptoResult> { + pub fn echo(&self, message: &Message) -> FastCryptoResult> { if message .dispersal .iter() @@ -618,14 +618,11 @@ impl Receiver { /// Once [Self::verify_and_decrypt] is called, the party should keep the resulting [State] /// around in order to handle future requests through [Self::handle_reveal] and /// [Self::handle_blame]. - pub fn process_echo_messages( - &self, - echo_messages: &[Echo], - ) -> FastCryptoResult { + pub fn process_echos(&self, echos: &[Echo]) -> FastCryptoResult { // Filter out invalid echo messages - let valid_echoes = echo_messages + let valid_echoes = echos .iter() - .filter(|echo_message| echo_message.verify(self.id).is_ok()) + .filter(|echo| echo.verify(self.id).is_ok()) .cloned() .collect_vec(); @@ -634,7 +631,7 @@ impl Receiver { let required_weight = self.nodes.total_weight() - self.f; if self .nodes - .total_weight_of(valid_echoes.iter().map(|echo_message| &echo_message.sender))? + .total_weight_of(valid_echoes.iter().map(|echo| &echo.sender))? < required_weight { return Err(NotEnoughWeight(required_weight as usize)); @@ -1230,7 +1227,7 @@ mod tests { let echoes_by_sender = receivers .iter() - .map(|receiver| receiver.echo_message(&messages[receiver.id as usize])) + .map(|receiver| receiver.echo(&messages[receiver.id as usize])) .collect::>>() .unwrap(); @@ -1249,7 +1246,7 @@ mod tests { .iter() .zip(messages.iter()) .zip(echoes_by_recipient.iter()) - .map(|((receiver, _message), echoes)| receiver.process_echo_messages(echoes).unwrap()) + .map(|((receiver, _message), echoes)| receiver.process_echos(echoes).unwrap()) .collect_vec(); let all_shares = receivers @@ -1347,12 +1344,12 @@ mod tests { let messages = dealer.create_message_cheating(&mut rng).unwrap(); // Echo phase - let echo_messages = receivers + let echos = receivers .iter() - .map(|r| r.echo_message(&messages[r.id as usize]).unwrap()) + .map(|r| r.echo(&messages[r.id as usize]).unwrap()) .collect_vec(); let echoes_per_recipient = (0..n) - .map(|i| echo_messages.iter().map(|em| em[i].clone()).collect_vec()) + .map(|i| echos.iter().map(|em| em[i].clone()).collect_vec()) .collect_vec(); // Process echoes + verify_and_decrypt. @@ -1360,7 +1357,7 @@ mod tests { .iter() .zip(echoes_per_recipient.iter()) .map(|(r, echoes)| { - let pem = r.process_echo_messages(echoes).unwrap(); + let pem = r.process_echos(echoes).unwrap(); ( r.id, r.verify_and_decrypt(pem, &messages[r.id as usize].common) diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index 2157dfb2a9..232d103c4e 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -239,7 +239,7 @@ mod tests { // Each receiver produces echoes addressed to every party. let echoes: Vec> = receivers .iter() - .map(|r| r.echo_message(&messages[r.id as usize]).unwrap()) + .map(|r| r.echo(&messages[r.id as usize]).unwrap()) .collect(); // Bundle echoes per recipient: echoes_per_recipient[i] = echoes addressed to party i. @@ -250,7 +250,7 @@ mod tests { // Each receiver processes the message. // In this case, we assume all are honest and there are no complaints. for ((r, echoes), msg) in receivers.iter().zip(&echoes_per_recipient).zip(&messages) { - let pem = r.process_echo_messages(echoes).unwrap(); + let pem = r.process_echos(echoes).unwrap(); let output = assert_valid_batch(r.verify_and_decrypt(pem, &msg.common).unwrap()); presigning_outputs.get_mut(&r.id).unwrap().push(output); } From 5c1b02f1bfffed719d5c021f3f35a658a082f27b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 19:18:20 +0200 Subject: [PATCH 41/91] Bind complaints to dealer broadcast via recipient_root + proof Reveal and Blame now carry the accuser's per-ciphertext Merkle root plus a proof binding it under the dealer's global_root. handle_reveal verifies the proof and re-runs check_avid_consistency, closing a share-extraction attack where a malicious accuser could submit any ciphertext that decrypts to invalid shares and trick honest parties into responding. handle_blame verifies the same proof and uses the verified accuser's r_i to authenticate the contributed shards (the previous code checked them against the verifier's own r_i, silently rejecting genuine dispersal complaints). State drops the verifier's own recipient_root since it's no longer needed for either handler. Adds test_share_recovery_blame mirroring test_share_recovery, plus a shard-mutation hook on create_message_with_mutation so the test can corrupt the dealer's RS dispersal. --- .../src/threshold_schnorr/batch_avss.rs | 270 ++++++++++++++++-- .../src/threshold_schnorr/reed_solomon.rs | 2 +- 2 files changed, 245 insertions(+), 27 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 04c8fc94b2..73c3582cb6 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -129,7 +129,6 @@ pub enum DecryptionOutcome { pub struct State { pub common_message: CommonMessage, pub global_root: merkle::Node, - pub recipient_root: merkle::Node, } /// The message a receiver broadcasts after `verify_and_decrypt`: a [Vote] endorsing the dealer's @@ -154,6 +153,11 @@ pub struct Vote { pub struct Reveal { pub proof: complaint::Complaint, pub ciphertext: Vec, + /// The accuser's per-ciphertext Merkle root, with a proof binding it under the dealer's + /// `global_root` at the accuser's leaf. Without this, a malicious accuser could submit any + /// ciphertext that decrypts to invalid shares and trick honest parties into responding. + pub recipient_root: merkle::Node, + pub recipient_root_proof: merkle::MerkleProof, /// `H(val)` from the dealer's broadcast, binding the complaint to a specific [CommonMessage]. pub common_message_hash: Digest<32>, } @@ -164,6 +168,11 @@ pub struct Reveal { pub struct Blame { pub accuser_id: PartyId, pub shards: Vec, + /// The accuser's per-ciphertext Merkle root, with a proof binding it under the dealer's + /// `global_root` at the accuser's leaf. Verifiers check the contributed shards against this + /// root. + pub recipient_root: merkle::Node, + pub recipient_root_proof: merkle::MerkleProof, pub common_message_hash: Digest<32>, } @@ -385,14 +394,17 @@ impl Dealer { /// 1. The Dealer generates shares for the secrets and creates a set of messages - one per receiver. pub fn create_message(&self, rng: &mut impl AllowedRng) -> FastCryptoResult> { - self.create_message_with_mutation(rng, |_| {}) + self.create_message_with_mutation(rng, |_| {}, |_| {}) } - /// Like [Self::create_message] but exposes a mutation hook over the plaintexts so tests can simulate a faulty dealer by corrupting one slot. + /// Like [Self::create_message] but exposes mutation hooks for tests: `mutate_plaintexts` runs + /// before encryption, and `mutate_shards` runs after RS-encoding (and before the per-recipient + /// Merkle trees are built), so tests can simulate a faulty dealer at either layer. fn create_message_with_mutation( &self, rng: &mut impl AllowedRng, - mutate: impl FnOnce(&mut [(crate::ecies_v1::PublicKey, Vec)]), + mutate_plaintexts: impl FnOnce(&mut [(crate::ecies_v1::PublicKey, Vec)]), + mutate_shards: impl FnOnce(&mut Vec>>), ) -> FastCryptoResult> { let secrets = repeat_with(|| S::rand(rng)) .take(self.batch_size) @@ -437,7 +449,7 @@ impl Dealer { }) .collect_vec(); - mutate(&mut pk_and_msgs); + mutate_plaintexts(&mut pk_and_msgs); let ciphertext = MultiRecipientEncryption::encrypt( &pk_and_msgs, @@ -451,7 +463,7 @@ impl Dealer { (self.nodes.total_weight() - 2 * self.f) as usize, // 2f parity shards )?; - let shards = ciphertexts + let mut shards: Vec>> = ciphertexts .iter() .map(|c| { let shards = code.encode(c)?; // One shard per weight @@ -459,6 +471,8 @@ impl Dealer { }) .collect::>>()?; + mutate_shards(&mut shards); + let recipient_trees = shards .iter() .map(recipient_tree) @@ -742,13 +756,12 @@ impl Receiver { state: State { common_message: common_message.clone(), global_root, - recipient_root, }, }), (true, Ok(_)) => { - // Repackage each echo's per-shard proof as a ShardContribution let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; let common_message_hash = any_echo.common_message_hash; + let recipient_root_proof = any_echo.recipient_root_proof.clone(); let shards = valid_echoes .into_iter() .map(|e| ShardContribution { @@ -760,11 +773,15 @@ impl Receiver { Ok(DecryptionOutcome::InvalidDispersal(Blame { accuser_id: self.id, shards, + recipient_root, + recipient_root_proof, common_message_hash, })) } (_, Err(_)) => { let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; + let common_message_hash = any_echo.common_message_hash; + let recipient_root_proof = any_echo.recipient_root_proof.clone(); Ok(DecryptionOutcome::InvalidShares(Reveal { proof: complaint::Complaint::create( self.id, @@ -774,15 +791,19 @@ impl Receiver { &mut rand::thread_rng(), ), ciphertext, - common_message_hash: any_echo.common_message_hash, + recipient_root, + recipient_root_proof, + common_message_hash, })) } } } /// 5. Upon receiving a [Reveal] from another party, verify it and respond with this party's - /// own shares so the accuser can recover. Decryption with the recovery package must yield - /// invalid shares against `common_message`. + /// own shares so the accuser can recover. The accuser's `recipient_root` must sit under + /// the dealer's `global_root` at the accuser's leaf, the ciphertext must re-encode to + /// that root (binding it to the dealer's broadcast), and decryption with the recovery + /// package must yield invalid shares against `common_message`. pub fn handle_reveal( &self, reveal: &Reveal, @@ -792,6 +813,8 @@ impl Receiver { let Reveal { proof, ciphertext, + recipient_root, + recipient_root_proof, common_message_hash, } = reveal; let accuser_id = proof.accuser_id; @@ -801,13 +824,19 @@ impl Receiver { let State { common_message, global_root, - .. } = state; if common_message_hash != &compute_common_message_hash(common_message) { return Err(InvalidProof); } + recipient_root_proof + .verify_proof_with_unserialized_leaf(global_root, recipient_root, accuser_id as usize) + .map_err(|_| InvalidProof)?; + + self.check_avid_consistency(ciphertext, recipient_root) + .map_err(|_| InvalidProof)?; + let challenge = compute_challenge_from_common_message( &self.random_oracle(), global_root, @@ -826,9 +855,11 @@ impl Receiver { Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } - /// Counterpart to [Self::handle_reveal] for [InvalidDispersal]. The accuser must have collected enough - /// authenticated shards whose re-encoded ciphertext root differs from the locally-known - /// `r_i`. On success, respond with this party's own shares. + /// Counterpart to [Self::handle_reveal] for [InvalidDispersal]. The accuser's + /// `recipient_root` must sit under the dealer's `global_root` at the accuser's leaf, the + /// contributed shards must each be authenticated under that root, and re-encoding the + /// reconstructed ciphertext must not match it. On success, respond with this party's own + /// shares. pub fn handle_blame( &self, blame: &Blame, @@ -838,18 +869,26 @@ impl Receiver { let Blame { accuser_id, shards, + recipient_root, + recipient_root_proof, common_message_hash, } = blame; let accuser_id = *accuser_id; let State { common_message, - recipient_root, - .. + global_root, } = state; - if common_message_hash != &compute_common_message_hash(common_message) - || shards.iter().map(|s| s.sender).unique().count() != shards.len() + if common_message_hash != &compute_common_message_hash(common_message) { + return Err(InvalidProof); + } + + recipient_root_proof + .verify_proof_with_unserialized_leaf(global_root, recipient_root, accuser_id as usize) + .map_err(|_| InvalidProof)?; + + if shards.iter().map(|s| s.sender).unique().count() != shards.len() || shards.iter().any(|s| s.verify(recipient_root).is_err()) { return Err(InvalidProof); @@ -871,7 +910,8 @@ impl Receiver { }) .map_err(|_| InvalidProof)?; - // The blame is valid iff re-encoding the recovered ciphertext does not match `r_i`. + // The blame is valid iff re-encoding the recovered ciphertext does not match the + // accuser's `r_i`. if self .check_avid_consistency(&ciphertext, recipient_root) .is_ok() @@ -1428,6 +1468,165 @@ mod tests { } } + #[test] + fn test_share_recovery_blame() { + // Dealer is honest at the share layer (decryption yields valid shares) but corrupts the + // last f senders' shards for receiver 0's ciphertext. Receiver 0 collects the W - f + // unaffected echoes, decodes the original ciphertext, decrypts valid shares, but + // re-encoding the recovered ciphertext yields a tree root different from the dealer's + // r_0 — triggering an InvalidDispersal complaint. + let t = 3; + let f = 2; + let n = 7; + let batch_size_per_weight: u16 = 3; + + let mut rng = rand::thread_rng(); + let sks = (0..n) + .map(|_| ecies_v1::PrivateKey::::new(&mut rng)) + .collect::>(); + let nodes = Nodes::new( + sks.iter() + .enumerate() + .map(|(id, sk)| Node { + id: id as u16, + pk: PublicKey::from_private_key(sk), + weight: 1, + }) + .collect::>(), + ) + .unwrap(); + + let sid = b"tbls test".to_vec(); + let dealer_id = 1; + let dealer = Dealer::new( + nodes.clone(), + dealer_id, + f, + t, + sid.clone(), + batch_size_per_weight, + ) + .unwrap(); + + let receivers = sks + .into_iter() + .enumerate() + .map(|(id, secret_key)| { + Receiver::new( + nodes.clone(), + id as u16, + dealer_id, + f, + t, + sid.clone(), + secret_key, + batch_size_per_weight, + ) + .unwrap() + }) + .collect_vec(); + + let messages = dealer.create_message_cheating_dispersal(&mut rng).unwrap(); + let victim_id = 0u16; + + // Echo phase + let echos = receivers + .iter() + .map(|r| r.echo(&messages[r.id as usize]).unwrap()) + .collect_vec(); + + // Bundle echoes per recipient. For the victim, simulate the last f senders being silent + // (their corrupted shards would otherwise make the receiver's decode fail outright). + let echoes_per_recipient = (0..n) + .map(|i| { + let take = if i == victim_id as usize { + n - f as usize + } else { + n + }; + echos + .iter() + .take(take) + .map(|em| em[i].clone()) + .collect_vec() + }) + .collect_vec(); + + // Process echoes + verify_and_decrypt. + let outcomes: HashMap = receivers + .iter() + .zip(echoes_per_recipient.iter()) + .map(|(r, echoes)| { + let pem = r.process_echos(echoes).unwrap(); + ( + r.id, + r.verify_and_decrypt(pem, &messages[r.id as usize].common) + .unwrap(), + ) + }) + .collect(); + + // Receiver 0 emits an InvalidDispersal complaint. + let mut outcomes = outcomes; + let blame = match outcomes.remove(&victim_id).unwrap() { + DecryptionOutcome::InvalidDispersal(b) => b, + other => panic!( + "expected InvalidDispersal from victim, got {:?}", + outcome_kind(&other) + ), + }; + + // The other receivers each get a Valid output. Keep both `output` and `state` so the + // honest receivers can answer the victim's complaint. + let mut states: HashMap = HashMap::new(); + let mut outputs: HashMap = outcomes + .into_iter() + .map(|(id, o)| match o { + DecryptionOutcome::Valid { output, state, .. } => { + states.insert(id, state); + (id, output) + } + other => panic!( + "expected Valid from honest receiver {id}, got {:?}", + outcome_kind(&other) + ), + }) + .collect(); + + // Each non-victim verifies the complaint and returns their shares. + let responses = receivers + .iter() + .filter(|r| r.id != victim_id) + .map(|r| { + r.handle_blame( + &blame, + states.get(&r.id).unwrap(), + outputs.get(&r.id).unwrap(), + ) + .unwrap() + }) + .collect_vec(); + + // Victim recovers via interpolation across t responses. + let recovered = receivers[victim_id as usize] + .recover(&messages[victim_id as usize], responses) + .unwrap(); + outputs.insert(victim_id, recovered); + + // Sanity: every receiver now holds verifiable shares for every secret. + for l in 0..dealer.batch_size { + let shares = receivers + .iter() + .take(t as usize) + .map(|r| Eval { + index: ShareIndex::try_from(r.id + 1).unwrap(), + value: outputs.get(&r.id).unwrap().my_shares.shares[0].batch[l], + }) + .collect_vec(); + Poly::recover_c0(t, shares.into_iter()).unwrap(); + } + } + fn assert_valid(outcome: DecryptionOutcome) -> ReceiverOutput { match outcome { DecryptionOutcome::Valid { output, .. } => output, @@ -1451,12 +1650,31 @@ mod tests { &self, rng: &mut impl AllowedRng, ) -> FastCryptoResult> { - self.create_message_with_mutation(rng, |pk_and_msgs| { - // Flip a low-order byte in receiver 0's plaintext. Targeting an offset deep enough - // to land inside an actual share (past BCS length prefixes) ensures the - // deserialized struct is well-formed but holds an invalid scalar field. - pk_and_msgs[0].1[7] ^= 1; - }) + self.create_message_with_mutation( + rng, + |pk_and_msgs| { + pk_and_msgs[0].1[7] ^= 1; + }, + |_| {}, + ) + } + + fn create_message_cheating_dispersal( + &self, + rng: &mut impl AllowedRng, + ) -> FastCryptoResult> { + let f = self.f as usize; + let n = self.nodes.total_weight() as usize; + self.create_message_with_mutation( + rng, + |_| {}, + |shards| { + // Flip a byte in the shards held by the last `f` senders for ciphertext 0. + for sender_shards in shards[0].iter_mut().skip(n - f) { + sender_shards[0].0[0] ^= 1; + } + }, + ) } } } diff --git a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs index c11fafb010..b4339035ab 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs @@ -132,7 +132,7 @@ pub struct ErasureCoder(ReedSolomon); #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(transparent)] -pub struct Shard(Vec); +pub struct Shard(pub(crate) Vec); impl ErasureCoder { /// Create a new erasure encoder/decoder. From 6da8fb55ae593d674ae6e8f217a36bf7c8f24cda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 20:26:11 +0200 Subject: [PATCH 42/91] Pass State to recover instead of Message DecryptionOutcome is now { state, kind: OutcomeKind } so every outcome carries the state, and the accuser feeds their own state into recover. Removes the trust-an-arbitrary-Message footgun. --- fastcrypto-tbls/benches/batch_avss.rs | 4 +- .../src/threshold_schnorr/batch_avss.rs | 170 ++++++++++-------- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 4 +- 3 files changed, 97 insertions(+), 81 deletions(-) diff --git a/fastcrypto-tbls/benches/batch_avss.rs b/fastcrypto-tbls/benches/batch_avss.rs index 6956aed406..0084de1654 100644 --- a/fastcrypto-tbls/benches/batch_avss.rs +++ b/fastcrypto-tbls/benches/batch_avss.rs @@ -238,8 +238,8 @@ mod batch_avss_benches { criterion_main!(batch_avss_benches::batch_avss_benches); fn assert_valid_batch(outcome: batch_avss::DecryptionOutcome) -> batch_avss::ReceiverOutput { - match outcome { - batch_avss::DecryptionOutcome::Valid { output, .. } => output, + match outcome.kind { + batch_avss::OutcomeKind::Valid { output, .. } => output, _ => panic!("Expected valid outcome"), } } diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 73c3582cb6..71a90c699d 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -13,7 +13,6 @@ use crate::ecies_v1::{MultiRecipientEncryption, PrivateKey, SharedComponents}; use crate::nodes::{Nodes, PartyId}; use crate::polynomial::{create_secret_sharing, Eval, Poly}; use crate::random_oracle::RandomOracle; -use crate::threshold_schnorr::batch_avss::DecryptionOutcome::Valid; use crate::threshold_schnorr::bcs::BCSSerialized; use crate::threshold_schnorr::complaint; use crate::threshold_schnorr::complaint::ComplaintResponse; @@ -108,23 +107,23 @@ pub struct ProcessedEchos { valid_echoes: Vec, } -/// The result of [Receiver::verify_and_decrypt]: either valid shares plus a vote to broadcast, or -/// a complaint to broadcast instead. +/// The result of [Receiver::verify_and_decrypt]. Carries the per-receiver [State] (sufficient, +/// together with a [ReceiverOutput], to handle later [Reveal] / [Blame] requests and to call +/// [Receiver::recover]) plus an [OutcomeKind] describing what the receiver actually got. +pub struct DecryptionOutcome { + pub state: State, + pub kind: OutcomeKind, +} + #[allow(clippy::large_enum_variant)] -pub enum DecryptionOutcome { - Valid { - output: ReceiverOutput, - vote: Vote, - /// State the party should retain to handle later [Reveal] / [Blame] requests via - /// [Receiver::handle_reveal] and [Receiver::handle_blame]. - state: State, - }, +pub enum OutcomeKind { + Valid { output: ReceiverOutput, vote: Vote }, InvalidShares(Reveal), InvalidDispersal(Blame), } -/// Context retained by a receiver after [Receiver::verify_and_decrypt] succeeds. Together with -/// the [ReceiverOutput] it is sufficient to handle later [Reveal] / [Blame] requests. +/// Context retained by a receiver after [Receiver::verify_and_decrypt]. Together with the +/// [ReceiverOutput] it is sufficient to handle later [Reveal] / [Blame] requests. #[derive(Clone, Debug)] pub struct State { pub common_message: CommonMessage, @@ -218,13 +217,13 @@ pub struct ShareBatch { impl DecryptionOutcome { /// Reduce this outcome to the message the party should broadcast to others: a [Vote] when /// the dealer's broadcast verified, otherwise the [InvalidShares] or [InvalidDispersal] itself. - /// The receiver's local [ReceiverOutput] (in the Valid case) is consumed and not part of the - /// wire format. + /// The receiver's local [ReceiverOutput] (in the Valid case) and [State] are consumed and + /// not part of the wire format. pub fn into_response(self) -> Response { - match self { - DecryptionOutcome::Valid { vote, .. } => Response::Vote(vote), - DecryptionOutcome::InvalidShares(r) => Response::InvalidShares(r), - DecryptionOutcome::InvalidDispersal(b) => Response::InvalidDispersal(b), + match self.kind { + OutcomeKind::Valid { vote, .. } => Response::Vote(vote), + OutcomeKind::InvalidShares(r) => Response::InvalidShares(r), + OutcomeKind::InvalidDispersal(b) => Response::InvalidDispersal(b), } } } @@ -742,22 +741,23 @@ impl Receiver { Ok(my_shares) }); + let state = State { + common_message: common_message.clone(), + global_root: global_root.clone(), + }; + // TODO: Revisit this dispatch. - match (faulty_dealer, decrypted_shares) { - (false, Ok(my_shares)) => Ok(Valid { + let kind = match (faulty_dealer, decrypted_shares) { + (false, Ok(my_shares)) => OutcomeKind::Valid { output: ReceiverOutput { my_shares, public_keys: full_public_keys.clone(), }, vote: Vote { - global_root: global_root.clone(), - common_message_hash: compute_common_message_hash(common_message), - }, - state: State { - common_message: common_message.clone(), global_root, + common_message_hash: compute_common_message_hash(common_message), }, - }), + }, (true, Ok(_)) => { let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; let common_message_hash = any_echo.common_message_hash; @@ -770,19 +770,19 @@ impl Receiver { proof: e.authenticated_shards.proof, }) .collect_vec(); - Ok(DecryptionOutcome::InvalidDispersal(Blame { + OutcomeKind::InvalidDispersal(Blame { accuser_id: self.id, shards, recipient_root, recipient_root_proof, common_message_hash, - })) + }) } (_, Err(_)) => { let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; let common_message_hash = any_echo.common_message_hash; let recipient_root_proof = any_echo.recipient_root_proof.clone(); - Ok(DecryptionOutcome::InvalidShares(Reveal { + OutcomeKind::InvalidShares(Reveal { proof: complaint::Complaint::create( self.id, shared, @@ -794,9 +794,10 @@ impl Receiver { recipient_root, recipient_root_proof, common_message_hash, - })) + }) } - } + }; + Ok(DecryptionOutcome { state, kind }) } /// 5. Upon receiving a [Reveal] from another party, verify it and respond with this party's @@ -974,7 +975,7 @@ impl Receiver { /// Fails if there are not enough valid responses to recover the shares or if any of the responses come from an invalid party. pub fn recover( &self, - message: &Message, + state: &State, responses: Vec>, ) -> FastCryptoResult { // TODO: This fails if one of the responses has an invalid responder_id. We could probably just ignore those instead. @@ -987,11 +988,14 @@ impl Receiver { return Err(FastCryptoError::InputTooShort(self.t as usize)); } - let global_root = global_tree_from_message(message)?.root(); + let State { + common_message, + global_root, + } = state; let challenge = compute_challenge_from_common_message( &self.random_oracle(), - &global_root, - &message.common, + global_root, + common_message, ); let response_shares = responses .into_iter() @@ -1003,7 +1007,7 @@ impl Receiver { }) .filter_map(|(weight, shares)| { shares - .verify(&message.common, &challenge, weight, self.batch_size) + .verify(common_message, &challenge, weight, self.batch_size) .ok() .map(|_| shares) }) @@ -1017,7 +1021,7 @@ impl Receiver { let my_shares = SharesForNode::recover(self, &response_shares)?; my_shares.verify( - &message.common, + common_message, &challenge, self.nodes.weight_of(self.id)?, self.batch_size, @@ -1025,7 +1029,7 @@ impl Receiver { Ok(ReceiverOutput { my_shares, - public_keys: message.common.full_public_keys.clone(), + public_keys: common_message.full_public_keys.clone(), }) } @@ -1166,8 +1170,8 @@ fn compute_common_message_hash(message: &CommonMessage) -> Digest<32> { #[cfg(test)] mod tests { use super::{ - Dealer, DecryptionOutcome, Message, Receiver, ReceiverOutput, ShareBatch, SharesForNode, - State, + Dealer, DecryptionOutcome, Message, OutcomeKind, Receiver, ReceiverOutput, ShareBatch, + SharesForNode, State, }; use crate::ecies_v1; use crate::ecies_v1::PublicKey; @@ -1409,11 +1413,15 @@ mod tests { // Receiver 0 (the targeted victim) emits a InvalidShares complaint. let victim_id = 0u16; let mut outcomes = outcomes; - let reveal = match outcomes.remove(&victim_id).unwrap() { - DecryptionOutcome::InvalidShares(r) => r, - other => panic!( + let DecryptionOutcome { + state: victim_state, + kind: victim_kind, + } = outcomes.remove(&victim_id).unwrap(); + let reveal = match victim_kind { + OutcomeKind::InvalidShares(r) => r, + ref other => panic!( "expected InvalidShares from victim, got {:?}", - outcome_kind(&other) + outcome_kind(other) ), }; @@ -1422,15 +1430,17 @@ mod tests { let mut states: HashMap = HashMap::new(); let mut outputs: HashMap = outcomes .into_iter() - .map(|(id, o)| match o { - DecryptionOutcome::Valid { output, state, .. } => { - states.insert(id, state); - (id, output) - } - other => panic!( - "expected Valid from honest receiver {id}, got {:?}", - outcome_kind(&other) - ), + .map(|(id, o)| { + let DecryptionOutcome { state, kind } = o; + states.insert(id, state); + let output = match kind { + OutcomeKind::Valid { output, .. } => output, + ref other => panic!( + "expected Valid from honest receiver {id}, got {:?}", + outcome_kind(other) + ), + }; + (id, output) }) .collect(); @@ -1450,7 +1460,7 @@ mod tests { // Victim recovers via interpolation across t responses. let recovered = receivers[victim_id as usize] - .recover(&messages[victim_id as usize], responses) + .recover(&victim_state, responses) .unwrap(); outputs.insert(victim_id, recovered); @@ -1568,11 +1578,15 @@ mod tests { // Receiver 0 emits an InvalidDispersal complaint. let mut outcomes = outcomes; - let blame = match outcomes.remove(&victim_id).unwrap() { - DecryptionOutcome::InvalidDispersal(b) => b, - other => panic!( + let DecryptionOutcome { + state: victim_state, + kind: victim_kind, + } = outcomes.remove(&victim_id).unwrap(); + let blame = match victim_kind { + OutcomeKind::InvalidDispersal(b) => b, + ref other => panic!( "expected InvalidDispersal from victim, got {:?}", - outcome_kind(&other) + outcome_kind(other) ), }; @@ -1581,15 +1595,17 @@ mod tests { let mut states: HashMap = HashMap::new(); let mut outputs: HashMap = outcomes .into_iter() - .map(|(id, o)| match o { - DecryptionOutcome::Valid { output, state, .. } => { - states.insert(id, state); - (id, output) - } - other => panic!( - "expected Valid from honest receiver {id}, got {:?}", - outcome_kind(&other) - ), + .map(|(id, o)| { + let DecryptionOutcome { state, kind } = o; + states.insert(id, state); + let output = match kind { + OutcomeKind::Valid { output, .. } => output, + ref other => panic!( + "expected Valid from honest receiver {id}, got {:?}", + outcome_kind(other) + ), + }; + (id, output) }) .collect(); @@ -1609,7 +1625,7 @@ mod tests { // Victim recovers via interpolation across t responses. let recovered = receivers[victim_id as usize] - .recover(&messages[victim_id as usize], responses) + .recover(&victim_state, responses) .unwrap(); outputs.insert(victim_id, recovered); @@ -1628,17 +1644,17 @@ mod tests { } fn assert_valid(outcome: DecryptionOutcome) -> ReceiverOutput { - match outcome { - DecryptionOutcome::Valid { output, .. } => output, - other => panic!("expected valid outcome, got {:?}", outcome_kind(&other)), + match outcome.kind { + OutcomeKind::Valid { output, .. } => output, + ref other => panic!("expected valid outcome, got {:?}", outcome_kind(other)), } } - fn outcome_kind(outcome: &DecryptionOutcome) -> &'static str { - match outcome { - DecryptionOutcome::Valid { .. } => "Valid", - DecryptionOutcome::InvalidShares(_) => "InvalidShares", - DecryptionOutcome::InvalidDispersal(_) => "InvalidDispersal", + fn outcome_kind(kind: &OutcomeKind) -> &'static str { + match kind { + OutcomeKind::Valid { .. } => "Valid", + OutcomeKind::InvalidShares(_) => "InvalidShares", + OutcomeKind::InvalidDispersal(_) => "InvalidDispersal", } } diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index 232d103c4e..47931e6a55 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -505,8 +505,8 @@ mod tests { } fn assert_valid_batch(outcome: batch_avss::DecryptionOutcome) -> batch_avss::ReceiverOutput { - match outcome { - batch_avss::DecryptionOutcome::Valid { output, .. } => output, + match outcome.kind { + batch_avss::OutcomeKind::Valid { output, .. } => output, _ => panic!("expected valid batch_avss output"), } } From 69bc0240781ce9b351db2af9b7fe527f2881d7f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 20:41:10 +0200 Subject: [PATCH 43/91] Prefer Blame to Reveal; group AVID helpers at end of impl MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Dispatch in verify_and_decrypt now routes any AVID-inconsistent outcome to Blame, regardless of share-decryption result. Reveal only fires when AVID is consistent but shares are bad — the genuine shares-layer case where Blame would not verify. reconstruct_ciphertext and check_avid_consistency moved next to the other Receiver helpers. --- .../src/threshold_schnorr/batch_avss.rs | 101 +++++++++--------- 1 file changed, 50 insertions(+), 51 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 71a90c699d..0d0abb386e 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -746,7 +746,6 @@ impl Receiver { global_root: global_root.clone(), }; - // TODO: Revisit this dispatch. let kind = match (faulty_dealer, decrypted_shares) { (false, Ok(my_shares)) => OutcomeKind::Valid { output: ReceiverOutput { @@ -758,7 +757,7 @@ impl Receiver { common_message_hash: compute_common_message_hash(common_message), }, }, - (true, Ok(_)) => { + (true, _) => { let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; let common_message_hash = any_echo.common_message_hash; let recipient_root_proof = any_echo.recipient_root_proof.clone(); @@ -778,7 +777,7 @@ impl Receiver { common_message_hash, }) } - (_, Err(_)) => { + (false, Err(_)) => { let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; let common_message_hash = any_echo.common_message_hash; let recipient_root_proof = any_echo.recipient_root_proof.clone(); @@ -923,54 +922,6 @@ impl Receiver { Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } - /// Reed-Solomon decode the ciphertext for `accuser_id` from a set of authenticated shard - /// contributions exposed via `shards_for(party_id) -> Option>`. Fails if the - /// contributing weight is below `W - 2f` (too few contributions to reconstruct), or if a - /// party's contribution has a shard count that doesn't match its weight. The caller is - /// responsible for having authenticated the shards via their Merkle proofs. - fn reconstruct_ciphertext( - &self, - accuser_id: PartyId, - shards_for: impl Fn(PartyId) -> Option>, - ) -> FastCryptoResult> { - let shards: Vec> = self - .nodes - .node_ids_iter() - .map(|id| -> FastCryptoResult>> { - let weight = self.nodes.weight_of(id).expect("valid party id") as usize; - match shards_for(id) { - Some(ss) if ss.len() == weight => Ok(ss.into_iter().map(Some).collect()), - // Fail if a contributor's shard count doesn't match its weight. - Some(_) => Err(InvalidInput), - None => Ok(vec![None; weight]), - } - }) - .flatten_ok() - .collect::>>()?; - - // The encryption used, counter-mode, is length-preserving, so the length of the ciphertext is equal to the length of the plaintext. - let expected_length = SharesForNode::bcs_serialized_size( - self.nodes.weight_of(accuser_id)? as usize, - self.batch_size, - ); - self.code.decode(shards, expected_length) - } - - /// The check r_i' == r_i from the paper - fn check_avid_consistency( - &self, - ciphertext: &[u8], - expected_root: &merkle::Node, - ) -> FastCryptoResult<()> { - let new_shards = self - .nodes - .collect_to_nodes(self.code.encode(ciphertext)?.into_iter())?; - if recipient_tree(&new_shards)?.root() != *expected_root { - return Err(InvalidMessage); - } - Ok(()) - } - /// 6. Upon receiving t valid responses to a complaint, the accuser can recover its shares. /// Fails if there are not enough valid responses to recover the shares or if any of the responses come from an invalid party. pub fn recover( @@ -1040,6 +991,54 @@ impl Receiver { fn random_oracle(&self) -> RandomOracle { random_oracle_from_sid(&self.sid) } + + /// Reed-Solomon decode the ciphertext for `accuser_id` from a set of authenticated shard + /// contributions exposed via `shards_for(party_id) -> Option>`. Fails if the + /// contributing weight is below `W - 2f` (too few contributions to reconstruct), or if a + /// party's contribution has a shard count that doesn't match its weight. The caller is + /// responsible for having authenticated the shards via their Merkle proofs. + fn reconstruct_ciphertext( + &self, + accuser_id: PartyId, + shards_for: impl Fn(PartyId) -> Option>, + ) -> FastCryptoResult> { + let shards: Vec> = self + .nodes + .node_ids_iter() + .map(|id| -> FastCryptoResult>> { + let weight = self.nodes.weight_of(id).expect("valid party id") as usize; + match shards_for(id) { + Some(ss) if ss.len() == weight => Ok(ss.into_iter().map(Some).collect()), + // Fail if a contributor's shard count doesn't match its weight. + Some(_) => Err(InvalidInput), + None => Ok(vec![None; weight]), + } + }) + .flatten_ok() + .collect::>>()?; + + // The encryption used, counter-mode, is length-preserving, so the length of the ciphertext is equal to the length of the plaintext. + let expected_length = SharesForNode::bcs_serialized_size( + self.nodes.weight_of(accuser_id)? as usize, + self.batch_size, + ); + self.code.decode(shards, expected_length) + } + + /// The check r_i' == r_i from the paper + fn check_avid_consistency( + &self, + ciphertext: &[u8], + expected_root: &merkle::Node, + ) -> FastCryptoResult<()> { + let new_shards = self + .nodes + .collect_to_nodes(self.code.encode(ciphertext)?.into_iter())?; + if recipient_tree(&new_shards)?.root() != *expected_root { + return Err(InvalidMessage); + } + Ok(()) + } } impl AuthenticatedShards { From 3d68529e160b8c7b07d3ac3deaeb6bbaaad7ea69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 4 May 2026 20:53:42 +0200 Subject: [PATCH 44/91] Use all_unique; mention recover in State doc --- fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 0d0abb386e..c5b95f26eb 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -123,7 +123,8 @@ pub enum OutcomeKind { } /// Context retained by a receiver after [Receiver::verify_and_decrypt]. Together with the -/// [ReceiverOutput] it is sufficient to handle later [Reveal] / [Blame] requests. +/// [ReceiverOutput] it is sufficient to handle later [Reveal] / [Blame] requests and to call +/// [Receiver::recover]. #[derive(Clone, Debug)] pub struct State { pub common_message: CommonMessage, @@ -888,7 +889,7 @@ impl Receiver { .verify_proof_with_unserialized_leaf(global_root, recipient_root, accuser_id as usize) .map_err(|_| InvalidProof)?; - if shards.iter().map(|s| s.sender).unique().count() != shards.len() + if !shards.iter().map(|s| s.sender).all_unique() || shards.iter().any(|s| s.verify(recipient_root).is_err()) { return Err(InvalidProof); From d997e51b6c362e74a3ca0308a4b17dfb54fa35fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Tue, 5 May 2026 09:51:11 +0200 Subject: [PATCH 45/91] Extract ComplaintHeader from Reveal/Blame Reveal and Blame each carry the same recipient_root + proof + hash binding the complaint to the dealer's broadcast; pull them into a shared ComplaintHeader with a verify method so handle_reveal and handle_blame share that step. --- .../src/threshold_schnorr/batch_avss.rs | 103 +++++++++--------- 1 file changed, 51 insertions(+), 52 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index c5b95f26eb..af52b7e6ba 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -153,13 +153,7 @@ pub struct Vote { pub struct Reveal { pub proof: complaint::Complaint, pub ciphertext: Vec, - /// The accuser's per-ciphertext Merkle root, with a proof binding it under the dealer's - /// `global_root` at the accuser's leaf. Without this, a malicious accuser could submit any - /// ciphertext that decrypts to invalid shares and trick honest parties into responding. - pub recipient_root: merkle::Node, - pub recipient_root_proof: merkle::MerkleProof, - /// `H(val)` from the dealer's broadcast, binding the complaint to a specific [CommonMessage]. - pub common_message_hash: Digest<32>, + pub header: ComplaintHeader, } /// A complaint by a receiver who decrypted valid shares but found the AVID dispersal @@ -168,9 +162,15 @@ pub struct Reveal { pub struct Blame { pub accuser_id: PartyId, pub shards: Vec, - /// The accuser's per-ciphertext Merkle root, with a proof binding it under the dealer's - /// `global_root` at the accuser's leaf. Verifiers check the contributed shards against this - /// root. + pub header: ComplaintHeader, +} + +/// Fields common to [Reveal] and [Blame] that bind the complaint to the dealer's broadcast. +/// `recipient_root` is the accuser's per-ciphertext Merkle root, `recipient_root_proof` binds +/// it under `global_root` at the accuser's leaf, and `common_message_hash` is `H(val)` from the +/// dealer's broadcast. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ComplaintHeader { pub recipient_root: merkle::Node, pub recipient_root_proof: merkle::MerkleProof, pub common_message_hash: Digest<32>, @@ -229,6 +229,24 @@ impl DecryptionOutcome { } } +impl ComplaintHeader { + /// Verify the header against the verifier's [State]: `common_message_hash` matches + /// `state.common_message`, and `recipient_root` is bound under `state.global_root` at + /// `accuser_id`'s leaf. + fn verify(&self, state: &State, accuser_id: PartyId) -> FastCryptoResult<()> { + if self.common_message_hash != compute_common_message_hash(&state.common_message) { + return Err(InvalidProof); + } + self.recipient_root_proof + .verify_proof_with_unserialized_leaf( + &state.global_root, + &self.recipient_root, + accuser_id as usize, + ) + .map_err(|_| InvalidProof) + } +} + impl ShareBatch { /// Verify a batch of shares using the given challenge. fn verify(&self, message: &CommonMessage, challenge: &[S]) -> FastCryptoResult<()> { @@ -760,8 +778,11 @@ impl Receiver { }, (true, _) => { let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; - let common_message_hash = any_echo.common_message_hash; - let recipient_root_proof = any_echo.recipient_root_proof.clone(); + let header = ComplaintHeader { + recipient_root, + recipient_root_proof: any_echo.recipient_root_proof.clone(), + common_message_hash: any_echo.common_message_hash, + }; let shards = valid_echoes .into_iter() .map(|e| ShardContribution { @@ -773,15 +794,16 @@ impl Receiver { OutcomeKind::InvalidDispersal(Blame { accuser_id: self.id, shards, - recipient_root, - recipient_root_proof, - common_message_hash, + header, }) } (false, Err(_)) => { let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; - let common_message_hash = any_echo.common_message_hash; - let recipient_root_proof = any_echo.recipient_root_proof.clone(); + let header = ComplaintHeader { + recipient_root, + recipient_root_proof: any_echo.recipient_root_proof.clone(), + common_message_hash: any_echo.common_message_hash, + }; OutcomeKind::InvalidShares(Reveal { proof: complaint::Complaint::create( self.id, @@ -791,9 +813,7 @@ impl Receiver { &mut rand::thread_rng(), ), ciphertext, - recipient_root, - recipient_root_proof, - common_message_hash, + header, }) } }; @@ -814,35 +834,26 @@ impl Receiver { let Reveal { proof, ciphertext, - recipient_root, - recipient_root_proof, - common_message_hash, + header, } = reveal; let accuser_id = proof.accuser_id; - let accuser_weight = self.nodes.weight_of(accuser_id)?; - let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; + + header.verify(state, accuser_id)?; + + self.check_avid_consistency(ciphertext, &header.recipient_root) + .map_err(|_| InvalidProof)?; let State { common_message, global_root, } = state; - - if common_message_hash != &compute_common_message_hash(common_message) { - return Err(InvalidProof); - } - - recipient_root_proof - .verify_proof_with_unserialized_leaf(global_root, recipient_root, accuser_id as usize) - .map_err(|_| InvalidProof)?; - - self.check_avid_consistency(ciphertext, recipient_root) - .map_err(|_| InvalidProof)?; - let challenge = compute_challenge_from_common_message( &self.random_oracle(), global_root, common_message, ); + let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; + let accuser_weight = self.nodes.weight_of(accuser_id)?; proof.check( accuser_pk, ciphertext, @@ -870,24 +881,12 @@ impl Receiver { let Blame { accuser_id, shards, - recipient_root, - recipient_root_proof, - common_message_hash, + header, } = blame; let accuser_id = *accuser_id; + let recipient_root = &header.recipient_root; - let State { - common_message, - global_root, - } = state; - - if common_message_hash != &compute_common_message_hash(common_message) { - return Err(InvalidProof); - } - - recipient_root_proof - .verify_proof_with_unserialized_leaf(global_root, recipient_root, accuser_id as usize) - .map_err(|_| InvalidProof)?; + header.verify(state, accuser_id)?; if !shards.iter().map(|s| s.sender).all_unique() || shards.iter().any(|s| s.verify(recipient_root).is_err()) From 38fe0dd5a89f99c904394e567ba4f895752adc06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Tue, 5 May 2026 10:56:39 +0200 Subject: [PATCH 46/91] Clean up --- .../src/threshold_schnorr/batch_avss.rs | 66 +++++++------------ 1 file changed, 25 insertions(+), 41 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index af52b7e6ba..87870b3b03 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -719,7 +719,6 @@ impl Receiver { valid_echoes, } = processed_echos; - // TODO: What should happen if these checks fail? // Verify that g^{p''(0)} == c' * prod_l c_l^{gamma_l} let challenge = compute_challenge_from_common_message( &self.random_oracle(), @@ -765,6 +764,12 @@ impl Receiver { global_root: global_root.clone(), }; + let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; + let header = ComplaintHeader { + recipient_root, + recipient_root_proof: any_echo.recipient_root_proof.clone(), + common_message_hash: any_echo.common_message_hash, + }; let kind = match (faulty_dealer, decrypted_shares) { (false, Ok(my_shares)) => OutcomeKind::Valid { output: ReceiverOutput { @@ -773,16 +778,10 @@ impl Receiver { }, vote: Vote { global_root, - common_message_hash: compute_common_message_hash(common_message), + common_message_hash: any_echo.common_message_hash, }, }, (true, _) => { - let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; - let header = ComplaintHeader { - recipient_root, - recipient_root_proof: any_echo.recipient_root_proof.clone(), - common_message_hash: any_echo.common_message_hash, - }; let shards = valid_echoes .into_iter() .map(|e| ShardContribution { @@ -797,25 +796,17 @@ impl Receiver { header, }) } - (false, Err(_)) => { - let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; - let header = ComplaintHeader { - recipient_root, - recipient_root_proof: any_echo.recipient_root_proof.clone(), - common_message_hash: any_echo.common_message_hash, - }; - OutcomeKind::InvalidShares(Reveal { - proof: complaint::Complaint::create( - self.id, - shared, - &self.enc_secret_key, - &self.random_oracle(), - &mut rand::thread_rng(), - ), - ciphertext, - header, - }) - } + (false, Err(_)) => OutcomeKind::InvalidShares(Reveal { + proof: complaint::Complaint::create( + self.id, + shared, + &self.enc_secret_key, + &self.random_oracle(), + &mut rand::thread_rng(), + ), + ciphertext, + header, + }), }; Ok(DecryptionOutcome { state, kind }) } @@ -839,7 +830,6 @@ impl Receiver { let accuser_id = proof.accuser_id; header.verify(state, accuser_id)?; - self.check_avid_consistency(ciphertext, &header.recipient_root) .map_err(|_| InvalidProof)?; @@ -883,13 +873,13 @@ impl Receiver { shards, header, } = blame; - let accuser_id = *accuser_id; - let recipient_root = &header.recipient_root; - header.verify(state, accuser_id)?; + header.verify(state, *accuser_id)?; if !shards.iter().map(|s| s.sender).all_unique() - || shards.iter().any(|s| s.verify(recipient_root).is_err()) + || shards + .iter() + .any(|s| s.verify(&header.recipient_root).is_err()) { return Err(InvalidProof); } @@ -902,7 +892,7 @@ impl Receiver { } let ciphertext = self - .reconstruct_ciphertext(accuser_id, |id| { + .reconstruct_ciphertext(*accuser_id, |id| { shards .iter() .find(|s| s.sender == id) @@ -910,14 +900,8 @@ impl Receiver { }) .map_err(|_| InvalidProof)?; - // The blame is valid iff re-encoding the recovered ciphertext does not match the - // accuser's `r_i`. - if self - .check_avid_consistency(&ciphertext, recipient_root) - .is_ok() - { - return Err(InvalidProof); - } + self.check_avid_consistency(&ciphertext, &header.recipient_root) + .map_err(|_| InvalidProof)?; Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } From 0cfb0c1174320f2fc2f054c64563441cf250e168 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Tue, 5 May 2026 11:01:34 +0200 Subject: [PATCH 47/91] Update docs --- .../src/threshold_schnorr/batch_avss.rs | 47 ++++++++++--------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 87870b3b03..c730850d17 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -229,24 +229,6 @@ impl DecryptionOutcome { } } -impl ComplaintHeader { - /// Verify the header against the verifier's [State]: `common_message_hash` matches - /// `state.common_message`, and `recipient_root` is bound under `state.global_root` at - /// `accuser_id`'s leaf. - fn verify(&self, state: &State, accuser_id: PartyId) -> FastCryptoResult<()> { - if self.common_message_hash != compute_common_message_hash(&state.common_message) { - return Err(InvalidProof); - } - self.recipient_root_proof - .verify_proof_with_unserialized_leaf( - &state.global_root, - &self.recipient_root, - accuser_id as usize, - ) - .map_err(|_| InvalidProof) - } -} - impl ShareBatch { /// Verify a batch of shares using the given challenge. fn verify(&self, message: &CommonMessage, challenge: &[S]) -> FastCryptoResult<()> { @@ -608,7 +590,8 @@ impl Receiver { }) } - /// 2. When a party receives its message, it verifies the Merkle tree path for its shards and generates Echos, one per party. + /// 2. When a party receives its [Message], it verifies the Merkle tree path for its shards and + /// generates [Echo]s, one per party ordered by their ID. pub fn echo(&self, message: &Message) -> FastCryptoResult> { if message .dispersal @@ -638,10 +621,10 @@ impl Receiver { .collect::>>() } - /// 3. When a party has received Echos from parties with at least weight W - f, it - /// tries to process them. It first filters out invalid messages and checks if the Echos - /// have the same digest, r and r_i values. If not, an InvalidMessage error is returned. - /// If the filtered set of Echos does not have sufficient weight, an NotEnoughWeight error + /// 3. When a party has received [Echo]s from parties with at least weight W - f, it + /// tries to process them. It first filters out invalid messages and checks if the [Echo]s + /// have the same digest, r and r_i values. If not, an [InvalidMessage] error is returned. + /// If the filtered set of [Echo]s does not have sufficient weight, an [NotEnoughWeight] error /// is returned. /// /// If these checks succeed, the party reconstructs it's message (ciphertext) from the echoed @@ -1060,6 +1043,24 @@ impl ShardContribution { } } +impl ComplaintHeader { + /// Verify the header against the verifier's [State]: `common_message_hash` matches + /// `state.common_message`, and `recipient_root` is bound under `state.global_root` at + /// `accuser_id`'s leaf. + fn verify(&self, state: &State, accuser_id: PartyId) -> FastCryptoResult<()> { + if self.common_message_hash != compute_common_message_hash(&state.common_message) { + return Err(InvalidProof); + } + self.recipient_root_proof + .verify_proof_with_unserialized_leaf( + &state.global_root, + &self.recipient_root, + accuser_id as usize, + ) + .map_err(|_| InvalidProof) + } +} + /// Build the per-recipient Merkle tree over `shards` (per-node grouped shard chunks of one /// ciphertext). The root of this tree is the per-recipient `recipient_root`. #[allow(clippy::ptr_arg)] From 3218526137750dfb06ab404b6d68995ec7fee671 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Tue, 5 May 2026 13:36:12 +0200 Subject: [PATCH 48/91] Take id in process_echos --- fastcrypto-tbls/benches/batch_avss.rs | 11 +- .../src/threshold_schnorr/batch_avss.rs | 319 +++++++++--------- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 2 +- 3 files changed, 174 insertions(+), 158 deletions(-) diff --git a/fastcrypto-tbls/benches/batch_avss.rs b/fastcrypto-tbls/benches/batch_avss.rs index 0084de1654..804632cfe3 100644 --- a/fastcrypto-tbls/benches/batch_avss.rs +++ b/fastcrypto-tbls/benches/batch_avss.rs @@ -141,7 +141,12 @@ mod batch_avss_benches { process.bench_function( format!("n={}, total_weight={}, t={}, w={}", n, total_w, t, w).as_str(), - |b| b.iter(|| r1.process_echos(&echoes_for_party_1).unwrap()), + |b| { + b.iter(|| { + r1.decode_ciphertext_for_party(&echoes_for_party_1, r1.id) + .unwrap() + }) + }, ); } } @@ -184,7 +189,9 @@ mod batch_avss_benches { .collect(); let echoes_for_party_1: Vec = echoes.iter().map(|em| em[1].clone()).collect(); - let pem = receivers[1].process_echos(&echoes_for_party_1).unwrap(); + let pem = receivers[1] + .decode_ciphertext_for_party(&echoes_for_party_1, receivers[1].id) + .unwrap(); assert_valid_batch( receivers[1] .verify_and_decrypt(pem, &messages[1].common) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index c730850d17..bd64f48283 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -50,7 +50,7 @@ pub struct Dealer { /// This represents a Receiver in the AVSS who receives shares from the [Dealer]. #[allow(dead_code)] pub struct Receiver { - pub(crate) id: PartyId, + pub id: PartyId, enc_secret_key: PrivateKey, nodes: Nodes, sid: Vec, @@ -196,7 +196,7 @@ pub struct ReceiverOutput { /// If we say that node i has a weight `W_i`, we have /// `indices().len() == shares_for_secret(i).len() == weight() = W_i` /// -/// These can be created either by decrypting the shares from the dealer (see [Receiver::process_echos]) or by recovering them from complaint responses. +/// These can be created either by decrypting the shares from the dealer (see [Receiver::decode_ciphertext_for_party]) or by recovering them from complaint responses. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SharesForNode { pub shares: Vec, @@ -215,148 +215,6 @@ pub struct ShareBatch { pub blinding_share: S, } -impl DecryptionOutcome { - /// Reduce this outcome to the message the party should broadcast to others: a [Vote] when - /// the dealer's broadcast verified, otherwise the [InvalidShares] or [InvalidDispersal] itself. - /// The receiver's local [ReceiverOutput] (in the Valid case) and [State] are consumed and - /// not part of the wire format. - pub fn into_response(self) -> Response { - match self.kind { - OutcomeKind::Valid { vote, .. } => Response::Vote(vote), - OutcomeKind::InvalidShares(r) => Response::InvalidShares(r), - OutcomeKind::InvalidDispersal(b) => Response::InvalidDispersal(b), - } - } -} - -impl ShareBatch { - /// Verify a batch of shares using the given challenge. - fn verify(&self, message: &CommonMessage, challenge: &[S]) -> FastCryptoResult<()> { - if challenge.len() != self.batch_size() { - return Err(InvalidInput); - } - - // Verify that r' + sum_l r_l * gamma_l == p''(i) - if self - .batch - .iter() - .zip_eq(challenge) - .fold(self.blinding_share, |acc, (r_l, gamma_l)| { - acc + r_l * gamma_l - }) - != message.response_polynomial.eval(self.index).value - { - return Err(InvalidInput); - } - Ok(()) - } - - fn batch_size(&self) -> usize { - self.batch.len() - } -} - -impl SharesForNode { - /// Get the weight of this node (number of shares it has). - pub fn weight(&self) -> u16 { - self.shares.len() as u16 - } - - /// If all shares have the same batch size, return that. - /// Otherwise, return an InvalidInput error. - pub fn try_uniform_batch_size(&self) -> FastCryptoResult { - // TODO: Should we cache this? It's called twice per dealer -- once when verifying shares received from a dealer and then again during presigning. - get_uniform_value(self.shares.iter().map(ShareBatch::batch_size)).ok_or(InvalidInput) - } - - /// Get all shares this node has for the i-th secret/nonce in the batch. - /// This panics if `i` is larger than or equal to the batch size. - pub fn shares_for_secret(&self, i: usize) -> impl Iterator> + '_ { - self.shares.iter().map(move |s| Eval { - index: s.index, - value: s.batch[i], - }) - } - - fn verify( - &self, - message: &CommonMessage, - challenge: &[S], - weight: u16, - expected_batch_size: usize, - ) -> FastCryptoResult<()> { - if self.weight() != weight || self.try_uniform_batch_size()? != expected_batch_size { - return Err(InvalidMessage); - } - for shares in &self.shares { - shares.verify(message, challenge)?; - } - Ok(()) - } - - /// Recover the shares for this node. - /// - /// Fails if `other_shares` is empty or if the batch sizes of all shares in `other_shares` are not equal to the expected batch size. - fn recover(receiver: &Receiver, other_shares: &[Self]) -> FastCryptoResult { - if other_shares.is_empty() { - return Err(InvalidInput); - } - - let shares = receiver - .my_indices() - .into_iter() - .map(|index| { - let batch = (0..receiver.batch_size) - .map(|i| { - let evaluations = other_shares - .iter() - .flat_map(|s| s.shares_for_secret(i)) - .collect_vec(); - Poly::recover_at(index, &evaluations).unwrap().value - }) - .collect_vec(); - - let blinding_share = Poly::recover_at( - index, - &other_shares - .iter() - .flat_map(|s| &s.shares) - .map(|share| Eval { - index: share.index, - value: share.blinding_share, - }) - .collect_vec(), - )? - .value; - - Ok(ShareBatch { - index, - batch, - blinding_share, - }) - }) - .collect::>>()?; - Ok(Self { shares }) - } - - /// BCS-serialized length of a `SharesForNode` for a node of the given weight at the given - /// batch size. - fn bcs_serialized_size(weight: usize, batch_size: usize) -> usize { - // Layout: - // SharesForNode = Vec - // = ULEB128(weight) + weight × ShareBatch - // ShareBatch - // = NonZeroU16 (= 2 bytes) + Vec + S - // = 2 + ULEB128(batch_size) + (batch_size + 1) × SCALAR_SIZE_IN_BYTES - - // TODO: A bit of a hack — this hardcodes the BCS layout of `SharesForNode` - uleb128_len(weight) - + weight * (2 + uleb128_len(batch_size) + (batch_size + 1) * SCALAR_SIZE_IN_BYTES) - } -} - -impl BCSSerialized for SharesForNode {} - impl Dealer { /// Create a new dealer. /// @@ -621,7 +479,7 @@ impl Receiver { .collect::>>() } - /// 3. When a party has received [Echo]s from parties with at least weight W - f, it + /// 3. When a party has received [Echo]s from parties with at least weight W - 2f, it /// tries to process them. It first filters out invalid messages and checks if the [Echo]s /// have the same digest, r and r_i values. If not, an [InvalidMessage] error is returned. /// If the filtered set of [Echo]s does not have sufficient weight, an [NotEnoughWeight] error @@ -633,17 +491,22 @@ impl Receiver { /// Once [Self::verify_and_decrypt] is called, the party should keep the resulting [State] /// around in order to handle future requests through [Self::handle_reveal] and /// [Self::handle_blame]. - pub fn process_echos(&self, echos: &[Echo]) -> FastCryptoResult { + pub fn decode_ciphertext_for_party( + &self, + echos: &[Echo], + party: PartyId, + ) -> FastCryptoResult { // Filter out invalid echo messages let valid_echoes = echos .iter() - .filter(|echo| echo.verify(self.id).is_ok()) + .filter(|echo| echo.verify(party).is_ok()) .cloned() .collect_vec(); let (global_root, recipient_root, _) = require_uniform_echo_metadata(&valid_echoes)?; - let required_weight = self.nodes.total_weight() - self.f; + // TODO: Double-check that this is ok + let required_weight = self.nodes.total_weight() - 2 * self.f; if self .nodes .total_weight_of(valid_echoes.iter().map(|echo| &echo.sender))? @@ -652,13 +515,13 @@ impl Receiver { return Err(NotEnoughWeight(required_weight as usize)); } - let ciphertext = self.reconstruct_ciphertext(self.id, |id| { + self.reconstruct_ciphertext(party, |id| { valid_echoes .iter() .find(|e| e.sender == id) .map(|e| e.authenticated_shards.shards.clone()) - })?; - Ok(ProcessedEchos { + }) + .map(|ciphertext| ProcessedEchos { ciphertext, global_root, recipient_root, @@ -751,7 +614,7 @@ impl Receiver { let header = ComplaintHeader { recipient_root, recipient_root_proof: any_echo.recipient_root_proof.clone(), - common_message_hash: any_echo.common_message_hash, + common_message_hash: compute_common_message_hash(common_message), }; let kind = match (faulty_dealer, decrypted_shares) { (false, Ok(my_shares)) => OutcomeKind::Valid { @@ -1008,6 +871,148 @@ impl Receiver { } } +impl DecryptionOutcome { + /// Reduce this outcome to the message the party should broadcast to others: a [Vote] when + /// the dealer's broadcast verified, otherwise the [InvalidShares] or [InvalidDispersal] itself. + /// The receiver's local [ReceiverOutput] (in the Valid case) and [State] are consumed and + /// not part of the wire format. + pub fn into_response(self) -> Response { + match self.kind { + OutcomeKind::Valid { vote, .. } => Response::Vote(vote), + OutcomeKind::InvalidShares(r) => Response::InvalidShares(r), + OutcomeKind::InvalidDispersal(b) => Response::InvalidDispersal(b), + } + } +} + +impl ShareBatch { + /// Verify a batch of shares using the given challenge. + fn verify(&self, message: &CommonMessage, challenge: &[S]) -> FastCryptoResult<()> { + if challenge.len() != self.batch_size() { + return Err(InvalidInput); + } + + // Verify that r' + sum_l r_l * gamma_l == p''(i) + if self + .batch + .iter() + .zip_eq(challenge) + .fold(self.blinding_share, |acc, (r_l, gamma_l)| { + acc + r_l * gamma_l + }) + != message.response_polynomial.eval(self.index).value + { + return Err(InvalidInput); + } + Ok(()) + } + + fn batch_size(&self) -> usize { + self.batch.len() + } +} + +impl SharesForNode { + /// Get the weight of this node (number of shares it has). + pub fn weight(&self) -> u16 { + self.shares.len() as u16 + } + + /// If all shares have the same batch size, return that. + /// Otherwise, return an InvalidInput error. + pub fn try_uniform_batch_size(&self) -> FastCryptoResult { + // TODO: Should we cache this? It's called twice per dealer -- once when verifying shares received from a dealer and then again during presigning. + get_uniform_value(self.shares.iter().map(ShareBatch::batch_size)).ok_or(InvalidInput) + } + + /// Get all shares this node has for the i-th secret/nonce in the batch. + /// This panics if `i` is larger than or equal to the batch size. + pub fn shares_for_secret(&self, i: usize) -> impl Iterator> + '_ { + self.shares.iter().map(move |s| Eval { + index: s.index, + value: s.batch[i], + }) + } + + fn verify( + &self, + message: &CommonMessage, + challenge: &[S], + weight: u16, + expected_batch_size: usize, + ) -> FastCryptoResult<()> { + if self.weight() != weight || self.try_uniform_batch_size()? != expected_batch_size { + return Err(InvalidMessage); + } + for shares in &self.shares { + shares.verify(message, challenge)?; + } + Ok(()) + } + + /// Recover the shares for this node. + /// + /// Fails if `other_shares` is empty or if the batch sizes of all shares in `other_shares` are not equal to the expected batch size. + fn recover(receiver: &Receiver, other_shares: &[Self]) -> FastCryptoResult { + if other_shares.is_empty() { + return Err(InvalidInput); + } + + let shares = receiver + .my_indices() + .into_iter() + .map(|index| { + let batch = (0..receiver.batch_size) + .map(|i| { + let evaluations = other_shares + .iter() + .flat_map(|s| s.shares_for_secret(i)) + .collect_vec(); + Poly::recover_at(index, &evaluations).unwrap().value + }) + .collect_vec(); + + let blinding_share = Poly::recover_at( + index, + &other_shares + .iter() + .flat_map(|s| &s.shares) + .map(|share| Eval { + index: share.index, + value: share.blinding_share, + }) + .collect_vec(), + )? + .value; + + Ok(ShareBatch { + index, + batch, + blinding_share, + }) + }) + .collect::>>()?; + Ok(Self { shares }) + } + + /// BCS-serialized length of a `SharesForNode` for a node of the given weight at the given + /// batch size. + fn bcs_serialized_size(weight: usize, batch_size: usize) -> usize { + // Layout: + // SharesForNode = Vec + // = ULEB128(weight) + weight × ShareBatch + // ShareBatch + // = NonZeroU16 (= 2 bytes) + Vec + S + // = 2 + ULEB128(batch_size) + (batch_size + 1) × SCALAR_SIZE_IN_BYTES + + // TODO: A bit of a hack — this hardcodes the BCS layout of `SharesForNode` + uleb128_len(weight) + + weight * (2 + uleb128_len(batch_size) + (batch_size + 1) * SCALAR_SIZE_IN_BYTES) + } +} + +impl BCSSerialized for SharesForNode {} + impl AuthenticatedShards { /// Verify that `shards` are the leaf at `leaf_index` under `recipient_root` using `proof`. fn verify(&self, leaf_index: usize) -> FastCryptoResult<()> { @@ -1274,7 +1279,11 @@ mod tests { .iter() .zip(messages.iter()) .zip(echoes_by_recipient.iter()) - .map(|((receiver, _message), echoes)| receiver.process_echos(echoes).unwrap()) + .map(|((receiver, _message), echoes)| { + receiver + .decode_ciphertext_for_party(echoes, receiver.id) + .unwrap() + }) .collect_vec(); let all_shares = receivers @@ -1385,7 +1394,7 @@ mod tests { .iter() .zip(echoes_per_recipient.iter()) .map(|(r, echoes)| { - let pem = r.process_echos(echoes).unwrap(); + let pem = r.decode_ciphertext_for_party(echoes, r.id).unwrap(); ( r.id, r.verify_and_decrypt(pem, &messages[r.id as usize].common) @@ -1551,7 +1560,7 @@ mod tests { .iter() .zip(echoes_per_recipient.iter()) .map(|(r, echoes)| { - let pem = r.process_echos(echoes).unwrap(); + let pem = r.decode_ciphertext_for_party(echoes, r.id).unwrap(); ( r.id, r.verify_and_decrypt(pem, &messages[r.id as usize].common) diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index 47931e6a55..eb80b98822 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -250,7 +250,7 @@ mod tests { // Each receiver processes the message. // In this case, we assume all are honest and there are no complaints. for ((r, echoes), msg) in receivers.iter().zip(&echoes_per_recipient).zip(&messages) { - let pem = r.process_echos(echoes).unwrap(); + let pem = r.decode_ciphertext_for_party(echoes, r.id).unwrap(); let output = assert_valid_batch(r.verify_and_decrypt(pem, &msg.common).unwrap()); presigning_outputs.get_mut(&r.id).unwrap().push(output); } From 26f2d6adfcc26f6f00e22fdb7d9d0cf79177443f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Tue, 5 May 2026 13:50:34 +0200 Subject: [PATCH 49/91] Fix inverted AVID check in handle_blame; rename ProcessedEchos -> DecodedCiphertext The AVID check in handle_blame was propagating Err on mismatch, but a valid Blame requires the re-encoded ciphertext to NOT match the accuser's r_i. Invert the check so genuine dispersal complaints verify and bogus ones are rejected. --- .../src/threshold_schnorr/batch_avss.rs | 26 ++++++++++++------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index bd64f48283..aad1795b3c 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -100,7 +100,7 @@ pub struct Echo { /// The receiver's reconstructed ciphertext together with the metadata extracted from the echoes. #[derive(Clone)] -pub struct ProcessedEchos { +pub struct DecodedCiphertext { ciphertext: Vec, global_root: merkle::Node, recipient_root: merkle::Node, @@ -495,7 +495,7 @@ impl Receiver { &self, echos: &[Echo], party: PartyId, - ) -> FastCryptoResult { + ) -> FastCryptoResult { // Filter out invalid echo messages let valid_echoes = echos .iter() @@ -521,7 +521,7 @@ impl Receiver { .find(|e| e.sender == id) .map(|e| e.authenticated_shards.shards.clone()) }) - .map(|ciphertext| ProcessedEchos { + .map(|ciphertext| DecodedCiphertext { ciphertext, global_root, recipient_root, @@ -543,7 +543,7 @@ impl Receiver { /// appeared on the TOB/ABC channel. pub fn verify_and_decrypt( &self, - processed_echos: ProcessedEchos, + decoded_ciphertext: DecodedCiphertext, common_message: &CommonMessage, ) -> FastCryptoResult { let CommonMessage { @@ -558,12 +558,12 @@ impl Receiver { return Err(InvalidMessage); } - let ProcessedEchos { + let DecodedCiphertext { ciphertext, global_root, recipient_root, valid_echoes, - } = processed_echos; + } = decoded_ciphertext; // Verify that g^{p''(0)} == c' * prod_l c_l^{gamma_l} let challenge = compute_challenge_from_common_message( @@ -746,8 +746,14 @@ impl Receiver { }) .map_err(|_| InvalidProof)?; - self.check_avid_consistency(&ciphertext, &header.recipient_root) - .map_err(|_| InvalidProof)?; + // The blame is valid iff re-encoding the recovered ciphertext does not match the + // accuser's `r_i`. + if self + .check_avid_consistency(&ciphertext, &header.recipient_root) + .is_ok() + { + return Err(InvalidProof); + } Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } @@ -1275,7 +1281,7 @@ mod tests { }) .collect_vec(); - let processed_echos = receivers + let decoded_ciphertext = receivers .iter() .zip(messages.iter()) .zip(echoes_by_recipient.iter()) @@ -1288,7 +1294,7 @@ mod tests { let all_shares = receivers .iter() - .zip(processed_echos) + .zip(decoded_ciphertext) .zip(messages) .map(|((receiver, pem), message)| { let output = From 6706e265de331fc06fb95d8b266cea242a628001 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Tue, 5 May 2026 14:28:50 +0200 Subject: [PATCH 50/91] Move AVID consistency check into decode_ciphertext_for_party The re-encoding faulty_dealer check belongs at the AVID layer, not mixed into verify_and_decrypt. decode_ciphertext_for_party now returns a DecodeOutcome of Decoded(DecodedCiphertext) or InvalidDispersal { blame, global_root }; verify_and_decrypt only sees the Decoded case and produces Valid or InvalidShares. A Blame accuser obtains global_root from the InvalidDispersal arm so they can later assemble a State for recover once they hold the CommonMessage. Also adds short doc comments on RS encode/decode. --- fastcrypto-tbls/benches/batch_avss.rs | 8 +- .../src/threshold_schnorr/batch_avss.rs | 172 ++++++++++-------- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 5 +- .../src/threshold_schnorr/reed_solomon.rs | 6 +- 4 files changed, 116 insertions(+), 75 deletions(-) diff --git a/fastcrypto-tbls/benches/batch_avss.rs b/fastcrypto-tbls/benches/batch_avss.rs index 804632cfe3..68b387c7d7 100644 --- a/fastcrypto-tbls/benches/batch_avss.rs +++ b/fastcrypto-tbls/benches/batch_avss.rs @@ -189,9 +189,13 @@ mod batch_avss_benches { .collect(); let echoes_for_party_1: Vec = echoes.iter().map(|em| em[1].clone()).collect(); - let pem = receivers[1] + let pem = match receivers[1] .decode_ciphertext_for_party(&echoes_for_party_1, receivers[1].id) - .unwrap(); + .unwrap() + { + batch_avss::DecodeOutcome::Decoded(d) => d, + _ => panic!("expected Decoded outcome"), + }; assert_valid_batch( receivers[1] .verify_and_decrypt(pem, &messages[1].common) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index aad1795b3c..53df68867c 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -107,6 +107,19 @@ pub struct DecodedCiphertext { valid_echoes: Vec, } +/// The result of [Receiver::decode_ciphertext_for_party]: either a successfully reconstructed +/// ciphertext whose AVID dispersal is consistent, or an [InvalidDispersal] [Blame] when the +/// re-encoded ciphertext disagrees with the dealer's `r_i`. The Blame variant additionally +/// surfaces the dealer's `global_root` so the accuser can later assemble a [State]. +#[allow(clippy::large_enum_variant)] +pub enum DecodeOutcome { + Decoded(DecodedCiphertext), + InvalidDispersal { + blame: Blame, + global_root: merkle::Node, + }, +} + /// The result of [Receiver::verify_and_decrypt]. Carries the per-receiver [State] (sufficient, /// together with a [ReceiverOutput], to handle later [Reveal] / [Blame] requests and to call /// [Receiver::recover]) plus an [OutcomeKind] describing what the receiver actually got. @@ -119,7 +132,6 @@ pub struct DecryptionOutcome { pub enum OutcomeKind { Valid { output: ReceiverOutput, vote: Vote }, InvalidShares(Reveal), - InvalidDispersal(Blame), } /// Context retained by a receiver after [Receiver::verify_and_decrypt]. Together with the @@ -495,7 +507,7 @@ impl Receiver { &self, echos: &[Echo], party: PartyId, - ) -> FastCryptoResult { + ) -> FastCryptoResult { // Filter out invalid echo messages let valid_echoes = echos .iter() @@ -503,7 +515,8 @@ impl Receiver { .cloned() .collect_vec(); - let (global_root, recipient_root, _) = require_uniform_echo_metadata(&valid_echoes)?; + let (global_root, recipient_root, common_message_hash) = + require_uniform_echo_metadata(&valid_echoes)?; // TODO: Double-check that this is ok let required_weight = self.nodes.total_weight() - 2 * self.f; @@ -515,18 +528,49 @@ impl Receiver { return Err(NotEnoughWeight(required_weight as usize)); } - self.reconstruct_ciphertext(party, |id| { + let ciphertext = self.reconstruct_ciphertext(party, |id| { valid_echoes .iter() .find(|e| e.sender == id) .map(|e| e.authenticated_shards.shards.clone()) - }) - .map(|ciphertext| DecodedCiphertext { + })?; + + // If re-encoding the recovered ciphertext doesn't yield `recipient_root`, the dealer's + // dispersal is inconsistent — package the contributed shards as a [Blame]. + if self + .check_avid_consistency(&ciphertext, &recipient_root) + .is_err() + { + let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; + let header = ComplaintHeader { + recipient_root, + recipient_root_proof: any_echo.recipient_root_proof.clone(), + common_message_hash, + }; + let shards = valid_echoes + .into_iter() + .map(|e| ShardContribution { + sender: e.sender, + shards: e.authenticated_shards.shards, + proof: e.authenticated_shards.proof, + }) + .collect_vec(); + return Ok(DecodeOutcome::InvalidDispersal { + blame: Blame { + accuser_id: party, + shards, + header, + }, + global_root, + }); + } + + Ok(DecodeOutcome::Decoded(DecodedCiphertext { ciphertext, global_root, recipient_root, valid_echoes, - }) + })) } /// 4. If the party also received a valid [Message] from the dealer, it can now decrypt its shares using the [CommonMessage] part of the message. @@ -579,10 +623,6 @@ impl Receiver { return Err(InvalidMessage); } - let faulty_dealer = self - .check_avid_consistency(&ciphertext, &recipient_root) - .is_err(); - let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); let decrypted_shares = shared .verify(&random_oracle_encryption) @@ -611,13 +651,8 @@ impl Receiver { }; let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; - let header = ComplaintHeader { - recipient_root, - recipient_root_proof: any_echo.recipient_root_proof.clone(), - common_message_hash: compute_common_message_hash(common_message), - }; - let kind = match (faulty_dealer, decrypted_shares) { - (false, Ok(my_shares)) => OutcomeKind::Valid { + let kind = match decrypted_shares { + Ok(my_shares) => OutcomeKind::Valid { output: ReceiverOutput { my_shares, public_keys: full_public_keys.clone(), @@ -627,22 +662,7 @@ impl Receiver { common_message_hash: any_echo.common_message_hash, }, }, - (true, _) => { - let shards = valid_echoes - .into_iter() - .map(|e| ShardContribution { - sender: e.sender, - shards: e.authenticated_shards.shards, - proof: e.authenticated_shards.proof, - }) - .collect_vec(); - OutcomeKind::InvalidDispersal(Blame { - accuser_id: self.id, - shards, - header, - }) - } - (false, Err(_)) => OutcomeKind::InvalidShares(Reveal { + Err(_) => OutcomeKind::InvalidShares(Reveal { proof: complaint::Complaint::create( self.id, shared, @@ -651,7 +671,11 @@ impl Receiver { &mut rand::thread_rng(), ), ciphertext, - header, + header: ComplaintHeader { + recipient_root, + recipient_root_proof: any_echo.recipient_root_proof.clone(), + common_message_hash: compute_common_message_hash(common_message), + }, }), }; Ok(DecryptionOutcome { state, kind }) @@ -879,14 +903,13 @@ impl Receiver { impl DecryptionOutcome { /// Reduce this outcome to the message the party should broadcast to others: a [Vote] when - /// the dealer's broadcast verified, otherwise the [InvalidShares] or [InvalidDispersal] itself. - /// The receiver's local [ReceiverOutput] (in the Valid case) and [State] are consumed and - /// not part of the wire format. + /// the dealer's broadcast verified, otherwise the [InvalidShares] complaint itself. The + /// receiver's local [ReceiverOutput] (in the Valid case) and [State] are consumed and not + /// part of the wire format. pub fn into_response(self) -> Response { match self.kind { OutcomeKind::Valid { vote, .. } => Response::Vote(vote), OutcomeKind::InvalidShares(r) => Response::InvalidShares(r), - OutcomeKind::InvalidDispersal(b) => Response::InvalidDispersal(b), } } } @@ -1165,8 +1188,8 @@ fn compute_common_message_hash(message: &CommonMessage) -> Digest<32> { #[cfg(test)] mod tests { use super::{ - Dealer, DecryptionOutcome, Message, OutcomeKind, Receiver, ReceiverOutput, ShareBatch, - SharesForNode, State, + Dealer, DecodeOutcome, DecodedCiphertext, DecryptionOutcome, Message, OutcomeKind, + Receiver, ReceiverOutput, ShareBatch, SharesForNode, State, }; use crate::ecies_v1; use crate::ecies_v1::PublicKey; @@ -1286,9 +1309,11 @@ mod tests { .zip(messages.iter()) .zip(echoes_by_recipient.iter()) .map(|((receiver, _message), echoes)| { - receiver - .decode_ciphertext_for_party(echoes, receiver.id) - .unwrap() + assert_decoded( + receiver + .decode_ciphertext_for_party(echoes, receiver.id) + .unwrap(), + ) }) .collect_vec(); @@ -1395,12 +1420,13 @@ mod tests { .map(|i| echos.iter().map(|em| em[i].clone()).collect_vec()) .collect_vec(); - // Process echoes + verify_and_decrypt. + // Process echoes + verify_and_decrypt. AVID is consistent for everyone in this test, so + // every decode yields a Decoded outcome. let outcomes: HashMap = receivers .iter() .zip(echoes_per_recipient.iter()) .map(|(r, echoes)| { - let pem = r.decode_ciphertext_for_party(echoes, r.id).unwrap(); + let pem = assert_decoded(r.decode_ciphertext_for_party(echoes, r.id).unwrap()); ( r.id, r.verify_and_decrypt(pem, &messages[r.id as usize].common) @@ -1561,41 +1587,37 @@ mod tests { }) .collect_vec(); - // Process echoes + verify_and_decrypt. - let outcomes: HashMap = receivers + // Decode each receiver's ciphertext. The victim hits the AVID inconsistency at the + // decode stage and gets a [DecodeOutcome::InvalidDispersal] directly; everyone else + // gets a [DecodeOutcome::Decoded] that they can pass through `verify_and_decrypt`. + let mut decode_outcomes: HashMap = receivers .iter() .zip(echoes_per_recipient.iter()) - .map(|(r, echoes)| { - let pem = r.decode_ciphertext_for_party(echoes, r.id).unwrap(); - ( - r.id, - r.verify_and_decrypt(pem, &messages[r.id as usize].common) - .unwrap(), - ) - }) + .map(|(r, echoes)| (r.id, r.decode_ciphertext_for_party(echoes, r.id).unwrap())) .collect(); - // Receiver 0 emits an InvalidDispersal complaint. - let mut outcomes = outcomes; - let DecryptionOutcome { - state: victim_state, - kind: victim_kind, - } = outcomes.remove(&victim_id).unwrap(); - let blame = match victim_kind { - OutcomeKind::InvalidDispersal(b) => b, - ref other => panic!( - "expected InvalidDispersal from victim, got {:?}", - outcome_kind(other) + let (blame, victim_state) = match decode_outcomes.remove(&victim_id).unwrap() { + DecodeOutcome::InvalidDispersal { blame, global_root } => ( + blame, + State { + common_message: messages[victim_id as usize].common.clone(), + global_root, + }, ), + DecodeOutcome::Decoded(_) => panic!("expected InvalidDispersal from victim"), }; // The other receivers each get a Valid output. Keep both `output` and `state` so the // honest receivers can answer the victim's complaint. let mut states: HashMap = HashMap::new(); - let mut outputs: HashMap = outcomes + let mut outputs: HashMap = decode_outcomes .into_iter() - .map(|(id, o)| { - let DecryptionOutcome { state, kind } = o; + .map(|(id, decoded)| { + let pem = assert_decoded(decoded); + let outcome = receivers[id as usize] + .verify_and_decrypt(pem, &messages[id as usize].common) + .unwrap(); + let DecryptionOutcome { state, kind } = outcome; states.insert(id, state); let output = match kind { OutcomeKind::Valid { output, .. } => output, @@ -1649,11 +1671,19 @@ mod tests { } } + fn assert_decoded(outcome: DecodeOutcome) -> DecodedCiphertext { + match outcome { + DecodeOutcome::Decoded(d) => d, + DecodeOutcome::InvalidDispersal { .. } => { + panic!("expected Decoded outcome, got InvalidDispersal") + } + } + } + fn outcome_kind(kind: &OutcomeKind) -> &'static str { match kind { OutcomeKind::Valid { .. } => "Valid", OutcomeKind::InvalidShares(_) => "InvalidShares", - OutcomeKind::InvalidDispersal(_) => "InvalidDispersal", } } diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index eb80b98822..d9720834fd 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -250,7 +250,10 @@ mod tests { // Each receiver processes the message. // In this case, we assume all are honest and there are no complaints. for ((r, echoes), msg) in receivers.iter().zip(&echoes_per_recipient).zip(&messages) { - let pem = r.decode_ciphertext_for_party(echoes, r.id).unwrap(); + let pem = match r.decode_ciphertext_for_party(echoes, r.id).unwrap() { + batch_avss::DecodeOutcome::Decoded(d) => d, + _ => panic!("expected Decoded outcome"), + }; let output = assert_valid_batch(r.verify_and_decrypt(pem, &msg.common).unwrap()); presigning_outputs.get_mut(&r.id).unwrap().push(output); } diff --git a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs index b4339035ab..ebc289efab 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs @@ -152,6 +152,8 @@ impl ErasureCoder { .map(Self) } + /// Encode `data` into `n` shards of equal size, the first `k` of which hold the (zero-padded) + /// data and the remaining `n - k` parity. Any `k` shards suffice to reconstruct the data. pub fn encode(&self, data: &[u8]) -> FastCryptoResult> { // Define a shard size such that the data can be contained in `k` shards. let shard_size = data.len().div_ceil(self.0.data_shard_count()); @@ -165,7 +167,9 @@ impl ErasureCoder { Ok(shards.into_iter().map(Shard).collect_vec()) } - /// Note that the result may be padded with zeroes, and it is up to the caller to remove them. + /// Reconstruct the original data from `n` (possibly missing) shards, returning the first + /// `expected_len` bytes. Fails if more than `n - k` shards are missing or if the present + /// shards are inconsistent with any single codeword. pub fn decode( &self, shards: Vec>, From 95d5cb75294ad2e0fdd884849cbdb7c81e445977 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Tue, 5 May 2026 14:53:37 +0200 Subject: [PATCH 51/91] Drop global_root and State; carry recipient_roots in CommonMessage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replaces the global Merkle root over per-recipient roots with the explicit Vec in CommonMessage, used directly when deriving the challenge. Echoes lose global_root and recipient_root_proof; AuthenticatedShards loses recipient_root; ComplaintHeader and Vote shed their global_root fields; Reveal/Blame collapse to just the hash. decode_ciphertext_for_party now takes &CommonMessage to verify echoes against the dealer's r_i. handle_reveal/handle_blame/recover take &CommonMessage; the State wrapper is gone — DecryptionOutcome carries common_message directly. Wire size: per-echo Merkle proof for recipient_root removed (W of them); CommonMessage grows by n × 32 bytes for recipient_roots. --- fastcrypto-tbls/benches/batch_avss.rs | 14 +- .../src/threshold_schnorr/batch_avss.rs | 393 +++++++----------- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 5 +- 3 files changed, 169 insertions(+), 243 deletions(-) diff --git a/fastcrypto-tbls/benches/batch_avss.rs b/fastcrypto-tbls/benches/batch_avss.rs index 68b387c7d7..dd729c4a2e 100644 --- a/fastcrypto-tbls/benches/batch_avss.rs +++ b/fastcrypto-tbls/benches/batch_avss.rs @@ -143,8 +143,12 @@ mod batch_avss_benches { format!("n={}, total_weight={}, t={}, w={}", n, total_w, t, w).as_str(), |b| { b.iter(|| { - r1.decode_ciphertext_for_party(&echoes_for_party_1, r1.id) - .unwrap() + r1.decode_ciphertext_for_party( + &echoes_for_party_1, + r1.id, + &messages[1].common, + ) + .unwrap() }) }, ); @@ -190,7 +194,11 @@ mod batch_avss_benches { let echoes_for_party_1: Vec = echoes.iter().map(|em| em[1].clone()).collect(); let pem = match receivers[1] - .decode_ciphertext_for_party(&echoes_for_party_1, receivers[1].id) + .decode_ciphertext_for_party( + &echoes_for_party_1, + receivers[1].id, + &messages[1].common, + ) .unwrap() { batch_avss::DecodeOutcome::Decoded(d) => d, diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 53df68867c..f49091a608 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -75,56 +75,50 @@ pub struct CommonMessage { blinding_commit: G, shared: SharedComponents, response_polynomial: Poly, + /// Per-recipient Merkle roots committing to the dealer's RS shards. Used both for AVID + /// consistency checks and as input to the challenge derivation. + recipient_roots: Vec, } -/// One recipient's shards for one ciphertext, with a Merkle proof binding them to the -/// per-ciphertext root the dealer committed to. +/// One recipient's shards for one ciphertext, with a Merkle proof verifying against the +/// corresponding `recipient_root` from [CommonMessage]. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct AuthenticatedShards { - recipient_root: merkle::Node, shards: Vec, proof: merkle::MerkleProof, } -/// One sender's echo to a single recipient: their shard for the recipient's ciphertext, with -/// Merkle proofs binding it to the dealer's broadcast. +/// One sender's echo to a single recipient: their shard for the recipient's ciphertext, with a +/// proof that verifies against the recipient's [CommonMessage::recipient_roots] entry, plus a +/// hash binding the echo to a specific [CommonMessage]. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Echo { sender: PartyId, - global_root: merkle::Node, - /// Proof that `authenticated_shards.recipient_root` sits under `global_root` at the recipient's leaf. - recipient_root_proof: merkle::MerkleProof, authenticated_shards: AuthenticatedShards, common_message_hash: Digest<32>, } -/// The receiver's reconstructed ciphertext together with the metadata extracted from the echoes. +/// The receiver's reconstructed ciphertext. #[derive(Clone)] pub struct DecodedCiphertext { ciphertext: Vec, - global_root: merkle::Node, - recipient_root: merkle::Node, - valid_echoes: Vec, } /// The result of [Receiver::decode_ciphertext_for_party]: either a successfully reconstructed -/// ciphertext whose AVID dispersal is consistent, or an [InvalidDispersal] [Blame] when the -/// re-encoded ciphertext disagrees with the dealer's `r_i`. The Blame variant additionally -/// surfaces the dealer's `global_root` so the accuser can later assemble a [State]. +/// ciphertext whose AVID dispersal is consistent, or a [Blame] when the re-encoded ciphertext +/// disagrees with the dealer's `r_i`. #[allow(clippy::large_enum_variant)] pub enum DecodeOutcome { Decoded(DecodedCiphertext), - InvalidDispersal { - blame: Blame, - global_root: merkle::Node, - }, + InvalidDispersal(Blame), } -/// The result of [Receiver::verify_and_decrypt]. Carries the per-receiver [State] (sufficient, -/// together with a [ReceiverOutput], to handle later [Reveal] / [Blame] requests and to call -/// [Receiver::recover]) plus an [OutcomeKind] describing what the receiver actually got. +/// The result of [Receiver::verify_and_decrypt]. Carries the dealer's [CommonMessage] +/// (sufficient, together with a [ReceiverOutput], to handle later [Reveal] / [Blame] requests +/// and to call [Receiver::recover]) plus an [OutcomeKind] describing what the receiver actually +/// got. pub struct DecryptionOutcome { - pub state: State, + pub common_message: CommonMessage, pub kind: OutcomeKind, } @@ -134,15 +128,6 @@ pub enum OutcomeKind { InvalidShares(Reveal), } -/// Context retained by a receiver after [Receiver::verify_and_decrypt]. Together with the -/// [ReceiverOutput] it is sufficient to handle later [Reveal] / [Blame] requests and to call -/// [Receiver::recover]. -#[derive(Clone, Debug)] -pub struct State { - pub common_message: CommonMessage, - pub global_root: merkle::Node, -} - /// The message a receiver broadcasts after `verify_and_decrypt`: a [Vote] endorsing the dealer's /// broadcast or a [InvalidShares] / [InvalidDispersal] complaint otherwise. #[allow(clippy::large_enum_variant)] @@ -156,7 +141,6 @@ pub enum Response { /// An endorsement of the dealer's broadcast. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Vote { - pub global_root: merkle::Node, pub common_message_hash: Digest<32>, } @@ -165,7 +149,8 @@ pub struct Vote { pub struct Reveal { pub proof: complaint::Complaint, pub ciphertext: Vec, - pub header: ComplaintHeader, + /// Hash binding the complaint to a specific [CommonMessage]. + pub common_message_hash: Digest<32>, } /// A complaint by a receiver who decrypted valid shares but found the AVID dispersal @@ -174,17 +159,7 @@ pub struct Reveal { pub struct Blame { pub accuser_id: PartyId, pub shards: Vec, - pub header: ComplaintHeader, -} - -/// Fields common to [Reveal] and [Blame] that bind the complaint to the dealer's broadcast. -/// `recipient_root` is the accuser's per-ciphertext Merkle root, `recipient_root_proof` binds -/// it under `global_root` at the accuser's leaf, and `common_message_hash` is `H(val)` from the -/// dealer's broadcast. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ComplaintHeader { - pub recipient_root: merkle::Node, - pub recipient_root_proof: merkle::MerkleProof, + /// Hash binding the complaint to a specific [CommonMessage]. pub common_message_hash: Digest<32>, } @@ -347,7 +322,8 @@ impl Dealer { .iter() .map(recipient_tree) .collect::>>()?; - let recipient_roots = recipient_trees.iter().map(MerkleTree::root); + let recipient_roots: Vec = + recipient_trees.iter().map(MerkleTree::root).collect(); let dispersals: Vec> = self .nodes @@ -356,10 +332,8 @@ impl Dealer { shards .iter() .zip(&recipient_trees) - .zip(recipient_roots.clone()) - .map(|((s, tree), recipient_root)| { + .map(|(s, tree)| { Ok(AuthenticatedShards { - recipient_root, shards: s[id as usize].clone(), proof: tree.get_proof(id as usize)?, }) @@ -368,15 +342,13 @@ impl Dealer { }) .collect::>>()?; - let global_root = global_tree(recipient_roots)?.root(); - // "response" polynomials from https://eprint.iacr.org/2023/536.pdf let challenge = compute_challenge( &self.random_oracle(), &full_public_keys, &blinding_commit, &shared, - &global_root, + &recipient_roots, ); // Get the first t evaluations for the response polynomial and use these to compute the coefficients @@ -397,6 +369,7 @@ impl Dealer { shared, response_polynomial, blinding_commit, + recipient_roots, }; Ok(dispersals @@ -463,32 +436,29 @@ impl Receiver { /// 2. When a party receives its [Message], it verifies the Merkle tree path for its shards and /// generates [Echo]s, one per party ordered by their ID. pub fn echo(&self, message: &Message) -> FastCryptoResult> { + if message.dispersal.len() != message.common.recipient_roots.len() { + return Err(InvalidMessage); + } if message .dispersal .iter() - .any(|auth| auth.verify(self.id as usize).is_err()) + .zip(&message.common.recipient_roots) + .any(|(auth, root)| auth.verify(self.id as usize, root).is_err()) { return Err(InvalidMessage); } - let global_tree = global_tree_from_message(message)?; - let global_root = global_tree.root(); let common_message_hash = compute_common_message_hash(&message.common); - message + Ok(message .dispersal .iter() .cloned() - .enumerate() - .map(|(i, authenticated_shards)| { - Ok(Echo { - sender: self.id, - global_root: global_root.clone(), - recipient_root_proof: global_tree.get_proof(i)?, - authenticated_shards, - common_message_hash, - }) + .map(|authenticated_shards| Echo { + sender: self.id, + authenticated_shards, + common_message_hash, }) - .collect::>>() + .collect()) } /// 3. When a party has received [Echo]s from parties with at least weight W - 2f, it @@ -507,16 +477,25 @@ impl Receiver { &self, echos: &[Echo], party: PartyId, + common_message: &CommonMessage, ) -> FastCryptoResult { - // Filter out invalid echo messages + let recipient_root = common_message + .recipient_roots + .get(party as usize) + .ok_or(InvalidInput)?; + + // Filter out invalid echo messages: each echo's shards proof must verify against the + // dealer's `r_i` for `party`. let valid_echoes = echos .iter() - .filter(|echo| echo.verify(party).is_ok()) + .filter(|echo| echo.verify(recipient_root).is_ok()) .cloned() .collect_vec(); - let (global_root, recipient_root, common_message_hash) = - require_uniform_echo_metadata(&valid_echoes)?; + let common_message_hash = require_uniform_common_message_hash(&valid_echoes)?; + if common_message_hash != compute_common_message_hash(common_message) { + return Err(InvalidMessage); + } // TODO: Double-check that this is ok let required_weight = self.nodes.total_weight() - 2 * self.f; @@ -538,15 +517,9 @@ impl Receiver { // If re-encoding the recovered ciphertext doesn't yield `recipient_root`, the dealer's // dispersal is inconsistent — package the contributed shards as a [Blame]. if self - .check_avid_consistency(&ciphertext, &recipient_root) + .check_avid_consistency(&ciphertext, recipient_root) .is_err() { - let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; - let header = ComplaintHeader { - recipient_root, - recipient_root_proof: any_echo.recipient_root_proof.clone(), - common_message_hash, - }; let shards = valid_echoes .into_iter() .map(|e| ShardContribution { @@ -555,22 +528,14 @@ impl Receiver { proof: e.authenticated_shards.proof, }) .collect_vec(); - return Ok(DecodeOutcome::InvalidDispersal { - blame: Blame { - accuser_id: party, - shards, - header, - }, - global_root, - }); + return Ok(DecodeOutcome::InvalidDispersal(Blame { + accuser_id: party, + shards, + common_message_hash, + })); } - Ok(DecodeOutcome::Decoded(DecodedCiphertext { - ciphertext, - global_root, - recipient_root, - valid_echoes, - })) + Ok(DecodeOutcome::Decoded(DecodedCiphertext { ciphertext })) } /// 4. If the party also received a valid [Message] from the dealer, it can now decrypt its shares using the [CommonMessage] part of the message. @@ -595,6 +560,7 @@ impl Receiver { blinding_commit, response_polynomial, shared, + .. } = &common_message; if full_public_keys.len() != self.batch_size || response_polynomial.degree() != self.t as usize - 1 @@ -602,19 +568,11 @@ impl Receiver { return Err(InvalidMessage); } - let DecodedCiphertext { - ciphertext, - global_root, - recipient_root, - valid_echoes, - } = decoded_ciphertext; + let DecodedCiphertext { ciphertext } = decoded_ciphertext; // Verify that g^{p''(0)} == c' * prod_l c_l^{gamma_l} - let challenge = compute_challenge_from_common_message( - &self.random_oracle(), - &global_root, - common_message, - ); + let challenge = + compute_challenge_from_common_message(&self.random_oracle(), common_message); if G::generator() * response_polynomial.c0() != blinding_commit + G::multi_scalar_mul(&challenge, full_public_keys) @@ -645,12 +603,7 @@ impl Receiver { Ok(my_shares) }); - let state = State { - common_message: common_message.clone(), - global_root: global_root.clone(), - }; - - let any_echo = valid_echoes.first().ok_or(InvalidMessage)?; + let common_message_hash = compute_common_message_hash(common_message); let kind = match decrypted_shares { Ok(my_shares) => OutcomeKind::Valid { output: ReceiverOutput { @@ -658,8 +611,7 @@ impl Receiver { public_keys: full_public_keys.clone(), }, vote: Vote { - global_root, - common_message_hash: any_echo.common_message_hash, + common_message_hash, }, }, Err(_) => OutcomeKind::InvalidShares(Reveal { @@ -671,14 +623,13 @@ impl Receiver { &mut rand::thread_rng(), ), ciphertext, - header: ComplaintHeader { - recipient_root, - recipient_root_proof: any_echo.recipient_root_proof.clone(), - common_message_hash: compute_common_message_hash(common_message), - }, + common_message_hash, }), }; - Ok(DecryptionOutcome { state, kind }) + Ok(DecryptionOutcome { + common_message: common_message.clone(), + kind, + }) } /// 5. Upon receiving a [Reveal] from another party, verify it and respond with this party's @@ -689,29 +640,28 @@ impl Receiver { pub fn handle_reveal( &self, reveal: &Reveal, - state: &State, + common_message: &CommonMessage, my_output: &ReceiverOutput, ) -> FastCryptoResult> { let Reveal { proof, ciphertext, - header, + common_message_hash, } = reveal; let accuser_id = proof.accuser_id; - header.verify(state, accuser_id)?; - self.check_avid_consistency(ciphertext, &header.recipient_root) + if *common_message_hash != compute_common_message_hash(common_message) { + return Err(InvalidProof); + } + let recipient_root = common_message + .recipient_roots + .get(accuser_id as usize) + .ok_or(InvalidProof)?; + self.check_avid_consistency(ciphertext, recipient_root) .map_err(|_| InvalidProof)?; - let State { - common_message, - global_root, - } = state; - let challenge = compute_challenge_from_common_message( - &self.random_oracle(), - global_root, - common_message, - ); + let challenge = + compute_challenge_from_common_message(&self.random_oracle(), common_message); let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; let accuser_weight = self.nodes.weight_of(accuser_id)?; proof.check( @@ -727,29 +677,32 @@ impl Receiver { Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } - /// Counterpart to [Self::handle_reveal] for [InvalidDispersal]. The accuser's - /// `recipient_root` must sit under the dealer's `global_root` at the accuser's leaf, the - /// contributed shards must each be authenticated under that root, and re-encoding the - /// reconstructed ciphertext must not match it. On success, respond with this party's own - /// shares. + /// Counterpart to [Self::handle_reveal] for [InvalidDispersal]. The contributed shards must + /// each be authenticated under the accuser's `r_i` (looked up in `common_message`), and + /// re-encoding the reconstructed ciphertext must not match it. On success, respond with this + /// party's own shares. pub fn handle_blame( &self, blame: &Blame, - state: &State, + common_message: &CommonMessage, my_output: &ReceiverOutput, ) -> FastCryptoResult> { let Blame { accuser_id, shards, - header, + common_message_hash, } = blame; - header.verify(state, *accuser_id)?; + if *common_message_hash != compute_common_message_hash(common_message) { + return Err(InvalidProof); + } + let recipient_root = common_message + .recipient_roots + .get(*accuser_id as usize) + .ok_or(InvalidProof)?; if !shards.iter().map(|s| s.sender).all_unique() - || shards - .iter() - .any(|s| s.verify(&header.recipient_root).is_err()) + || shards.iter().any(|s| s.verify(recipient_root).is_err()) { return Err(InvalidProof); } @@ -773,7 +726,7 @@ impl Receiver { // The blame is valid iff re-encoding the recovered ciphertext does not match the // accuser's `r_i`. if self - .check_avid_consistency(&ciphertext, &header.recipient_root) + .check_avid_consistency(&ciphertext, recipient_root) .is_ok() { return Err(InvalidProof); @@ -786,7 +739,7 @@ impl Receiver { /// Fails if there are not enough valid responses to recover the shares or if any of the responses come from an invalid party. pub fn recover( &self, - state: &State, + common_message: &CommonMessage, responses: Vec>, ) -> FastCryptoResult { // TODO: This fails if one of the responses has an invalid responder_id. We could probably just ignore those instead. @@ -799,15 +752,8 @@ impl Receiver { return Err(FastCryptoError::InputTooShort(self.t as usize)); } - let State { - common_message, - global_root, - } = state; - let challenge = compute_challenge_from_common_message( - &self.random_oracle(), - global_root, - common_message, - ); + let challenge = + compute_challenge_from_common_message(&self.random_oracle(), common_message); let response_shares = responses .into_iter() .filter_map(|response| { @@ -1043,26 +989,19 @@ impl SharesForNode { impl BCSSerialized for SharesForNode {} impl AuthenticatedShards { - /// Verify that `shards` are the leaf at `leaf_index` under `recipient_root` using `proof`. - fn verify(&self, leaf_index: usize) -> FastCryptoResult<()> { - self.proof.verify_proof_with_unserialized_leaf( - &self.recipient_root, - &self.shards, - leaf_index, - ) + /// Verify that `shards` are the leaf at `leaf_index` under `recipient_root`. + fn verify(&self, leaf_index: usize, recipient_root: &merkle::Node) -> FastCryptoResult<()> { + self.proof + .verify_proof_with_unserialized_leaf(recipient_root, &self.shards, leaf_index) } } impl Echo { - /// Verify both Merkle proofs in this echo. - fn verify(&self, recipient_id: PartyId) -> FastCryptoResult<()> { - self.authenticated_shards.verify(self.sender as usize)?; - self.recipient_root_proof - .verify_proof_with_unserialized_leaf( - &self.global_root, - &self.authenticated_shards.recipient_root, - recipient_id as usize, - ) + /// Verify the shard's Merkle proof against `recipient_root` (the dealer's `r_i` for the + /// recipient this echo is addressed to) at `sender`'s leaf. + fn verify(&self, recipient_root: &merkle::Node) -> FastCryptoResult<()> { + self.authenticated_shards + .verify(self.sender as usize, recipient_root) } } @@ -1077,24 +1016,6 @@ impl ShardContribution { } } -impl ComplaintHeader { - /// Verify the header against the verifier's [State]: `common_message_hash` matches - /// `state.common_message`, and `recipient_root` is bound under `state.global_root` at - /// `accuser_id`'s leaf. - fn verify(&self, state: &State, accuser_id: PartyId) -> FastCryptoResult<()> { - if self.common_message_hash != compute_common_message_hash(&state.common_message) { - return Err(InvalidProof); - } - self.recipient_root_proof - .verify_proof_with_unserialized_leaf( - &state.global_root, - &self.recipient_root, - accuser_id as usize, - ) - .map_err(|_| InvalidProof) - } -} - /// Build the per-recipient Merkle tree over `shards` (per-node grouped shard chunks of one /// ciphertext). The root of this tree is the per-recipient `recipient_root`. #[allow(clippy::ptr_arg)] @@ -1102,16 +1023,6 @@ fn recipient_tree(shards: &Vec>) -> FastCryptoResult::build_from_unserialized(shards.iter()) } -fn global_tree( - recipient_roots: impl ExactSizeIterator, -) -> FastCryptoResult> { - MerkleTree::::build_from_unserialized(recipient_roots) -} - -fn global_tree_from_message(message: &Message) -> FastCryptoResult> { - global_tree(message.dispersal.iter().map(|s| s.recipient_root.clone())) -} - /// Number of bytes BCS uses to encode `x` as an unsigned LEB128 length prefix. fn uleb128_len(x: usize) -> usize { let mut len = 1; @@ -1123,17 +1034,9 @@ fn uleb128_len(x: usize) -> usize { len } -fn require_uniform_echo_metadata( - echoes: &[Echo], -) -> FastCryptoResult<(merkle::Node, merkle::Node, Digest<32>)> { - get_uniform_value(echoes.iter().map(|e| { - ( - e.global_root.clone(), - e.authenticated_shards.recipient_root.clone(), - e.common_message_hash, - ) - })) - .ok_or(InvalidMessage) +/// Require that every echo's `common_message_hash` agrees and return that hash. +fn require_uniform_common_message_hash(echoes: &[Echo]) -> FastCryptoResult> { + get_uniform_value(echoes.iter().map(|e| e.common_message_hash)).ok_or(InvalidMessage) } fn compute_challenge( @@ -1141,11 +1044,12 @@ fn compute_challenge( c: &[G], c_prime: &G, shared: &SharedComponents, - root: &merkle::Node, + recipient_roots: &[merkle::Node], ) -> Vec { let random_oracle = random_oracle.extend(&Challenge.to_string()); let inner_hash = - Sha3_512::digest(bcs::to_bytes(&(c.to_vec(), c_prime, shared, root)).unwrap()).digest; + Sha3_512::digest(bcs::to_bytes(&(c.to_vec(), c_prime, shared, recipient_roots)).unwrap()) + .digest; (0..c.len()) .map(|l| random_oracle.evaluate_to_group_element(&(l, inner_hash.to_vec()))) .collect() @@ -1153,7 +1057,6 @@ fn compute_challenge( fn compute_challenge_from_common_message( random_oracle: &RandomOracle, - root: &merkle::Node, message: &CommonMessage, ) -> Vec { compute_challenge( @@ -1161,7 +1064,7 @@ fn compute_challenge_from_common_message( &message.full_public_keys, &message.blinding_commit, &message.shared, - root, + &message.recipient_roots, ) } @@ -1171,6 +1074,7 @@ fn compute_common_message_hash(message: &CommonMessage) -> Digest<32> { full_public_keys, blinding_commit, response_polynomial, + recipient_roots, } = message; let mut hasher = Blake2b256::new(); hasher.update( @@ -1179,6 +1083,7 @@ fn compute_common_message_hash(message: &CommonMessage) -> Digest<32> { full_public_keys, blinding_commit, response_polynomial, + recipient_roots, )) .unwrap(), ); @@ -1188,8 +1093,8 @@ fn compute_common_message_hash(message: &CommonMessage) -> Digest<32> { #[cfg(test)] mod tests { use super::{ - Dealer, DecodeOutcome, DecodedCiphertext, DecryptionOutcome, Message, OutcomeKind, - Receiver, ReceiverOutput, ShareBatch, SharesForNode, State, + CommonMessage, Dealer, DecodeOutcome, DecodedCiphertext, DecryptionOutcome, Message, + OutcomeKind, Receiver, ReceiverOutput, ShareBatch, SharesForNode, }; use crate::ecies_v1; use crate::ecies_v1::PublicKey; @@ -1308,10 +1213,10 @@ mod tests { .iter() .zip(messages.iter()) .zip(echoes_by_recipient.iter()) - .map(|((receiver, _message), echoes)| { + .map(|((receiver, message), echoes)| { assert_decoded( receiver - .decode_ciphertext_for_party(echoes, receiver.id) + .decode_ciphertext_for_party(echoes, receiver.id, &message.common) .unwrap(), ) }) @@ -1426,7 +1331,10 @@ mod tests { .iter() .zip(echoes_per_recipient.iter()) .map(|(r, echoes)| { - let pem = assert_decoded(r.decode_ciphertext_for_party(echoes, r.id).unwrap()); + let pem = assert_decoded( + r.decode_ciphertext_for_party(echoes, r.id, &messages[r.id as usize].common) + .unwrap(), + ); ( r.id, r.verify_and_decrypt(pem, &messages[r.id as usize].common) @@ -1439,7 +1347,7 @@ mod tests { let victim_id = 0u16; let mut outcomes = outcomes; let DecryptionOutcome { - state: victim_state, + common_message: victim_common, kind: victim_kind, } = outcomes.remove(&victim_id).unwrap(); let reveal = match victim_kind { @@ -1450,14 +1358,17 @@ mod tests { ), }; - // The other receivers each get a Valid output. Keep both `output` and `state` so the - // honest receivers can answer the victim's complaint. - let mut states: HashMap = HashMap::new(); + // The other receivers each get a Valid output. Keep both `output` and the dealer's + // [CommonMessage] so the honest receivers can answer the victim's complaint. + let mut commons: HashMap = HashMap::new(); let mut outputs: HashMap = outcomes .into_iter() .map(|(id, o)| { - let DecryptionOutcome { state, kind } = o; - states.insert(id, state); + let DecryptionOutcome { + common_message, + kind, + } = o; + commons.insert(id, common_message); let output = match kind { OutcomeKind::Valid { output, .. } => output, ref other => panic!( @@ -1476,7 +1387,7 @@ mod tests { .map(|r| { r.handle_reveal( &reveal, - states.get(&r.id).unwrap(), + commons.get(&r.id).unwrap(), outputs.get(&r.id).unwrap(), ) .unwrap() @@ -1485,7 +1396,7 @@ mod tests { // Victim recovers via interpolation across t responses. let recovered = receivers[victim_id as usize] - .recover(&victim_state, responses) + .recover(&victim_common, responses) .unwrap(); outputs.insert(victim_id, recovered); @@ -1593,23 +1504,24 @@ mod tests { let mut decode_outcomes: HashMap = receivers .iter() .zip(echoes_per_recipient.iter()) - .map(|(r, echoes)| (r.id, r.decode_ciphertext_for_party(echoes, r.id).unwrap())) + .map(|(r, echoes)| { + ( + r.id, + r.decode_ciphertext_for_party(echoes, r.id, &messages[r.id as usize].common) + .unwrap(), + ) + }) .collect(); - let (blame, victim_state) = match decode_outcomes.remove(&victim_id).unwrap() { - DecodeOutcome::InvalidDispersal { blame, global_root } => ( - blame, - State { - common_message: messages[victim_id as usize].common.clone(), - global_root, - }, - ), + let blame = match decode_outcomes.remove(&victim_id).unwrap() { + DecodeOutcome::InvalidDispersal(blame) => blame, DecodeOutcome::Decoded(_) => panic!("expected InvalidDispersal from victim"), }; + let victim_common = messages[victim_id as usize].common.clone(); - // The other receivers each get a Valid output. Keep both `output` and `state` so the - // honest receivers can answer the victim's complaint. - let mut states: HashMap = HashMap::new(); + // The other receivers each get a Valid output. Keep both `output` and the dealer's + // [CommonMessage] so the honest receivers can answer the victim's complaint. + let mut commons: HashMap = HashMap::new(); let mut outputs: HashMap = decode_outcomes .into_iter() .map(|(id, decoded)| { @@ -1617,8 +1529,11 @@ mod tests { let outcome = receivers[id as usize] .verify_and_decrypt(pem, &messages[id as usize].common) .unwrap(); - let DecryptionOutcome { state, kind } = outcome; - states.insert(id, state); + let DecryptionOutcome { + common_message, + kind, + } = outcome; + commons.insert(id, common_message); let output = match kind { OutcomeKind::Valid { output, .. } => output, ref other => panic!( @@ -1637,7 +1552,7 @@ mod tests { .map(|r| { r.handle_blame( &blame, - states.get(&r.id).unwrap(), + commons.get(&r.id).unwrap(), outputs.get(&r.id).unwrap(), ) .unwrap() @@ -1646,7 +1561,7 @@ mod tests { // Victim recovers via interpolation across t responses. let recovered = receivers[victim_id as usize] - .recover(&victim_state, responses) + .recover(&victim_common, responses) .unwrap(); outputs.insert(victim_id, recovered); diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index d9720834fd..3a49e777a0 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -250,7 +250,10 @@ mod tests { // Each receiver processes the message. // In this case, we assume all are honest and there are no complaints. for ((r, echoes), msg) in receivers.iter().zip(&echoes_per_recipient).zip(&messages) { - let pem = match r.decode_ciphertext_for_party(echoes, r.id).unwrap() { + let pem = match r + .decode_ciphertext_for_party(echoes, r.id, &msg.common) + .unwrap() + { batch_avss::DecodeOutcome::Decoded(d) => d, _ => panic!("expected Decoded outcome"), }; From 3cd4ca6871a0884c43109a3315c47dcde88f7116 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Tue, 5 May 2026 16:05:08 +0200 Subject: [PATCH 52/91] Slim Blame; drop DecodedCiphertext and outcome wrappers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - DecodeOutcome::Decoded now carries Vec directly (DecodedCiphertext was a single-field wrapper). - DecryptionOutcome collapses to a flat enum (no common_message field, no OutcomeKind wrapper). Tests pull common_message from the message they already hold. - decode_ciphertext_for_party returns InvalidDispersal on RS-decode failure too, not just on re-encode mismatch — closes the AVID liveness gap when the dealer ships shards that don't form a codeword. - Blame loses its shards/ShardContribution payload. handle_blame takes the verifier's locally observed echoes and re-runs decode for the accuser; valid iff that decode yields InvalidDispersal. --- fastcrypto-tbls/benches/batch_avss.rs | 4 +- .../src/threshold_schnorr/batch_avss.rs | 233 ++++++------------ fastcrypto-tbls/src/threshold_schnorr/mod.rs | 4 +- 3 files changed, 74 insertions(+), 167 deletions(-) diff --git a/fastcrypto-tbls/benches/batch_avss.rs b/fastcrypto-tbls/benches/batch_avss.rs index dd729c4a2e..0f8b06ee61 100644 --- a/fastcrypto-tbls/benches/batch_avss.rs +++ b/fastcrypto-tbls/benches/batch_avss.rs @@ -257,8 +257,8 @@ mod batch_avss_benches { criterion_main!(batch_avss_benches::batch_avss_benches); fn assert_valid_batch(outcome: batch_avss::DecryptionOutcome) -> batch_avss::ReceiverOutput { - match outcome.kind { - batch_avss::OutcomeKind::Valid { output, .. } => output, + match outcome { + batch_avss::DecryptionOutcome::Valid { output, .. } => output, _ => panic!("Expected valid outcome"), } } diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index f49091a608..1be17f5cd8 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -98,32 +98,18 @@ pub struct Echo { common_message_hash: Digest<32>, } -/// The receiver's reconstructed ciphertext. -#[derive(Clone)] -pub struct DecodedCiphertext { - ciphertext: Vec, -} - /// The result of [Receiver::decode_ciphertext_for_party]: either a successfully reconstructed /// ciphertext whose AVID dispersal is consistent, or a [Blame] when the re-encoded ciphertext /// disagrees with the dealer's `r_i`. #[allow(clippy::large_enum_variant)] pub enum DecodeOutcome { - Decoded(DecodedCiphertext), + Decoded(Vec), InvalidDispersal(Blame), } -/// The result of [Receiver::verify_and_decrypt]. Carries the dealer's [CommonMessage] -/// (sufficient, together with a [ReceiverOutput], to handle later [Reveal] / [Blame] requests -/// and to call [Receiver::recover]) plus an [OutcomeKind] describing what the receiver actually -/// got. -pub struct DecryptionOutcome { - pub common_message: CommonMessage, - pub kind: OutcomeKind, -} - +/// The result of [Receiver::verify_and_decrypt]. #[allow(clippy::large_enum_variant)] -pub enum OutcomeKind { +pub enum DecryptionOutcome { Valid { output: ReceiverOutput, vote: Vote }, InvalidShares(Reveal), } @@ -154,24 +140,15 @@ pub struct Reveal { } /// A complaint by a receiver who decrypted valid shares but found the AVID dispersal -/// inconsistent. +/// inconsistent. Carries no shards — verifiers re-run the AVID decode using their own locally +/// observed echoes addressed to `accuser_id`. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Blame { pub accuser_id: PartyId, - pub shards: Vec, /// Hash binding the complaint to a specific [CommonMessage]. pub common_message_hash: Digest<32>, } -/// One sender's contribution of shards toward reconstructing the accuser's ciphertext. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ShardContribution { - pub sender: PartyId, - pub shards: Vec, - /// Proof that `shards` sits under the accuser's `recipient_root` at `sender`'s leaf. - pub proof: merkle::MerkleProof, -} - /// The output of a receiver which is a batch of shares and public keys for all nonces. #[derive(Debug, Clone)] pub struct ReceiverOutput { @@ -507,35 +484,32 @@ impl Receiver { return Err(NotEnoughWeight(required_weight as usize)); } - let ciphertext = self.reconstruct_ciphertext(party, |id| { + // Try to RS-decode the ciphertext. Two failure modes both indicate an inconsistent + // dealer dispersal: (a) decode fails outright because the echoed shards don't lie on a + // single codeword, or (b) decode succeeds but re-encoding the result yields a tree root + // different from the dealer's `r_i`. Both produce a [Blame]; verifiers re-run the same + // decode against their own locally observed echoes to confirm. + let try_decode = self.reconstruct_ciphertext(party, |id| { valid_echoes .iter() .find(|e| e.sender == id) .map(|e| e.authenticated_shards.shards.clone()) - })?; + }); + let dispersal_consistent = try_decode + .as_ref() + .ok() + .is_some_and(|ct| self.check_avid_consistency(ct, recipient_root).is_ok()); - // If re-encoding the recovered ciphertext doesn't yield `recipient_root`, the dealer's - // dispersal is inconsistent — package the contributed shards as a [Blame]. - if self - .check_avid_consistency(&ciphertext, recipient_root) - .is_err() - { - let shards = valid_echoes - .into_iter() - .map(|e| ShardContribution { - sender: e.sender, - shards: e.authenticated_shards.shards, - proof: e.authenticated_shards.proof, - }) - .collect_vec(); + if !dispersal_consistent { return Ok(DecodeOutcome::InvalidDispersal(Blame { accuser_id: party, - shards, common_message_hash, })); } - Ok(DecodeOutcome::Decoded(DecodedCiphertext { ciphertext })) + Ok(DecodeOutcome::Decoded( + try_decode.expect("just verified Ok"), + )) } /// 4. If the party also received a valid [Message] from the dealer, it can now decrypt its shares using the [CommonMessage] part of the message. @@ -552,7 +526,7 @@ impl Receiver { /// appeared on the TOB/ABC channel. pub fn verify_and_decrypt( &self, - decoded_ciphertext: DecodedCiphertext, + ciphertext: Vec, common_message: &CommonMessage, ) -> FastCryptoResult { let CommonMessage { @@ -568,8 +542,6 @@ impl Receiver { return Err(InvalidMessage); } - let DecodedCiphertext { ciphertext } = decoded_ciphertext; - // Verify that g^{p''(0)} == c' * prod_l c_l^{gamma_l} let challenge = compute_challenge_from_common_message(&self.random_oracle(), common_message); @@ -604,8 +576,8 @@ impl Receiver { }); let common_message_hash = compute_common_message_hash(common_message); - let kind = match decrypted_shares { - Ok(my_shares) => OutcomeKind::Valid { + match decrypted_shares { + Ok(my_shares) => Ok(DecryptionOutcome::Valid { output: ReceiverOutput { my_shares, public_keys: full_public_keys.clone(), @@ -613,8 +585,8 @@ impl Receiver { vote: Vote { common_message_hash, }, - }, - Err(_) => OutcomeKind::InvalidShares(Reveal { + }), + Err(_) => Ok(DecryptionOutcome::InvalidShares(Reveal { proof: complaint::Complaint::create( self.id, shared, @@ -624,12 +596,8 @@ impl Receiver { ), ciphertext, common_message_hash, - }), - }; - Ok(DecryptionOutcome { - common_message: common_message.clone(), - kind, - }) + })), + } } /// 5. Upon receiving a [Reveal] from another party, verify it and respond with this party's @@ -677,62 +645,34 @@ impl Receiver { Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } - /// Counterpart to [Self::handle_reveal] for [InvalidDispersal]. The contributed shards must - /// each be authenticated under the accuser's `r_i` (looked up in `common_message`), and - /// re-encoding the reconstructed ciphertext must not match it. On success, respond with this + /// Counterpart to [Self::handle_reveal] for [InvalidDispersal]. Re-runs the AVID decode for + /// the accuser; `echoes` should be the verifier's locally observed echoes (in a broadcast + /// model, every echo seen on the wire — `decode_ciphertext_for_party` filters internally to + /// the ones whose proofs verify under `r_accuser_id`). The blame is valid iff that decode + /// produces an [DecodeOutcome::InvalidDispersal] outcome. On success, respond with this /// party's own shares. pub fn handle_blame( &self, blame: &Blame, + echoes: &[Echo], common_message: &CommonMessage, my_output: &ReceiverOutput, ) -> FastCryptoResult> { let Blame { accuser_id, - shards, common_message_hash, } = blame; if *common_message_hash != compute_common_message_hash(common_message) { return Err(InvalidProof); } - let recipient_root = common_message - .recipient_roots - .get(*accuser_id as usize) - .ok_or(InvalidProof)?; - if !shards.iter().map(|s| s.sender).all_unique() - || shards.iter().any(|s| s.verify(recipient_root).is_err()) - { - return Err(InvalidProof); - } - - let weight_of_shards = self - .nodes - .total_weight_of(shards.iter().map(|s| &s.sender))?; - if weight_of_shards < self.nodes.total_weight() - 2 * self.f { - return Err(InvalidProof); - } - - let ciphertext = self - .reconstruct_ciphertext(*accuser_id, |id| { - shards - .iter() - .find(|s| s.sender == id) - .map(|s| s.shards.clone()) - }) - .map_err(|_| InvalidProof)?; - - // The blame is valid iff re-encoding the recovered ciphertext does not match the - // accuser's `r_i`. - if self - .check_avid_consistency(&ciphertext, recipient_root) - .is_ok() - { - return Err(InvalidProof); + match self.decode_ciphertext_for_party(echoes, *accuser_id, common_message)? { + DecodeOutcome::InvalidDispersal(_) => { + Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) + } + DecodeOutcome::Decoded(_) => Err(InvalidProof), } - - Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } /// 6. Upon receiving t valid responses to a complaint, the accuser can recover its shares. @@ -850,12 +790,12 @@ impl Receiver { impl DecryptionOutcome { /// Reduce this outcome to the message the party should broadcast to others: a [Vote] when /// the dealer's broadcast verified, otherwise the [InvalidShares] complaint itself. The - /// receiver's local [ReceiverOutput] (in the Valid case) and [State] are consumed and not - /// part of the wire format. + /// receiver's local [ReceiverOutput] (in the Valid case) is consumed and not part of the + /// wire format. pub fn into_response(self) -> Response { - match self.kind { - OutcomeKind::Valid { vote, .. } => Response::Vote(vote), - OutcomeKind::InvalidShares(r) => Response::InvalidShares(r), + match self { + DecryptionOutcome::Valid { vote, .. } => Response::Vote(vote), + DecryptionOutcome::InvalidShares(r) => Response::InvalidShares(r), } } } @@ -1005,17 +945,6 @@ impl Echo { } } -impl ShardContribution { - /// Verify that `shards` are the leaf at `sender` under `recipient_root` using `proof`. - fn verify(&self, recipient_root: &merkle::Node) -> FastCryptoResult<()> { - self.proof.verify_proof_with_unserialized_leaf( - recipient_root, - &self.shards, - self.sender as usize, - ) - } -} - /// Build the per-recipient Merkle tree over `shards` (per-node grouped shard chunks of one /// ciphertext). The root of this tree is the per-recipient `recipient_root`. #[allow(clippy::ptr_arg)] @@ -1093,8 +1022,8 @@ fn compute_common_message_hash(message: &CommonMessage) -> Digest<32> { #[cfg(test)] mod tests { use super::{ - CommonMessage, Dealer, DecodeOutcome, DecodedCiphertext, DecryptionOutcome, Message, - OutcomeKind, Receiver, ReceiverOutput, ShareBatch, SharesForNode, + Dealer, DecodeOutcome, DecryptionOutcome, Message, Receiver, ReceiverOutput, ShareBatch, + SharesForNode, }; use crate::ecies_v1; use crate::ecies_v1::PublicKey; @@ -1346,37 +1275,23 @@ mod tests { // Receiver 0 (the targeted victim) emits a InvalidShares complaint. let victim_id = 0u16; let mut outcomes = outcomes; - let DecryptionOutcome { - common_message: victim_common, - kind: victim_kind, - } = outcomes.remove(&victim_id).unwrap(); - let reveal = match victim_kind { - OutcomeKind::InvalidShares(r) => r, + let reveal = match outcomes.remove(&victim_id).unwrap() { + DecryptionOutcome::InvalidShares(r) => r, ref other => panic!( "expected InvalidShares from victim, got {:?}", outcome_kind(other) ), }; - // The other receivers each get a Valid output. Keep both `output` and the dealer's - // [CommonMessage] so the honest receivers can answer the victim's complaint. - let mut commons: HashMap = HashMap::new(); + // The other receivers each get a Valid output. let mut outputs: HashMap = outcomes .into_iter() - .map(|(id, o)| { - let DecryptionOutcome { - common_message, - kind, - } = o; - commons.insert(id, common_message); - let output = match kind { - OutcomeKind::Valid { output, .. } => output, - ref other => panic!( - "expected Valid from honest receiver {id}, got {:?}", - outcome_kind(other) - ), - }; - (id, output) + .map(|(id, o)| match o { + DecryptionOutcome::Valid { output, .. } => (id, output), + ref other => panic!( + "expected Valid from honest receiver {id}, got {:?}", + outcome_kind(other) + ), }) .collect(); @@ -1387,7 +1302,7 @@ mod tests { .map(|r| { r.handle_reveal( &reveal, - commons.get(&r.id).unwrap(), + &messages[r.id as usize].common, outputs.get(&r.id).unwrap(), ) .unwrap() @@ -1396,7 +1311,7 @@ mod tests { // Victim recovers via interpolation across t responses. let recovered = receivers[victim_id as usize] - .recover(&victim_common, responses) + .recover(&messages[victim_id as usize].common, responses) .unwrap(); outputs.insert(victim_id, recovered); @@ -1517,11 +1432,7 @@ mod tests { DecodeOutcome::InvalidDispersal(blame) => blame, DecodeOutcome::Decoded(_) => panic!("expected InvalidDispersal from victim"), }; - let victim_common = messages[victim_id as usize].common.clone(); - - // The other receivers each get a Valid output. Keep both `output` and the dealer's - // [CommonMessage] so the honest receivers can answer the victim's complaint. - let mut commons: HashMap = HashMap::new(); + // The other receivers each get a Valid output. let mut outputs: HashMap = decode_outcomes .into_iter() .map(|(id, decoded)| { @@ -1529,13 +1440,8 @@ mod tests { let outcome = receivers[id as usize] .verify_and_decrypt(pem, &messages[id as usize].common) .unwrap(); - let DecryptionOutcome { - common_message, - kind, - } = outcome; - commons.insert(id, common_message); - let output = match kind { - OutcomeKind::Valid { output, .. } => output, + let output = match outcome { + DecryptionOutcome::Valid { output, .. } => output, ref other => panic!( "expected Valid from honest receiver {id}, got {:?}", outcome_kind(other) @@ -1552,7 +1458,8 @@ mod tests { .map(|r| { r.handle_blame( &blame, - commons.get(&r.id).unwrap(), + &echoes_per_recipient[victim_id as usize], + &messages[r.id as usize].common, outputs.get(&r.id).unwrap(), ) .unwrap() @@ -1561,7 +1468,7 @@ mod tests { // Victim recovers via interpolation across t responses. let recovered = receivers[victim_id as usize] - .recover(&victim_common, responses) + .recover(&messages[victim_id as usize].common, responses) .unwrap(); outputs.insert(victim_id, recovered); @@ -1580,25 +1487,25 @@ mod tests { } fn assert_valid(outcome: DecryptionOutcome) -> ReceiverOutput { - match outcome.kind { - OutcomeKind::Valid { output, .. } => output, + match outcome { + DecryptionOutcome::Valid { output, .. } => output, ref other => panic!("expected valid outcome, got {:?}", outcome_kind(other)), } } - fn assert_decoded(outcome: DecodeOutcome) -> DecodedCiphertext { + fn assert_decoded(outcome: DecodeOutcome) -> Vec { match outcome { - DecodeOutcome::Decoded(d) => d, + DecodeOutcome::Decoded(c) => c, DecodeOutcome::InvalidDispersal { .. } => { panic!("expected Decoded outcome, got InvalidDispersal") } } } - fn outcome_kind(kind: &OutcomeKind) -> &'static str { - match kind { - OutcomeKind::Valid { .. } => "Valid", - OutcomeKind::InvalidShares(_) => "InvalidShares", + fn outcome_kind(outcome: &DecryptionOutcome) -> &'static str { + match outcome { + DecryptionOutcome::Valid { .. } => "Valid", + DecryptionOutcome::InvalidShares(_) => "InvalidShares", } } diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index 3a49e777a0..fe1960231f 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -511,8 +511,8 @@ mod tests { } fn assert_valid_batch(outcome: batch_avss::DecryptionOutcome) -> batch_avss::ReceiverOutput { - match outcome.kind { - batch_avss::OutcomeKind::Valid { output, .. } => output, + match outcome { + batch_avss::DecryptionOutcome::Valid { output, .. } => output, _ => panic!("expected valid batch_avss output"), } } From 61696fb1f0a2080533d3f77089df9660cfbb59a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 08:26:40 +0200 Subject: [PATCH 53/91] Documentation pass - Module doc: setup, happy path, complaint paths - Per-method protocol docs (create_message, echo, decode, verify_and_decrypt, handle_reveal, handle_blame, recover) - CommonMessage doc: usage, longevity, recovery-path note - Digest type alias using Blake2b256::OUTPUT_SIZE --- .../src/threshold_schnorr/batch_avss.rs | 126 ++++++++++-------- 1 file changed, 70 insertions(+), 56 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 1be17f5cd8..f886b6ede3 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -1,13 +1,39 @@ // Copyright (c) 2022, Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -//! Implementation of an asynchronous verifiable secret sharing (AVSS) protocol to distribute secret shares for a batch of random nonces. -//! The size of the batch is proportional to the [Dealer]'s weight. +//! Asynchronous verifiable secret sharing (AVSS) for a batch of random nonces, with batch size +//! proportional to the [Dealer]'s weight. An AVID layer based on Reed-Solomon `(W, W − 2f)` and +//! per-recipient Merkle commitments lets each receiver authenticate the dealer's broadcast even +//! if they did not receive it directly. //! -//! Before the protocol starts, the following setup is needed: -//! * Each receiver has a encryption key pair (ECIES) and these public keys are known to all parties. -//! * The public keys along with the weights of each receiver are known to all parties and defined in the [Nodes] structure. -//! * Define a new [Dealer] with the secrets who begins by calling [Dealer::create_message]. +//! # Setup +//! +//! * Each receiver holds an ECIES key pair from [crate::ecies_v1]; the public keys are +//! advertised through the shared [Nodes] structure together with each party's weight. +//! * One party is designated [Dealer] and constructs a [Dealer] with the same `nodes`, `f` +//! (Byzantine bound by weight), `t` (recovery threshold), session id, and +//! `batch_size_per_weight`. Every receiver constructs a [Receiver] with matching parameters. +//! +//! # Happy path +//! +//! 1. The dealer calls [Dealer::create_message] and sends each [Message] to its recipient. +//! 2. Every receiver calls [Receiver::echo] and broadcasts each resulting [Echo] to the +//! indexed recipient. +//! 3. Each receiver collects [Echo]s and runs [Receiver::decode_ciphertext_for_party] for their +//! own id, yielding a [DecodeOutcome::Decoded] ciphertext. +//! 4. They feed the ciphertext to [Receiver::verify_and_decrypt], which yields a +//! [DecryptionOutcome::Valid] containing this party's [ReceiverOutput] and a [Vote] to +//! broadcast on the TOB/ABC channel. +//! +//! # Complaint paths +//! +//! When a receiver gets [DecryptionOutcome::InvalidShares] or +//! [DecodeOutcome::InvalidDispersal] (decryption produced bad shares, or AVID dispersal is +//! inconsistent), they broadcast the [Reveal] / [Blame] complaint after at least `W − f` votes +//! have accrued. Other receivers validate via [Receiver::handle_reveal] / +//! [Receiver::handle_blame] and respond with their own shares; the accuser then calls +//! [Receiver::recover] once `≥ t` weight of valid responses has arrived to interpolate their +//! shares. use crate::ecies_v1::{MultiRecipientEncryption, PrivateKey, SharedComponents}; use crate::nodes::{Nodes, PartyId}; @@ -26,7 +52,7 @@ use fastcrypto::error::FastCryptoError::{ use fastcrypto::error::{FastCryptoError, FastCryptoResult}; use fastcrypto::groups::secp256k1::SCALAR_SIZE_IN_BYTES; use fastcrypto::groups::{GroupElement, MultiScalarMul, Scalar}; -use fastcrypto::hash::{Blake2b256, Digest, HashFunction, Sha3_512}; +use fastcrypto::hash::{Blake2b256, HashFunction, Sha3_512}; use fastcrypto::merkle; use fastcrypto::merkle::MerkleTree; use fastcrypto::traits::AllowedRng; @@ -35,6 +61,9 @@ use serde::{Deserialize, Serialize}; use std::fmt::Debug; use std::iter::repeat_with; +/// Blake2b digest used to bind echoes/complaints to a specific [CommonMessage]. +pub type Digest = fastcrypto::hash::Digest<{ Blake2b256::OUTPUT_SIZE }>; + /// This represents a Dealer in the AVSS. /// There is exactly one dealer who creates the shares and broadcasts the encrypted shares. #[allow(dead_code)] @@ -62,21 +91,25 @@ pub struct Receiver { code: ErasureCoder, } -/// The message broadcast by the dealer. +/// The dealer's per-recipient message: the shared [CommonMessage] plus the receiver's own +/// [AuthenticatedShards] entries (one per ciphertext, indexed by recipient id). #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Message { pub common: CommonMessage, dispersal: Vec, } +/// The shared part of the dealer's broadcast — identical for every receiver and required by +/// every later step ([Receiver::decode_ciphertext_for_party], [Receiver::verify_and_decrypt], +/// [Receiver::handle_reveal], [Receiver::handle_blame], [Receiver::recover]). Receivers should +/// keep it around for the lifetime of the session. A receiver that didn't get a [Message] from +/// the dealer should fetch the [CommonMessage] from another receiver who did. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct CommonMessage { full_public_keys: Vec, blinding_commit: G, shared: SharedComponents, response_polynomial: Poly, - /// Per-recipient Merkle roots committing to the dealer's RS shards. Used both for AVID - /// consistency checks and as input to the challenge derivation. recipient_roots: Vec, } @@ -95,7 +128,7 @@ pub struct AuthenticatedShards { pub struct Echo { sender: PartyId, authenticated_shards: AuthenticatedShards, - common_message_hash: Digest<32>, + common_message_hash: Digest, } /// The result of [Receiver::decode_ciphertext_for_party]: either a successfully reconstructed @@ -127,7 +160,7 @@ pub enum Response { /// An endorsement of the dealer's broadcast. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Vote { - pub common_message_hash: Digest<32>, + pub common_message_hash: Digest, } /// A complaint by a receiver who could not decrypt or verify its shares. @@ -136,7 +169,7 @@ pub struct Reveal { pub proof: complaint::Complaint, pub ciphertext: Vec, /// Hash binding the complaint to a specific [CommonMessage]. - pub common_message_hash: Digest<32>, + pub common_message_hash: Digest, } /// A complaint by a receiver who decrypted valid shares but found the AVID dispersal @@ -146,7 +179,7 @@ pub struct Reveal { pub struct Blame { pub accuser_id: PartyId, /// Hash binding the complaint to a specific [CommonMessage]. - pub common_message_hash: Digest<32>, + pub common_message_hash: Digest, } /// The output of a receiver which is a batch of shares and public keys for all nonces. @@ -214,7 +247,9 @@ impl Dealer { }) } - /// 1. The Dealer generates shares for the secrets and creates a set of messages - one per receiver. + /// 1. Build one [Message] per receiver. Each carries a shared [CommonMessage] (with the + /// public commitments and the per-recipient Merkle roots) and the recipient's own + /// [AuthenticatedShards] entries. Sent point-to-point to the corresponding receiver. pub fn create_message(&self, rng: &mut impl AllowedRng) -> FastCryptoResult> { self.create_message_with_mutation(rng, |_| {}, |_| {}) } @@ -410,8 +445,8 @@ impl Receiver { }) } - /// 2. When a party receives its [Message], it verifies the Merkle tree path for its shards and - /// generates [Echo]s, one per party ordered by their ID. + /// 2. Verify the dispersal entries against `recipient_roots` and emit one [Echo] per + /// recipient (indexed by recipient id) for the receiver to broadcast. pub fn echo(&self, message: &Message) -> FastCryptoResult> { if message.dispersal.len() != message.common.recipient_roots.len() { return Err(InvalidMessage); @@ -438,18 +473,10 @@ impl Receiver { .collect()) } - /// 3. When a party has received [Echo]s from parties with at least weight W - 2f, it - /// tries to process them. It first filters out invalid messages and checks if the [Echo]s - /// have the same digest, r and r_i values. If not, an [InvalidMessage] error is returned. - /// If the filtered set of [Echo]s does not have sufficient weight, an [NotEnoughWeight] error - /// is returned. - /// - /// If these checks succeed, the party reconstructs it's message (ciphertext) from the echoed - /// shards along with the r and r_i values. - /// - /// Once [Self::verify_and_decrypt] is called, the party should keep the resulting [State] - /// around in order to handle future requests through [Self::handle_reveal] and - /// [Self::handle_blame]. + /// 3. Reconstruct the ciphertext for `party` from received [Echo]s. Returns + /// [DecodeOutcome::Decoded] when the AVID dispersal is consistent with the dealer's + /// `r_party`, or [DecodeOutcome::InvalidDispersal] (a [Blame]) when it isn't. Also called + /// by [Self::handle_blame] to validate complaints. pub fn decode_ciphertext_for_party( &self, echos: &[Echo], @@ -512,18 +539,9 @@ impl Receiver { )) } - /// 4. If the party also received a valid [Message] from the dealer, it can now decrypt its shares using the [CommonMessage] part of the message. - /// If this succeeds (returns a DecryptionOutcome::Valid), the party should return a signed vote to the dealer. - /// The vote payload can be obtained by calling [DecryptionOutcome::into_response] on the - /// outcome, which yields a [Response::Vote] for the caller to sign. - /// - /// When parties with weight at least W -f has submitted a vote, parties who didn't get a valid - /// [Message] from the dealer should request the [CommonMessage] part of that from the parties who voted. - /// Using this, the party can decrypt the shares and verify that the shares are valid. - /// - /// If this function returns an [InvalidShares] or [InvalidDispersal] outcome, the party should broadcast it - /// to the other parties, but only after at least `W - f` votes from other parties have - /// appeared on the TOB/ABC channel. + /// 4. Decrypt and verify the receiver's own shares from a successfully decoded ciphertext. + /// Yields [DecryptionOutcome::Valid] (with a [Vote] to broadcast) when shares verify, or + /// [DecryptionOutcome::InvalidShares] (a [Reveal]) otherwise. pub fn verify_and_decrypt( &self, ciphertext: Vec, @@ -600,11 +618,10 @@ impl Receiver { } } - /// 5. Upon receiving a [Reveal] from another party, verify it and respond with this party's - /// own shares so the accuser can recover. The accuser's `recipient_root` must sit under - /// the dealer's `global_root` at the accuser's leaf, the ciphertext must re-encode to - /// that root (binding it to the dealer's broadcast), and decryption with the recovery - /// package must yield invalid shares against `common_message`. + /// 5a. Validate a [Reveal] complaint and respond with this party's own shares so the + /// accuser can recover. Accepts iff the ciphertext is bound to the dealer's broadcast + /// (re-encodes to `recipient_roots[accuser_id]`) and the recovery package decrypts it + /// to invalid shares. pub fn handle_reveal( &self, reveal: &Reveal, @@ -645,12 +662,9 @@ impl Receiver { Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } - /// Counterpart to [Self::handle_reveal] for [InvalidDispersal]. Re-runs the AVID decode for - /// the accuser; `echoes` should be the verifier's locally observed echoes (in a broadcast - /// model, every echo seen on the wire — `decode_ciphertext_for_party` filters internally to - /// the ones whose proofs verify under `r_accuser_id`). The blame is valid iff that decode - /// produces an [DecodeOutcome::InvalidDispersal] outcome. On success, respond with this - /// party's own shares. + /// 5b. Validate a [Blame] complaint and respond with this party's own shares. Accepts iff + /// re-running [Self::decode_ciphertext_for_party] for `accuser_id` against the + /// verifier's locally observed `echoes` yields [DecodeOutcome::InvalidDispersal]. pub fn handle_blame( &self, blame: &Blame, @@ -675,8 +689,8 @@ impl Receiver { } } - /// 6. Upon receiving t valid responses to a complaint, the accuser can recover its shares. - /// Fails if there are not enough valid responses to recover the shares or if any of the responses come from an invalid party. + /// 6. After broadcasting a complaint, reconstruct this receiver's shares by Lagrange- + /// interpolating across at least `t` weight of valid [ComplaintResponse]s. pub fn recover( &self, common_message: &CommonMessage, @@ -964,7 +978,7 @@ fn uleb128_len(x: usize) -> usize { } /// Require that every echo's `common_message_hash` agrees and return that hash. -fn require_uniform_common_message_hash(echoes: &[Echo]) -> FastCryptoResult> { +fn require_uniform_common_message_hash(echoes: &[Echo]) -> FastCryptoResult { get_uniform_value(echoes.iter().map(|e| e.common_message_hash)).ok_or(InvalidMessage) } @@ -997,7 +1011,7 @@ fn compute_challenge_from_common_message( ) } -fn compute_common_message_hash(message: &CommonMessage) -> Digest<32> { +fn compute_common_message_hash(message: &CommonMessage) -> Digest { let CommonMessage { shared, full_public_keys, From 661784e648633d7f6fa95cccf0f961cf8b8d281c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 08:40:27 +0200 Subject: [PATCH 54/91] Update module doc --- .../src/threshold_schnorr/batch_avss.rs | 24 +++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index f886b6ede3..c2a368fae4 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -1,18 +1,17 @@ // Copyright (c) 2022, Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -//! Asynchronous verifiable secret sharing (AVSS) for a batch of random nonces, with batch size -//! proportional to the [Dealer]'s weight. An AVID layer based on Reed-Solomon `(W, W − 2f)` and -//! per-recipient Merkle commitments lets each receiver authenticate the dealer's broadcast even -//! if they did not receive it directly. +//! Asynchronous verifiable secret sharing (AVSS) for a batch of random nonces. +//! An AVID layer based on Reed-Solomon `(W, W − 2f)` and per-recipient Merkle commitments lets each +//! receiver authenticate the dealer's broadcast even if they did not receive it directly. //! //! # Setup //! -//! * Each receiver holds an ECIES key pair from [crate::ecies_v1]; the public keys are +//! * Each receiver holds an ECIES key pair from [crate::ecies_v1]. The public keys are //! advertised through the shared [Nodes] structure together with each party's weight. -//! * One party is designated [Dealer] and constructs a [Dealer] with the same `nodes`, `f` +//! * One party is designated dealer and constructs a [Dealer] with `nodes`, `f` //! (Byzantine bound by weight), `t` (recovery threshold), session id, and -//! `batch_size_per_weight`. Every receiver constructs a [Receiver] with matching parameters. +//! `batch_size_per_weight`. The receivers uses a [Receiver] with matching parameters. //! //! # Happy path //! @@ -25,11 +24,16 @@ //! [DecryptionOutcome::Valid] containing this party's [ReceiverOutput] and a [Vote] to //! broadcast on the TOB/ABC channel. //! +//! Receivers should keep the common part of the message, [CommonMessage], and all echos it has received +//! for the lifetime of the protocol since these are needed to handle complaints. +//! //! # Complaint paths //! -//! When a receiver gets [DecryptionOutcome::InvalidShares] or -//! [DecodeOutcome::InvalidDispersal] (decryption produced bad shares, or AVID dispersal is -//! inconsistent), they broadcast the [Reveal] / [Blame] complaint after at least `W − f` votes +//! If a receiver in [Receiver::decode_ciphertext_for_party] detects that AVID dispersal is +//! inconsistent, it returns a [Blame] complaint. If a receiver in [Receiver::verify_and_decrypt] +//! detects that its encryption is invalid or if its shares are invalid, it returns a [Reveal] complaint. +//! +//! They broadcast the [Reveal] / [Blame] complaint after at least `W − f` votes //! have accrued. Other receivers validate via [Receiver::handle_reveal] / //! [Receiver::handle_blame] and respond with their own shares; the accuser then calls //! [Receiver::recover] once `≥ t` weight of valid responses has arrived to interpolate their From 6ac819122de6c009646062bc1f5bbe673fc621c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 09:21:22 +0200 Subject: [PATCH 55/91] Restore Blame shards; doc + ergonomics polish - Blame regains its Vec; handle_blame validates the carried shards against recipient_roots[accuser_id], runs the same decode-and-re-encode logic, and works under strict point-to-point echoes (no broadcast assumption needed). - decode_ciphertext_for_party renamed to decode_ciphertext and drops the redundant `party` argument (always self.id at every call site). - New CommonMessage::verify checks the well-formedness + polynomial commitment and returns the Fiat-Shamir challenge. Called by all five receiver entry points that take a CommonMessage. - Module + per-fn doc rewrite; Digest type alias via Blake2b256::OUTPUT_SIZE. --- fastcrypto-tbls/benches/batch_avss.rs | 14 +- .../src/threshold_schnorr/batch_avss.rs | 200 ++++++++++++------ fastcrypto-tbls/src/threshold_schnorr/mod.rs | 5 +- 3 files changed, 138 insertions(+), 81 deletions(-) diff --git a/fastcrypto-tbls/benches/batch_avss.rs b/fastcrypto-tbls/benches/batch_avss.rs index 0f8b06ee61..9c5e6643ee 100644 --- a/fastcrypto-tbls/benches/batch_avss.rs +++ b/fastcrypto-tbls/benches/batch_avss.rs @@ -143,12 +143,8 @@ mod batch_avss_benches { format!("n={}, total_weight={}, t={}, w={}", n, total_w, t, w).as_str(), |b| { b.iter(|| { - r1.decode_ciphertext_for_party( - &echoes_for_party_1, - r1.id, - &messages[1].common, - ) - .unwrap() + r1.decode_ciphertext(&echoes_for_party_1, &messages[1].common) + .unwrap() }) }, ); @@ -194,11 +190,7 @@ mod batch_avss_benches { let echoes_for_party_1: Vec = echoes.iter().map(|em| em[1].clone()).collect(); let pem = match receivers[1] - .decode_ciphertext_for_party( - &echoes_for_party_1, - receivers[1].id, - &messages[1].common, - ) + .decode_ciphertext(&echoes_for_party_1, &messages[1].common) .unwrap() { batch_avss::DecodeOutcome::Decoded(d) => d, diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index c2a368fae4..56a25d4484 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -18,26 +18,27 @@ //! 1. The dealer calls [Dealer::create_message] and sends each [Message] to its recipient. //! 2. Every receiver calls [Receiver::echo] and broadcasts each resulting [Echo] to the //! indexed recipient. -//! 3. Each receiver collects [Echo]s and runs [Receiver::decode_ciphertext_for_party] for their +//! 3. Each receiver collects [Echo]s and runs [Receiver::decode_ciphertext] for their //! own id, yielding a [DecodeOutcome::Decoded] ciphertext. //! 4. They feed the ciphertext to [Receiver::verify_and_decrypt], which yields a //! [DecryptionOutcome::Valid] containing this party's [ReceiverOutput] and a [Vote] to //! broadcast on the TOB/ABC channel. //! -//! Receivers should keep the common part of the message, [CommonMessage], and all echos it has received -//! for the lifetime of the protocol since these are needed to handle complaints. +//! Receivers should keep the common part of the message, [CommonMessage], for the lifetime of +//! the protocol since it is needed to handle complaints. //! //! # Complaint paths //! -//! If a receiver in [Receiver::decode_ciphertext_for_party] detects that AVID dispersal is -//! inconsistent, it returns a [Blame] complaint. If a receiver in [Receiver::verify_and_decrypt] -//! detects that its encryption is invalid or if its shares are invalid, it returns a [Reveal] complaint. +//! If a receiver in [Receiver::decode_ciphertext] detects that AVID dispersal is +//! inconsistent, it returns a self-contained [Blame] carrying the collected [ShardContribution]s +//! as evidence. If a receiver in [Receiver::verify_and_decrypt] detects that decryption fails +//! or yields shares that don't verify, it returns a [Reveal] complaint. //! -//! They broadcast the [Reveal] / [Blame] complaint after at least `W − f` votes -//! have accrued. Other receivers validate via [Receiver::handle_reveal] / -//! [Receiver::handle_blame] and respond with their own shares; the accuser then calls -//! [Receiver::recover] once `≥ t` weight of valid responses has arrived to interpolate their -//! shares. +//! The accuser broadcasts the [Reveal] / [Blame] after at least `W − f` votes have accrued on +//! the TOB/ABC channel. Other receivers validate it via [Receiver::handle_reveal] / +//! [Receiver::handle_blame] and respond with their own shares. Once `≥ t` weight of valid +//! responses has arrived, the accuser calls [Receiver::recover] to interpolate their own +//! shares from those responses. use crate::ecies_v1::{MultiRecipientEncryption, PrivateKey, SharedComponents}; use crate::nodes::{Nodes, PartyId}; @@ -104,7 +105,7 @@ pub struct Message { } /// The shared part of the dealer's broadcast — identical for every receiver and required by -/// every later step ([Receiver::decode_ciphertext_for_party], [Receiver::verify_and_decrypt], +/// every later step ([Receiver::decode_ciphertext], [Receiver::verify_and_decrypt], /// [Receiver::handle_reveal], [Receiver::handle_blame], [Receiver::recover]). Receivers should /// keep it around for the lifetime of the session. A receiver that didn't get a [Message] from /// the dealer should fetch the [CommonMessage] from another receiver who did. @@ -135,7 +136,7 @@ pub struct Echo { common_message_hash: Digest, } -/// The result of [Receiver::decode_ciphertext_for_party]: either a successfully reconstructed +/// The result of [Receiver::decode_ciphertext]: either a successfully reconstructed /// ciphertext whose AVID dispersal is consistent, or a [Blame] when the re-encoded ciphertext /// disagrees with the dealer's `r_i`. #[allow(clippy::large_enum_variant)] @@ -176,16 +177,26 @@ pub struct Reveal { pub common_message_hash: Digest, } -/// A complaint by a receiver who decrypted valid shares but found the AVID dispersal -/// inconsistent. Carries no shards — verifiers re-run the AVID decode using their own locally -/// observed echoes addressed to `accuser_id`. +/// A complaint by a receiver who found the AVID dispersal inconsistent. Self-contained: +/// carries the accuser's collected shard contributions so verifiers can re-run the AVID check +/// without needing to observe echoes addressed to the accuser. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Blame { pub accuser_id: PartyId, + pub shards: Vec, /// Hash binding the complaint to a specific [CommonMessage]. pub common_message_hash: Digest, } +/// One sender's contribution toward reconstructing the accuser's ciphertext: their shards plus +/// a Merkle proof binding them under `recipient_roots[accuser_id]` at `sender`'s leaf. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ShardContribution { + pub sender: PartyId, + pub shards: Vec, + pub proof: merkle::MerkleProof, +} + /// The output of a receiver which is a batch of shares and public keys for all nonces. #[derive(Debug, Clone)] pub struct ReceiverOutput { @@ -197,7 +208,7 @@ pub struct ReceiverOutput { /// If we say that node i has a weight `W_i`, we have /// `indices().len() == shares_for_secret(i).len() == weight() = W_i` /// -/// These can be created either by decrypting the shares from the dealer (see [Receiver::decode_ciphertext_for_party]) or by recovering them from complaint responses. +/// These can be created either by decrypting the shares from the dealer (see [Receiver::decode_ciphertext]) or by recovering them from complaint responses. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SharesForNode { pub shares: Vec, @@ -477,23 +488,22 @@ impl Receiver { .collect()) } - /// 3. Reconstruct the ciphertext for `party` from received [Echo]s. Returns + /// 3. Reconstruct this receiver's ciphertext from received [Echo]s. Returns /// [DecodeOutcome::Decoded] when the AVID dispersal is consistent with the dealer's - /// `r_party`, or [DecodeOutcome::InvalidDispersal] (a [Blame]) when it isn't. Also called - /// by [Self::handle_blame] to validate complaints. - pub fn decode_ciphertext_for_party( + /// `r_{self.id}`, or [DecodeOutcome::InvalidDispersal] (a [Blame]) when it isn't. + pub fn decode_ciphertext( &self, echos: &[Echo], - party: PartyId, common_message: &CommonMessage, ) -> FastCryptoResult { + common_message.verify(self.t, self.batch_size, &self.random_oracle())?; let recipient_root = common_message .recipient_roots - .get(party as usize) + .get(self.id as usize) .ok_or(InvalidInput)?; // Filter out invalid echo messages: each echo's shards proof must verify against the - // dealer's `r_i` for `party`. + // dealer's `r_{self.id}`. let valid_echoes = echos .iter() .filter(|echo| echo.verify(recipient_root).is_ok()) @@ -518,9 +528,9 @@ impl Receiver { // Try to RS-decode the ciphertext. Two failure modes both indicate an inconsistent // dealer dispersal: (a) decode fails outright because the echoed shards don't lie on a // single codeword, or (b) decode succeeds but re-encoding the result yields a tree root - // different from the dealer's `r_i`. Both produce a [Blame]; verifiers re-run the same - // decode against their own locally observed echoes to confirm. - let try_decode = self.reconstruct_ciphertext(party, |id| { + // different from the dealer's `r_{self.id}`. Both produce a self-contained [Blame] + // carrying the collected shards as evidence. + let try_decode = self.reconstruct_ciphertext(self.id, |id| { valid_echoes .iter() .find(|e| e.sender == id) @@ -532,8 +542,17 @@ impl Receiver { .is_some_and(|ct| self.check_avid_consistency(ct, recipient_root).is_ok()); if !dispersal_consistent { + let shards = valid_echoes + .into_iter() + .map(|e| ShardContribution { + sender: e.sender, + shards: e.authenticated_shards.shards, + proof: e.authenticated_shards.proof, + }) + .collect_vec(); return Ok(DecodeOutcome::InvalidDispersal(Blame { - accuser_id: party, + accuser_id: self.id, + shards, common_message_hash, })); } @@ -551,29 +570,12 @@ impl Receiver { ciphertext: Vec, common_message: &CommonMessage, ) -> FastCryptoResult { + let challenge = common_message.verify(self.t, self.batch_size, &self.random_oracle())?; let CommonMessage { full_public_keys, - blinding_commit, - response_polynomial, shared, .. } = &common_message; - if full_public_keys.len() != self.batch_size - || response_polynomial.degree() != self.t as usize - 1 - { - return Err(InvalidMessage); - } - - // Verify that g^{p''(0)} == c' * prod_l c_l^{gamma_l} - let challenge = - compute_challenge_from_common_message(&self.random_oracle(), common_message); - if G::generator() * response_polynomial.c0() - != blinding_commit - + G::multi_scalar_mul(&challenge, full_public_keys) - .expect("Inputs have constant lengths") - { - return Err(InvalidMessage); - } let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); let decrypted_shares = shared @@ -632,6 +634,8 @@ impl Receiver { common_message: &CommonMessage, my_output: &ReceiverOutput, ) -> FastCryptoResult> { + let challenge = common_message.verify(self.t, self.batch_size, &self.random_oracle())?; + let Reveal { proof, ciphertext, @@ -648,9 +652,6 @@ impl Receiver { .ok_or(InvalidProof)?; self.check_avid_consistency(ciphertext, recipient_root) .map_err(|_| InvalidProof)?; - - let challenge = - compute_challenge_from_common_message(&self.random_oracle(), common_message); let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; let accuser_weight = self.nodes.weight_of(accuser_id)?; proof.check( @@ -667,34 +668,66 @@ impl Receiver { } /// 5b. Validate a [Blame] complaint and respond with this party's own shares. Accepts iff - /// re-running [Self::decode_ciphertext_for_party] for `accuser_id` against the - /// verifier's locally observed `echoes` yields [DecodeOutcome::InvalidDispersal]. + /// the carried [ShardContribution]s authenticate under + /// `common_message.recipient_roots[accuser_id]`, contribute `≥ W − 2f` weight from + /// unique senders, and either fail to RS-decode or decode to a ciphertext whose + /// re-encoding doesn't match the accuser's `r_i`. pub fn handle_blame( &self, blame: &Blame, - echoes: &[Echo], common_message: &CommonMessage, my_output: &ReceiverOutput, ) -> FastCryptoResult> { + common_message.verify(self.t, self.batch_size, &self.random_oracle())?; + let Blame { accuser_id, + shards, common_message_hash, } = blame; if *common_message_hash != compute_common_message_hash(common_message) { return Err(InvalidProof); } + let recipient_root = common_message + .recipient_roots + .get(*accuser_id as usize) + .ok_or(InvalidProof)?; + + if !shards.iter().map(|s| s.sender).all_unique() + || shards.iter().any(|s| s.verify(recipient_root).is_err()) + { + return Err(InvalidProof); + } - match self.decode_ciphertext_for_party(echoes, *accuser_id, common_message)? { - DecodeOutcome::InvalidDispersal(_) => { - Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) - } - DecodeOutcome::Decoded(_) => Err(InvalidProof), + let weight_of_shards = self + .nodes + .total_weight_of(shards.iter().map(|s| &s.sender))?; + if weight_of_shards < self.nodes.total_weight() - 2 * self.f { + return Err(InvalidProof); } + + // The blame is valid iff the contributed shards either fail to RS-decode (they don't + // lie on a single codeword) or decode to a ciphertext whose re-encoding doesn't match + // the accuser's `r_i`. + let dispersal_consistent = self + .reconstruct_ciphertext(*accuser_id, |id| { + shards + .iter() + .find(|s| s.sender == id) + .map(|s| s.shards.clone()) + }) + .ok() + .is_some_and(|ct| self.check_avid_consistency(&ct, recipient_root).is_ok()); + if dispersal_consistent { + return Err(InvalidProof); + } + + Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) } - /// 6. After broadcasting a complaint, reconstruct this receiver's shares by Lagrange- - /// interpolating across at least `t` weight of valid [ComplaintResponse]s. + /// 6. Upon receiving t valid responses to a complaint, the accuser can recover its shares. + /// Fails if there are not enough valid responses to recover the shares or if any of the responses come from an invalid party. pub fn recover( &self, common_message: &CommonMessage, @@ -702,6 +735,8 @@ impl Receiver { ) -> FastCryptoResult { // TODO: This fails if one of the responses has an invalid responder_id. We could probably just ignore those instead. + let challenge = common_message.verify(self.t, self.batch_size, &self.random_oracle())?; + // Sanity check that we have enough responses (by weight) to recover the shares. let total_response_weight = self .nodes @@ -709,9 +744,6 @@ impl Receiver { if total_response_weight < self.t { return Err(FastCryptoError::InputTooShort(self.t as usize)); } - - let challenge = - compute_challenge_from_common_message(&self.random_oracle(), common_message); let response_shares = responses .into_iter() .filter_map(|response| { @@ -805,6 +837,43 @@ impl Receiver { } } +impl CommonMessage { + /// Verify the dealer's commitments: the lengths/degree of the published values are + /// well-formed and `g^{p''(0)} = c' · ∏ c_l^{γ_l}`. Returns the Fiat-Shamir challenge `γ` + /// so the caller can reuse it for per-share verification. + fn verify( + &self, + t: u16, + batch_size: usize, + random_oracle: &RandomOracle, + ) -> FastCryptoResult> { + if self.full_public_keys.len() != batch_size + || self.response_polynomial.degree() != t as usize - 1 + { + return Err(InvalidMessage); + } + let challenge = compute_challenge_from_common_message(random_oracle, self); + if G::generator() * self.response_polynomial.c0() + != self.blinding_commit + + G::multi_scalar_mul(&challenge, &self.full_public_keys) + .expect("Inputs have constant lengths") + { + return Err(InvalidMessage); + } + Ok(challenge) + } +} + +impl ShardContribution { + fn verify(&self, recipient_root: &merkle::Node) -> FastCryptoResult<()> { + self.proof.verify_proof_with_unserialized_leaf( + recipient_root, + &self.shards, + self.sender as usize, + ) + } +} + impl DecryptionOutcome { /// Reduce this outcome to the message the party should broadcast to others: a [Vote] when /// the dealer's broadcast verified, otherwise the [InvalidShares] complaint itself. The @@ -1163,7 +1232,7 @@ mod tests { .map(|((receiver, message), echoes)| { assert_decoded( receiver - .decode_ciphertext_for_party(echoes, receiver.id, &message.common) + .decode_ciphertext(echoes, &message.common) .unwrap(), ) }) @@ -1279,7 +1348,7 @@ mod tests { .zip(echoes_per_recipient.iter()) .map(|(r, echoes)| { let pem = assert_decoded( - r.decode_ciphertext_for_party(echoes, r.id, &messages[r.id as usize].common) + r.decode_ciphertext(echoes, &messages[r.id as usize].common) .unwrap(), ); ( @@ -1440,7 +1509,7 @@ mod tests { .map(|(r, echoes)| { ( r.id, - r.decode_ciphertext_for_party(echoes, r.id, &messages[r.id as usize].common) + r.decode_ciphertext(echoes, &messages[r.id as usize].common) .unwrap(), ) }) @@ -1476,7 +1545,6 @@ mod tests { .map(|r| { r.handle_blame( &blame, - &echoes_per_recipient[victim_id as usize], &messages[r.id as usize].common, outputs.get(&r.id).unwrap(), ) diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index fe1960231f..b9ed009a58 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -250,10 +250,7 @@ mod tests { // Each receiver processes the message. // In this case, we assume all are honest and there are no complaints. for ((r, echoes), msg) in receivers.iter().zip(&echoes_per_recipient).zip(&messages) { - let pem = match r - .decode_ciphertext_for_party(echoes, r.id, &msg.common) - .unwrap() - { + let pem = match r.decode_ciphertext(echoes, &msg.common).unwrap() { batch_avss::DecodeOutcome::Decoded(d) => d, _ => panic!("expected Decoded outcome"), }; From 500debfcded3cb704671a54e70841d7596828b73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 09:41:48 +0200 Subject: [PATCH 56/91] Drop unused Response enum and into_response Neither was referenced outside its own definition. --- .../src/threshold_schnorr/batch_avss.rs | 23 ------------------- 1 file changed, 23 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 56a25d4484..04322adcb3 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -152,16 +152,6 @@ pub enum DecryptionOutcome { InvalidShares(Reveal), } -/// The message a receiver broadcasts after `verify_and_decrypt`: a [Vote] endorsing the dealer's -/// broadcast or a [InvalidShares] / [InvalidDispersal] complaint otherwise. -#[allow(clippy::large_enum_variant)] -#[derive(Clone, Debug, Serialize, Deserialize)] -pub enum Response { - Vote(Vote), - InvalidShares(Reveal), - InvalidDispersal(Blame), -} - /// An endorsement of the dealer's broadcast. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Vote { @@ -874,19 +864,6 @@ impl ShardContribution { } } -impl DecryptionOutcome { - /// Reduce this outcome to the message the party should broadcast to others: a [Vote] when - /// the dealer's broadcast verified, otherwise the [InvalidShares] complaint itself. The - /// receiver's local [ReceiverOutput] (in the Valid case) is consumed and not part of the - /// wire format. - pub fn into_response(self) -> Response { - match self { - DecryptionOutcome::Valid { vote, .. } => Response::Vote(vote), - DecryptionOutcome::InvalidShares(r) => Response::InvalidShares(r), - } - } -} - impl ShareBatch { /// Verify a batch of shares using the given challenge. fn verify(&self, message: &CommonMessage, challenge: &[S]) -> FastCryptoResult<()> { From e8cf71895c7e286ecde46eefd61fed208d2264ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 09:48:14 +0200 Subject: [PATCH 57/91] cargo fmt --- fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 04322adcb3..7671cf1ac6 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -1207,11 +1207,7 @@ mod tests { .zip(messages.iter()) .zip(echoes_by_recipient.iter()) .map(|((receiver, message), echoes)| { - assert_decoded( - receiver - .decode_ciphertext(echoes, &message.common) - .unwrap(), - ) + assert_decoded(receiver.decode_ciphertext(echoes, &message.common).unwrap()) }) .collect_vec(); From e931d7d9dcac1c7014adaa9327e9d1f1e84cb363 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 09:58:01 +0200 Subject: [PATCH 58/91] docs --- fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 7671cf1ac6..74d20ec3bc 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -22,7 +22,7 @@ //! own id, yielding a [DecodeOutcome::Decoded] ciphertext. //! 4. They feed the ciphertext to [Receiver::verify_and_decrypt], which yields a //! [DecryptionOutcome::Valid] containing this party's [ReceiverOutput] and a [Vote] to -//! broadcast on the TOB/ABC channel. +//! broadcast on the TOB/ABC channel if both this and [Receiver::decode_ciphertext] succeeded. //! //! Receivers should keep the common part of the message, [CommonMessage], for the lifetime of //! the protocol since it is needed to handle complaints. From 0b26ee3cb4992bef16cc08a65aac2f201348a2ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 12:30:52 +0200 Subject: [PATCH 59/91] Authenticate ComplaintResponses with recovery packages ComplaintResponse now carries (responder_id, ciphertext, recovery_package) instead of plaintext shares. handle_reveal/handle_blame take the responder's own ciphertext and build a fresh ECIES recovery package; recover authenticates each response by AVID-binding the ciphertext to the dealer's broadcast and decrypting via the recovery package. Closes the L-degrees-of-freedom hole where a malicious responder could forge verify-passing-but-fake shares: forging now requires an ECIES NIZK on the dealer's actual ciphertext, which is infeasible. Tests now retain each receiver's ciphertext after decode and pass it to the complaint handlers. --- .../src/threshold_schnorr/batch_avss.rs | 115 ++++++++++++++---- 1 file changed, 88 insertions(+), 27 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 74d20ec3bc..f8ea5ddb3d 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -40,15 +40,14 @@ //! responses has arrived, the accuser calls [Receiver::recover] to interpolate their own //! shares from those responses. -use crate::ecies_v1::{MultiRecipientEncryption, PrivateKey, SharedComponents}; +use crate::ecies_v1::{MultiRecipientEncryption, PrivateKey, RecoveryPackage, SharedComponents}; use crate::nodes::{Nodes, PartyId}; use crate::polynomial::{create_secret_sharing, Eval, Poly}; use crate::random_oracle::RandomOracle; use crate::threshold_schnorr::bcs::BCSSerialized; use crate::threshold_schnorr::complaint; -use crate::threshold_schnorr::complaint::ComplaintResponse; use crate::threshold_schnorr::reed_solomon::{ErasureCoder, Shard}; -use crate::threshold_schnorr::Extensions::{Challenge, Encryption}; +use crate::threshold_schnorr::Extensions::{Challenge, Encryption, Recovery}; use crate::threshold_schnorr::{random_oracle_from_sid, EG, G, S}; use crate::types::{get_uniform_value, ShareIndex}; use fastcrypto::error::FastCryptoError::{ @@ -178,8 +177,10 @@ pub struct Blame { pub common_message_hash: Digest, } -/// One sender's contribution toward reconstructing the accuser's ciphertext: their shards plus -/// a Merkle proof binding them under `recipient_roots[accuser_id]` at `sender`'s leaf. +/// An entry in [Blame::shards]: one sender's shards for the accuser's ciphertext, with a Merkle +/// proof under `recipient_roots[accuser_id]` at `sender`'s leaf. A [Blame] carries enough of +/// these to attempt RS-decoding the accuser's ciphertext; the complaint is valid iff that +/// decode either fails or re-encodes to a tree root different from `recipient_roots[accuser_id]`. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ShardContribution { pub sender: PartyId, @@ -187,6 +188,17 @@ pub struct ShardContribution { pub proof: merkle::MerkleProof, } +/// A responder's reply to a [Reveal] / [Blame] complaint. Carries the responder's own dealer- +/// encrypted ciphertext together with an ECIES recovery package, so the accuser can +/// independently authenticate the responder's shares against the dealer's broadcast and +/// extract them via decryption. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ComplaintResponse { + pub responder_id: PartyId, + pub ciphertext: Vec, + pub recovery_package: RecoveryPackage, +} + /// The output of a receiver which is a batch of shares and public keys for all nonces. #[derive(Debug, Clone)] pub struct ReceiverOutput { @@ -622,13 +634,13 @@ impl Receiver { &self, reveal: &Reveal, common_message: &CommonMessage, - my_output: &ReceiverOutput, - ) -> FastCryptoResult> { + ciphertext: &[u8], + ) -> FastCryptoResult { let challenge = common_message.verify(self.t, self.batch_size, &self.random_oracle())?; let Reveal { proof, - ciphertext, + ciphertext: reveal_ciphertext, common_message_hash, } = reveal; let accuser_id = proof.accuser_id; @@ -640,13 +652,13 @@ impl Receiver { .recipient_roots .get(accuser_id as usize) .ok_or(InvalidProof)?; - self.check_avid_consistency(ciphertext, recipient_root) + self.check_avid_consistency(reveal_ciphertext, recipient_root) .map_err(|_| InvalidProof)?; let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; let accuser_weight = self.nodes.weight_of(accuser_id)?; proof.check( accuser_pk, - ciphertext, + reveal_ciphertext, &common_message.shared, &self.random_oracle(), |shares: &SharesForNode| { @@ -654,7 +666,7 @@ impl Receiver { }, )?; - Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) + Ok(self.build_complaint_response(common_message, ciphertext)) } /// 5b. Validate a [Blame] complaint and respond with this party's own shares. Accepts iff @@ -666,8 +678,8 @@ impl Receiver { &self, blame: &Blame, common_message: &CommonMessage, - my_output: &ReceiverOutput, - ) -> FastCryptoResult> { + ciphertext: &[u8], + ) -> FastCryptoResult { common_message.verify(self.t, self.batch_size, &self.random_oracle())?; let Blame { @@ -713,7 +725,27 @@ impl Receiver { return Err(InvalidProof); } - Ok(ComplaintResponse::new(self.id, my_output.my_shares.clone())) + Ok(self.build_complaint_response(common_message, ciphertext)) + } + + /// Build a [ComplaintResponse] for an answered [Reveal] / [Blame]: package this party's own + /// dealer-encrypted ciphertext together with an ECIES recovery package, so the accuser can + /// decrypt and authenticate the responder's shares. + fn build_complaint_response( + &self, + common_message: &CommonMessage, + ciphertext: &[u8], + ) -> ComplaintResponse { + let recovery_package = common_message.shared.create_recovery_package( + &self.enc_secret_key, + &self.random_oracle().extend(&Recovery(self.id).to_string()), + &mut rand::thread_rng(), + ); + ComplaintResponse { + responder_id: self.id, + ciphertext: ciphertext.to_vec(), + recovery_package, + } } /// 6. Upon receiving t valid responses to a complaint, the accuser can recover its shares. @@ -721,7 +753,7 @@ impl Receiver { pub fn recover( &self, common_message: &CommonMessage, - responses: Vec>, + responses: Vec, ) -> FastCryptoResult { // TODO: This fails if one of the responses has an invalid responder_id. We could probably just ignore those instead. @@ -734,19 +766,44 @@ impl Receiver { if total_response_weight < self.t { return Err(FastCryptoError::InputTooShort(self.t as usize)); } + // Each response carries the responder's own dealer-encrypted ciphertext plus an ECIES + // recovery package. Authenticate the ciphertext under the dealer's broadcast, decrypt + // it via the recovery package, then sanity-check the shares against the response + // polynomial. Any failure drops the response. + let encryption_ro = self.random_oracle().extend(&Encryption.to_string()); let response_shares = responses .into_iter() .filter_map(|response| { - self.nodes - .weight_of(response.responder_id) - .map(|w| (w, response.shares)) - .ok() - }) - .filter_map(|(weight, shares)| { + let responder_pk = self + .nodes + .node_id_to_node(response.responder_id) + .ok()? + .pk + .clone(); + let weight = self.nodes.weight_of(response.responder_id).ok()?; + let recipient_root = common_message + .recipient_roots + .get(response.responder_id as usize)?; + self.check_avid_consistency(&response.ciphertext, recipient_root) + .ok()?; + let plaintext = common_message + .shared + .decrypt_with_recovery_package( + &response.ciphertext, + &response.recovery_package, + &self + .random_oracle() + .extend(&Recovery(response.responder_id).to_string()), + &encryption_ro, + &responder_pk, + response.responder_id as usize, + ) + .ok()?; + let shares = SharesForNode::from_bytes(&plaintext).ok()?; shares .verify(common_message, &challenge, weight, self.batch_size) - .ok() - .map(|_| shares) + .ok()?; + Some(shares) }) .collect_vec(); @@ -1316,6 +1373,7 @@ mod tests { // Process echoes + verify_and_decrypt. AVID is consistent for everyone in this test, so // every decode yields a Decoded outcome. + let mut ciphertexts: HashMap> = HashMap::new(); let outcomes: HashMap = receivers .iter() .zip(echoes_per_recipient.iter()) @@ -1324,6 +1382,7 @@ mod tests { r.decode_ciphertext(echoes, &messages[r.id as usize].common) .unwrap(), ); + ciphertexts.insert(r.id, pem.clone()); ( r.id, r.verify_and_decrypt(pem, &messages[r.id as usize].common) @@ -1355,7 +1414,7 @@ mod tests { }) .collect(); - // Each non-victim verifies the complaint and returns their shares. + // Each non-victim verifies the complaint and returns their own ciphertext + recovery package. let responses = receivers .iter() .filter(|r| r.id != victim_id) @@ -1363,7 +1422,7 @@ mod tests { r.handle_reveal( &reveal, &messages[r.id as usize].common, - outputs.get(&r.id).unwrap(), + ciphertexts.get(&r.id).unwrap(), ) .unwrap() }) @@ -1493,10 +1552,12 @@ mod tests { DecodeOutcome::Decoded(_) => panic!("expected InvalidDispersal from victim"), }; // The other receivers each get a Valid output. + let mut ciphertexts: HashMap> = HashMap::new(); let mut outputs: HashMap = decode_outcomes .into_iter() .map(|(id, decoded)| { let pem = assert_decoded(decoded); + ciphertexts.insert(id, pem.clone()); let outcome = receivers[id as usize] .verify_and_decrypt(pem, &messages[id as usize].common) .unwrap(); @@ -1511,7 +1572,7 @@ mod tests { }) .collect(); - // Each non-victim verifies the complaint and returns their shares. + // Each non-victim verifies the complaint and returns their own ciphertext + recovery package. let responses = receivers .iter() .filter(|r| r.id != victim_id) @@ -1519,7 +1580,7 @@ mod tests { r.handle_blame( &blame, &messages[r.id as usize].common, - outputs.get(&r.id).unwrap(), + ciphertexts.get(&r.id).unwrap(), ) .unwrap() }) From de047f4eeec297148194cc61e1f890e095d56693 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 12:42:57 +0200 Subject: [PATCH 60/91] Drop ShardContribution; Blame.shards is BTreeMap MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace `Vec` with `BTreeMap` — sender uniqueness is now enforced by the map keys, lookup in reconstruct_ciphertext is O(log n), serialization is canonical (BCS sorted keys), and the wrapper struct goes away. --- .../src/threshold_schnorr/batch_avss.rs | 81 +++++++------------ 1 file changed, 28 insertions(+), 53 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index f8ea5ddb3d..d453d21520 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -30,8 +30,9 @@ //! # Complaint paths //! //! If a receiver in [Receiver::decode_ciphertext] detects that AVID dispersal is -//! inconsistent, it returns a self-contained [Blame] carrying the collected [ShardContribution]s -//! as evidence. If a receiver in [Receiver::verify_and_decrypt] detects that decryption fails +//! inconsistent, it returns a self-contained [Blame] carrying the collected per-sender +//! [AuthenticatedShards] as evidence. If a receiver in [Receiver::verify_and_decrypt] detects +//! that decryption fails //! or yields shares that don't verify, it returns a [Reveal] complaint. //! //! The accuser broadcasts the [Reveal] / [Blame] after at least `W − f` votes have accrued on @@ -62,6 +63,7 @@ use fastcrypto::merkle::MerkleTree; use fastcrypto::traits::AllowedRng; use itertools::Itertools; use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; use std::fmt::Debug; use std::iter::repeat_with; @@ -166,28 +168,18 @@ pub struct Reveal { pub common_message_hash: Digest, } -/// A complaint by a receiver who found the AVID dispersal inconsistent. Self-contained: -/// carries the accuser's collected shard contributions so verifiers can re-run the AVID check -/// without needing to observe echoes addressed to the accuser. +/// A complaint by a receiver who found the AVID dispersal inconsistent. Self-contained: carries +/// the accuser's collected per-sender [AuthenticatedShards] so verifiers can re-run the AVID +/// check without needing to observe echoes addressed to the accuser. The map keys are sender +/// ids, which both deduplicates contributions and gives O(log n) lookup during reconstruction. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Blame { pub accuser_id: PartyId, - pub shards: Vec, + pub shards: BTreeMap, /// Hash binding the complaint to a specific [CommonMessage]. pub common_message_hash: Digest, } -/// An entry in [Blame::shards]: one sender's shards for the accuser's ciphertext, with a Merkle -/// proof under `recipient_roots[accuser_id]` at `sender`'s leaf. A [Blame] carries enough of -/// these to attempt RS-decoding the accuser's ciphertext; the complaint is valid iff that -/// decode either fails or re-encodes to a tree root different from `recipient_roots[accuser_id]`. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ShardContribution { - pub sender: PartyId, - pub shards: Vec, - pub proof: merkle::MerkleProof, -} - /// A responder's reply to a [Reveal] / [Blame] complaint. Carries the responder's own dealer- /// encrypted ciphertext together with an ECIES recovery package, so the accuser can /// independently authenticate the responder's shares against the dealer's broadcast and @@ -544,14 +536,10 @@ impl Receiver { .is_some_and(|ct| self.check_avid_consistency(ct, recipient_root).is_ok()); if !dispersal_consistent { - let shards = valid_echoes + let shards: BTreeMap = valid_echoes .into_iter() - .map(|e| ShardContribution { - sender: e.sender, - shards: e.authenticated_shards.shards, - proof: e.authenticated_shards.proof, - }) - .collect_vec(); + .map(|e| (e.sender, e.authenticated_shards)) + .collect(); return Ok(DecodeOutcome::InvalidDispersal(Blame { accuser_id: self.id, shards, @@ -634,7 +622,7 @@ impl Receiver { &self, reveal: &Reveal, common_message: &CommonMessage, - ciphertext: &[u8], + ciphertext: Vec, ) -> FastCryptoResult { let challenge = common_message.verify(self.t, self.batch_size, &self.random_oracle())?; @@ -670,15 +658,16 @@ impl Receiver { } /// 5b. Validate a [Blame] complaint and respond with this party's own shares. Accepts iff - /// the carried [ShardContribution]s authenticate under - /// `common_message.recipient_roots[accuser_id]`, contribute `≥ W − 2f` weight from - /// unique senders, and either fail to RS-decode or decode to a ciphertext whose - /// re-encoding doesn't match the accuser's `r_i`. + /// each entry in `blame.shards` authenticates under + /// `common_message.recipient_roots[accuser_id]` at its sender's leaf, the senders + /// contribute `≥ W − 2f` weight, and the resulting set of shards either fails to + /// RS-decode or decodes to a ciphertext whose re-encoding doesn't match the accuser's + /// `r_i`. pub fn handle_blame( &self, blame: &Blame, common_message: &CommonMessage, - ciphertext: &[u8], + ciphertext: Vec, ) -> FastCryptoResult { common_message.verify(self.t, self.batch_size, &self.random_oracle())?; @@ -696,15 +685,14 @@ impl Receiver { .get(*accuser_id as usize) .ok_or(InvalidProof)?; - if !shards.iter().map(|s| s.sender).all_unique() - || shards.iter().any(|s| s.verify(recipient_root).is_err()) + if shards + .iter() + .any(|(sender, auth)| auth.verify(*sender as usize, recipient_root).is_err()) { return Err(InvalidProof); } - let weight_of_shards = self - .nodes - .total_weight_of(shards.iter().map(|s| &s.sender))?; + let weight_of_shards = self.nodes.total_weight_of(shards.keys())?; if weight_of_shards < self.nodes.total_weight() - 2 * self.f { return Err(InvalidProof); } @@ -714,10 +702,7 @@ impl Receiver { // the accuser's `r_i`. let dispersal_consistent = self .reconstruct_ciphertext(*accuser_id, |id| { - shards - .iter() - .find(|s| s.sender == id) - .map(|s| s.shards.clone()) + shards.get(&id).map(|auth| auth.shards.clone()) }) .ok() .is_some_and(|ct| self.check_avid_consistency(&ct, recipient_root).is_ok()); @@ -734,7 +719,7 @@ impl Receiver { fn build_complaint_response( &self, common_message: &CommonMessage, - ciphertext: &[u8], + ciphertext: Vec, ) -> ComplaintResponse { let recovery_package = common_message.shared.create_recovery_package( &self.enc_secret_key, @@ -743,7 +728,7 @@ impl Receiver { ); ComplaintResponse { responder_id: self.id, - ciphertext: ciphertext.to_vec(), + ciphertext, recovery_package, } } @@ -911,16 +896,6 @@ impl CommonMessage { } } -impl ShardContribution { - fn verify(&self, recipient_root: &merkle::Node) -> FastCryptoResult<()> { - self.proof.verify_proof_with_unserialized_leaf( - recipient_root, - &self.shards, - self.sender as usize, - ) - } -} - impl ShareBatch { /// Verify a batch of shares using the given challenge. fn verify(&self, message: &CommonMessage, challenge: &[S]) -> FastCryptoResult<()> { @@ -1422,7 +1397,7 @@ mod tests { r.handle_reveal( &reveal, &messages[r.id as usize].common, - ciphertexts.get(&r.id).unwrap(), + ciphertexts.get(&r.id).unwrap().clone(), ) .unwrap() }) @@ -1580,7 +1555,7 @@ mod tests { r.handle_blame( &blame, &messages[r.id as usize].common, - ciphertexts.get(&r.id).unwrap(), + ciphertexts.get(&r.id).unwrap().clone(), ) .unwrap() }) From 682ce75a509b62f0b04aa8646b52d9d1efc0688f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 13:35:38 +0200 Subject: [PATCH 61/91] Simplify decode_ciphertext's dispersal-consistency branch --- .../src/threshold_schnorr/batch_avss.rs | 41 +++++++------------ 1 file changed, 15 insertions(+), 26 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index d453d21520..73d97670f4 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -519,37 +519,26 @@ impl Receiver { return Err(NotEnoughWeight(required_weight as usize)); } - // Try to RS-decode the ciphertext. Two failure modes both indicate an inconsistent - // dealer dispersal: (a) decode fails outright because the echoed shards don't lie on a - // single codeword, or (b) decode succeeds but re-encoding the result yields a tree root - // different from the dealer's `r_{self.id}`. Both produce a self-contained [Blame] - // carrying the collected shards as evidence. - let try_decode = self.reconstruct_ciphertext(self.id, |id| { - valid_echoes - .iter() - .find(|e| e.sender == id) - .map(|e| e.authenticated_shards.shards.clone()) - }); - let dispersal_consistent = try_decode - .as_ref() + // Try to RS-decode the ciphertext and re-encode it. The dispersal is consistent iff + // both succeed and the re-encoded root matches `r_{self.id}`. Otherwise the dealer's + // dispersal is inconsistent — package the collected shards into a self-contained [Blame]. + let shards: BTreeMap = valid_echoes + .into_iter() + .map(|e| (e.sender, e.authenticated_shards)) + .collect(); + let decoded = self + .reconstruct_ciphertext(self.id, |id| shards.get(&id).map(|a| a.shards.clone())) .ok() - .is_some_and(|ct| self.check_avid_consistency(ct, recipient_root).is_ok()); + .filter(|ct| self.check_avid_consistency(ct, recipient_root).is_ok()); - if !dispersal_consistent { - let shards: BTreeMap = valid_echoes - .into_iter() - .map(|e| (e.sender, e.authenticated_shards)) - .collect(); - return Ok(DecodeOutcome::InvalidDispersal(Blame { + Ok(match decoded { + Some(ct) => DecodeOutcome::Decoded(ct), + None => DecodeOutcome::InvalidDispersal(Blame { accuser_id: self.id, shards, common_message_hash, - })); - } - - Ok(DecodeOutcome::Decoded( - try_decode.expect("just verified Ok"), - )) + }), + }) } /// 4. Decrypt and verify the receiver's own shares from a successfully decoded ciphertext. From 3ed995cf0ef43627b7e2b0fa7318fc8efef039c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 14:14:51 +0200 Subject: [PATCH 62/91] Clean up --- .../src/threshold_schnorr/batch_avss.rs | 41 +++++++++---------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 73d97670f4..a213f3b757 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -557,29 +557,26 @@ impl Receiver { } = &common_message; let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); - let decrypted_shares = shared + shared .verify(&random_oracle_encryption) - .map(|_| { - shared.decrypt( - &ciphertext, - &self.enc_secret_key, - &random_oracle_encryption, - self.id as usize, - ) - }) - .and_then(SharesForNode::from_bytes) - .and_then(|my_shares| { - my_shares.verify( - common_message, - &challenge, - self.nodes.weight_of(self.id)?, - self.batch_size, - )?; - Ok(my_shares) - }); + .map_err(|_| InvalidMessage)?; let common_message_hash = compute_common_message_hash(common_message); - match decrypted_shares { + let plaintext = shared.decrypt( + &ciphertext, + &self.enc_secret_key, + &random_oracle_encryption, + self.id as usize, + ); + match SharesForNode::from_bytes(plaintext).and_then(|my_shares| { + my_shares.verify( + common_message, + &challenge, + self.nodes.weight_of(self.id)?, + self.batch_size, + )?; + Ok(my_shares) + }) { Ok(my_shares) => Ok(DecryptionOutcome::Valid { output: ReceiverOutput { my_shares, @@ -744,7 +741,7 @@ impl Receiver { // recovery package. Authenticate the ciphertext under the dealer's broadcast, decrypt // it via the recovery package, then sanity-check the shares against the response // polynomial. Any failure drops the response. - let encryption_ro = self.random_oracle().extend(&Encryption.to_string()); + let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); let response_shares = responses .into_iter() .filter_map(|response| { @@ -768,7 +765,7 @@ impl Receiver { &self .random_oracle() .extend(&Recovery(response.responder_id).to_string()), - &encryption_ro, + &random_oracle_encryption, &responder_pk, response.responder_id as usize, ) From c652a462d0e43770e11d498cc972e5a728a3d446 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 14:25:21 +0200 Subject: [PATCH 63/91] Tighten CommonMessage API and AVID helpers - CommonMessage gains hash() and recipient_root(id) accessors. Replaces free fns and repeated `recipient_roots.get(id as usize).ok_or(...)`. - reconstruct_ciphertext now takes &BTreeMap directly, dropping the closure indirection. - recover drops the upfront total_response_weight check (the filter_map already handles invalid responder ids and the post-filter weight check enforces the quorum). - Inline require_uniform_common_message_hash at its only call site. --- .../src/threshold_schnorr/batch_avss.rs | 118 +++++++----------- 1 file changed, 48 insertions(+), 70 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index a213f3b757..2b010c5bf8 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -469,7 +469,7 @@ impl Receiver { return Err(InvalidMessage); } - let common_message_hash = compute_common_message_hash(&message.common); + let common_message_hash = message.common.hash(); Ok(message .dispersal .iter() @@ -491,10 +491,7 @@ impl Receiver { common_message: &CommonMessage, ) -> FastCryptoResult { common_message.verify(self.t, self.batch_size, &self.random_oracle())?; - let recipient_root = common_message - .recipient_roots - .get(self.id as usize) - .ok_or(InvalidInput)?; + let recipient_root = common_message.recipient_root(self.id)?; // Filter out invalid echo messages: each echo's shards proof must verify against the // dealer's `r_{self.id}`. @@ -504,8 +501,10 @@ impl Receiver { .cloned() .collect_vec(); - let common_message_hash = require_uniform_common_message_hash(&valid_echoes)?; - if common_message_hash != compute_common_message_hash(common_message) { + let common_message_hash = + get_uniform_value(valid_echoes.iter().map(|e| e.common_message_hash)) + .ok_or(InvalidMessage)?; + if common_message_hash != common_message.hash() { return Err(InvalidMessage); } @@ -527,7 +526,7 @@ impl Receiver { .map(|e| (e.sender, e.authenticated_shards)) .collect(); let decoded = self - .reconstruct_ciphertext(self.id, |id| shards.get(&id).map(|a| a.shards.clone())) + .reconstruct_ciphertext(self.id, &shards) .ok() .filter(|ct| self.check_avid_consistency(ct, recipient_root).is_ok()); @@ -561,7 +560,7 @@ impl Receiver { .verify(&random_oracle_encryption) .map_err(|_| InvalidMessage)?; - let common_message_hash = compute_common_message_hash(common_message); + let common_message_hash = common_message.hash(); let plaintext = shared.decrypt( &ciphertext, &self.enc_secret_key, @@ -619,13 +618,10 @@ impl Receiver { } = reveal; let accuser_id = proof.accuser_id; - if *common_message_hash != compute_common_message_hash(common_message) { + if *common_message_hash != common_message.hash() { return Err(InvalidProof); } - let recipient_root = common_message - .recipient_roots - .get(accuser_id as usize) - .ok_or(InvalidProof)?; + let recipient_root = common_message.recipient_root(accuser_id)?; self.check_avid_consistency(reveal_ciphertext, recipient_root) .map_err(|_| InvalidProof)?; let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; @@ -663,13 +659,10 @@ impl Receiver { common_message_hash, } = blame; - if *common_message_hash != compute_common_message_hash(common_message) { + if *common_message_hash != common_message.hash() { return Err(InvalidProof); } - let recipient_root = common_message - .recipient_roots - .get(*accuser_id as usize) - .ok_or(InvalidProof)?; + let recipient_root = common_message.recipient_root(*accuser_id)?; if shards .iter() @@ -687,9 +680,7 @@ impl Receiver { // lie on a single codeword) or decode to a ciphertext whose re-encoding doesn't match // the accuser's `r_i`. let dispersal_consistent = self - .reconstruct_ciphertext(*accuser_id, |id| { - shards.get(&id).map(|auth| auth.shards.clone()) - }) + .reconstruct_ciphertext(*accuser_id, shards) .ok() .is_some_and(|ct| self.check_avid_consistency(&ct, recipient_root).is_ok()); if dispersal_consistent { @@ -726,17 +717,8 @@ impl Receiver { common_message: &CommonMessage, responses: Vec, ) -> FastCryptoResult { - // TODO: This fails if one of the responses has an invalid responder_id. We could probably just ignore those instead. - let challenge = common_message.verify(self.t, self.batch_size, &self.random_oracle())?; - // Sanity check that we have enough responses (by weight) to recover the shares. - let total_response_weight = self - .nodes - .total_weight_of(responses.iter().map(|response| &response.responder_id))?; - if total_response_weight < self.t { - return Err(FastCryptoError::InputTooShort(self.t as usize)); - } // Each response carries the responder's own dealer-encrypted ciphertext plus an ECIES // recovery package. Authenticate the ciphertext under the dealer's broadcast, decrypt // it via the recovery package, then sanity-check the shares against the response @@ -752,9 +734,7 @@ impl Receiver { .pk .clone(); let weight = self.nodes.weight_of(response.responder_id).ok()?; - let recipient_root = common_message - .recipient_roots - .get(response.responder_id as usize)?; + let recipient_root = common_message.recipient_root(response.responder_id).ok()?; self.check_avid_consistency(&response.ciphertext, recipient_root) .ok()?; let plaintext = common_message @@ -807,22 +787,24 @@ impl Receiver { } /// Reed-Solomon decode the ciphertext for `accuser_id` from a set of authenticated shard - /// contributions exposed via `shards_for(party_id) -> Option>`. Fails if the - /// contributing weight is below `W - 2f` (too few contributions to reconstruct), or if a - /// party's contribution has a shard count that doesn't match its weight. The caller is - /// responsible for having authenticated the shards via their Merkle proofs. + /// contributions, keyed by sender id. Fails if the contributing weight is below `W - 2f` + /// (too few contributions to reconstruct), or if a party's contribution has a shard count + /// that doesn't match its weight. The caller is responsible for having authenticated the + /// shards via their Merkle proofs. fn reconstruct_ciphertext( &self, accuser_id: PartyId, - shards_for: impl Fn(PartyId) -> Option>, + shards: &BTreeMap, ) -> FastCryptoResult> { - let shards: Vec> = self + let opt_shards: Vec> = self .nodes .node_ids_iter() .map(|id| -> FastCryptoResult>> { let weight = self.nodes.weight_of(id).expect("valid party id") as usize; - match shards_for(id) { - Some(ss) if ss.len() == weight => Ok(ss.into_iter().map(Some).collect()), + match shards.get(&id) { + Some(auth) if auth.shards.len() == weight => { + Ok(auth.shards.iter().cloned().map(Some).collect()) + } // Fail if a contributor's shard count doesn't match its weight. Some(_) => Err(InvalidInput), None => Ok(vec![None; weight]), @@ -836,7 +818,7 @@ impl Receiver { self.nodes.weight_of(accuser_id)? as usize, self.batch_size, ); - self.code.decode(shards, expected_length) + self.code.decode(opt_shards, expected_length) } /// The check r_i' == r_i from the paper @@ -880,6 +862,29 @@ impl CommonMessage { } Ok(challenge) } + + /// Blake2b hash of the BCS-serialized [CommonMessage]. Used to bind echoes and complaints + /// to a specific dealer broadcast. + fn hash(&self) -> Digest { + let mut hasher = Blake2b256::new(); + hasher.update( + bcs::to_bytes(&( + &self.shared, + &self.full_public_keys, + &self.blinding_commit, + &self.response_polynomial, + &self.recipient_roots, + )) + .unwrap(), + ); + hasher.finalize() + } + + /// The dealer's per-recipient Merkle root for `id`. Returns [InvalidProof] if `id` is + /// out of range. + fn recipient_root(&self, id: PartyId) -> FastCryptoResult<&merkle::Node> { + self.recipient_roots.get(id as usize).ok_or(InvalidProof) + } } impl ShareBatch { @@ -1045,11 +1050,6 @@ fn uleb128_len(x: usize) -> usize { len } -/// Require that every echo's `common_message_hash` agrees and return that hash. -fn require_uniform_common_message_hash(echoes: &[Echo]) -> FastCryptoResult { - get_uniform_value(echoes.iter().map(|e| e.common_message_hash)).ok_or(InvalidMessage) -} - fn compute_challenge( random_oracle: &RandomOracle, c: &[G], @@ -1079,28 +1079,6 @@ fn compute_challenge_from_common_message( ) } -fn compute_common_message_hash(message: &CommonMessage) -> Digest { - let CommonMessage { - shared, - full_public_keys, - blinding_commit, - response_polynomial, - recipient_roots, - } = message; - let mut hasher = Blake2b256::new(); - hasher.update( - bcs::to_bytes(&( - shared, - full_public_keys, - blinding_commit, - response_polynomial, - recipient_roots, - )) - .unwrap(), - ); - hasher.finalize() -} - #[cfg(test)] mod tests { use super::{ From 3f62ec348d6c83f6b04c863a2adb9a4c45fcb28f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 14:46:16 +0200 Subject: [PATCH 64/91] Rewrite batch_avss module doc --- .../src/threshold_schnorr/batch_avss.rs | 87 +++++++++++++------ 1 file changed, 59 insertions(+), 28 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 2b010c5bf8..3538e168ed 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -2,44 +2,75 @@ // SPDX-License-Identifier: Apache-2.0 //! Asynchronous verifiable secret sharing (AVSS) for a batch of random nonces. -//! An AVID layer based on Reed-Solomon `(W, W − 2f)` and per-recipient Merkle commitments lets each -//! receiver authenticate the dealer's broadcast even if they did not receive it directly. //! -//! # Setup +//! # What it does //! -//! * Each receiver holds an ECIES key pair from [crate::ecies_v1]. The public keys are -//! advertised through the shared [Nodes] structure together with each party's weight. -//! * One party is designated dealer and constructs a [Dealer] with `nodes`, `f` -//! (Byzantine bound by weight), `t` (recovery threshold), session id, and -//! `batch_size_per_weight`. The receivers uses a [Receiver] with matching parameters. +//! A single dealer commits to a batch of `L` random nonces `r_1, …, r_L` and distributes +//! shares to `n` weighted receivers forming a `t`-of-`W` threshold (with `W = Σ_j w_j` total +//! weight, `f` the Byzantine bound by weight, and `L = w_dealer · BATCH_SIZE`). Every honest +//! receiver `j` ends up with `p_l(i_{j,1}), …, p_l(i_{j,w_j})` for every secret `r_l`, where +//! `p_l` is a degree-`(t−1)` polynomial with `p_l(0) = r_l`. Any `≥ t` valid shares reconstruct +//! `r_l`. +//! +//! # Two layers +//! +//! The dealer's broadcast (the [CommonMessage]) carries the public commitments +//! `c_l = g^{r_l}`, the blinding commitment `c' = g^{r'}`, the *response polynomial* `p''(X)`, +//! and the per-recipient Merkle roots `r_1, …, r_n`. +//! +//! **AVID layer.** The dealer encrypts each receiver's shares under multi-recipient ECIES, +//! RS-encodes the per-recipient ciphertexts under a `(W, W−2f)` code, and Merkle-commits each +//! ciphertext's shards into the root `r_i`. Receivers exchange small [Echo]s so any quorum can +//! reconstruct a ciphertext even if the dealer didn't reach them directly. +//! +//! **AVSS layer.** Each receiver decrypts their own ciphertext to get their shares. The +//! response polynomial `p''(X) = p'(X) + Σ_l γ_l · p_l(X)` — a degree-`(t−1)` linear +//! combination of all `L` sharing polynomials plus a blinding `p'`, where `γ_l` is a +//! Fiat-Shamir challenge over *all* dealer commitments — lets the receiver verify their shares +//! with one polynomial identity (construction from [eprint/2023/536](https://eprint.iacr.org/2023/536)). +//! Because `γ` binds to every public root, the dealer can't equivocate later. //! //! # Happy path //! -//! 1. The dealer calls [Dealer::create_message] and sends each [Message] to its recipient. -//! 2. Every receiver calls [Receiver::echo] and broadcasts each resulting [Echo] to the -//! indexed recipient. -//! 3. Each receiver collects [Echo]s and runs [Receiver::decode_ciphertext] for their -//! own id, yielding a [DecodeOutcome::Decoded] ciphertext. -//! 4. They feed the ciphertext to [Receiver::verify_and_decrypt], which yields a -//! [DecryptionOutcome::Valid] containing this party's [ReceiverOutput] and a [Vote] to -//! broadcast on the TOB/ABC channel if both this and [Receiver::decode_ciphertext] succeeded. +//! 1. **Dealer.** Build a [Message] per receiver and send it point-to-point. +//! 2. **Echo.** Each receiver verifies their dispersal entry and sends an [Echo] to every other +//! recipient with their shard for that recipient's ciphertext. +//! 3. **Decode.** Collect `≥ W−2f` valid echoes for the same [CommonMessage] and run +//! [Receiver::decode_ciphertext]. +//! 4. **Verify-and-decrypt.** Run the polynomial commitment check +//! `g^{p''(0)} = c' · ∏ c_l^{γ_l}`, decrypt the ciphertext, and verify each share pointwise +//! against `p''`. +//! 5. **Vote.** Once enough valid echoes have been collected in step 2 and step 4 succeeds, the +//! receiver sends a [Vote] to the dealer. +//! 6. The dealer collects `≥ W−f` votes (by weight) into a certificate posted on the TOB. The +//! broadcast is now *certified* — every party agrees on `common_message_hash`. +//! 7. A receiver that saw the certificate but missed the original [Message] or enough echoes +//! fetches [CommonMessage] / echoes from a voter, then runs steps 3–4 (without sending a +//! [Vote]). //! -//! Receivers should keep the common part of the message, [CommonMessage], for the lifetime of -//! the protocol since it is needed to handle complaints. +//! Receivers should retain their [Message], [Echo]s, and decoded ciphertext for the lifetime of +//! the session — they are needed for complaint validation, or in case other receivers ask for +//! them. //! //! # Complaint paths //! -//! If a receiver in [Receiver::decode_ciphertext] detects that AVID dispersal is -//! inconsistent, it returns a self-contained [Blame] carrying the collected per-sender -//! [AuthenticatedShards] as evidence. If a receiver in [Receiver::verify_and_decrypt] detects -//! that decryption fails -//! or yields shares that don't verify, it returns a [Reveal] complaint. +//! Complaints are broadcast only after the certificate is in place; the certificate is what +//! pins down the [CommonMessage] every validation hinges on. +//! +//! - **[Reveal]** (encryption-layer fault, raised in step 4). Decryption fails or the shares +//! don't satisfy `p''`. The accuser publishes a `Reveal` with their ciphertext and an ECIES +//! recovery package; verifiers re-bind the ciphertext to the dealer's broadcast and use the +//! recovery package to confirm decryption yields invalid shares. +//! - **[Blame]** (dispersal-layer fault, raised in step 3). RS-decode fails or the recovered +//! ciphertext doesn't re-encode to `recipient_roots[accuser]`. The accuser publishes a +//! `Blame` with the collected per-sender [AuthenticatedShards] as evidence; verifiers re-run +//! the same decode-and-re-encode check on the carried shards. //! -//! The accuser broadcasts the [Reveal] / [Blame] after at least `W − f` votes have accrued on -//! the TOB/ABC channel. Other receivers validate it via [Receiver::handle_reveal] / -//! [Receiver::handle_blame] and respond with their own shares. Once `≥ t` weight of valid -//! responses has arrived, the accuser calls [Receiver::recover] to interpolate their own -//! shares from those responses. +//! Verifiers respond to a valid complaint with a [ComplaintResponse] carrying their own +//! ciphertext plus a recovery package. The accuser AVID-binds each responder's ciphertext, +//! decrypts via the recovery package, verifies the shares against `p''`, and +//! Lagrange-interpolates once `≥ t` weight of valid responses has accrued +//! (see [Receiver::recover]). use crate::ecies_v1::{MultiRecipientEncryption, PrivateKey, RecoveryPackage, SharedComponents}; use crate::nodes::{Nodes, PartyId}; From 28786e5bbfc4f1972bf173b49dd41a912fb476c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 15:16:48 +0200 Subject: [PATCH 65/91] Touch up batch_avss docs and decode_ciphertext branch --- .../src/threshold_schnorr/batch_avss.rs | 114 ++++++++++-------- 1 file changed, 62 insertions(+), 52 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 3538e168ed..89db8a5e39 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -48,9 +48,9 @@ //! fetches [CommonMessage] / echoes from a voter, then runs steps 3–4 (without sending a //! [Vote]). //! -//! Receivers should retain their [Message], [Echo]s, and decoded ciphertext for the lifetime of -//! the session — they are needed for complaint validation, or in case other receivers ask for -//! them. +//! Receivers should retain the [CommonMessage] for the lifetime of the session — it is required +//! to validate complaints and build a [ComplaintResponse]. The [Echo]s and the decoded +//! ciphertext should also be kept so laggards (step 7) can fetch them. //! //! # Complaint paths //! @@ -169,8 +169,9 @@ pub struct Echo { } /// The result of [Receiver::decode_ciphertext]: either a successfully reconstructed -/// ciphertext whose AVID dispersal is consistent, or a [Blame] when the re-encoded ciphertext -/// disagrees with the dealer's `r_i`. +/// ciphertext whose AVID dispersal is consistent, or a [Blame] when the collected shards either +/// fail to RS-decode or decode to a ciphertext whose re-encoding disagrees with the dealer's +/// `r_i`. #[allow(clippy::large_enum_variant)] pub enum DecodeOutcome { Decoded(Vec), @@ -195,7 +196,6 @@ pub struct Vote { pub struct Reveal { pub proof: complaint::Complaint, pub ciphertext: Vec, - /// Hash binding the complaint to a specific [CommonMessage]. pub common_message_hash: Digest, } @@ -207,7 +207,6 @@ pub struct Reveal { pub struct Blame { pub accuser_id: PartyId, pub shards: BTreeMap, - /// Hash binding the complaint to a specific [CommonMessage]. pub common_message_hash: Digest, } @@ -233,7 +232,8 @@ pub struct ReceiverOutput { /// If we say that node i has a weight `W_i`, we have /// `indices().len() == shares_for_secret(i).len() == weight() = W_i` /// -/// These can be created either by decrypting the shares from the dealer (see [Receiver::decode_ciphertext]) or by recovering them from complaint responses. +/// Produced by [Receiver::verify_and_decrypt] on the happy path, or by [Receiver::recover] +/// from complaint responses. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SharesForNode { pub shares: Vec, @@ -516,6 +516,9 @@ impl Receiver { /// 3. Reconstruct this receiver's ciphertext from received [Echo]s. Returns /// [DecodeOutcome::Decoded] when the AVID dispersal is consistent with the dealer's /// `r_{self.id}`, or [DecodeOutcome::InvalidDispersal] (a [Blame]) when it isn't. + /// + /// Invalid echos are filtered out here and a [NotEnoughWeight] error is returned + /// if the valid echoes don't contribute `≥ W−2f` weight. pub fn decode_ciphertext( &self, echos: &[Echo], @@ -556,19 +559,21 @@ impl Receiver { .into_iter() .map(|e| (e.sender, e.authenticated_shards)) .collect(); - let decoded = self - .reconstruct_ciphertext(self.id, &shards) - .ok() - .filter(|ct| self.check_avid_consistency(ct, recipient_root).is_ok()); - Ok(match decoded { - Some(ct) => DecodeOutcome::Decoded(ct), - None => DecodeOutcome::InvalidDispersal(Blame { - accuser_id: self.id, - shards, - common_message_hash, - }), - }) + Ok(self + .reconstruct_ciphertext(self.id, &shards) + .and_then(|ct| { + self.check_avid_consistency(&ct, recipient_root)?; + Ok(ct) + }) + .map(DecodeOutcome::Decoded) + .unwrap_or_else(|_| { + DecodeOutcome::InvalidDispersal(Blame { + accuser_id: self.id, + shards, + common_message_hash, + }) + })) } /// 4. Decrypt and verify the receiver's own shares from a successfully decoded ciphertext. @@ -598,16 +603,18 @@ impl Receiver { &random_oracle_encryption, self.id as usize, ); - match SharesForNode::from_bytes(plaintext).and_then(|my_shares| { - my_shares.verify( - common_message, - &challenge, - self.nodes.weight_of(self.id)?, - self.batch_size, - )?; - Ok(my_shares) - }) { - Ok(my_shares) => Ok(DecryptionOutcome::Valid { + + SharesForNode::from_bytes(plaintext) + .and_then(|my_shares| { + my_shares.verify( + common_message, + &challenge, + self.nodes.weight_of(self.id)?, + self.batch_size, + )?; + Ok(my_shares) + }) + .map(|my_shares| DecryptionOutcome::Valid { output: ReceiverOutput { my_shares, public_keys: full_public_keys.clone(), @@ -615,19 +622,20 @@ impl Receiver { vote: Vote { common_message_hash, }, - }), - Err(_) => Ok(DecryptionOutcome::InvalidShares(Reveal { - proof: complaint::Complaint::create( - self.id, - shared, - &self.enc_secret_key, - &self.random_oracle(), - &mut rand::thread_rng(), - ), - ciphertext, - common_message_hash, - })), - } + }) + .or_else(|_| { + Ok(DecryptionOutcome::InvalidShares(Reveal { + proof: complaint::Complaint::create( + self.id, + shared, + &self.enc_secret_key, + &self.random_oracle(), + &mut rand::thread_rng(), + ), + ciphertext, + common_message_hash, + })) + }) } /// 5a. Validate a [Reveal] complaint and respond with this party's own shares so the @@ -647,16 +655,15 @@ impl Receiver { ciphertext: reveal_ciphertext, common_message_hash, } = reveal; - let accuser_id = proof.accuser_id; if *common_message_hash != common_message.hash() { return Err(InvalidProof); } - let recipient_root = common_message.recipient_root(accuser_id)?; + let recipient_root = common_message.recipient_root(proof.accuser_id)?; self.check_avid_consistency(reveal_ciphertext, recipient_root) .map_err(|_| InvalidProof)?; - let accuser_pk = &self.nodes.node_id_to_node(accuser_id)?.pk; - let accuser_weight = self.nodes.weight_of(accuser_id)?; + let accuser_pk = &self.nodes.node_id_to_node(proof.accuser_id)?.pk; + let accuser_weight = self.nodes.weight_of(proof.accuser_id)?; proof.check( accuser_pk, reveal_ciphertext, @@ -710,11 +717,11 @@ impl Receiver { // The blame is valid iff the contributed shards either fail to RS-decode (they don't // lie on a single codeword) or decode to a ciphertext whose re-encoding doesn't match // the accuser's `r_i`. - let dispersal_consistent = self + if self .reconstruct_ciphertext(*accuser_id, shards) .ok() - .is_some_and(|ct| self.check_avid_consistency(&ct, recipient_root).is_ok()); - if dispersal_consistent { + .is_some_and(|ct| self.check_avid_consistency(&ct, recipient_root).is_ok()) + { return Err(InvalidProof); } @@ -741,8 +748,11 @@ impl Receiver { } } - /// 6. Upon receiving t valid responses to a complaint, the accuser can recover its shares. - /// Fails if there are not enough valid responses to recover the shares or if any of the responses come from an invalid party. + /// 6. Recover the accuser's own shares from a set of [ComplaintResponse]s. Each response is + /// AVID-bound to the dealer's broadcast, decrypted via its recovery package, and the + /// shares are checked against `p''`; responses that don't authenticate are silently + /// dropped. Fails if `common_message` is malformed, the surviving responses contribute + /// `< t` weight, or the interpolated shares fail final verification. pub fn recover( &self, common_message: &CommonMessage, From fe79d65499db624daab8c69ef70e12f3cd65797c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 16:05:52 +0200 Subject: [PATCH 66/91] Various clean up --- .../src/threshold_schnorr/batch_avss.rs | 74 +++++++++---------- 1 file changed, 36 insertions(+), 38 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 89db8a5e39..862ea91e89 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -374,8 +374,7 @@ impl Dealer { .iter() .map(recipient_tree) .collect::>>()?; - let recipient_roots: Vec = - recipient_trees.iter().map(MerkleTree::root).collect(); + let recipient_roots = recipient_trees.iter().map(MerkleTree::root).collect_vec(); let dispersals: Vec> = self .nodes @@ -528,9 +527,14 @@ impl Receiver { let recipient_root = common_message.recipient_root(self.id)?; // Filter out invalid echo messages: each echo's shards proof must verify against the - // dealer's `r_{self.id}`. + // dealer's `r_{self.id}` and the number of shards must be equal to the weight of the sender. let valid_echoes = echos .iter() + .filter(|echo| { + self.nodes + .weight_of(echo.sender) + .is_ok_and(|w| echo.authenticated_shards.shards.len() == w as usize) + }) .filter(|echo| echo.verify(recipient_root).is_ok()) .cloned() .collect_vec(); @@ -705,8 +709,9 @@ impl Receiver { if shards .iter() .any(|(sender, auth)| auth.verify(*sender as usize, recipient_root).is_err()) + // TODO: Check this { - return Err(InvalidProof); + return Ok(self.build_complaint_response(common_message, ciphertext)); } let weight_of_shards = self.nodes.total_weight_of(shards.keys())?; @@ -767,36 +772,30 @@ impl Receiver { let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); let response_shares = responses .into_iter() - .filter_map(|response| { + .map(|response| { let responder_pk = self .nodes - .node_id_to_node(response.responder_id) - .ok()? + .node_id_to_node(response.responder_id)? .pk .clone(); - let weight = self.nodes.weight_of(response.responder_id).ok()?; - let recipient_root = common_message.recipient_root(response.responder_id).ok()?; - self.check_avid_consistency(&response.ciphertext, recipient_root) - .ok()?; - let plaintext = common_message - .shared - .decrypt_with_recovery_package( - &response.ciphertext, - &response.recovery_package, - &self - .random_oracle() - .extend(&Recovery(response.responder_id).to_string()), - &random_oracle_encryption, - &responder_pk, - response.responder_id as usize, - ) - .ok()?; - let shares = SharesForNode::from_bytes(&plaintext).ok()?; - shares - .verify(common_message, &challenge, weight, self.batch_size) - .ok()?; - Some(shares) + let weight = self.nodes.weight_of(response.responder_id)?; + let recipient_root = common_message.recipient_root(response.responder_id)?; + self.check_avid_consistency(&response.ciphertext, recipient_root)?; + let plaintext = common_message.shared.decrypt_with_recovery_package( + &response.ciphertext, + &response.recovery_package, + &self + .random_oracle() + .extend(&Recovery(response.responder_id).to_string()), + &random_oracle_encryption, + &responder_pk, + response.responder_id as usize, + )?; + let shares = SharesForNode::from_bytes(&plaintext)?; + shares.verify(common_message, &challenge, weight, self.batch_size)?; + Ok(shares) }) + .filter_map(FastCryptoResult::ok) .collect_vec(); // Compute the total weight of the valid responses @@ -837,29 +836,28 @@ impl Receiver { accuser_id: PartyId, shards: &BTreeMap, ) -> FastCryptoResult> { - let opt_shards: Vec> = self + let shards_matrix = self .nodes .node_ids_iter() - .map(|id| -> FastCryptoResult>> { + .map(|id| -> Vec> { let weight = self.nodes.weight_of(id).expect("valid party id") as usize; match shards.get(&id) { + // If the shards exist and are consistent with the weight, put them in the matrix. Otherwise, add a None, corresponding to an erasure. Some(auth) if auth.shards.len() == weight => { - Ok(auth.shards.iter().cloned().map(Some).collect()) + auth.shards.iter().cloned().map(Some).collect_vec() } - // Fail if a contributor's shard count doesn't match its weight. - Some(_) => Err(InvalidInput), - None => Ok(vec![None; weight]), + _ => vec![None; weight], } }) - .flatten_ok() - .collect::>>()?; + .flatten() + .collect_vec(); // The encryption used, counter-mode, is length-preserving, so the length of the ciphertext is equal to the length of the plaintext. let expected_length = SharesForNode::bcs_serialized_size( self.nodes.weight_of(accuser_id)? as usize, self.batch_size, ); - self.code.decode(opt_shards, expected_length) + self.code.decode(shards_matrix, expected_length) } /// The check r_i' == r_i from the paper From 22f43a1238d99ff7ce579eecac4d098eee48be67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 16:09:59 +0200 Subject: [PATCH 67/91] Collapse map().flatten() to flat_map --- fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 862ea91e89..0db304583c 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -839,7 +839,7 @@ impl Receiver { let shards_matrix = self .nodes .node_ids_iter() - .map(|id| -> Vec> { + .flat_map(|id| -> Vec> { let weight = self.nodes.weight_of(id).expect("valid party id") as usize; match shards.get(&id) { // If the shards exist and are consistent with the weight, put them in the matrix. Otherwise, add a None, corresponding to an erasure. @@ -849,7 +849,6 @@ impl Receiver { _ => vec![None; weight], } }) - .flatten() .collect_vec(); // The encryption used, counter-mode, is length-preserving, so the length of the ciphertext is equal to the length of the plaintext. From 06190603ff401f966a58f0cea015058627aa8ec1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 16:22:20 +0200 Subject: [PATCH 68/91] Rename DecryptionOutcome::InvalidShares to Invalid; verify_and_decrypt takes &[u8] --- fastcrypto-tbls/benches/batch_avss.rs | 2 +- .../src/threshold_schnorr/batch_avss.rs | 28 +++++++++---------- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 2 +- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/fastcrypto-tbls/benches/batch_avss.rs b/fastcrypto-tbls/benches/batch_avss.rs index 9c5e6643ee..c90350a213 100644 --- a/fastcrypto-tbls/benches/batch_avss.rs +++ b/fastcrypto-tbls/benches/batch_avss.rs @@ -198,7 +198,7 @@ mod batch_avss_benches { }; assert_valid_batch( receivers[1] - .verify_and_decrypt(pem, &messages[1].common) + .verify_and_decrypt(&pem, &messages[1].common) .unwrap(), ) }) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 0db304583c..604e1d2856 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -182,7 +182,7 @@ pub enum DecodeOutcome { #[allow(clippy::large_enum_variant)] pub enum DecryptionOutcome { Valid { output: ReceiverOutput, vote: Vote }, - InvalidShares(Reveal), + Invalid(Reveal), } /// An endorsement of the dealer's broadcast. @@ -582,10 +582,10 @@ impl Receiver { /// 4. Decrypt and verify the receiver's own shares from a successfully decoded ciphertext. /// Yields [DecryptionOutcome::Valid] (with a [Vote] to broadcast) when shares verify, or - /// [DecryptionOutcome::InvalidShares] (a [Reveal]) otherwise. + /// [DecryptionOutcome::Invalid] (a [Reveal]) otherwise. pub fn verify_and_decrypt( &self, - ciphertext: Vec, + ciphertext: &[u8], common_message: &CommonMessage, ) -> FastCryptoResult { let challenge = common_message.verify(self.t, self.batch_size, &self.random_oracle())?; @@ -602,7 +602,7 @@ impl Receiver { let common_message_hash = common_message.hash(); let plaintext = shared.decrypt( - &ciphertext, + ciphertext, &self.enc_secret_key, &random_oracle_encryption, self.id as usize, @@ -628,7 +628,7 @@ impl Receiver { }, }) .or_else(|_| { - Ok(DecryptionOutcome::InvalidShares(Reveal { + Ok(DecryptionOutcome::Invalid(Reveal { proof: complaint::Complaint::create( self.id, shared, @@ -636,7 +636,7 @@ impl Receiver { &self.random_oracle(), &mut rand::thread_rng(), ), - ciphertext, + ciphertext: ciphertext.to_vec(), common_message_hash, })) }) @@ -1251,7 +1251,7 @@ mod tests { .zip(messages) .map(|((receiver, pem), message)| { let output = - assert_valid(receiver.verify_and_decrypt(pem, &message.common).unwrap()); + assert_valid(receiver.verify_and_decrypt(&pem, &message.common).unwrap()); (receiver.id, output) }) .collect::>(); @@ -1284,7 +1284,7 @@ mod tests { fn test_share_recovery() { // Dealer is honest at the AVID layer (consistent dispersal) but flips a byte in // receiver 0's plaintext, so receiver 0's decryption succeeds but the resulting - // SharesForNode fails verification — triggering a InvalidShares complaint. The other receivers + // SharesForNode fails verification — triggering an Invalid complaint. The other receivers // verify the complaint and respond with their own shares; receiver 0 reconstructs. let t = 3; let f = 2; @@ -1362,19 +1362,19 @@ mod tests { ciphertexts.insert(r.id, pem.clone()); ( r.id, - r.verify_and_decrypt(pem, &messages[r.id as usize].common) + r.verify_and_decrypt(&pem, &messages[r.id as usize].common) .unwrap(), ) }) .collect(); - // Receiver 0 (the targeted victim) emits a InvalidShares complaint. + // Receiver 0 (the targeted victim) emits an Invalid complaint. let victim_id = 0u16; let mut outcomes = outcomes; let reveal = match outcomes.remove(&victim_id).unwrap() { - DecryptionOutcome::InvalidShares(r) => r, + DecryptionOutcome::Invalid(r) => r, ref other => panic!( - "expected InvalidShares from victim, got {:?}", + "expected Invalid from victim, got {:?}", outcome_kind(other) ), }; @@ -1536,7 +1536,7 @@ mod tests { let pem = assert_decoded(decoded); ciphertexts.insert(id, pem.clone()); let outcome = receivers[id as usize] - .verify_and_decrypt(pem, &messages[id as usize].common) + .verify_and_decrypt(&pem, &messages[id as usize].common) .unwrap(); let output = match outcome { DecryptionOutcome::Valid { output, .. } => output, @@ -1602,7 +1602,7 @@ mod tests { fn outcome_kind(outcome: &DecryptionOutcome) -> &'static str { match outcome { DecryptionOutcome::Valid { .. } => "Valid", - DecryptionOutcome::InvalidShares(_) => "InvalidShares", + DecryptionOutcome::Invalid(_) => "Invalid", } } diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index b9ed009a58..36a89a8d3a 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -254,7 +254,7 @@ mod tests { batch_avss::DecodeOutcome::Decoded(d) => d, _ => panic!("expected Decoded outcome"), }; - let output = assert_valid_batch(r.verify_and_decrypt(pem, &msg.common).unwrap()); + let output = assert_valid_batch(r.verify_and_decrypt(&pem, &msg.common).unwrap()); presigning_outputs.get_mut(&r.id).unwrap().push(output); } } From 99171874b1403267ed653db6cb3ed87b3594d83a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 16:23:36 +0200 Subject: [PATCH 69/91] simplify --- fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 604e1d2856..7c2c39dcc2 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -568,9 +568,8 @@ impl Receiver { .reconstruct_ciphertext(self.id, &shards) .and_then(|ct| { self.check_avid_consistency(&ct, recipient_root)?; - Ok(ct) + Ok(DecodeOutcome::Decoded(ct)) }) - .map(DecodeOutcome::Decoded) .unwrap_or_else(|_| { DecodeOutcome::InvalidDispersal(Blame { accuser_id: self.id, From 05c022c27b849e2d442de42ae657416bab34c0fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Wed, 6 May 2026 16:28:47 +0200 Subject: [PATCH 70/91] clippy --- fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 7c2c39dcc2..b1a7d25230 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -570,13 +570,11 @@ impl Receiver { self.check_avid_consistency(&ct, recipient_root)?; Ok(DecodeOutcome::Decoded(ct)) }) - .unwrap_or_else(|_| { - DecodeOutcome::InvalidDispersal(Blame { - accuser_id: self.id, - shards, - common_message_hash, - }) - })) + .unwrap_or(DecodeOutcome::InvalidDispersal(Blame { + accuser_id: self.id, + shards, + common_message_hash, + }))) } /// 4. Decrypt and verify the receiver's own shares from a successfully decoded ciphertext. From f7abac0582d1b73f17addfc67d97b57f19643c78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Thu, 7 May 2026 09:45:59 +0200 Subject: [PATCH 71/91] Add VerifiedEcho; Receiver::verify_echo wraps private Echo::verify --- fastcrypto-tbls/benches/batch_avss.rs | 20 +++- .../src/threshold_schnorr/batch_avss.rs | 112 ++++++++++++------ fastcrypto-tbls/src/threshold_schnorr/mod.rs | 6 +- 3 files changed, 94 insertions(+), 44 deletions(-) diff --git a/fastcrypto-tbls/benches/batch_avss.rs b/fastcrypto-tbls/benches/batch_avss.rs index c90350a213..e4d50ba0a8 100644 --- a/fastcrypto-tbls/benches/batch_avss.rs +++ b/fastcrypto-tbls/benches/batch_avss.rs @@ -135,8 +135,14 @@ mod batch_avss_benches { .enumerate() .map(|(i, r)| r.echo(&messages[i]).unwrap()) .collect(); - let echoes_for_party_1: Vec = - echoes.iter().map(|em| em[1].clone()).collect(); + let echoes_for_party_1: Vec = echoes + .iter() + .map(|em| { + receivers[1] + .verify_echo(em[1].clone(), &messages[1].common) + .unwrap() + }) + .collect(); let r1 = &receivers[1]; process.bench_function( @@ -187,8 +193,14 @@ mod batch_avss_benches { .enumerate() .map(|(i, r)| r.echo(&messages[i]).unwrap()) .collect(); - let echoes_for_party_1: Vec = - echoes.iter().map(|em| em[1].clone()).collect(); + let echoes_for_party_1: Vec = echoes + .iter() + .map(|em| { + receivers[1] + .verify_echo(em[1].clone(), &messages[1].common) + .unwrap() + }) + .collect(); let pem = match receivers[1] .decode_ciphertext(&echoes_for_party_1, &messages[1].common) .unwrap() diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index b1a7d25230..cafc3c438e 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -165,9 +165,15 @@ pub struct AuthenticatedShards { pub struct Echo { sender: PartyId, authenticated_shards: AuthenticatedShards, - common_message_hash: Digest, + pub common_message_hash: Digest, } +/// An [Echo] that has been verified by [Receiver::verify_echo] against a specific +/// [CommonMessage]: the sender's shard count matches their weight, the Merkle proof checks out +/// against the receiver's `recipient_root`, and the echo's `common_message_hash` matches. +#[derive(Clone, Debug)] +pub struct VerifiedEcho(Echo); + /// The result of [Receiver::decode_ciphertext]: either a successfully reconstructed /// ciphertext whose AVID dispersal is consistent, or a [Blame] when the collected shards either /// fail to RS-decode or decode to a ciphertext whose re-encoding disagrees with the dealer's @@ -512,45 +518,35 @@ impl Receiver { .collect()) } - /// 3. Reconstruct this receiver's ciphertext from received [Echo]s. Returns + /// Convenience wrapper around [Echo::verify] that looks up the sender's weight from + /// [Self::nodes] and uses [Self::id] as the recipient. + pub fn verify_echo( + &self, + echo: Echo, + common_message: &CommonMessage, + ) -> FastCryptoResult { + let weight = self.nodes.weight_of(echo.sender)?; + echo.verify(weight, self.id, common_message) + } + + /// 3. Reconstruct this receiver's ciphertext from a quorum of [VerifiedEcho]s. Returns /// [DecodeOutcome::Decoded] when the AVID dispersal is consistent with the dealer's /// `r_{self.id}`, or [DecodeOutcome::InvalidDispersal] (a [Blame]) when it isn't. /// - /// Invalid echos are filtered out here and a [NotEnoughWeight] error is returned - /// if the valid echoes don't contribute `≥ W−2f` weight. + /// Echoes must already be validated via [Self::verify_echo]. Returns [NotEnoughWeight] if + /// the senders contribute `< W − 2f` weight. pub fn decode_ciphertext( &self, - echos: &[Echo], + echos: &[VerifiedEcho], common_message: &CommonMessage, ) -> FastCryptoResult { common_message.verify(self.t, self.batch_size, &self.random_oracle())?; let recipient_root = common_message.recipient_root(self.id)?; - // Filter out invalid echo messages: each echo's shards proof must verify against the - // dealer's `r_{self.id}` and the number of shards must be equal to the weight of the sender. - let valid_echoes = echos - .iter() - .filter(|echo| { - self.nodes - .weight_of(echo.sender) - .is_ok_and(|w| echo.authenticated_shards.shards.len() == w as usize) - }) - .filter(|echo| echo.verify(recipient_root).is_ok()) - .cloned() - .collect_vec(); - - let common_message_hash = - get_uniform_value(valid_echoes.iter().map(|e| e.common_message_hash)) - .ok_or(InvalidMessage)?; - if common_message_hash != common_message.hash() { - return Err(InvalidMessage); - } - - // TODO: Double-check that this is ok let required_weight = self.nodes.total_weight() - 2 * self.f; if self .nodes - .total_weight_of(valid_echoes.iter().map(|echo| &echo.sender))? + .total_weight_of(echos.iter().map(|e| &e.0.sender))? < required_weight { return Err(NotEnoughWeight(required_weight as usize)); @@ -559,9 +555,10 @@ impl Receiver { // Try to RS-decode the ciphertext and re-encode it. The dispersal is consistent iff // both succeed and the re-encoded root matches `r_{self.id}`. Otherwise the dealer's // dispersal is inconsistent — package the collected shards into a self-contained [Blame]. - let shards: BTreeMap = valid_echoes - .into_iter() - .map(|e| (e.sender, e.authenticated_shards)) + let shards: BTreeMap = echos + .iter() + .cloned() + .map(|e| (e.0.sender, e.0.authenticated_shards)) .collect(); Ok(self @@ -573,7 +570,7 @@ impl Receiver { .unwrap_or(DecodeOutcome::InvalidDispersal(Blame { accuser_id: self.id, shards, - common_message_hash, + common_message_hash: common_message.hash(), }))) } @@ -1059,11 +1056,26 @@ impl AuthenticatedShards { } impl Echo { - /// Verify the shard's Merkle proof against `recipient_root` (the dealer's `r_i` for the - /// recipient this echo is addressed to) at `sender`'s leaf. - fn verify(&self, recipient_root: &merkle::Node) -> FastCryptoResult<()> { - self.authenticated_shards - .verify(self.sender as usize, recipient_root) + /// Verify this echo against `common_message` for the recipient `receiver_id`: the sender's + /// shard count matches `sender_weight`, the Merkle proof checks against + /// `recipient_roots[receiver_id]`, and the echo's `common_message_hash` matches. + fn verify( + self, + sender_weight: u16, + receiver_id: PartyId, + common_message: &CommonMessage, + ) -> FastCryptoResult { + if self.authenticated_shards.shards.len() != sender_weight as usize { + return Err(InvalidMessage); + } + self.authenticated_shards.verify( + self.sender as usize, + common_message.recipient_root(receiver_id)?, + )?; + if self.common_message_hash != common_message.hash() { + return Err(InvalidMessage); + } + Ok(VerifiedEcho(self)) } } @@ -1238,7 +1250,15 @@ mod tests { .zip(messages.iter()) .zip(echoes_by_recipient.iter()) .map(|((receiver, message), echoes)| { - assert_decoded(receiver.decode_ciphertext(echoes, &message.common).unwrap()) + let verified = echoes + .iter() + .map(|e| receiver.verify_echo(e.clone(), &message.common).unwrap()) + .collect_vec(); + assert_decoded( + receiver + .decode_ciphertext(&verified, &message.common) + .unwrap(), + ) }) .collect_vec(); @@ -1352,8 +1372,15 @@ mod tests { .iter() .zip(echoes_per_recipient.iter()) .map(|(r, echoes)| { + let verified = echoes + .iter() + .map(|e| { + r.verify_echo(e.clone(), &messages[r.id as usize].common) + .unwrap() + }) + .collect_vec(); let pem = assert_decoded( - r.decode_ciphertext(echoes, &messages[r.id as usize].common) + r.decode_ciphertext(&verified, &messages[r.id as usize].common) .unwrap(), ); ciphertexts.insert(r.id, pem.clone()); @@ -1513,9 +1540,16 @@ mod tests { .iter() .zip(echoes_per_recipient.iter()) .map(|(r, echoes)| { + let verified = echoes + .iter() + .map(|e| { + r.verify_echo(e.clone(), &messages[r.id as usize].common) + .unwrap() + }) + .collect_vec(); ( r.id, - r.decode_ciphertext(echoes, &messages[r.id as usize].common) + r.decode_ciphertext(&verified, &messages[r.id as usize].common) .unwrap(), ) }) diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index 36a89a8d3a..9fdcf1c26a 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -250,7 +250,11 @@ mod tests { // Each receiver processes the message. // In this case, we assume all are honest and there are no complaints. for ((r, echoes), msg) in receivers.iter().zip(&echoes_per_recipient).zip(&messages) { - let pem = match r.decode_ciphertext(echoes, &msg.common).unwrap() { + let verified = echoes + .iter() + .map(|e| r.verify_echo(e.clone(), &msg.common).unwrap()) + .collect::>(); + let pem = match r.decode_ciphertext(&verified, &msg.common).unwrap() { batch_avss::DecodeOutcome::Decoded(d) => d, _ => panic!("expected Decoded outcome"), }; From 614cfac55ad3125aa09d24233c87576ac87e2b68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Thu, 7 May 2026 10:03:01 +0200 Subject: [PATCH 72/91] Add VerifiedComplaintResponse and Receiver::verify_complaint_response --- .../src/threshold_schnorr/batch_avss.rs | 118 ++++++++++-------- 1 file changed, 67 insertions(+), 51 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index cafc3c438e..43bf6307bb 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -227,6 +227,12 @@ pub struct ComplaintResponse { pub recovery_package: RecoveryPackage, } +/// A [ComplaintResponse] that has been verified by [Receiver::verify_complaint_response]: the +/// carried ciphertext is AVID-bound to the dealer's broadcast, decryption via the recovery +/// package yielded shares satisfying the dealer's response polynomial. +#[derive(Clone, Debug)] +pub struct VerifiedComplaintResponse(SharesForNode); + /// The output of a receiver which is a batch of shares and public keys for all nonces. #[derive(Debug, Clone)] pub struct ReceiverOutput { @@ -518,8 +524,10 @@ impl Receiver { .collect()) } - /// Convenience wrapper around [Echo::verify] that looks up the sender's weight from - /// [Self::nodes] and uses [Self::id] as the recipient. + /// Verify an [Echo] addressed to this receiver against `common_message`: the sender's shard + /// count matches their advertised weight, the Merkle proof checks against the receiver's + /// `recipient_root`, and the echo's `common_message_hash` matches. Returns a [VerifiedEcho] + /// suitable for [Self::decode_ciphertext]. pub fn verify_echo( &self, echo: Echo, @@ -541,7 +549,6 @@ impl Receiver { common_message: &CommonMessage, ) -> FastCryptoResult { common_message.verify(self.t, self.batch_size, &self.random_oracle())?; - let recipient_root = common_message.recipient_root(self.id)?; let required_weight = self.nodes.total_weight() - 2 * self.f; if self @@ -564,7 +571,7 @@ impl Receiver { Ok(self .reconstruct_ciphertext(self.id, &shards) .and_then(|ct| { - self.check_avid_consistency(&ct, recipient_root)?; + self.check_avid_consistency(&ct, common_message.recipient_root(self.id)?)?; Ok(DecodeOutcome::Decoded(ct)) }) .unwrap_or(DecodeOutcome::InvalidDispersal(Blame { @@ -747,52 +754,53 @@ impl Receiver { } } - /// 6. Recover the accuser's own shares from a set of [ComplaintResponse]s. Each response is - /// AVID-bound to the dealer's broadcast, decrypted via its recovery package, and the - /// shares are checked against `p''`; responses that don't authenticate are silently - /// dropped. Fails if `common_message` is malformed, the surviving responses contribute - /// `< t` weight, or the interpolated shares fail final verification. + /// Verify a [ComplaintResponse] against `common_message`: confirm that the responder's + /// ciphertext is the one the dealer broadcast to them, that the recovery package decrypts + /// it, and that the recovered shares are the ones the dealer dealt. Returns a + /// [VerifiedComplaintResponse] suitable for [Self::recover]. + pub fn verify_complaint_response( + &self, + response: ComplaintResponse, + common_message: &CommonMessage, + ) -> FastCryptoResult { + let challenge = + compute_challenge_from_common_message(&self.random_oracle(), common_message); + let responder_pk = self + .nodes + .node_id_to_node(response.responder_id)? + .pk + .clone(); + let weight = self.nodes.weight_of(response.responder_id)?; + let recipient_root = common_message.recipient_root(response.responder_id)?; + self.check_avid_consistency(&response.ciphertext, recipient_root)?; + let plaintext = common_message.shared.decrypt_with_recovery_package( + &response.ciphertext, + &response.recovery_package, + &self + .random_oracle() + .extend(&Recovery(response.responder_id).to_string()), + &self.random_oracle().extend(&Encryption.to_string()), + &responder_pk, + response.responder_id as usize, + )?; + let shares = SharesForNode::from_bytes(&plaintext)?; + shares.verify(common_message, &challenge, weight, self.batch_size)?; + Ok(VerifiedComplaintResponse(shares)) + } + + /// 6. Recover the accuser's own shares from a quorum of [VerifiedComplaintResponse]s. + /// Responses must already be validated via [Self::verify_complaint_response]. Fails if + /// `common_message` is malformed, the responses contribute `< t` weight, or the + /// interpolated shares fail final verification. pub fn recover( &self, common_message: &CommonMessage, - responses: Vec, + responses: Vec, ) -> FastCryptoResult { let challenge = common_message.verify(self.t, self.batch_size, &self.random_oracle())?; - // Each response carries the responder's own dealer-encrypted ciphertext plus an ECIES - // recovery package. Authenticate the ciphertext under the dealer's broadcast, decrypt - // it via the recovery package, then sanity-check the shares against the response - // polynomial. Any failure drops the response. - let random_oracle_encryption = self.random_oracle().extend(&Encryption.to_string()); - let response_shares = responses - .into_iter() - .map(|response| { - let responder_pk = self - .nodes - .node_id_to_node(response.responder_id)? - .pk - .clone(); - let weight = self.nodes.weight_of(response.responder_id)?; - let recipient_root = common_message.recipient_root(response.responder_id)?; - self.check_avid_consistency(&response.ciphertext, recipient_root)?; - let plaintext = common_message.shared.decrypt_with_recovery_package( - &response.ciphertext, - &response.recovery_package, - &self - .random_oracle() - .extend(&Recovery(response.responder_id).to_string()), - &random_oracle_encryption, - &responder_pk, - response.responder_id as usize, - )?; - let shares = SharesForNode::from_bytes(&plaintext)?; - shares.verify(common_message, &challenge, weight, self.batch_size)?; - Ok(shares) - }) - .filter_map(FastCryptoResult::ok) - .collect_vec(); + let response_shares: Vec = responses.into_iter().map(|v| v.0).collect(); - // Compute the total weight of the valid responses let response_weight: u16 = response_shares.iter().map(SharesForNode::weight).sum(); if response_weight < self.t { return Err(FastCryptoError::InputTooShort(self.t as usize)); @@ -1429,10 +1437,14 @@ mod tests { }) .collect_vec(); - // Victim recovers via interpolation across t responses. - let recovered = receivers[victim_id as usize] - .recover(&messages[victim_id as usize].common, responses) - .unwrap(); + // Victim verifies and then recovers via interpolation across t responses. + let victim = &receivers[victim_id as usize]; + let common = &messages[victim_id as usize].common; + let verified_responses = responses + .into_iter() + .map(|r| victim.verify_complaint_response(r, common).unwrap()) + .collect_vec(); + let recovered = victim.recover(common, verified_responses).unwrap(); outputs.insert(victim_id, recovered); // Sanity: every receiver now holds verifiable shares for every secret. @@ -1594,10 +1606,14 @@ mod tests { }) .collect_vec(); - // Victim recovers via interpolation across t responses. - let recovered = receivers[victim_id as usize] - .recover(&messages[victim_id as usize].common, responses) - .unwrap(); + // Victim verifies and then recovers via interpolation across t responses. + let victim = &receivers[victim_id as usize]; + let common = &messages[victim_id as usize].common; + let verified_responses = responses + .into_iter() + .map(|r| victim.verify_complaint_response(r, common).unwrap()) + .collect_vec(); + let recovered = victim.recover(common, verified_responses).unwrap(); outputs.insert(victim_id, recovered); // Sanity: every receiver now holds verifiable shares for every secret. From 87caeb1a6326829683e0d76a1ab94b6448203681 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Thu, 7 May 2026 10:11:54 +0200 Subject: [PATCH 73/91] Tidy verify_complaint_response --- .../src/threshold_schnorr/batch_avss.rs | 53 ++++++++++--------- 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 43bf6307bb..7751c033f3 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -763,28 +763,35 @@ impl Receiver { response: ComplaintResponse, common_message: &CommonMessage, ) -> FastCryptoResult { - let challenge = - compute_challenge_from_common_message(&self.random_oracle(), common_message); - let responder_pk = self - .nodes - .node_id_to_node(response.responder_id)? - .pk - .clone(); - let weight = self.nodes.weight_of(response.responder_id)?; - let recipient_root = common_message.recipient_root(response.responder_id)?; - self.check_avid_consistency(&response.ciphertext, recipient_root)?; - let plaintext = common_message.shared.decrypt_with_recovery_package( - &response.ciphertext, - &response.recovery_package, - &self - .random_oracle() - .extend(&Recovery(response.responder_id).to_string()), - &self.random_oracle().extend(&Encryption.to_string()), - &responder_pk, - response.responder_id as usize, + let ComplaintResponse { + responder_id, + ciphertext, + recovery_package, + } = response; + + self.check_avid_consistency(&ciphertext, common_message.recipient_root(responder_id)?)?; + let responder = self.nodes.node_id_to_node(responder_id)?; + let shares = common_message + .shared + .decrypt_with_recovery_package( + &ciphertext, + &recovery_package, + &self + .random_oracle() + .extend(&Recovery(responder_id).to_string()), + &self.random_oracle().extend(&Encryption.to_string()), + &responder.pk, + responder_id as usize, + ) + .and_then(SharesForNode::from_bytes)?; + + shares.verify( + common_message, + &compute_challenge_from_common_message(&self.random_oracle(), common_message), + responder.weight, + self.batch_size, )?; - let shares = SharesForNode::from_bytes(&plaintext)?; - shares.verify(common_message, &challenge, weight, self.batch_size)?; + Ok(VerifiedComplaintResponse(shares)) } @@ -797,15 +804,13 @@ impl Receiver { common_message: &CommonMessage, responses: Vec, ) -> FastCryptoResult { - let challenge = common_message.verify(self.t, self.batch_size, &self.random_oracle())?; - let response_shares: Vec = responses.into_iter().map(|v| v.0).collect(); - let response_weight: u16 = response_shares.iter().map(SharesForNode::weight).sum(); if response_weight < self.t { return Err(FastCryptoError::InputTooShort(self.t as usize)); } + let challenge = common_message.verify(self.t, self.batch_size, &self.random_oracle())?; let my_shares = SharesForNode::recover(self, &response_shares)?; my_shares.verify( common_message, From 4fc0726d8cea10ad941dd76de9be2118bc833794 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Thu, 7 May 2026 10:34:11 +0200 Subject: [PATCH 74/91] Add VerifiedCommonMessage; CommonMessage::verify returns it --- fastcrypto-tbls/benches/batch_avss.rs | 47 ++-- .../src/threshold_schnorr/batch_avss.rs | 207 +++++++++--------- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 16 +- 3 files changed, 135 insertions(+), 135 deletions(-) diff --git a/fastcrypto-tbls/benches/batch_avss.rs b/fastcrypto-tbls/benches/batch_avss.rs index e4d50ba0a8..6e37c820fc 100644 --- a/fastcrypto-tbls/benches/batch_avss.rs +++ b/fastcrypto-tbls/benches/batch_avss.rs @@ -130,29 +130,28 @@ mod batch_avss_benches { .map(|id| setup_receiver(id, 0, f, t, w, &keys, batch_size_per_weight)) .collect(); let messages = d0.create_message(&mut thread_rng()).unwrap(); + let mut vcm = None; let echoes: Vec> = receivers .iter() .enumerate() - .map(|(i, r)| r.echo(&messages[i]).unwrap()) + .map(|(i, r)| { + let (v, e) = r.echo(&messages[i]).unwrap(); + if i == 1 { + vcm = Some(v); + } + e + }) .collect(); + let vcm = vcm.unwrap(); let echoes_for_party_1: Vec = echoes .iter() - .map(|em| { - receivers[1] - .verify_echo(em[1].clone(), &messages[1].common) - .unwrap() - }) + .map(|em| receivers[1].verify_echo(em[1].clone(), &vcm).unwrap()) .collect(); let r1 = &receivers[1]; process.bench_function( format!("n={}, total_weight={}, t={}, w={}", n, total_w, t, w).as_str(), - |b| { - b.iter(|| { - r1.decode_ciphertext(&echoes_for_party_1, &messages[1].common) - .unwrap() - }) - }, + |b| b.iter(|| r1.decode_ciphertext(&echoes_for_party_1, &vcm).unwrap()), ); } } @@ -188,31 +187,31 @@ mod batch_avss_benches { ) }) .collect(); + let mut vcm = None; let echoes: Vec> = receivers .iter() .enumerate() - .map(|(i, r)| r.echo(&messages[i]).unwrap()) + .map(|(i, r)| { + let (v, e) = r.echo(&messages[i]).unwrap(); + if i == 1 { + vcm = Some(v); + } + e + }) .collect(); + let vcm = vcm.unwrap(); let echoes_for_party_1: Vec = echoes .iter() - .map(|em| { - receivers[1] - .verify_echo(em[1].clone(), &messages[1].common) - .unwrap() - }) + .map(|em| receivers[1].verify_echo(em[1].clone(), &vcm).unwrap()) .collect(); let pem = match receivers[1] - .decode_ciphertext(&echoes_for_party_1, &messages[1].common) + .decode_ciphertext(&echoes_for_party_1, &vcm) .unwrap() { batch_avss::DecodeOutcome::Decoded(d) => d, _ => panic!("expected Decoded outcome"), }; - assert_valid_batch( - receivers[1] - .verify_and_decrypt(&pem, &messages[1].common) - .unwrap(), - ) + assert_valid_batch(receivers[1].verify_and_decrypt(&pem, &vcm).unwrap()) }) .collect_vec(); diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 7751c033f3..e7f7ce6210 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -88,7 +88,7 @@ use fastcrypto::error::FastCryptoError::{ use fastcrypto::error::{FastCryptoError, FastCryptoResult}; use fastcrypto::groups::secp256k1::SCALAR_SIZE_IN_BYTES; use fastcrypto::groups::{GroupElement, MultiScalarMul, Scalar}; -use fastcrypto::hash::{Blake2b256, HashFunction, Sha3_512}; +use fastcrypto::hash::{Blake2b256, HashFunction}; use fastcrypto::merkle; use fastcrypto::merkle::MerkleTree; use fastcrypto::traits::AllowedRng; @@ -136,11 +136,9 @@ pub struct Message { dispersal: Vec, } -/// The shared part of the dealer's broadcast — identical for every receiver and required by -/// every later step ([Receiver::decode_ciphertext], [Receiver::verify_and_decrypt], -/// [Receiver::handle_reveal], [Receiver::handle_blame], [Receiver::recover]). Receivers should -/// keep it around for the lifetime of the session. A receiver that didn't get a [Message] from -/// the dealer should fetch the [CommonMessage] from another receiver who did. +/// The shared part of the dealer's broadcast — identical for every receiver. Receivers must run +/// it through [Receiver::verify_common_message] before any further step; the resulting +/// [VerifiedCommonMessage] is what every later API consumes. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct CommonMessage { full_public_keys: Vec, @@ -150,6 +148,12 @@ pub struct CommonMessage { recipient_roots: Vec, } +/// A [CommonMessage] that has been validated against the dealer's commitments. Receivers +/// should keep it around for the lifetime of the session. +// TODO: We can cache the hash and challenge here if it makes sense. +#[derive(Clone, Debug)] +pub struct VerifiedCommonMessage(pub CommonMessage); + /// One recipient's shards for one ciphertext, with a Merkle proof verifying against the /// corresponding `recipient_root` from [CommonMessage]. #[derive(Clone, Debug, Serialize, Deserialize)] @@ -497,8 +501,10 @@ impl Receiver { } /// 2. Verify the dispersal entries against `recipient_roots` and emit one [Echo] per - /// recipient (indexed by recipient id) for the receiver to broadcast. - pub fn echo(&self, message: &Message) -> FastCryptoResult> { + /// recipient (indexed by recipient id) for the receiver to broadcast. Also returns the + /// [VerifiedCommonMessage] that the receiver should keep around for the rest of the + /// session. + pub fn echo(&self, message: &Message) -> FastCryptoResult<(VerifiedCommonMessage, Vec)> { if message.dispersal.len() != message.common.recipient_roots.len() { return Err(InvalidMessage); } @@ -511,8 +517,9 @@ impl Receiver { return Err(InvalidMessage); } - let common_message_hash = message.common.hash(); - Ok(message + let verified_common_message = self.verify_common_message(message.common.clone())?; + let common_message_hash = verified_common_message.0.hash(); + let echoes = message .dispersal .iter() .cloned() @@ -521,7 +528,17 @@ impl Receiver { authenticated_shards, common_message_hash, }) - .collect()) + .collect(); + Ok((verified_common_message, echoes)) + } + + /// Run the dealer's commitments through [CommonMessage::verify] using this receiver's `t`, + /// `batch_size`, and session id. Returns the resulting [VerifiedCommonMessage]. + pub fn verify_common_message( + &self, + common_message: CommonMessage, + ) -> FastCryptoResult { + common_message.verify(self.t, self.batch_size, &self.random_oracle()) } /// Verify an [Echo] addressed to this receiver against `common_message`: the sender's shard @@ -531,10 +548,10 @@ impl Receiver { pub fn verify_echo( &self, echo: Echo, - common_message: &CommonMessage, + common_message: &VerifiedCommonMessage, ) -> FastCryptoResult { let weight = self.nodes.weight_of(echo.sender)?; - echo.verify(weight, self.id, common_message) + echo.verify(weight, self.id, &common_message.0) } /// 3. Reconstruct this receiver's ciphertext from a quorum of [VerifiedEcho]s. Returns @@ -546,10 +563,8 @@ impl Receiver { pub fn decode_ciphertext( &self, echos: &[VerifiedEcho], - common_message: &CommonMessage, + common_message: &VerifiedCommonMessage, ) -> FastCryptoResult { - common_message.verify(self.t, self.batch_size, &self.random_oracle())?; - let required_weight = self.nodes.total_weight() - 2 * self.f; if self .nodes @@ -571,13 +586,13 @@ impl Receiver { Ok(self .reconstruct_ciphertext(self.id, &shards) .and_then(|ct| { - self.check_avid_consistency(&ct, common_message.recipient_root(self.id)?)?; + self.check_avid_consistency(&ct, common_message.0.recipient_root(self.id)?)?; Ok(DecodeOutcome::Decoded(ct)) }) .unwrap_or(DecodeOutcome::InvalidDispersal(Blame { accuser_id: self.id, shards, - common_message_hash: common_message.hash(), + common_message_hash: common_message.0.hash(), }))) } @@ -587,9 +602,11 @@ impl Receiver { pub fn verify_and_decrypt( &self, ciphertext: &[u8], - common_message: &CommonMessage, + common_message: &VerifiedCommonMessage, ) -> FastCryptoResult { - let challenge = common_message.verify(self.t, self.batch_size, &self.random_oracle())?; + let common_message = &common_message.0; + let challenge = + compute_challenge_from_common_message(&self.random_oracle(), common_message); let CommonMessage { full_public_keys, shared, @@ -600,8 +617,6 @@ impl Receiver { shared .verify(&random_oracle_encryption) .map_err(|_| InvalidMessage)?; - - let common_message_hash = common_message.hash(); let plaintext = shared.decrypt( ciphertext, &self.enc_secret_key, @@ -609,6 +624,7 @@ impl Receiver { self.id as usize, ); + let common_message_hash = common_message.hash(); SharesForNode::from_bytes(plaintext) .and_then(|my_shares| { my_shares.verify( @@ -650,10 +666,12 @@ impl Receiver { pub fn handle_reveal( &self, reveal: &Reveal, - common_message: &CommonMessage, + common_message: &VerifiedCommonMessage, ciphertext: Vec, ) -> FastCryptoResult { - let challenge = common_message.verify(self.t, self.batch_size, &self.random_oracle())?; + let common_message = &common_message.0; + let challenge = + compute_challenge_from_common_message(&self.random_oracle(), common_message); let Reveal { proof, @@ -691,10 +709,10 @@ impl Receiver { pub fn handle_blame( &self, blame: &Blame, - common_message: &CommonMessage, + common_message: &VerifiedCommonMessage, ciphertext: Vec, ) -> FastCryptoResult { - common_message.verify(self.t, self.batch_size, &self.random_oracle())?; + let common_message = &common_message.0; let Blame { accuser_id, @@ -761,8 +779,12 @@ impl Receiver { pub fn verify_complaint_response( &self, response: ComplaintResponse, - common_message: &CommonMessage, + common_message: &VerifiedCommonMessage, ) -> FastCryptoResult { + let common_message = &common_message.0; + let challenge = + compute_challenge_from_common_message(&self.random_oracle(), common_message); + let ComplaintResponse { responder_id, ciphertext, @@ -787,7 +809,7 @@ impl Receiver { shares.verify( common_message, - &compute_challenge_from_common_message(&self.random_oracle(), common_message), + &challenge, responder.weight, self.batch_size, )?; @@ -801,7 +823,7 @@ impl Receiver { /// interpolated shares fail final verification. pub fn recover( &self, - common_message: &CommonMessage, + common_message: &VerifiedCommonMessage, responses: Vec, ) -> FastCryptoResult { let response_shares: Vec = responses.into_iter().map(|v| v.0).collect(); @@ -810,7 +832,9 @@ impl Receiver { return Err(FastCryptoError::InputTooShort(self.t as usize)); } - let challenge = common_message.verify(self.t, self.batch_size, &self.random_oracle())?; + let common_message = &common_message.0; + let challenge = + compute_challenge_from_common_message(&self.random_oracle(), common_message); let my_shares = SharesForNode::recover(self, &response_shares)?; my_shares.verify( common_message, @@ -884,20 +908,20 @@ impl Receiver { impl CommonMessage { /// Verify the dealer's commitments: the lengths/degree of the published values are - /// well-formed and `g^{p''(0)} = c' · ∏ c_l^{γ_l}`. Returns the Fiat-Shamir challenge `γ` - /// so the caller can reuse it for per-share verification. - fn verify( - &self, + /// well-formed and `g^{p''(0)} = c' · ∏ c_l^{γ_l}`. Consumes `self` and returns a + /// [VerifiedCommonMessage] on success. + pub fn verify( + self, t: u16, batch_size: usize, random_oracle: &RandomOracle, - ) -> FastCryptoResult> { + ) -> FastCryptoResult { if self.full_public_keys.len() != batch_size || self.response_polynomial.degree() != t as usize - 1 { return Err(InvalidMessage); } - let challenge = compute_challenge_from_common_message(random_oracle, self); + let challenge = compute_challenge_from_common_message(random_oracle, &self); if G::generator() * self.response_polynomial.c0() != self.blinding_commit + G::multi_scalar_mul(&challenge, &self.full_public_keys) @@ -905,7 +929,7 @@ impl CommonMessage { { return Err(InvalidMessage); } - Ok(challenge) + Ok(VerifiedCommonMessage(self)) } /// Blake2b hash of the BCS-serialized [CommonMessage]. Used to bind echoes and complaints @@ -1119,7 +1143,7 @@ fn compute_challenge( ) -> Vec { let random_oracle = random_oracle.extend(&Challenge.to_string()); let inner_hash = - Sha3_512::digest(bcs::to_bytes(&(c.to_vec(), c_prime, shared, recipient_roots)).unwrap()) + Blake2b256::digest(bcs::to_bytes(&(c.to_vec(), c_prime, shared, recipient_roots)).unwrap()) .digest; (0..c.len()) .map(|l| random_oracle.evaluate_to_group_element(&(l, inner_hash.to_vec()))) @@ -1241,11 +1265,10 @@ mod tests { let messages = dealer.create_message(&mut rng).unwrap(); - let echoes_by_sender = receivers + let (verified_commons, echoes_by_sender): (Vec<_>, Vec<_>) = receivers .iter() - .map(|receiver| receiver.echo(&messages[receiver.id as usize])) - .collect::>>() - .unwrap(); + .map(|receiver| receiver.echo(&messages[receiver.id as usize]).unwrap()) + .unzip(); let echoes_by_recipient = receivers .iter() @@ -1260,28 +1283,23 @@ mod tests { let decoded_ciphertext = receivers .iter() - .zip(messages.iter()) + .zip(verified_commons.iter()) .zip(echoes_by_recipient.iter()) - .map(|((receiver, message), echoes)| { + .map(|((receiver, vcm), echoes)| { let verified = echoes .iter() - .map(|e| receiver.verify_echo(e.clone(), &message.common).unwrap()) + .map(|e| receiver.verify_echo(e.clone(), vcm).unwrap()) .collect_vec(); - assert_decoded( - receiver - .decode_ciphertext(&verified, &message.common) - .unwrap(), - ) + assert_decoded(receiver.decode_ciphertext(&verified, vcm).unwrap()) }) .collect_vec(); let all_shares = receivers .iter() + .zip(verified_commons.iter()) .zip(decoded_ciphertext) - .zip(messages) - .map(|((receiver, pem), message)| { - let output = - assert_valid(receiver.verify_and_decrypt(&pem, &message.common).unwrap()); + .map(|((receiver, vcm), pem)| { + let output = assert_valid(receiver.verify_and_decrypt(&pem, vcm).unwrap()); (receiver.id, output) }) .collect::>(); @@ -1370,10 +1388,10 @@ mod tests { let messages = dealer.create_message_cheating(&mut rng).unwrap(); // Echo phase - let echos = receivers + let (verified_commons, echos): (Vec<_>, Vec<_>) = receivers .iter() .map(|r| r.echo(&messages[r.id as usize]).unwrap()) - .collect_vec(); + .unzip(); let echoes_per_recipient = (0..n) .map(|i| echos.iter().map(|em| em[i].clone()).collect_vec()) .collect_vec(); @@ -1383,25 +1401,16 @@ mod tests { let mut ciphertexts: HashMap> = HashMap::new(); let outcomes: HashMap = receivers .iter() + .zip(verified_commons.iter()) .zip(echoes_per_recipient.iter()) - .map(|(r, echoes)| { + .map(|((r, vcm), echoes)| { let verified = echoes .iter() - .map(|e| { - r.verify_echo(e.clone(), &messages[r.id as usize].common) - .unwrap() - }) + .map(|e| r.verify_echo(e.clone(), vcm).unwrap()) .collect_vec(); - let pem = assert_decoded( - r.decode_ciphertext(&verified, &messages[r.id as usize].common) - .unwrap(), - ); + let pem = assert_decoded(r.decode_ciphertext(&verified, vcm).unwrap()); ciphertexts.insert(r.id, pem.clone()); - ( - r.id, - r.verify_and_decrypt(&pem, &messages[r.id as usize].common) - .unwrap(), - ) + (r.id, r.verify_and_decrypt(&pem, vcm).unwrap()) }) .collect(); @@ -1431,25 +1440,22 @@ mod tests { // Each non-victim verifies the complaint and returns their own ciphertext + recovery package. let responses = receivers .iter() - .filter(|r| r.id != victim_id) - .map(|r| { - r.handle_reveal( - &reveal, - &messages[r.id as usize].common, - ciphertexts.get(&r.id).unwrap().clone(), - ) - .unwrap() + .zip(verified_commons.iter()) + .filter(|(r, _)| r.id != victim_id) + .map(|(r, vcm)| { + r.handle_reveal(&reveal, vcm, ciphertexts.get(&r.id).unwrap().clone()) + .unwrap() }) .collect_vec(); // Victim verifies and then recovers via interpolation across t responses. let victim = &receivers[victim_id as usize]; - let common = &messages[victim_id as usize].common; + let vcm = &verified_commons[victim_id as usize]; let verified_responses = responses .into_iter() - .map(|r| victim.verify_complaint_response(r, common).unwrap()) + .map(|r| victim.verify_complaint_response(r, vcm).unwrap()) .collect_vec(); - let recovered = victim.recover(common, verified_responses).unwrap(); + let recovered = victim.recover(vcm, verified_responses).unwrap(); outputs.insert(victim_id, recovered); // Sanity: every receiver now holds verifiable shares for every secret. @@ -1528,10 +1534,10 @@ mod tests { let victim_id = 0u16; // Echo phase - let echos = receivers + let (verified_commons, echos): (Vec<_>, Vec<_>) = receivers .iter() .map(|r| r.echo(&messages[r.id as usize]).unwrap()) - .collect_vec(); + .unzip(); // Bundle echoes per recipient. For the victim, simulate the last f senders being silent // (their corrupted shards would otherwise make the receiver's decode fail outright). @@ -1555,20 +1561,14 @@ mod tests { // gets a [DecodeOutcome::Decoded] that they can pass through `verify_and_decrypt`. let mut decode_outcomes: HashMap = receivers .iter() + .zip(verified_commons.iter()) .zip(echoes_per_recipient.iter()) - .map(|(r, echoes)| { + .map(|((r, vcm), echoes)| { let verified = echoes .iter() - .map(|e| { - r.verify_echo(e.clone(), &messages[r.id as usize].common) - .unwrap() - }) + .map(|e| r.verify_echo(e.clone(), vcm).unwrap()) .collect_vec(); - ( - r.id, - r.decode_ciphertext(&verified, &messages[r.id as usize].common) - .unwrap(), - ) + (r.id, r.decode_ciphertext(&verified, vcm).unwrap()) }) .collect(); @@ -1584,7 +1584,7 @@ mod tests { let pem = assert_decoded(decoded); ciphertexts.insert(id, pem.clone()); let outcome = receivers[id as usize] - .verify_and_decrypt(&pem, &messages[id as usize].common) + .verify_and_decrypt(&pem, &verified_commons[id as usize]) .unwrap(); let output = match outcome { DecryptionOutcome::Valid { output, .. } => output, @@ -1600,25 +1600,22 @@ mod tests { // Each non-victim verifies the complaint and returns their own ciphertext + recovery package. let responses = receivers .iter() - .filter(|r| r.id != victim_id) - .map(|r| { - r.handle_blame( - &blame, - &messages[r.id as usize].common, - ciphertexts.get(&r.id).unwrap().clone(), - ) - .unwrap() + .zip(verified_commons.iter()) + .filter(|(r, _)| r.id != victim_id) + .map(|(r, vcm)| { + r.handle_blame(&blame, vcm, ciphertexts.get(&r.id).unwrap().clone()) + .unwrap() }) .collect_vec(); // Victim verifies and then recovers via interpolation across t responses. let victim = &receivers[victim_id as usize]; - let common = &messages[victim_id as usize].common; + let vcm = &verified_commons[victim_id as usize]; let verified_responses = responses .into_iter() - .map(|r| victim.verify_complaint_response(r, common).unwrap()) + .map(|r| victim.verify_complaint_response(r, vcm).unwrap()) .collect_vec(); - let recovered = victim.recover(common, verified_responses).unwrap(); + let recovered = victim.recover(vcm, verified_responses).unwrap(); outputs.insert(victim_id, recovered); // Sanity: every receiver now holds verifiable shares for every secret. diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index 9fdcf1c26a..71f63ac2aa 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -237,10 +237,10 @@ mod tests { let messages = dealer.create_message(&mut rng).unwrap(); // Each receiver produces echoes addressed to every party. - let echoes: Vec> = receivers + let (verified_commons, echoes): (Vec<_>, Vec>) = receivers .iter() .map(|r| r.echo(&messages[r.id as usize]).unwrap()) - .collect(); + .unzip(); // Bundle echoes per recipient: echoes_per_recipient[i] = echoes addressed to party i. let echoes_per_recipient: Vec> = (0..n) @@ -249,16 +249,20 @@ mod tests { // Each receiver processes the message. // In this case, we assume all are honest and there are no complaints. - for ((r, echoes), msg) in receivers.iter().zip(&echoes_per_recipient).zip(&messages) { + for ((r, echoes), vcm) in receivers + .iter() + .zip(&echoes_per_recipient) + .zip(&verified_commons) + { let verified = echoes .iter() - .map(|e| r.verify_echo(e.clone(), &msg.common).unwrap()) + .map(|e| r.verify_echo(e.clone(), vcm).unwrap()) .collect::>(); - let pem = match r.decode_ciphertext(&verified, &msg.common).unwrap() { + let pem = match r.decode_ciphertext(&verified, vcm).unwrap() { batch_avss::DecodeOutcome::Decoded(d) => d, _ => panic!("expected Decoded outcome"), }; - let output = assert_valid_batch(r.verify_and_decrypt(&pem, &msg.common).unwrap()); + let output = assert_valid_batch(r.verify_and_decrypt(&pem, vcm).unwrap()); presigning_outputs.get_mut(&r.id).unwrap().push(output); } } From 274155d9db1aab10dd56f43ce7d54f4fc282fe0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Thu, 7 May 2026 10:40:53 +0200 Subject: [PATCH 75/91] Clean up --- fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index e7f7ce6210..77f2a6ba57 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -685,15 +685,14 @@ impl Receiver { let recipient_root = common_message.recipient_root(proof.accuser_id)?; self.check_avid_consistency(reveal_ciphertext, recipient_root) .map_err(|_| InvalidProof)?; - let accuser_pk = &self.nodes.node_id_to_node(proof.accuser_id)?.pk; - let accuser_weight = self.nodes.weight_of(proof.accuser_id)?; + let accuser = self.nodes.node_id_to_node(proof.accuser_id)?; proof.check( - accuser_pk, + &accuser.pk, reveal_ciphertext, &common_message.shared, &self.random_oracle(), |shares: &SharesForNode| { - shares.verify(common_message, &challenge, accuser_weight, self.batch_size) + shares.verify(common_message, &challenge, accuser.weight, self.batch_size) }, )?; @@ -728,7 +727,6 @@ impl Receiver { if shards .iter() .any(|(sender, auth)| auth.verify(*sender as usize, recipient_root).is_err()) - // TODO: Check this { return Ok(self.build_complaint_response(common_message, ciphertext)); } @@ -826,7 +824,7 @@ impl Receiver { common_message: &VerifiedCommonMessage, responses: Vec, ) -> FastCryptoResult { - let response_shares: Vec = responses.into_iter().map(|v| v.0).collect(); + let response_shares = responses.into_iter().map(|v| v.0).collect_vec(); let response_weight: u16 = response_shares.iter().map(SharesForNode::weight).sum(); if response_weight < self.t { return Err(FastCryptoError::InputTooShort(self.t as usize)); @@ -910,7 +908,7 @@ impl CommonMessage { /// Verify the dealer's commitments: the lengths/degree of the published values are /// well-formed and `g^{p''(0)} = c' · ∏ c_l^{γ_l}`. Consumes `self` and returns a /// [VerifiedCommonMessage] on success. - pub fn verify( + fn verify( self, t: u16, batch_size: usize, @@ -992,7 +990,6 @@ impl SharesForNode { /// If all shares have the same batch size, return that. /// Otherwise, return an InvalidInput error. pub fn try_uniform_batch_size(&self) -> FastCryptoResult { - // TODO: Should we cache this? It's called twice per dealer -- once when verifying shares received from a dealer and then again during presigning. get_uniform_value(self.shares.iter().map(ShareBatch::batch_size)).ok_or(InvalidInput) } From 3d0f0a3bc314e8ea91794c36259c389a84923ab0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Thu, 7 May 2026 10:55:07 +0200 Subject: [PATCH 76/91] Reject malformed Blame in handle_blame; tidy module doc and reconstruct_ciphertext doc --- .../src/threshold_schnorr/batch_avss.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 77f2a6ba57..89de172e74 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -48,8 +48,8 @@ //! fetches [CommonMessage] / echoes from a voter, then runs steps 3–4 (without sending a //! [Vote]). //! -//! Receivers should retain the [CommonMessage] for the lifetime of the session — it is required -//! to validate complaints and build a [ComplaintResponse]. The [Echo]s and the decoded +//! Receivers should retain the [VerifiedCommonMessage] for the lifetime of the session — it is +//! required to validate complaints and build a [ComplaintResponse]. The [Echo]s and the decoded //! ciphertext should also be kept so laggards (step 7) can fetch them. //! //! # Complaint paths @@ -728,7 +728,7 @@ impl Receiver { .iter() .any(|(sender, auth)| auth.verify(*sender as usize, recipient_root).is_err()) { - return Ok(self.build_complaint_response(common_message, ciphertext)); + return Err(InvalidProof); } let weight_of_shards = self.nodes.total_weight_of(shards.keys())?; @@ -856,10 +856,10 @@ impl Receiver { } /// Reed-Solomon decode the ciphertext for `accuser_id` from a set of authenticated shard - /// contributions, keyed by sender id. Fails if the contributing weight is below `W - 2f` - /// (too few contributions to reconstruct), or if a party's contribution has a shard count - /// that doesn't match its weight. The caller is responsible for having authenticated the - /// shards via their Merkle proofs. + /// contributions, keyed by sender id. Missing senders and senders whose shard count + /// doesn't match their weight are treated as erasures, so RS decoding fails if those + /// account for more than `2f` of the total weight. The caller is responsible for having + /// authenticated the shards via their Merkle proofs. fn reconstruct_ciphertext( &self, accuser_id: PartyId, @@ -888,7 +888,8 @@ impl Receiver { self.code.decode(shards_matrix, expected_length) } - /// The check r_i' == r_i from the paper + /// RS-encode `ciphertext`, rebuild the per-recipient Merkle tree, and check its root matches + /// the dealer's `expected_root`. Errors with [InvalidMessage] on mismatch. fn check_avid_consistency( &self, ciphertext: &[u8], From 25dbed2d6814368f419723375b8af5c807b5ab30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Thu, 7 May 2026 11:31:03 +0200 Subject: [PATCH 77/91] Document Blame must wait for matching certificate before broadcast --- .../src/threshold_schnorr/batch_avss.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 89de172e74..5572c26405 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -61,10 +61,12 @@ //! don't satisfy `p''`. The accuser publishes a `Reveal` with their ciphertext and an ECIES //! recovery package; verifiers re-bind the ciphertext to the dealer's broadcast and use the //! recovery package to confirm decryption yields invalid shares. -//! - **[Blame]** (dispersal-layer fault, raised in step 3). RS-decode fails or the recovered -//! ciphertext doesn't re-encode to `recipient_roots[accuser]`. The accuser publishes a -//! `Blame` with the collected per-sender [AuthenticatedShards] as evidence; verifiers re-run -//! the same decode-and-re-encode check on the carried shards. +//! - **[Blame]** (dispersal-layer fault, raised in step 3). When [Receiver::decode_ciphertext] +//! returns [DecodeOutcome::InvalidDispersal], **hold** the [Blame] — do not broadcast yet. +//! If a certificate for the same `common_message_hash` later lands on the TOB, publish it; if +//! a different [CommonMessage] gets certified instead, discard the held [Blame] and re-decode +//! against echoes for the certified common. Verifiers re-run the same decode-and-re-encode +//! check on the carried shards. //! //! Verifiers respond to a valid complaint with a [ComplaintResponse] carrying their own //! ciphertext plus a recovery package. The accuser AVID-binds each responder's ciphertext, @@ -182,6 +184,11 @@ pub struct VerifiedEcho(Echo); /// ciphertext whose AVID dispersal is consistent, or a [Blame] when the collected shards either /// fail to RS-decode or decode to a ciphertext whose re-encoding disagrees with the dealer's /// `r_i`. +/// +/// On `InvalidDispersal`, do **not** broadcast the [Blame] immediately: hold it until the same +/// `common_message_hash` is certified on the TOB, then publish. If a *different* [CommonMessage] +/// gets certified instead, discard the held [Blame] and re-decode against the echoes for the +/// certified common. #[allow(clippy::large_enum_variant)] pub enum DecodeOutcome { Decoded(Vec), @@ -213,6 +220,9 @@ pub struct Reveal { /// the accuser's collected per-sender [AuthenticatedShards] so verifiers can re-run the AVID /// check without needing to observe echoes addressed to the accuser. The map keys are sender /// ids, which both deduplicates contributions and gives O(log n) lookup during reconstruction. +/// +/// Do not broadcast a [Blame] until the matching `common_message_hash` has been certified on the +/// TOB; see [DecodeOutcome::InvalidDispersal]. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Blame { pub accuser_id: PartyId, From 5d5d171566498352edef24f4823a5d9bd719732b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 8 May 2026 10:15:58 +0200 Subject: [PATCH 78/91] Switch erasure coding to GF(2^16); rename Blame/Reveal; tighten input checks --- fastcrypto-tbls/src/ecies_v1.rs | 2 +- .../src/threshold_schnorr/batch_avss.rs | 96 +++++++++++-------- .../src/threshold_schnorr/reed_solomon.rs | 83 ++++++++++++---- 3 files changed, 124 insertions(+), 57 deletions(-) diff --git a/fastcrypto-tbls/src/ecies_v1.rs b/fastcrypto-tbls/src/ecies_v1.rs index 935a4419a4..63303e649c 100644 --- a/fastcrypto-tbls/src/ecies_v1.rs +++ b/fastcrypto-tbls/src/ecies_v1.rs @@ -60,7 +60,7 @@ pub struct SharedComponents { #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct EncryptedPart { enc: Vec, - g: PhantomData, + _g: PhantomData, } impl MultiRecipientEncryption diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 5572c26405..e0e420c5d4 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -57,14 +57,14 @@ //! Complaints are broadcast only after the certificate is in place; the certificate is what //! pins down the [CommonMessage] every validation hinges on. //! -//! - **[Reveal]** (encryption-layer fault, raised in step 4). Decryption fails or the shares -//! don't satisfy `p''`. The accuser publishes a `Reveal` with their ciphertext and an ECIES +//! - **[RevealComplaint]** (encryption-layer fault, raised in step 4). Decryption fails or the shares +//! don't satisfy `p''`. The accuser publishes a `RevealComplaint` with their ciphertext and an ECIES //! recovery package; verifiers re-bind the ciphertext to the dealer's broadcast and use the //! recovery package to confirm decryption yields invalid shares. -//! - **[Blame]** (dispersal-layer fault, raised in step 3). When [Receiver::decode_ciphertext] -//! returns [DecodeOutcome::InvalidDispersal], **hold** the [Blame] — do not broadcast yet. +//! - **[BlameComplaint]** (dispersal-layer fault, raised in step 3). When [Receiver::decode_ciphertext] +//! returns [DecodeOutcome::InvalidDispersal], **hold** the [BlameComplaint] — do not broadcast yet. //! If a certificate for the same `common_message_hash` later lands on the TOB, publish it; if -//! a different [CommonMessage] gets certified instead, discard the held [Blame] and re-decode +//! a different [CommonMessage] gets certified instead, discard the held [BlameComplaint] and re-decode //! against echoes for the certified common. Verifiers re-run the same decode-and-re-encode //! check on the carried shards. //! @@ -145,7 +145,7 @@ pub struct Message { pub struct CommonMessage { full_public_keys: Vec, blinding_commit: G, - shared: SharedComponents, + ciphertext_shared: SharedComponents, response_polynomial: Poly, recipient_roots: Vec, } @@ -181,25 +181,25 @@ pub struct Echo { pub struct VerifiedEcho(Echo); /// The result of [Receiver::decode_ciphertext]: either a successfully reconstructed -/// ciphertext whose AVID dispersal is consistent, or a [Blame] when the collected shards either +/// ciphertext whose AVID dispersal is consistent, or a [BlameComplaint] when the collected shards either /// fail to RS-decode or decode to a ciphertext whose re-encoding disagrees with the dealer's /// `r_i`. /// -/// On `InvalidDispersal`, do **not** broadcast the [Blame] immediately: hold it until the same +/// On `InvalidDispersal`, do **not** broadcast the [BlameComplaint] immediately: hold it until the same /// `common_message_hash` is certified on the TOB, then publish. If a *different* [CommonMessage] -/// gets certified instead, discard the held [Blame] and re-decode against the echoes for the +/// gets certified instead, discard the held [BlameComplaint] and re-decode against the echoes for the /// certified common. #[allow(clippy::large_enum_variant)] pub enum DecodeOutcome { Decoded(Vec), - InvalidDispersal(Blame), + InvalidDispersal(BlameComplaint), } /// The result of [Receiver::verify_and_decrypt]. #[allow(clippy::large_enum_variant)] pub enum DecryptionOutcome { Valid { output: ReceiverOutput, vote: Vote }, - Invalid(Reveal), + Invalid(RevealComplaint), } /// An endorsement of the dealer's broadcast. @@ -210,7 +210,7 @@ pub struct Vote { /// A complaint by a receiver who could not decrypt or verify its shares. #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct Reveal { +pub struct RevealComplaint { pub proof: complaint::Complaint, pub ciphertext: Vec, pub common_message_hash: Digest, @@ -221,16 +221,16 @@ pub struct Reveal { /// check without needing to observe echoes addressed to the accuser. The map keys are sender /// ids, which both deduplicates contributions and gives O(log n) lookup during reconstruction. /// -/// Do not broadcast a [Blame] until the matching `common_message_hash` has been certified on the +/// Do not broadcast a [BlameComplaint] until the matching `common_message_hash` has been certified on the /// TOB; see [DecodeOutcome::InvalidDispersal]. #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct Blame { +pub struct BlameComplaint { pub accuser_id: PartyId, pub shards: BTreeMap, pub common_message_hash: Digest, } -/// A responder's reply to a [Reveal] / [Blame] complaint. Carries the responder's own dealer- +/// A responder's reply to a [RevealComplaint] / [BlameComplaint] complaint. Carries the responder's own dealer- /// encrypted ciphertext together with an ECIES recovery package, so the accuser can /// independently authenticate the responder's shares against the dealer's broadcast and /// extract them via decryption. @@ -326,8 +326,12 @@ impl Dealer { fn create_message_with_mutation( &self, rng: &mut impl AllowedRng, - mutate_plaintexts: impl FnOnce(&mut [(crate::ecies_v1::PublicKey, Vec)]), - mutate_shards: impl FnOnce(&mut Vec>>), + #[cfg_attr(not(test), allow(unused_variables))] mutate_plaintexts: impl FnOnce( + &mut [(crate::ecies_v1::PublicKey, Vec)], + ), + #[cfg_attr(not(test), allow(unused_variables))] mutate_shards: impl FnOnce( + &mut Vec>>, + ), ) -> FastCryptoResult> { let secrets = repeat_with(|| S::rand(rng)) .take(self.batch_size) @@ -350,6 +354,7 @@ impl Dealer { .collect_vec(); // Encrypt all shares to the receivers + #[cfg_attr(not(test), allow(unused_mut))] let mut pk_and_msgs = self .nodes .iter() @@ -372,6 +377,7 @@ impl Dealer { }) .collect_vec(); + #[cfg(test)] mutate_plaintexts(&mut pk_and_msgs); let ciphertext = MultiRecipientEncryption::encrypt( @@ -386,14 +392,18 @@ impl Dealer { (self.nodes.total_weight() - 2 * self.f) as usize, // 2f parity shards )?; + #[cfg_attr(not(test), allow(unused_mut))] let mut shards: Vec>> = ciphertexts .iter() .map(|c| { - let shards = code.encode(c)?; // One shard per weight + // Every node has positive weight, so each per-recipient ciphertext is non-empty + // and `code.encode` cannot return `InvalidInput`. + let shards = code.encode(c).expect("non-empty ciphertext"); // One shard per weight self.nodes.collect_to_nodes(shards.into_iter()) // Grouped to nodes by weight }) .collect::>>()?; + #[cfg(test)] mutate_shards(&mut shards); let recipient_trees = shards @@ -443,7 +453,7 @@ impl Dealer { let common = CommonMessage { full_public_keys, - shared, + ciphertext_shared: shared, response_polynomial, blinding_commit, recipient_roots, @@ -515,7 +525,8 @@ impl Receiver { /// [VerifiedCommonMessage] that the receiver should keep around for the rest of the /// session. pub fn echo(&self, message: &Message) -> FastCryptoResult<(VerifiedCommonMessage, Vec)> { - if message.dispersal.len() != message.common.recipient_roots.len() { + let n = self.nodes.num_nodes(); + if message.dispersal.len() != n || message.common.recipient_roots.len() != n { return Err(InvalidMessage); } if message @@ -566,15 +577,20 @@ impl Receiver { /// 3. Reconstruct this receiver's ciphertext from a quorum of [VerifiedEcho]s. Returns /// [DecodeOutcome::Decoded] when the AVID dispersal is consistent with the dealer's - /// `r_{self.id}`, or [DecodeOutcome::InvalidDispersal] (a [Blame]) when it isn't. + /// `r_{self.id}`, or [DecodeOutcome::InvalidDispersal] (a [BlameComplaint]) when it isn't. /// - /// Echoes must already be validated via [Self::verify_echo]. Returns [NotEnoughWeight] if - /// the senders contribute `< W − 2f` weight. + /// Echoes must already be validated via [Self::verify_echo] and must come from distinct + /// senders — duplicates yield [InvalidInput]. Returns [NotEnoughWeight] if the senders + /// contribute `< W − 2f` weight. pub fn decode_ciphertext( &self, echos: &[VerifiedEcho], common_message: &VerifiedCommonMessage, ) -> FastCryptoResult { + if !echos.iter().map(|e| e.0.sender).all_unique() { + return Err(InvalidInput); + } + let required_weight = self.nodes.total_weight() - 2 * self.f; if self .nodes @@ -586,7 +602,7 @@ impl Receiver { // Try to RS-decode the ciphertext and re-encode it. The dispersal is consistent iff // both succeed and the re-encoded root matches `r_{self.id}`. Otherwise the dealer's - // dispersal is inconsistent — package the collected shards into a self-contained [Blame]. + // dispersal is inconsistent — package the collected shards into a self-contained [BlameComplaint]. let shards: BTreeMap = echos .iter() .cloned() @@ -599,7 +615,7 @@ impl Receiver { self.check_avid_consistency(&ct, common_message.0.recipient_root(self.id)?)?; Ok(DecodeOutcome::Decoded(ct)) }) - .unwrap_or(DecodeOutcome::InvalidDispersal(Blame { + .unwrap_or(DecodeOutcome::InvalidDispersal(BlameComplaint { accuser_id: self.id, shards, common_message_hash: common_message.0.hash(), @@ -608,7 +624,7 @@ impl Receiver { /// 4. Decrypt and verify the receiver's own shares from a successfully decoded ciphertext. /// Yields [DecryptionOutcome::Valid] (with a [Vote] to broadcast) when shares verify, or - /// [DecryptionOutcome::Invalid] (a [Reveal]) otherwise. + /// [DecryptionOutcome::Invalid] (a [RevealComplaint]) otherwise. pub fn verify_and_decrypt( &self, ciphertext: &[u8], @@ -619,7 +635,7 @@ impl Receiver { compute_challenge_from_common_message(&self.random_oracle(), common_message); let CommonMessage { full_public_keys, - shared, + ciphertext_shared: shared, .. } = &common_message; @@ -655,7 +671,7 @@ impl Receiver { }, }) .or_else(|_| { - Ok(DecryptionOutcome::Invalid(Reveal { + Ok(DecryptionOutcome::Invalid(RevealComplaint { proof: complaint::Complaint::create( self.id, shared, @@ -669,13 +685,13 @@ impl Receiver { }) } - /// 5a. Validate a [Reveal] complaint and respond with this party's own shares so the + /// 5a. Validate a [RevealComplaint] complaint and respond with this party's own shares so the /// accuser can recover. Accepts iff the ciphertext is bound to the dealer's broadcast /// (re-encodes to `recipient_roots[accuser_id]`) and the recovery package decrypts it /// to invalid shares. pub fn handle_reveal( &self, - reveal: &Reveal, + reveal: &RevealComplaint, common_message: &VerifiedCommonMessage, ciphertext: Vec, ) -> FastCryptoResult { @@ -683,7 +699,7 @@ impl Receiver { let challenge = compute_challenge_from_common_message(&self.random_oracle(), common_message); - let Reveal { + let RevealComplaint { proof, ciphertext: reveal_ciphertext, common_message_hash, @@ -699,7 +715,7 @@ impl Receiver { proof.check( &accuser.pk, reveal_ciphertext, - &common_message.shared, + &common_message.ciphertext_shared, &self.random_oracle(), |shares: &SharesForNode| { shares.verify(common_message, &challenge, accuser.weight, self.batch_size) @@ -709,7 +725,7 @@ impl Receiver { Ok(self.build_complaint_response(common_message, ciphertext)) } - /// 5b. Validate a [Blame] complaint and respond with this party's own shares. Accepts iff + /// 5b. Validate a [BlameComplaint] complaint and respond with this party's own shares. Accepts iff /// each entry in `blame.shards` authenticates under /// `common_message.recipient_roots[accuser_id]` at its sender's leaf, the senders /// contribute `≥ W − 2f` weight, and the resulting set of shards either fails to @@ -717,13 +733,13 @@ impl Receiver { /// `r_i`. pub fn handle_blame( &self, - blame: &Blame, + blame: &BlameComplaint, common_message: &VerifiedCommonMessage, ciphertext: Vec, ) -> FastCryptoResult { let common_message = &common_message.0; - let Blame { + let BlameComplaint { accuser_id, shards, common_message_hash, @@ -760,7 +776,7 @@ impl Receiver { Ok(self.build_complaint_response(common_message, ciphertext)) } - /// Build a [ComplaintResponse] for an answered [Reveal] / [Blame]: package this party's own + /// Build a [ComplaintResponse] for an answered [RevealComplaint] / [BlameComplaint]: package this party's own /// dealer-encrypted ciphertext together with an ECIES recovery package, so the accuser can /// decrypt and authenticate the responder's shares. fn build_complaint_response( @@ -768,7 +784,7 @@ impl Receiver { common_message: &CommonMessage, ciphertext: Vec, ) -> ComplaintResponse { - let recovery_package = common_message.shared.create_recovery_package( + let recovery_package = common_message.ciphertext_shared.create_recovery_package( &self.enc_secret_key, &self.random_oracle().extend(&Recovery(self.id).to_string()), &mut rand::thread_rng(), @@ -802,7 +818,7 @@ impl Receiver { self.check_avid_consistency(&ciphertext, common_message.recipient_root(responder_id)?)?; let responder = self.nodes.node_id_to_node(responder_id)?; let shares = common_message - .shared + .ciphertext_shared .decrypt_with_recovery_package( &ciphertext, &recovery_package, @@ -947,7 +963,7 @@ impl CommonMessage { let mut hasher = Blake2b256::new(); hasher.update( bcs::to_bytes(&( - &self.shared, + &self.ciphertext_shared, &self.full_public_keys, &self.blinding_commit, &self.response_polynomial, @@ -1166,7 +1182,7 @@ fn compute_challenge_from_common_message( random_oracle, &message.full_public_keys, &message.blinding_commit, - &message.shared, + &message.ciphertext_shared, &message.recipient_roots, ) } diff --git a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs index ebc289efab..4b41525456 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs @@ -9,7 +9,7 @@ use fastcrypto::error::FastCryptoError::{ }; use fastcrypto::error::FastCryptoResult; use itertools::Itertools; -use reed_solomon_erasure::galois_8::ReedSolomon; +use reed_solomon_erasure::galois_16::ReedSolomon; use serde::{Deserialize, Serialize}; /// Decoder for Reed-Solomon codes. @@ -142,9 +142,9 @@ impl ErasureCoder { /// - `k`: Number of data shards. /// /// # Errors - /// Returns [`FastCryptoError::InvalidInput`] if `k == 0`, `n <= k` or `n > 256`. + /// Returns [`FastCryptoError::InvalidInput`] if `k == 0`, `n <= k` or `n > 65536`. pub fn new(n: usize, k: usize) -> FastCryptoResult { - if k == 0 || n <= k || n > 256 { + if k == 0 || n <= k || n > 65536 { return Err(InvalidInput); } ReedSolomon::new(k, n - k) @@ -155,16 +155,25 @@ impl ErasureCoder { /// Encode `data` into `n` shards of equal size, the first `k` of which hold the (zero-padded) /// data and the remaining `n - k` parity. Any `k` shards suffice to reconstruct the data. pub fn encode(&self, data: &[u8]) -> FastCryptoResult> { - // Define a shard size such that the data can be contained in `k` shards. - let shard_size = data.len().div_ceil(self.0.data_shard_count()); - let mut data = data.to_vec(); - data.resize(shard_size * self.0.total_shard_count(), 0); - let mut shards = data - .chunks_exact(shard_size) - .map(|c| c.to_vec()) + if data.is_empty() { + return Err(InvalidInput); + } + // GF(2^16) elements are pairs of bytes; size each shard to a whole number of pairs. + let elems_per_shard = data.len().div_ceil(2 * self.0.data_shard_count()); + let bytes_per_shard = 2 * elems_per_shard; + let mut padded = data.to_vec(); + padded.resize(bytes_per_shard * self.0.total_shard_count(), 0); + let mut shards: Vec> = padded + .chunks_exact(bytes_per_shard) + .map(bytes_to_elems) .collect_vec(); - self.0.encode(&mut shards).map_err(|_| InvalidInput)?; - Ok(shards.into_iter().map(Shard).collect_vec()) + self.0 + .encode(&mut shards) + .expect("Inputs are well-formed (non-empty data, equal-sized non-empty shards, exact total_shard_count)"); + Ok(shards + .into_iter() + .map(|s| Shard(s.into_iter().flatten().collect())) + .collect_vec()) } /// Reconstruct the original data from `n` (possibly missing) shards, returning the first @@ -183,7 +192,18 @@ impl ErasureCoder { return Err(InvalidInput); } - let mut shards = shards.into_iter().map(|s| s.map(|s| s.0)).collect_vec(); + let mut shards: Vec>> = shards + .into_iter() + .map(|s| { + s.map(|s| { + if s.0.len() % 2 != 0 { + return Err(InvalidInput); + } + Ok(bytes_to_elems(&s.0)) + }) + .transpose() + }) + .collect::>()?; self.0.reconstruct(&mut shards).map_err(|_| InvalidInput)?; let shards = shards .into_iter() @@ -195,11 +215,12 @@ impl ErasureCoder { return Err(TooManyErrors(0)); // This is just an erasure code, so we can't correct errors. } - let mut data = shards + let mut data: Vec = shards .into_iter() .take(self.0.data_shard_count()) .flatten() - .collect_vec(); + .flatten() + .collect(); if data.len() < expected_len { return Err(InvalidInput); } @@ -208,6 +229,10 @@ impl ErasureCoder { } } +fn bytes_to_elems(bytes: &[u8]) -> Vec<[u8; 2]> { + bytes.chunks_exact(2).map(|p| [p[0], p[1]]).collect() +} + #[cfg(test)] mod tests { use super::*; @@ -255,7 +280,7 @@ mod tests { assert!(matches!(ErasureCoder::new(10, 0), Err(InvalidInput))); assert!(matches!(ErasureCoder::new(10, 10), Err(InvalidInput))); assert!(matches!(ErasureCoder::new(9, 10), Err(InvalidInput))); - assert!(matches!(ErasureCoder::new(257, 1), Err(InvalidInput))); + assert!(matches!(ErasureCoder::new(65537, 1), Err(InvalidInput))); } #[test] @@ -318,4 +343,30 @@ mod tests { Err(TooManyErrors(_)) )); } + + #[test] + fn test_erasure_coder_encode_shard_lengths() { + // Each GF(2^16) element is 2 bytes; shards are sized to a whole number of pairs, with + // pair count ⌈data_len / (2 · k)⌉. + for &(n, k, data_len, expected_shard_bytes) in &[ + (10, 6, 1, 2), // ⌈ 1 / 12⌉ = 1 pair + (10, 6, 11, 2), // ⌈11 / 12⌉ = 1 pair + (10, 6, 12, 2), // ⌈12 / 12⌉ = 1 pair + (10, 6, 13, 4), // ⌈13 / 12⌉ = 2 pairs + (10, 6, 100, 18), // ⌈100 / 12⌉ = 9 pairs + (800, 268, 2028, 8), // ⌈2028 / 536⌉ = 4 pairs + ] { + let coder = ErasureCoder::new(n, k).unwrap(); + let data: Vec = (0..data_len).map(|i| i as u8).collect(); + let shards = coder.encode(&data).unwrap(); + assert_eq!(shards.len(), n, "shard count"); + for shard in &shards { + assert_eq!( + shard.0.len(), + expected_shard_bytes, + "shard byte length for n={n}, k={k}, data_len={data_len}" + ); + } + } + } } From 3ae6407506ae63d1a04d5bb49350eeaafc57cd59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 8 May 2026 10:21:52 +0200 Subject: [PATCH 79/91] Tidy: hoist cfg_attr to function level, rename encode locals --- fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs | 11 +++-------- fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs | 10 +++++----- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index e0e420c5d4..b67ea8bd46 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -323,15 +323,12 @@ impl Dealer { /// Like [Self::create_message] but exposes mutation hooks for tests: `mutate_plaintexts` runs /// before encryption, and `mutate_shards` runs after RS-encoding (and before the per-recipient /// Merkle trees are built), so tests can simulate a faulty dealer at either layer. + #[cfg_attr(not(test), allow(unused_variables, unused_mut))] fn create_message_with_mutation( &self, rng: &mut impl AllowedRng, - #[cfg_attr(not(test), allow(unused_variables))] mutate_plaintexts: impl FnOnce( - &mut [(crate::ecies_v1::PublicKey, Vec)], - ), - #[cfg_attr(not(test), allow(unused_variables))] mutate_shards: impl FnOnce( - &mut Vec>>, - ), + mutate_plaintexts: impl FnOnce(&mut [(crate::ecies_v1::PublicKey, Vec)]), + mutate_shards: impl FnOnce(&mut Vec>>), ) -> FastCryptoResult> { let secrets = repeat_with(|| S::rand(rng)) .take(self.batch_size) @@ -354,7 +351,6 @@ impl Dealer { .collect_vec(); // Encrypt all shares to the receivers - #[cfg_attr(not(test), allow(unused_mut))] let mut pk_and_msgs = self .nodes .iter() @@ -392,7 +388,6 @@ impl Dealer { (self.nodes.total_weight() - 2 * self.f) as usize, // 2f parity shards )?; - #[cfg_attr(not(test), allow(unused_mut))] let mut shards: Vec>> = ciphertexts .iter() .map(|c| { diff --git a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs index 4b41525456..27b3956448 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs @@ -159,11 +159,11 @@ impl ErasureCoder { return Err(InvalidInput); } // GF(2^16) elements are pairs of bytes; size each shard to a whole number of pairs. - let elems_per_shard = data.len().div_ceil(2 * self.0.data_shard_count()); - let bytes_per_shard = 2 * elems_per_shard; - let mut padded = data.to_vec(); - padded.resize(bytes_per_shard * self.0.total_shard_count(), 0); - let mut shards: Vec> = padded + let shard_size = data.len().div_ceil(2 * self.0.data_shard_count()); + let bytes_per_shard = 2 * shard_size; + let mut data = data.to_vec(); + data.resize(bytes_per_shard * self.0.total_shard_count(), 0); + let mut shards: Vec> = data .chunks_exact(bytes_per_shard) .map(bytes_to_elems) .collect_vec(); From a5ce46773eff60f7c47e3b400c21a8665da54c89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 8 May 2026 10:27:34 +0200 Subject: [PATCH 80/91] Apply suggestions from code review Co-authored-by: benr-ml <112846738+benr-ml@users.noreply.github.com> --- fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index b67ea8bd46..6554e355e6 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -7,14 +7,14 @@ //! //! A single dealer commits to a batch of `L` random nonces `r_1, …, r_L` and distributes //! shares to `n` weighted receivers forming a `t`-of-`W` threshold (with `W = Σ_j w_j` total -//! weight, `f` the Byzantine bound by weight, and `L = w_dealer · BATCH_SIZE`). Every honest +//! weight, `f` the Byzantine bound by weight, and `L = w_dealer · BATCH_SIZE`). In case of a honest dealer, every honest //! receiver `j` ends up with `p_l(i_{j,1}), …, p_l(i_{j,w_j})` for every secret `r_l`, where //! `p_l` is a degree-`(t−1)` polynomial with `p_l(0) = r_l`. Any `≥ t` valid shares reconstruct //! `r_l`. //! //! # Two layers //! -//! The dealer's broadcast (the [CommonMessage]) carries the public commitments +//! The dealer's broadcast (the [CommonMessage]) carries the public nonces //! `c_l = g^{r_l}`, the blinding commitment `c' = g^{r'}`, the *response polynomial* `p''(X)`, //! and the per-recipient Merkle roots `r_1, …, r_n`. //! From 56373b51076f57cb45e56e834a327b36a919bdae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 8 May 2026 10:31:44 +0200 Subject: [PATCH 81/91] Note that echo emits one entry addressed to self --- fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 6554e355e6..6f8a271408 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -516,9 +516,9 @@ impl Receiver { } /// 2. Verify the dispersal entries against `recipient_roots` and emit one [Echo] per - /// recipient (indexed by recipient id) for the receiver to broadcast. Also returns the - /// [VerifiedCommonMessage] that the receiver should keep around for the rest of the - /// session. + /// recipient (indexed by recipient id) for the receiver to broadcast — including one + /// addressed to the receiver itself. Also returns the [VerifiedCommonMessage] that the + /// receiver should keep around for the rest of the session. pub fn echo(&self, message: &Message) -> FastCryptoResult<(VerifiedCommonMessage, Vec)> { let n = self.nodes.num_nodes(); if message.dispersal.len() != n || message.common.recipient_roots.len() != n { From 391c1b2be3c0a93a899f31dcfcbe21d1268f759e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 8 May 2026 11:11:07 +0200 Subject: [PATCH 82/91] Replace stored ErasureCoder with Receiver::get_coder; explicit param checks --- .../src/threshold_schnorr/batch_avss.rs | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 6f8a271408..d9ba89d446 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -126,8 +126,6 @@ pub struct Receiver { t: u16, /// The total number of nonces that the receiver expects to receive from the dealer. batch_size: usize, - /// Reed-Solomon `(W, W - 2f)` coder over the dealer's per-receiver ciphertexts. - code: ErasureCoder, } /// The dealer's per-recipient message: the shared [CommonMessage] plus the receiver's own @@ -498,10 +496,14 @@ impl Receiver { // The dealer is expected to deal a number of nonces proportional to it's weight let batch_size = nodes.weight_of(dealer_id)? as usize * batch_size_per_weight as usize; - let code = ErasureCoder::new( - nodes.total_weight() as usize, - (nodes.total_weight() - 2 * f) as usize, // 2f parity shards - )?; + // Constraints required by the Reed-Solomon `(W, W − 2f)` coder: + // * f ≥ 1 (so n − k = 2f > 0, i.e. there is at least one parity shard) + // * W > 2f (so k = W − 2f > 0, i.e. there is at least one data shard) + // * W ≤ 65536 (Reed-Solomon over GF(2^16)) + let w = nodes.total_weight() as usize; + if f == 0 || w <= 2 * f as usize || w > 65536 { + return Err(InvalidInput); + } Ok(Self { id, @@ -511,10 +513,18 @@ impl Receiver { f, t, batch_size, - code, }) } + /// Reed-Solomon `(W, W − 2f)` coder over the dealer's per-receiver ciphertexts. + fn get_coder(&self) -> ErasureCoder { + ErasureCoder::new( + self.nodes.total_weight() as usize, + (self.nodes.total_weight() - 2 * self.f) as usize, + ) + .expect("parameters were validated in Receiver::new") + } + /// 2. Verify the dispersal entries against `recipient_roots` and emit one [Echo] per /// recipient (indexed by recipient id) for the receiver to broadcast — including one /// addressed to the receiver itself. Also returns the [VerifiedCommonMessage] that the @@ -906,7 +916,7 @@ impl Receiver { self.nodes.weight_of(accuser_id)? as usize, self.batch_size, ); - self.code.decode(shards_matrix, expected_length) + self.get_coder().decode(shards_matrix, expected_length) } /// RS-encode `ciphertext`, rebuild the per-recipient Merkle tree, and check its root matches @@ -918,7 +928,7 @@ impl Receiver { ) -> FastCryptoResult<()> { let new_shards = self .nodes - .collect_to_nodes(self.code.encode(ciphertext)?.into_iter())?; + .collect_to_nodes(self.get_coder().encode(ciphertext)?.into_iter())?; if recipient_tree(&new_shards)?.root() != *expected_root { return Err(InvalidMessage); } From 1912b712de68041dc5cbda9b02d2621f10578cba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 8 May 2026 11:34:30 +0200 Subject: [PATCH 83/91] Add validate_parameters and shared get_coder helper used by Dealer and Receiver --- .../src/threshold_schnorr/batch_avss.rs | 59 ++++++++++--------- 1 file changed, 31 insertions(+), 28 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index d9ba89d446..ab172e368a 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -297,9 +297,7 @@ impl Dealer { sid: Vec, batch_size_per_weight: u16, ) -> FastCryptoResult { - if t > nodes.total_weight() { - return Err(InvalidInput); - } + validate_parameters(t, f, nodes.total_weight())?; // Each dealer deals a number of nonces proportional to their weight. let batch_size = nodes.weight_of(dealer_id)? as usize * batch_size_per_weight as usize; Ok(Self { @@ -381,10 +379,7 @@ impl Dealer { ); let (shared, ciphertexts) = ciphertext.clone().into_parts(); - let code = ErasureCoder::new( - self.nodes.total_weight() as usize, - (self.nodes.total_weight() - 2 * self.f) as usize, // 2f parity shards - )?; + let code = get_coder(&self.nodes, self.f); let mut shards: Vec>> = ciphertexts .iter() @@ -496,14 +491,7 @@ impl Receiver { // The dealer is expected to deal a number of nonces proportional to it's weight let batch_size = nodes.weight_of(dealer_id)? as usize * batch_size_per_weight as usize; - // Constraints required by the Reed-Solomon `(W, W − 2f)` coder: - // * f ≥ 1 (so n − k = 2f > 0, i.e. there is at least one parity shard) - // * W > 2f (so k = W − 2f > 0, i.e. there is at least one data shard) - // * W ≤ 65536 (Reed-Solomon over GF(2^16)) - let w = nodes.total_weight() as usize; - if f == 0 || w <= 2 * f as usize || w > 65536 { - return Err(InvalidInput); - } + validate_parameters(t, f, nodes.total_weight())?; Ok(Self { id, @@ -516,15 +504,6 @@ impl Receiver { }) } - /// Reed-Solomon `(W, W − 2f)` coder over the dealer's per-receiver ciphertexts. - fn get_coder(&self) -> ErasureCoder { - ErasureCoder::new( - self.nodes.total_weight() as usize, - (self.nodes.total_weight() - 2 * self.f) as usize, - ) - .expect("parameters were validated in Receiver::new") - } - /// 2. Verify the dispersal entries against `recipient_roots` and emit one [Echo] per /// recipient (indexed by recipient id) for the receiver to broadcast — including one /// addressed to the receiver itself. Also returns the [VerifiedCommonMessage] that the @@ -916,7 +895,7 @@ impl Receiver { self.nodes.weight_of(accuser_id)? as usize, self.batch_size, ); - self.get_coder().decode(shards_matrix, expected_length) + get_coder(&self.nodes, self.f).decode(shards_matrix, expected_length) } /// RS-encode `ciphertext`, rebuild the per-recipient Merkle tree, and check its root matches @@ -926,9 +905,11 @@ impl Receiver { ciphertext: &[u8], expected_root: &merkle::Node, ) -> FastCryptoResult<()> { - let new_shards = self - .nodes - .collect_to_nodes(self.get_coder().encode(ciphertext)?.into_iter())?; + let new_shards = self.nodes.collect_to_nodes( + get_coder(&self.nodes, self.f) + .encode(ciphertext)? + .into_iter(), + )?; if recipient_tree(&new_shards)?.root() != *expected_root { return Err(InvalidMessage); } @@ -1145,6 +1126,28 @@ impl Echo { } } +/// Reed-Solomon `(W, W − 2f)` coder over the per-receiver ciphertexts. Requires the parameters +/// to have been validated via [validate_parameters]. +fn get_coder(nodes: &Nodes, f: u16) -> ErasureCoder { + ErasureCoder::new( + nodes.total_weight() as usize, + (nodes.total_weight() - 2 * f) as usize, + ) + .expect("parameters were validated by validate_parameters") +} + +/// Validate the protocol parameters `(t, f, W)`: +/// * `f ≥ 1` and `W > 2f` and `W ≤ 65536` — required by the Reed-Solomon `(W, W − 2f)` coder +/// over GF(2^16). +/// * `1 ≤ t ≤ W` — recovery threshold is well-defined and reachable by the total weight. +fn validate_parameters(t: u16, f: u16, total_weight: u16) -> FastCryptoResult<()> { + let w = total_weight as usize; + if f == 0 || w <= 2 * f as usize || w > 65536 || t == 0 || t > total_weight { + return Err(InvalidInput); + } + Ok(()) +} + /// Build the per-recipient Merkle tree over `shards` (per-node grouped shard chunks of one /// ciphertext). The root of this tree is the per-recipient `recipient_root`. #[allow(clippy::ptr_arg)] From fd70f2111d5f271c4bd32419949e8299c55b3beb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Fri, 8 May 2026 11:43:28 +0200 Subject: [PATCH 84/91] Clean up --- fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs | 9 ++++----- fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs | 7 +++++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index ab172e368a..50c15d49e8 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -1136,15 +1136,14 @@ fn get_coder(nodes: &Nodes, f: u16) -> ErasureCoder { .expect("parameters were validated by validate_parameters") } -/// Validate the protocol parameters `(t, f, W)`: -/// * `f ≥ 1` and `W > 2f` and `W ≤ 65536` — required by the Reed-Solomon `(W, W − 2f)` coder -/// over GF(2^16). +/// Validate the protocol parameters `(t, f, W)`. +/// * It is possible to create a Reed-Solomon `(W, W − 2f)` coder /// * `1 ≤ t ≤ W` — recovery threshold is well-defined and reachable by the total weight. fn validate_parameters(t: u16, f: u16, total_weight: u16) -> FastCryptoResult<()> { - let w = total_weight as usize; - if f == 0 || w <= 2 * f as usize || w > 65536 || t == 0 || t > total_weight { + if f == 0 || total_weight <= 2 * f || t == 0 || t > total_weight { return Err(InvalidInput); } + ErasureCoder::check_parameters(total_weight as usize, (total_weight - 2 * f) as usize)?; Ok(()) } diff --git a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs index 27b3956448..de6c8ea979 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs @@ -152,6 +152,13 @@ impl ErasureCoder { .map(Self) } + pub fn check_parameters(n: usize, k: usize) -> FastCryptoResult<()> { + if k == 0 || n <= k || n > 65536 { + return Err(InvalidInput); + } + Ok(()) + } + /// Encode `data` into `n` shards of equal size, the first `k` of which hold the (zero-padded) /// data and the remaining `n - k` parity. Any `k` shards suffice to reconstruct the data. pub fn encode(&self, data: &[u8]) -> FastCryptoResult> { From df2dcef12f03c69588d1348b3127419a419c9b5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 11 May 2026 09:52:05 +0200 Subject: [PATCH 85/91] Tighten bytes_to_elems error handling; note GF(2^16) bound --- .../src/threshold_schnorr/reed_solomon.rs | 25 ++++++++----------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs index de6c8ea979..5b2c767077 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs @@ -144,6 +144,8 @@ impl ErasureCoder { /// # Errors /// Returns [`FastCryptoError::InvalidInput`] if `k == 0`, `n <= k` or `n > 65536`. pub fn new(n: usize, k: usize) -> FastCryptoResult { + // The code is defined over GF(2^16), which has 2^16 = 65536 elements; n cannot exceed + // that or the evaluation points would collide. if k == 0 || n <= k || n > 65536 { return Err(InvalidInput); } @@ -173,10 +175,8 @@ impl ErasureCoder { let mut shards: Vec> = data .chunks_exact(bytes_per_shard) .map(bytes_to_elems) - .collect_vec(); - self.0 - .encode(&mut shards) - .expect("Inputs are well-formed (non-empty data, equal-sized non-empty shards, exact total_shard_count)"); + .collect::>()?; + self.0.encode(&mut shards).map_err(|_| InvalidInput)?; Ok(shards .into_iter() .map(|s| Shard(s.into_iter().flatten().collect())) @@ -201,15 +201,7 @@ impl ErasureCoder { let mut shards: Vec>> = shards .into_iter() - .map(|s| { - s.map(|s| { - if s.0.len() % 2 != 0 { - return Err(InvalidInput); - } - Ok(bytes_to_elems(&s.0)) - }) - .transpose() - }) + .map(|s| s.map(|s| bytes_to_elems(&s.0)).transpose()) .collect::>()?; self.0.reconstruct(&mut shards).map_err(|_| InvalidInput)?; let shards = shards @@ -236,8 +228,11 @@ impl ErasureCoder { } } -fn bytes_to_elems(bytes: &[u8]) -> Vec<[u8; 2]> { - bytes.chunks_exact(2).map(|p| [p[0], p[1]]).collect() +fn bytes_to_elems(bytes: &[u8]) -> FastCryptoResult> { + if !bytes.len().is_multiple_of(2) { + return Err(InvalidInput); + } + Ok(bytes.chunks_exact(2).map(|p| [p[0], p[1]]).collect()) } #[cfg(test)] From af49629b30fc74e65f5cb943eae74f04d779ede5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 11 May 2026 10:14:33 +0200 Subject: [PATCH 86/91] Introduce Element/ELEMENT_SIZE_IN_BYTES; verify zero padding; tidy decode error --- .../src/threshold_schnorr/reed_solomon.rs | 44 +++++++++++++------ 1 file changed, 30 insertions(+), 14 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs index 5b2c767077..c1bf636cd0 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs @@ -4,9 +4,7 @@ use crate::polynomial::{Eval, MonicLinear, Poly}; use crate::threshold_schnorr::S; use crate::types::{to_scalar, ShareIndex}; -use fastcrypto::error::FastCryptoError::{ - InputLengthWrong, InputTooShort, InvalidInput, TooManyErrors, -}; +use fastcrypto::error::FastCryptoError::{InputLengthWrong, InvalidInput, TooManyErrors}; use fastcrypto::error::FastCryptoResult; use itertools::Itertools; use reed_solomon_erasure::galois_16::ReedSolomon; @@ -130,6 +128,12 @@ impl RSDecoder { /// A wrapper struct for the Reed-Solomon erasure coding library. pub struct ErasureCoder(ReedSolomon); +/// An element of `GF(2^16)` as represented by the underlying coder. +type Element = [u8; ELEMENT_SIZE_IN_BYTES]; + +/// Size in bytes of one `GF(2^16)` element. +const ELEMENT_SIZE_IN_BYTES: usize = 2; + #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(transparent)] pub struct Shard(pub(crate) Vec); @@ -167,14 +171,16 @@ impl ErasureCoder { if data.is_empty() { return Err(InvalidInput); } - // GF(2^16) elements are pairs of bytes; size each shard to a whole number of pairs. - let shard_size = data.len().div_ceil(2 * self.0.data_shard_count()); - let bytes_per_shard = 2 * shard_size; + // Size each shard to a whole number of field elements. + let shard_size = data + .len() + .div_ceil(ELEMENT_SIZE_IN_BYTES * self.0.data_shard_count()); + let bytes_per_shard = ELEMENT_SIZE_IN_BYTES * shard_size; let mut data = data.to_vec(); data.resize(bytes_per_shard * self.0.total_shard_count(), 0); - let mut shards: Vec> = data + let mut shards: Vec> = data .chunks_exact(bytes_per_shard) - .map(bytes_to_elems) + .map(bytes_to_elements) .collect::>()?; self.0.encode(&mut shards).map_err(|_| InvalidInput)?; Ok(shards @@ -192,16 +198,16 @@ impl ErasureCoder { expected_len: usize, ) -> FastCryptoResult> { if shards.len() != self.0.total_shard_count() { - return Err(InputTooShort(self.0.total_shard_count())); + return Err(InputLengthWrong(self.0.total_shard_count())); } if shards.iter().filter(|s| s.is_none()).count() > self.0.parity_shard_count() { return Err(InvalidInput); } - let mut shards: Vec>> = shards + let mut shards: Vec>> = shards .into_iter() - .map(|s| s.map(|s| bytes_to_elems(&s.0)).transpose()) + .map(|s| s.map(|s| bytes_to_elements(&s.0)).transpose()) .collect::>()?; self.0.reconstruct(&mut shards).map_err(|_| InvalidInput)?; let shards = shards @@ -223,16 +229,26 @@ impl ErasureCoder { if data.len() < expected_len { return Err(InvalidInput); } + // The bytes past `expected_len` are zero-padding inserted by `encode`; reject anything + // that doesn't match. + if data[expected_len..].iter().any(|&b| b != 0) { + return Err(InvalidInput); + } data.truncate(expected_len); Ok(data) } } -fn bytes_to_elems(bytes: &[u8]) -> FastCryptoResult> { - if !bytes.len().is_multiple_of(2) { +/// Reinterpret `bytes` as a sequence of [Element]s. Fails with [`InvalidInput`] if the input +/// length is not a multiple of [`ELEMENT_SIZE_IN_BYTES`]. +fn bytes_to_elements(bytes: &[u8]) -> FastCryptoResult> { + if !bytes.len().is_multiple_of(ELEMENT_SIZE_IN_BYTES) { return Err(InvalidInput); } - Ok(bytes.chunks_exact(2).map(|p| [p[0], p[1]]).collect()) + Ok(bytes + .chunks_exact(ELEMENT_SIZE_IN_BYTES) + .map(|p| p.try_into().expect("chunk has ELEMENT_SIZE_IN_BYTES bytes")) + .collect()) } #[cfg(test)] From 487ebd7a6fdb35fbdc229fa3390928ec72111516 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 11 May 2026 10:30:17 +0200 Subject: [PATCH 87/91] Simplify decode: drop redundant guard, merge length/zero check, untangle shadowed binding --- .../src/threshold_schnorr/reed_solomon.rs | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs index c1bf636cd0..12db6a215b 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/reed_solomon.rs @@ -183,10 +183,7 @@ impl ErasureCoder { .map(bytes_to_elements) .collect::>()?; self.0.encode(&mut shards).map_err(|_| InvalidInput)?; - Ok(shards - .into_iter() - .map(|s| Shard(s.into_iter().flatten().collect())) - .collect_vec()) + Ok(shards.into_iter().map(|s| Shard(s.concat())).collect_vec()) } /// Reconstruct the original data from `n` (possibly missing) shards, returning the first @@ -201,13 +198,12 @@ impl ErasureCoder { return Err(InputLengthWrong(self.0.total_shard_count())); } - if shards.iter().filter(|s| s.is_none()).count() > self.0.parity_shard_count() { - return Err(InvalidInput); - } - let mut shards: Vec>> = shards .into_iter() - .map(|s| s.map(|s| bytes_to_elements(&s.0)).transpose()) + .map(|opt| { + opt.map(|Shard(bytes)| bytes_to_elements(&bytes)) + .transpose() + }) .collect::>()?; self.0.reconstruct(&mut shards).map_err(|_| InvalidInput)?; let shards = shards @@ -226,12 +222,9 @@ impl ErasureCoder { .flatten() .flatten() .collect(); - if data.len() < expected_len { - return Err(InvalidInput); - } // The bytes past `expected_len` are zero-padding inserted by `encode`; reject anything // that doesn't match. - if data[expected_len..].iter().any(|&b| b != 0) { + if data.len() < expected_len || data[expected_len..].iter().any(|&b| b != 0) { return Err(InvalidInput); } data.truncate(expected_len); From c048a29170cc130f71f1b81b0dcf536de394eedf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 11 May 2026 10:48:34 +0200 Subject: [PATCH 88/91] Rename Complaint to RecoveryProof; move accuser_id to wire types --- fastcrypto-tbls/src/threshold_schnorr/avss.rs | 32 +++++++++----- .../src/threshold_schnorr/batch_avss.rs | 12 ++++-- .../src/threshold_schnorr/complaint.rs | 42 +++++++++---------- 3 files changed, 50 insertions(+), 36 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/avss.rs b/fastcrypto-tbls/src/threshold_schnorr/avss.rs index 28f6f676ed..7044d8976f 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/avss.rs @@ -14,7 +14,7 @@ use crate::nodes::{Nodes, PartyId}; use crate::polynomial::{Eval, Poly}; use crate::random_oracle::RandomOracle; use crate::threshold_schnorr::bcs::BCSSerialized; -use crate::threshold_schnorr::complaint::{Complaint, ComplaintResponse}; +use crate::threshold_schnorr::complaint::{ComplaintResponse, RecoveryProof}; use crate::threshold_schnorr::Extensions::Encryption; use crate::threshold_schnorr::{random_oracle_from_sid, EG, G, S}; use crate::types; @@ -63,6 +63,14 @@ pub enum ProcessedMessage { Complaint(Complaint), } +/// A complaint by a receiver who could not decrypt or verify its shares from the dealer's +/// broadcast. Given enough responses, the accuser can recover its shares. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct Complaint { + pub accuser_id: PartyId, + pub proof: RecoveryProof, +} + /// The output of a receiver after a single instance of AVSS: The shares for each nonce + commitments for the next round. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PartialOutput { @@ -284,13 +292,16 @@ impl Receiver { my_shares, feldman_commitment: message.feldman_commitment.clone(), })), - Err(_) => Ok(ProcessedMessage::Complaint(Complaint::create( - self.id, - &message.ciphertext.shared(), - &self.enc_secret_key, - &self.random_oracle(), - &mut rand::thread_rng(), - ))), + Err(_) => Ok(ProcessedMessage::Complaint(Complaint { + accuser_id: self.id, + proof: RecoveryProof::create( + self.id, + &message.ciphertext.shared(), + &self.enc_secret_key, + &self.random_oracle(), + &mut rand::thread_rng(), + ), + })), } } @@ -301,7 +312,8 @@ impl Receiver { complaint: &Complaint, my_output: &PartialOutput, ) -> FastCryptoResult> { - complaint.check( + complaint.proof.check( + complaint.accuser_id, &self.nodes.node_id_to_node(complaint.accuser_id)?.pk, &message.ciphertext.encs[complaint.accuser_id as usize], &message.ciphertext.shared(), @@ -550,11 +562,11 @@ mod tests { use crate::ecies_v1::{MultiRecipientEncryption, PublicKey}; use crate::nodes::{Node, Nodes, PartyId}; use crate::polynomial::Poly; + use crate::threshold_schnorr::avss::Complaint; use crate::threshold_schnorr::avss::{Dealer, Message, Receiver}; use crate::threshold_schnorr::avss::{PartialOutput, ProcessedMessage}; use crate::threshold_schnorr::avss::{ReceiverOutput, SharesForNode}; use crate::threshold_schnorr::bcs::BCSSerialized; - use crate::threshold_schnorr::complaint::Complaint; use crate::threshold_schnorr::tests::restrict; use crate::threshold_schnorr::Extensions::Encryption; use crate::threshold_schnorr::{EG, G, S}; diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 50c15d49e8..11364de74c 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -209,7 +209,8 @@ pub struct Vote { /// A complaint by a receiver who could not decrypt or verify its shares. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct RevealComplaint { - pub proof: complaint::Complaint, + pub accuser_id: PartyId, + pub proof: complaint::RecoveryProof, pub ciphertext: Vec, pub common_message_hash: Digest, } @@ -656,7 +657,8 @@ impl Receiver { }) .or_else(|_| { Ok(DecryptionOutcome::Invalid(RevealComplaint { - proof: complaint::Complaint::create( + accuser_id: self.id, + proof: complaint::RecoveryProof::create( self.id, shared, &self.enc_secret_key, @@ -684,6 +686,7 @@ impl Receiver { compute_challenge_from_common_message(&self.random_oracle(), common_message); let RevealComplaint { + accuser_id, proof, ciphertext: reveal_ciphertext, common_message_hash, @@ -692,11 +695,12 @@ impl Receiver { if *common_message_hash != common_message.hash() { return Err(InvalidProof); } - let recipient_root = common_message.recipient_root(proof.accuser_id)?; + let recipient_root = common_message.recipient_root(*accuser_id)?; self.check_avid_consistency(reveal_ciphertext, recipient_root) .map_err(|_| InvalidProof)?; - let accuser = self.nodes.node_id_to_node(proof.accuser_id)?; + let accuser = self.nodes.node_id_to_node(*accuser_id)?; proof.check( + *accuser_id, &accuser.pk, reveal_ciphertext, &common_message.ciphertext_shared, diff --git a/fastcrypto-tbls/src/threshold_schnorr/complaint.rs b/fastcrypto-tbls/src/threshold_schnorr/complaint.rs index c78a946971..4ead9791a1 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/complaint.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/complaint.rs @@ -14,18 +14,19 @@ use fastcrypto::traits::AllowedRng; use serde::{Deserialize, Serialize}; use tracing::debug; -/// A complaint by an accuser that it could not decrypt or verify its shares. -/// Given enough responses to the complaint, the accuser can recover its shares. +/// Cryptographic proof attached to a complaint: an ECIES recovery package that opens the +/// dealer's shared ciphertext with the accuser's private key and produces shares that fail a +/// supplied verifier. #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct Complaint { - pub(crate) accuser_id: PartyId, - pub(crate) proof: RecoveryPackage, -} +pub struct RecoveryProof(RecoveryPackage); -impl Complaint { - /// Try to decrypt the shares for the accuser. +impl RecoveryProof { + /// Verify the proof for the given `accuser_id`: decrypt `ciphertext` via the recovery + /// package and confirm the resulting shares fail `verifier`. The caller supplies + /// `accuser_id` from their protocol context — it is *not* carried inside the proof. pub fn check( &self, + accuser_id: PartyId, enc_pk: &ecies_v1::PublicKey, ciphertext: &[u8], shared: &SharedComponents, @@ -35,17 +36,17 @@ impl Complaint { // Check that the recovery package is valid, and if not, return an error since the complaint is invalid. let buffer = shared.decrypt_with_recovery_package( ciphertext, - &self.proof, - &random_oracle.extend(&Recovery(self.accuser_id).to_string()), + &self.0, + &random_oracle.extend(&Recovery(accuser_id).to_string()), &random_oracle.extend(&Encryption.to_string()), enc_pk, - self.accuser_id as usize, + accuser_id as usize, )?; let Ok(shares) = S::from_bytes(&buffer) else { debug!( "Complaint by party {} is valid: Failed to deserialize shares", - self.accuser_id + accuser_id ); return Ok(()); }; @@ -53,13 +54,13 @@ impl Complaint { if verifier(&shares).is_ok() { debug!( "Complaint by party {} is invalid: Shares verify correctly", - self.accuser_id + accuser_id ); Err(InvalidProof) } else { debug!( "Complaint by party {} is valid: Shares do not verify correctly", - self.accuser_id + accuser_id ); Ok(()) } @@ -72,14 +73,11 @@ impl Complaint { random_oracle: &RandomOracle, rng: &mut impl AllowedRng, ) -> Self { - Self { - accuser_id, - proof: ciphertext.create_recovery_package( - enc_sk, - &random_oracle.extend(&Recovery(accuser_id).to_string()), - rng, - ), - } + Self(ciphertext.create_recovery_package( + enc_sk, + &random_oracle.extend(&Recovery(accuser_id).to_string()), + rng, + )) } } From eff1da134c80148d9548d22a3fd15567edd2347e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 11 May 2026 10:59:59 +0200 Subject: [PATCH 89/91] Move ComplaintResponse into avss; drop unused generic parameter --- fastcrypto-tbls/src/threshold_schnorr/avss.rs | 14 +++++++++++--- fastcrypto-tbls/src/threshold_schnorr/complaint.rs | 8 -------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/fastcrypto-tbls/src/threshold_schnorr/avss.rs b/fastcrypto-tbls/src/threshold_schnorr/avss.rs index 7044d8976f..4a1fa6916a 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/avss.rs @@ -14,7 +14,7 @@ use crate::nodes::{Nodes, PartyId}; use crate::polynomial::{Eval, Poly}; use crate::random_oracle::RandomOracle; use crate::threshold_schnorr::bcs::BCSSerialized; -use crate::threshold_schnorr::complaint::{ComplaintResponse, RecoveryProof}; +use crate::threshold_schnorr::complaint::RecoveryProof; use crate::threshold_schnorr::Extensions::Encryption; use crate::threshold_schnorr::{random_oracle_from_sid, EG, G, S}; use crate::types; @@ -71,6 +71,14 @@ pub struct Complaint { pub proof: RecoveryProof, } +/// A response to a [Complaint], containing the responder's shares so the accuser can +/// Lagrange-interpolate their own. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ComplaintResponse { + pub responder_id: PartyId, + pub shares: SharesForNode, +} + /// The output of a receiver after a single instance of AVSS: The shares for each nonce + commitments for the next round. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PartialOutput { @@ -311,7 +319,7 @@ impl Receiver { message: &Message, complaint: &Complaint, my_output: &PartialOutput, - ) -> FastCryptoResult> { + ) -> FastCryptoResult { complaint.proof.check( complaint.accuser_id, &self.nodes.node_id_to_node(complaint.accuser_id)?.pk, @@ -333,7 +341,7 @@ impl Receiver { pub fn recover( &self, message: &Message, - responses: Vec>, + responses: Vec, ) -> FastCryptoResult { // Sanity check that we have enough responses (by weight) to recover the shares. let total_response_weight = self diff --git a/fastcrypto-tbls/src/threshold_schnorr/complaint.rs b/fastcrypto-tbls/src/threshold_schnorr/complaint.rs index 4ead9791a1..9e2828b297 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/complaint.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/complaint.rs @@ -80,11 +80,3 @@ impl RecoveryProof { )) } } - -/// A response to a complaint, containing the responder's shares. Constructed only via -/// `Receiver::handle_complaint`, which gates on `Complaint::check`. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ComplaintResponse { - pub(crate) responder_id: PartyId, - pub(crate) shares: S, -} From 2bf10553e96c0578feb17790724ca6421e72167b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 11 May 2026 13:55:18 +0200 Subject: [PATCH 90/91] Rename complaint.rs to recovery_proof.rs --- fastcrypto-tbls/src/threshold_schnorr/avss.rs | 2 +- fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs | 6 +++--- fastcrypto-tbls/src/threshold_schnorr/mod.rs | 2 +- .../threshold_schnorr/{complaint.rs => recovery_proof.rs} | 0 4 files changed, 5 insertions(+), 5 deletions(-) rename fastcrypto-tbls/src/threshold_schnorr/{complaint.rs => recovery_proof.rs} (100%) diff --git a/fastcrypto-tbls/src/threshold_schnorr/avss.rs b/fastcrypto-tbls/src/threshold_schnorr/avss.rs index 4a1fa6916a..8ebe46de04 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/avss.rs @@ -14,7 +14,7 @@ use crate::nodes::{Nodes, PartyId}; use crate::polynomial::{Eval, Poly}; use crate::random_oracle::RandomOracle; use crate::threshold_schnorr::bcs::BCSSerialized; -use crate::threshold_schnorr::complaint::RecoveryProof; +use crate::threshold_schnorr::recovery_proof::RecoveryProof; use crate::threshold_schnorr::Extensions::Encryption; use crate::threshold_schnorr::{random_oracle_from_sid, EG, G, S}; use crate::types; diff --git a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs index 11364de74c..efb7f68a97 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/batch_avss.rs @@ -79,7 +79,7 @@ use crate::nodes::{Nodes, PartyId}; use crate::polynomial::{create_secret_sharing, Eval, Poly}; use crate::random_oracle::RandomOracle; use crate::threshold_schnorr::bcs::BCSSerialized; -use crate::threshold_schnorr::complaint; +use crate::threshold_schnorr::recovery_proof; use crate::threshold_schnorr::reed_solomon::{ErasureCoder, Shard}; use crate::threshold_schnorr::Extensions::{Challenge, Encryption, Recovery}; use crate::threshold_schnorr::{random_oracle_from_sid, EG, G, S}; @@ -210,7 +210,7 @@ pub struct Vote { #[derive(Clone, Debug, Serialize, Deserialize)] pub struct RevealComplaint { pub accuser_id: PartyId, - pub proof: complaint::RecoveryProof, + pub proof: recovery_proof::RecoveryProof, pub ciphertext: Vec, pub common_message_hash: Digest, } @@ -658,7 +658,7 @@ impl Receiver { .or_else(|_| { Ok(DecryptionOutcome::Invalid(RevealComplaint { accuser_id: self.id, - proof: complaint::RecoveryProof::create( + proof: recovery_proof::RecoveryProof::create( self.id, shared, &self.enc_secret_key, diff --git a/fastcrypto-tbls/src/threshold_schnorr/mod.rs b/fastcrypto-tbls/src/threshold_schnorr/mod.rs index 71f63ac2aa..2a440afa15 100644 --- a/fastcrypto-tbls/src/threshold_schnorr/mod.rs +++ b/fastcrypto-tbls/src/threshold_schnorr/mod.rs @@ -32,10 +32,10 @@ use std::fmt::{Display, Formatter}; pub mod avss; pub mod batch_avss; mod bcs; -pub mod complaint; pub mod key_derivation; mod pascal_matrix; pub mod presigning; +pub mod recovery_proof; pub mod reed_solomon; pub mod signing; diff --git a/fastcrypto-tbls/src/threshold_schnorr/complaint.rs b/fastcrypto-tbls/src/threshold_schnorr/recovery_proof.rs similarity index 100% rename from fastcrypto-tbls/src/threshold_schnorr/complaint.rs rename to fastcrypto-tbls/src/threshold_schnorr/recovery_proof.rs From ff098d343c8e7fce0ad541132d9d812d5cf4f493 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Lindstr=C3=B8m?= Date: Mon, 11 May 2026 14:12:50 +0200 Subject: [PATCH 91/91] Add benches for verify_common_message, echo, verify_echo, verify_and_decrypt --- fastcrypto-tbls/benches/batch_avss.rs | 110 ++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) diff --git a/fastcrypto-tbls/benches/batch_avss.rs b/fastcrypto-tbls/benches/batch_avss.rs index 6e37c820fc..c30ae03d80 100644 --- a/fastcrypto-tbls/benches/batch_avss.rs +++ b/fastcrypto-tbls/benches/batch_avss.rs @@ -115,6 +115,71 @@ mod batch_avss_benches { } } + { + let mut verify_common: BenchmarkGroup<_> = c.benchmark_group(format!( + "BATCH_AVSS (batch_size_per_weight = {batch_size_per_weight}) verify_common_message" + )); + for (n, total_w) in iproduct!(SIZES.iter(), TOTAL_WEIGHTS.iter()) { + let w = total_w / n; + let total_w = w * n; + let t = total_w / 3 - 1; + let f = t.saturating_sub(1); + let keys = generate_ecies_keys(*n); + let d0 = setup_dealer(0, f, t, w, &keys, batch_size_per_weight); + let r1 = setup_receiver(1, 0, f, t, w, &keys, batch_size_per_weight); + let messages = d0.create_message(&mut thread_rng()).unwrap(); + let common = messages[1].common.clone(); + verify_common.bench_function( + format!("n={}, total_weight={}, t={}, w={}", n, total_w, t, w).as_str(), + |b| b.iter(|| r1.verify_common_message(common.clone()).unwrap()), + ); + } + } + + { + let mut echo: BenchmarkGroup<_> = c.benchmark_group(format!( + "BATCH_AVSS (batch_size_per_weight = {batch_size_per_weight}) echo" + )); + for (n, total_w) in iproduct!(SIZES.iter(), TOTAL_WEIGHTS.iter()) { + let w = total_w / n; + let total_w = w * n; + let t = total_w / 3 - 1; + let f = t.saturating_sub(1); + let keys = generate_ecies_keys(*n); + let d0 = setup_dealer(0, f, t, w, &keys, batch_size_per_weight); + let r1 = setup_receiver(1, 0, f, t, w, &keys, batch_size_per_weight); + let messages = d0.create_message(&mut thread_rng()).unwrap(); + let message = &messages[1]; + echo.bench_function( + format!("n={}, total_weight={}, t={}, w={}", n, total_w, t, w).as_str(), + |b| b.iter(|| r1.echo(message).unwrap()), + ); + } + } + + { + let mut verify_echo: BenchmarkGroup<_> = c.benchmark_group(format!( + "BATCH_AVSS (batch_size_per_weight = {batch_size_per_weight}) verify_echo" + )); + for (n, total_w) in iproduct!(SIZES.iter(), TOTAL_WEIGHTS.iter()) { + let w = total_w / n; + let total_w = w * n; + let t = total_w / 3 - 1; + let f = t.saturating_sub(1); + let keys = generate_ecies_keys(*n); + let d0 = setup_dealer(0, f, t, w, &keys, batch_size_per_weight); + let r0 = setup_receiver(0, 0, f, t, w, &keys, batch_size_per_weight); + let r1 = setup_receiver(1, 0, f, t, w, &keys, batch_size_per_weight); + let messages = d0.create_message(&mut thread_rng()).unwrap(); + let (vcm, echoes_from_r0) = r0.echo(&messages[0]).unwrap(); + let echo_for_r1 = echoes_from_r0[1].clone(); + verify_echo.bench_function( + format!("n={}, total_weight={}, t={}, w={}", n, total_w, t, w).as_str(), + |b| b.iter(|| r1.verify_echo(echo_for_r1.clone(), &vcm).unwrap()), + ); + } + } + { let mut process: BenchmarkGroup<_> = c.benchmark_group(format!( "BATCH_AVSS (batch_size_per_weight = {batch_size_per_weight}) process_message" @@ -155,6 +220,51 @@ mod batch_avss_benches { ); } } + + { + let mut verify_decrypt: BenchmarkGroup<_> = c.benchmark_group(format!( + "BATCH_AVSS (batch_size_per_weight = {batch_size_per_weight}) verify_and_decrypt" + )); + for (n, total_w) in iproduct!(SIZES.iter(), TOTAL_WEIGHTS.iter()) { + let w = total_w / n; + let total_w = w * n; + let t = total_w / 3 - 1; + let f = t.saturating_sub(1); + let keys = generate_ecies_keys(*n); + let d0 = setup_dealer(0, f, t, w, &keys, batch_size_per_weight); + let receivers: Vec = (0..*n) + .map(|id| setup_receiver(id, 0, f, t, w, &keys, batch_size_per_weight)) + .collect(); + let messages = d0.create_message(&mut thread_rng()).unwrap(); + let mut vcm = None; + let echoes: Vec> = receivers + .iter() + .enumerate() + .map(|(i, r)| { + let (v, e) = r.echo(&messages[i]).unwrap(); + if i == 1 { + vcm = Some(v); + } + e + }) + .collect(); + let vcm = vcm.unwrap(); + let echoes_for_party_1: Vec = echoes + .iter() + .map(|em| receivers[1].verify_echo(em[1].clone(), &vcm).unwrap()) + .collect(); + let r1 = &receivers[1]; + let pem = match r1.decode_ciphertext(&echoes_for_party_1, &vcm).unwrap() { + batch_avss::DecodeOutcome::Decoded(d) => d, + _ => panic!("expected Decoded outcome"), + }; + + verify_decrypt.bench_function( + format!("n={}, total_weight={}, t={}, w={}", n, total_w, t, w).as_str(), + |b| b.iter(|| r1.verify_and_decrypt(&pem, &vcm).unwrap()), + ); + } + } { let mut complete: BenchmarkGroup<_> = c.benchmark_group(format!( "BATCH_AVSS (batch_size_per_weight = {batch_size_per_weight}) presigning"