-
Notifications
You must be signed in to change notification settings - Fork 396
Expand file tree
/
Copy pathaggregation_layer.rs
More file actions
262 lines (232 loc) · 9.27 KB
/
aggregation_layer.rs
File metadata and controls
262 lines (232 loc) · 9.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
use crate::{
beacon::{BeaconClient, BeaconClientError},
common::types::Network,
};
use ethers::{
providers::{Http, Middleware, Provider},
types::Filter,
};
use lambdaworks_crypto::merkle_tree::{merkle::MerkleTree, traits::IsMerkleTreeBackend};
use sha3::{Digest, Keccak256};
/// How much to go back from current block if from_block is not provided
/// 7500 blocks = 25hr
const FROM_BLOCKS_AGO_DEFAULT: u64 = 7500;
#[derive(Debug)]
pub enum AggregationModeVerificationData {
SP1 {
vk: [u8; 32],
public_inputs: Vec<u8>,
},
Risc0 {
image_id: [u8; 32],
public_inputs: Vec<u8>,
},
}
impl AggregationModeVerificationData {
fn commitment(&self) -> [u8; 32] {
match self {
AggregationModeVerificationData::SP1 { vk, public_inputs } => {
let mut hasher = Keccak256::new();
hasher.update(vk);
hasher.update(public_inputs);
hasher.finalize().into()
}
AggregationModeVerificationData::Risc0 {
image_id,
public_inputs,
} => {
let mut hasher = Keccak256::new();
hasher.update(image_id);
hasher.update(public_inputs);
hasher.finalize().into()
}
}
}
}
// We use a newtype wrapper around `[u8; 32]` because Rust's orphan rule
// prevents implementing a foreign trait (`IsMerkleTreeBackend`) for a foreign type (`[u8; 32]`).
#[derive(Default, Debug, PartialEq, Eq)]
pub struct Hash32([u8; 32]);
// Note:
// We define a version of the backend that takes the leaves as hashed data
// since the user may not have access to the proofs that he didn't submit
// The original MerkleTreeBackend is defined in three locations
// - aggregation_mode/src/aggregators/mod.rs
// - aggregation_mode/src/aggregators/risc0_aggregator.rs
// - aggregation_mode/src/aggregators/sp1_aggregator.rs
// The definition on aggregator/mod.rs supports taking proofs from both Risc0 and SP1
// Hashes of all implementations should match
impl IsMerkleTreeBackend for Hash32 {
type Data = Hash32;
type Node = [u8; 32];
/// We don't have to hash the data, as the blob already contains the proof commitments (which represent the merkle leaves)
fn hash_data(leaf: &Self::Data) -> Self::Node {
leaf.0
}
/// Computes a commutative Keccak256 hash, ensuring H(a, b) == H(b, a).
///
/// See: https://docs.openzeppelin.com/contracts/5.x/api/utils#Hashes
///
/// Source: https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/utils/cryptography/Hashes.sol#L17-L19
///
/// Compliant with OpenZeppelin's `processProofCalldata` function from MerkleProof.sol.
///
/// See: https://docs.openzeppelin.com/contracts/5.x/api/utils#MerkleProof
///
/// Source: https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/utils/cryptography/MerkleProof.sol#L114-L128
fn hash_new_parent(child_1: &Self::Node, child_2: &Self::Node) -> Self::Node {
let mut hasher = Keccak256::new();
if child_1 < child_2 {
hasher.update(child_1);
hasher.update(child_2);
} else {
hasher.update(child_2);
hasher.update(child_1);
}
hasher.finalize().into()
}
}
#[derive(Debug)]
pub enum ProofVerificationAggModeError {
ProvingSystemNotSupportedInAggMode,
EthereumProviderError(String),
BeaconClient(BeaconClientError),
UnmatchedBlobAndEventMerkleRoot,
ProofNotFoundInLogs,
EventDecoding,
}
/// Given the [`AggregationModeVerificationData`], this function checks whether the proof was included in a
/// in a recent aggregated proof and verifies the corresponding Merkle root commitment.
///
/// Note: This functionality is currently in Beta. As a result, we cannot determine with certainty
/// which specific aggregation a proof belongs to. Instead, we check the events from the specified `from_block`.
///
/// Note: The `from_block` must not be older than 18 days,
/// as blobs expire after that period and will no longer be retrievable.
/// If not provided, it defaults to fetch logs from [`FROM_BLOCKS_AGO_DEFAULT`]
///
/// The step-by-step verification process includes:
/// 1. Querying the blob versioned hash from the events emitted by the aligned proof aggregation service contract since `from_block`
/// 2. Retrieving the corresponding beacon block using the block's parent beacon root
/// 3. Fetching the blobs associated with that slot
/// 4. Filtering the blob that matches the queried blob versioned hash
/// 5. Decoding the blob to extract the proofs commitments
/// 6. Checking if the given proof commitment exists within the blob's proofs
/// 7. Reconstructing the Merkle root and verifying it against the root stored in the contract
pub async fn is_proof_verified_in_aggregation_mode(
verification_data: AggregationModeVerificationData,
network: Network,
eth_rpc_url: String,
beacon_client_url: String,
from_block: Option<u64>,
) -> Result<[u8; 32], ProofVerificationAggModeError> {
let eth_rpc_provider = Provider::<Http>::try_from(eth_rpc_url)
.map_err(|e| ProofVerificationAggModeError::EthereumProviderError(e.to_string()))?;
let beacon_client = BeaconClient::new(beacon_client_url);
let from_block = match from_block {
Some(from_block) => from_block,
None => {
let block_number = eth_rpc_provider
.get_block_number()
.await
.map_err(|e| ProofVerificationAggModeError::EthereumProviderError(e.to_string()))?;
block_number
.as_u64()
.saturating_sub(FROM_BLOCKS_AGO_DEFAULT)
}
};
let filter = Filter::new()
.address(network.get_aligned_proof_agg_service_address())
.event("AggregatedProofVerified(bytes32,bytes32)")
.from_block(from_block);
let logs = eth_rpc_provider.get_logs(&filter).await.unwrap();
for log in logs {
// First 32 bytes of the data are the bytes of the blob versioned hash
let blob_versioned_hash: [u8; 32] = log.data[0..32]
.try_into()
.map_err(|_| ProofVerificationAggModeError::EventDecoding)?;
// Event is indexed by merkle root
let merkle_root = log.topics[1].0;
// Block Number shouldn't be empty, in case it is,
// there is a problem with this log, and we skip it
// This same logic is replicated for other checks.
let Some(block_number) = log.block_number else {
continue;
};
let Some(block) = eth_rpc_provider
.get_block(block_number.as_u64())
.await
.map_err(|e| ProofVerificationAggModeError::EthereumProviderError(e.to_string()))?
else {
continue;
};
let Some(beacon_parent_root) = block.parent_beacon_block_root else {
continue;
};
let Some(beacon_block) = beacon_client
.get_block_header_from_parent_hash(beacon_parent_root.0)
.await
.map_err(ProofVerificationAggModeError::BeaconClient)?
else {
continue;
};
let slot: u64 = beacon_block
.header
.message
.slot
.parse()
.expect("Slot to be parsable number");
let Some(blob_data) = beacon_client
.get_blob_by_versioned_hash(slot, blob_versioned_hash)
.await
.map_err(ProofVerificationAggModeError::BeaconClient)?
else {
continue;
};
let blob_bytes =
hex::decode(blob_data.blob.replace("0x", "")).expect("A valid hex encoded data");
let proof_commitments: Vec<Hash32> = decoded_blob(blob_bytes)
.iter()
.map(|p| Hash32(*p))
.collect();
let Some(merkle_tree) = MerkleTree::<Hash32>::build(&proof_commitments) else {
continue;
};
if proof_commitments.contains(&Hash32(verification_data.commitment())) {
return if merkle_tree.root == merkle_root {
Ok(merkle_root)
} else {
Err(ProofVerificationAggModeError::UnmatchedBlobAndEventMerkleRoot)
};
}
}
Err(ProofVerificationAggModeError::ProofNotFoundInLogs)
}
fn decoded_blob(blob_data: Vec<u8>) -> Vec<[u8; 32]> {
let mut proof_hashes = vec![];
let mut current_hash = [0u8; 32];
let mut current_hash_count = 0;
let mut total_bytes_count = 0;
while total_bytes_count < blob_data.len() {
// Every 32 bytes there is a 0x0 acting as padding, so we need to skip the byte
let is_pad = total_bytes_count % 32 == 0;
if is_pad {
total_bytes_count += 1;
continue;
}
current_hash[current_hash_count] = blob_data[total_bytes_count];
if current_hash_count + 1 == 32 {
// if the current_hash is the zero hash, then there are no more proofs in the blob
if current_hash == [0u8; 32] {
break;
}
proof_hashes.push(current_hash);
current_hash = [0u8; 32];
current_hash_count = 0;
} else {
current_hash_count += 1;
}
total_bytes_count += 1;
}
proof_hashes
}