Skip to content

Commit d843434

Browse files
yoavGrsclaude
andauthored
starknet_committer,apollo_committer: remove CommitBlockTrait, promote commit_block to free function (#13758)
Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
1 parent 8a80b28 commit d843434

10 files changed

Lines changed: 110 additions & 139 deletions

File tree

crates/apollo_committer/src/committer.rs

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
use std::collections::HashMap;
22
use std::error::Error;
3-
use std::marker::PhantomData;
43
use std::path::PathBuf;
54

65
use apollo_committer_config::config::{ApolloStorage, CommitterConfig};
@@ -18,7 +17,7 @@ use starknet_api::block_hash::state_diff_hash::calculate_state_diff_hash;
1817
use starknet_api::core::{GlobalRoot, StateDiffCommitment};
1918
use starknet_api::hash::PoseidonHash;
2019
use starknet_api::state::ThinStateDiff;
21-
use starknet_committer::block_committer::commit::CommitBlockTrait;
20+
use starknet_committer::block_committer::commit::commit_block;
2221
use starknet_committer::block_committer::input::Input;
2322
use starknet_committer::block_committer::measurements_util::{
2423
Action,
@@ -70,10 +69,7 @@ mod committer_test;
7069

7170
pub type ApolloCommitterDb = IndexDb<ApolloStorage>;
7271

73-
pub struct BlockCommitter;
74-
impl CommitBlockTrait for BlockCommitter {}
75-
76-
pub type ApolloCommitter = Committer<ApolloStorage, ApolloCommitterDb, BlockCommitter>;
72+
pub type ApolloCommitter = Committer<ApolloStorage, ApolloCommitterDb>;
7773

7874
pub trait StorageConstructor: Storage {
7975
fn create_storage(db_path: PathBuf, storage_config: Self::Config) -> Self;
@@ -96,33 +92,29 @@ struct CommitStateDiffOutput {
9692
}
9793

9894
/// Apollo committer. Maintains the Starknet state tries in persistent storage.
99-
pub struct Committer<S: Storage, ForestDB, BlockCommitter>
95+
pub struct Committer<S: Storage, ForestDB>
10096
where
10197
ForestDB: ForestStorageWithEmptyReadContext,
102-
BlockCommitter: CommitBlockTrait,
10398
{
10499
/// Storage for forest operations.
105100
forest_storage: ForestDB,
106101
/// Committer config.
107102
config: CommitterConfig<S::Config>,
108103
/// The next block number to commit.
109104
offset: BlockNumber,
110-
// Allow define the generic type CB and not use it.
111-
phantom: PhantomData<BlockCommitter>,
112105
}
113106

114-
impl<S, ForestDB, BlockCommitter> Committer<S, ForestDB, BlockCommitter>
107+
impl<S, ForestDB> Committer<S, ForestDB>
115108
where
116109
S: StorageConstructor,
117110
ForestDB: ForestStorageWithEmptyReadContext<Storage = S>,
118-
BlockCommitter: CommitBlockTrait,
119111
{
120112
pub async fn new(config: CommitterConfig<S::Config>) -> Self {
121113
let storage = S::create_storage(config.db_path.clone(), config.storage_config.clone());
122114
let mut forest_storage = ForestDB::new(storage);
123115
let offset = Self::load_offset_or_panic(&mut forest_storage).await;
124116
info!("Initializing committer with offset: {offset}");
125-
Self { forest_storage, config, offset, phantom: PhantomData }
117+
Self { forest_storage, config, offset }
126118
}
127119

128120
fn update_offset(&mut self, offset: BlockNumber) {
@@ -405,7 +397,7 @@ where
405397
config: self.config.reader_config.clone(),
406398
};
407399
let (filled_forest, deleted_nodes) =
408-
BlockCommitter::commit_block(input, &mut self.forest_storage, measurements)
400+
commit_block(input, &mut self.forest_storage, measurements)
409401
.await
410402
.map_err(|err| self.map_internal_error(err))?;
411403
let global_root = filled_forest.state_roots().global_root();

crates/apollo_committer/src/committer_test.rs

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,11 @@ use starknet_api::state::ThinStateDiff;
1717
use starknet_committer::db::index_db::db::IndexDb;
1818
use starknet_patricia_storage::map_storage::MapStorage;
1919

20-
use super::{BlockCommitter, Committer};
20+
use super::Committer;
2121
use crate::committer::StorageConstructor;
2222

2323
pub type ApolloTestStorage = MapStorage;
24-
pub type ApolloTestCommitter =
25-
Committer<ApolloTestStorage, IndexDb<ApolloTestStorage>, BlockCommitter>;
24+
pub type ApolloTestCommitter = Committer<ApolloTestStorage, IndexDb<ApolloTestStorage>>;
2625

2726
impl StorageConstructor for ApolloTestStorage {
2827
fn create_storage(_db_path: PathBuf, _storage_config: Self::Config) -> Self {

crates/apollo_committer/src/communication.rs

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ use apollo_committer_types::communication::{CommitterRequest, CommitterResponse}
22
use apollo_infra::component_definitions::ComponentRequestHandler;
33
use apollo_infra::component_server::{LocalComponentServer, RemoteComponentServer};
44
use async_trait::async_trait;
5-
use starknet_committer::block_committer::commit::CommitBlockTrait;
65
use starknet_committer::db::forest_trait::ForestStorageWithEmptyReadContext;
76

87
use crate::committer::{ApolloCommitter, Committer, StorageConstructor};
@@ -12,12 +11,8 @@ pub type LocalCommitterServer =
1211
pub type RemoteCommitterServer = RemoteComponentServer<CommitterRequest, CommitterResponse>;
1312

1413
#[async_trait]
15-
impl<
16-
S: StorageConstructor,
17-
ForestDB: ForestStorageWithEmptyReadContext<Storage = S>,
18-
BlockCommitter: CommitBlockTrait,
19-
> ComponentRequestHandler<CommitterRequest, CommitterResponse>
20-
for Committer<S, ForestDB, BlockCommitter>
14+
impl<S: StorageConstructor, ForestDB: ForestStorageWithEmptyReadContext<Storage = S>>
15+
ComponentRequestHandler<CommitterRequest, CommitterResponse> for Committer<S, ForestDB>
2116
{
2217
async fn handle_request(&mut self, request: CommitterRequest) -> CommitterResponse {
2318
match request {

crates/starknet_committer/src/block_committer/commit.rs

Lines changed: 78 additions & 88 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
use std::collections::HashMap;
22

3-
use async_trait::async_trait;
43
use starknet_api::core::{ClassHash, ContractAddress, Nonce};
54
use starknet_patricia::patricia_merkle_tree::types::{NodeIndex, SortedLeafIndices};
65
use starknet_types_core::felt::Felt;
@@ -30,101 +29,92 @@ use crate::patricia_merkle_tree::types::class_hash_into_node_index;
3029

3130
pub type BlockCommitmentResult<T> = Result<T, BlockCommitmentError>;
3231

33-
// TODO(Yoav): Remove this trait when the index layout is ready.
34-
#[async_trait]
35-
pub trait CommitBlockTrait: Send {
36-
async fn commit_block<Reader: ForestReader + Send, M: MeasurementsTrait + Send>(
37-
input: Input<Reader::InitialReadContext>,
38-
trie_reader: &mut Reader,
39-
measurements: &mut M,
40-
) -> BlockCommitmentResult<(FilledForest, DeletedNodes)> {
41-
let (mut storage_tries_indices, mut contracts_trie_indices, mut classes_trie_indices) =
42-
get_all_modified_indices(&input.state_diff);
43-
let n_contracts_trie_modifications = contracts_trie_indices.len();
44-
let forest_sorted_indices = ForestSortedIndices {
45-
storage_tries_sorted_indices: storage_tries_indices
46-
.iter_mut()
47-
.map(|(address, indices)| (*address, SortedLeafIndices::new(indices)))
48-
.collect(),
49-
contracts_trie_sorted_indices: SortedLeafIndices::new(&mut contracts_trie_indices),
50-
classes_trie_sorted_indices: SortedLeafIndices::new(&mut classes_trie_indices),
51-
};
52-
let actual_storage_updates = input.state_diff.actual_storage_updates();
53-
let actual_classes_updates = input.state_diff.actual_classes_updates();
54-
// Record the number of modifications.
55-
measure_number_of_modifications(
56-
measurements,
57-
&actual_storage_updates,
58-
n_contracts_trie_modifications,
59-
actual_classes_updates.len(),
60-
);
61-
// Reads - fetch_nodes.
62-
measurements.start_measurement(Action::Read);
63-
let roots =
64-
trie_reader.read_roots(input.initial_read_context).await.map_err(ForestError::from)?;
65-
let (original_forest, original_contracts_trie_leaves) = trie_reader
66-
.read(
67-
roots,
68-
&actual_storage_updates,
69-
&actual_classes_updates,
70-
&forest_sorted_indices,
71-
input.config.clone(),
72-
)
73-
.await?;
74-
let n_read_entries =
75-
original_forest.storage_tries.values().map(|trie| trie.nodes.len()).sum();
76-
measurements.attempt_to_stop_measurement(Action::Read, n_read_entries).ok();
77-
debug!("Original skeleton forest created successfully.");
78-
79-
if input.config.warn_on_trivial_modifications() {
80-
check_trivial_nonce_and_class_hash_updates(
81-
&original_contracts_trie_leaves,
82-
&input.state_diff.address_to_class_hash,
83-
&input.state_diff.address_to_nonce,
84-
);
85-
}
86-
87-
// Compute the new topology.
88-
measurements.start_measurement(Action::Compute);
89-
let updated_forest = UpdatedSkeletonForest::create(
90-
&original_forest,
91-
&input.state_diff.skeleton_classes_updates(),
92-
&input.state_diff.skeleton_storage_updates(),
93-
&original_contracts_trie_leaves,
94-
&input.state_diff.address_to_class_hash,
95-
&input.state_diff.address_to_nonce,
96-
)?;
97-
debug!("Updated skeleton forest created successfully.");
98-
99-
// Find deleted nodes.
100-
let deleted_nodes = find_deleted_nodes(
101-
&original_forest,
102-
&updated_forest,
32+
pub async fn commit_block<Reader: ForestReader + Send, M: MeasurementsTrait + Send>(
33+
input: Input<Reader::InitialReadContext>,
34+
trie_reader: &mut Reader,
35+
measurements: &mut M,
36+
) -> BlockCommitmentResult<(FilledForest, DeletedNodes)> {
37+
let (mut storage_tries_indices, mut contracts_trie_indices, mut classes_trie_indices) =
38+
get_all_modified_indices(&input.state_diff);
39+
let n_contracts_trie_modifications = contracts_trie_indices.len();
40+
let forest_sorted_indices = ForestSortedIndices {
41+
storage_tries_sorted_indices: storage_tries_indices
42+
.iter_mut()
43+
.map(|(address, indices)| (*address, SortedLeafIndices::new(indices)))
44+
.collect(),
45+
contracts_trie_sorted_indices: SortedLeafIndices::new(&mut contracts_trie_indices),
46+
classes_trie_sorted_indices: SortedLeafIndices::new(&mut classes_trie_indices),
47+
};
48+
let actual_storage_updates = input.state_diff.actual_storage_updates();
49+
let actual_classes_updates = input.state_diff.actual_classes_updates();
50+
// Record the number of modifications.
51+
measure_number_of_modifications(
52+
measurements,
53+
&actual_storage_updates,
54+
n_contracts_trie_modifications,
55+
actual_classes_updates.len(),
56+
);
57+
// Reads - fetch_nodes.
58+
measurements.start_measurement(Action::Read);
59+
let roots =
60+
trie_reader.read_roots(input.initial_read_context).await.map_err(ForestError::from)?;
61+
let (original_forest, original_contracts_trie_leaves) = trie_reader
62+
.read(
63+
roots,
10364
&actual_storage_updates,
10465
&actual_classes_updates,
105-
&original_contracts_trie_leaves,
106-
);
66+
&forest_sorted_indices,
67+
input.config.clone(),
68+
)
69+
.await?;
70+
let n_read_entries = original_forest.storage_tries.values().map(|trie| trie.nodes.len()).sum();
71+
measurements.attempt_to_stop_measurement(Action::Read, n_read_entries).ok();
72+
debug!("Original skeleton forest created successfully.");
10773

108-
// Compute the new hashes.
109-
let filled_forest = FilledForest::create::<TreeHashFunctionImpl>(
110-
updated_forest,
111-
actual_storage_updates,
112-
actual_classes_updates,
74+
if input.config.warn_on_trivial_modifications() {
75+
check_trivial_nonce_and_class_hash_updates(
11376
&original_contracts_trie_leaves,
11477
&input.state_diff.address_to_class_hash,
11578
&input.state_diff.address_to_nonce,
116-
)
117-
.await?;
118-
measurements.attempt_to_stop_measurement(Action::Compute, 0).ok();
119-
debug!("Filled forest created successfully.");
120-
121-
Ok((filled_forest, deleted_nodes))
79+
);
12280
}
123-
}
12481

125-
pub struct CommitBlockImpl;
126-
127-
impl CommitBlockTrait for CommitBlockImpl {}
82+
// Compute the new topology.
83+
measurements.start_measurement(Action::Compute);
84+
let updated_forest = UpdatedSkeletonForest::create(
85+
&original_forest,
86+
&input.state_diff.skeleton_classes_updates(),
87+
&input.state_diff.skeleton_storage_updates(),
88+
&original_contracts_trie_leaves,
89+
&input.state_diff.address_to_class_hash,
90+
&input.state_diff.address_to_nonce,
91+
)?;
92+
debug!("Updated skeleton forest created successfully.");
93+
94+
// Find deleted nodes.
95+
let deleted_nodes = find_deleted_nodes(
96+
&original_forest,
97+
&updated_forest,
98+
&actual_storage_updates,
99+
&actual_classes_updates,
100+
&original_contracts_trie_leaves,
101+
);
102+
103+
// Compute the new hashes.
104+
let filled_forest = FilledForest::create::<TreeHashFunctionImpl>(
105+
updated_forest,
106+
actual_storage_updates,
107+
actual_classes_updates,
108+
&original_contracts_trie_leaves,
109+
&input.state_diff.address_to_class_hash,
110+
&input.state_diff.address_to_nonce,
111+
)
112+
.await?;
113+
measurements.attempt_to_stop_measurement(Action::Compute, 0).ok();
114+
debug!("Filled forest created successfully.");
115+
116+
Ok((filled_forest, deleted_nodes))
117+
}
128118

129119
/// Compares the previous state's nonce and class hash with the given in the state diff.
130120
/// In case of trivial update, logs out a warning for trivial state diff update.

crates/starknet_committer/src/block_committer/commit_test.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ use starknet_api::hash::StateRoots;
1111
use starknet_patricia_storage::map_storage::MapStorage;
1212
use starknet_types_core::felt::Felt;
1313

14-
use crate::block_committer::commit::{CommitBlockImpl, CommitBlockTrait};
14+
use crate::block_committer::commit::commit_block;
1515
use crate::block_committer::input::{
1616
Input,
1717
ReaderConfig,
@@ -128,7 +128,7 @@ async fn test_commit_two_consecutive_blocks<Db: ForestReader + ForestWriter>(
128128
config: ReaderConfig::default(),
129129
};
130130
let (filled_forest, _deleted_nodes) =
131-
CommitBlockImpl::commit_block(input, &mut db, &mut NoMeasurements).await.unwrap();
131+
commit_block(input, &mut db, &mut NoMeasurements).await.unwrap();
132132
db.write(&filled_forest).await.unwrap();
133133

134134
input = Input {
@@ -138,7 +138,7 @@ async fn test_commit_two_consecutive_blocks<Db: ForestReader + ForestWriter>(
138138
};
139139

140140
let (filled_forest, _deleted_nodes) =
141-
CommitBlockImpl::commit_block(input, &mut db, &mut NoMeasurements).await.unwrap();
141+
commit_block(input, &mut db, &mut NoMeasurements).await.unwrap();
142142
db.write(&filled_forest).await.unwrap();
143143

144144
expected_roots.assert_debug_eq(&filled_forest.state_roots());

crates/starknet_committer/src/forest/deleted_nodes_test.rs

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ use starknet_patricia::patricia_merkle_tree::types::NodeIndex;
55
use starknet_patricia_storage::map_storage::MapStorage;
66
use starknet_types_core::felt::Felt;
77

8-
use crate::block_committer::commit::{CommitBlockImpl, CommitBlockTrait};
8+
use crate::block_committer::commit::commit_block;
99
use crate::block_committer::input::{
1010
contract_address_into_node_index,
1111
Input,
@@ -23,7 +23,7 @@ const CONTRACT_ADDRESS: ContractAddress = ContractAddress(PatriciaKey::from_hex_
2323

2424
/// Commits a block with the given leaves and value, and writes the forest to storage.
2525
/// Returns the deleted nodes.
26-
async fn commit_block(
26+
async fn commit_block_and_write(
2727
leaves: Vec<u128>,
2828
value: u128,
2929
index_db: &mut IndexDb<MapStorage>,
@@ -44,9 +44,7 @@ async fn commit_block(
4444
};
4545

4646
let (filled_forest, deleted_nodes) =
47-
CommitBlockImpl::commit_block(input, index_db, &mut NoMeasurements)
48-
.await
49-
.expect("Failed to commit block");
47+
commit_block(input, index_db, &mut NoMeasurements).await.expect("Failed to commit block");
5048

5149
// Write the forest to storage so the roots are persisted, including deleted nodes
5250
ForestWriterWithMetadata::write_with_metadata(
@@ -76,10 +74,10 @@ async fn verify_deleted_nodes(
7674
let mut index_db = IndexDb::new(MapStorage::default());
7775

7876
// Commit block with original leaves (all non-zero)
79-
commit_block(initial_leaves, 100, &mut index_db).await;
77+
commit_block_and_write(initial_leaves, 100, &mut index_db).await;
8078

8179
// Commit block with deletion leaves (all zero)
82-
let deleted_nodes = commit_block(leaves_to_delete, 0, &mut index_db).await;
80+
let deleted_nodes = commit_block_and_write(leaves_to_delete, 0, &mut index_db).await;
8381

8482
// Verify the deleted nodes match the expected set.
8583
let storage_trie_deleted_nodes = deleted_nodes.storage_tries.get(&CONTRACT_ADDRESS);

crates/starknet_committer_and_os_cli/src/committer_cli/commands.rs

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use starknet_committer::block_committer::commit::{CommitBlockImpl, CommitBlockTrait};
1+
use starknet_committer::block_committer::commit::commit_block;
22
use starknet_committer::block_committer::measurements_util::NoMeasurements;
33
use starknet_committer::db::facts_db::FactsDb;
44
use starknet_committer::db::forest_trait::StorageInitializer;
@@ -35,10 +35,9 @@ pub async fn parse_and_commit(
3535

3636
pub async fn commit(input: FactsDbInputImpl, output_path: String, storage: MapStorage) {
3737
let mut facts_db = FactsDb::new(storage);
38-
let (filled_forest, _deleted_nodes) =
39-
CommitBlockImpl::commit_block(input, &mut facts_db, &mut NoMeasurements)
40-
.await
41-
.expect("Failed to commit the given block.");
38+
let (filled_forest, _deleted_nodes) = commit_block(input, &mut facts_db, &mut NoMeasurements)
39+
.await
40+
.expect("Failed to commit the given block.");
4241
let output = SerializedForest(filled_forest)
4342
.forest_to_output()
4443
.await

0 commit comments

Comments
 (0)