Skip to content

Commit 79969d6

Browse files
committed
finish page allocator verification
Signed-off-by: Lennard Gäher <l.gaeher@posteo.de>
1 parent 24be7b6 commit 79969d6

61 files changed

Lines changed: 2015 additions & 718 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.github/workflows/verify.yml

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ jobs:
1717
- name: Install opam dependencies
1818
run: sudo apt update -y && sudo apt install -y pkg-config git rsync tar unzip m4 time curl ocaml build-essential bubblewrap gawk libgmp-dev python3 python-is-python3 libmpfr-dev && python3 -m pip install setuptools
1919
- name: Install opam
20-
run: curl "https://github.com/ocaml/opam/releases/download/2.1.5/opam-2.1.5-x86_64-linux" -Lo /usr/local/bin/opam && chmod +x /usr/local/bin/opam
20+
run: curl "https://github.com/ocaml/opam/releases/download/2.5.0/opam-2.5.0-x86_64-linux" -Lo /usr/local/bin/opam && chmod +x /usr/local/bin/opam
2121
- name: Setup opam
2222
run: opam init -y
2323
- name: Setup opam switch
@@ -63,12 +63,14 @@ jobs:
6363
python3 .github/workflows/prune_dune_modules.py verification/rust_proofs/ace/proofs/dune \
6464
proof_core_page_allocator_allocator_PageAllocator_add_memory_region \
6565
proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token \
66+
proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token \
6667
proof_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary \
68+
proof_core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens \
6769
proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size \
6870
proof_core_page_allocator_page_Page_T_write \
6971
proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read \
70-
proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token \
71-
proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide
72+
proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide \
73+
proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_merge
7274
- name: Compile Rocq proofs
7375
run: |
7476
(

rust-toolchain.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
[toolchain]
2-
channel = "nightly-2025-09-12"
2+
channel = "nightly-2026-03-23"
33
targets = [ "riscv64gc-unknown-none-elf" ]

security-monitor/rust-crates/pointers_utility/src/lib.rs

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,17 @@ mod error;
1616
use core::mem::size_of;
1717
pub use crate::error::PointerError;
1818

19+
// !start spec(pointers_utility.ptr_byte_offset)
1920
/// Calculates the offset in bytes between two pointers.
2021
#[rr::only_spec]
21-
#[rr::returns("wrap_to_it (pointer1.2 - pointer2.2) isize")]
22+
#[rr::returns("wrap_to_it (pointer1.(loc_a) - pointer2.(loc_a)) isize")]
23+
// !end spec
24+
// !start code(pointers_utility.ptr_byte_offset)
2225
pub fn ptr_byte_offset(pointer1: *const usize, pointer2: *const usize) -> isize {
2326
// TODO: we should use wrapping arithmetic here, as it might overflow
2427
(pointer1.addr() as isize) - (pointer2.addr() as isize)
2528
}
29+
// !end code
2630

2731
/// Aligns the pointer to specific size while making sure that the aligned pointer
2832
/// is still within the memory region owned by the original pointer. Check `ptr_byte_add_mut`
@@ -32,14 +36,17 @@ pub fn ptr_align(pointer: *mut usize, align_in_bytes: usize, owned_region_end: *
3236
ptr_byte_add_mut(pointer, offset_to_align, owned_region_end)
3337
}
3438

39+
// !start spec(pointers_utility.ptr_byte_add_mut)
3540
/// Calculates the offset from a mutable raw pointer. This function guarantees that
3641
/// the returning pointer did not overflow and is within the owned memory region excluding
3742
/// the one-past-the-end address. The returned pointer is guaranteed to be valid for accesses
3843
/// of size one, if the original pointer is valid. Additional checks are required for making
3944
/// larger memory accesses.
4045
#[rr::ok]
41-
#[rr::requires("pointer.2 + offset_in_bytes < owned_region_end.2")]
46+
#[rr::requires("pointer.(loc_a) + offset_in_bytes < owned_region_end.(loc_a)")]
4247
#[rr::ensures("ret = (pointer +ₗ offset_in_bytes)")]
48+
// !end spec
49+
// !start code(pointers_utility.ptr_byte_add_mut)
4350
pub fn ptr_byte_add_mut(
4451
pointer: *mut usize, offset_in_bytes: usize, owned_region_end: *const usize,
4552
) -> Result<*mut usize, PointerError> {
@@ -54,17 +61,22 @@ pub fn ptr_byte_add_mut(
5461
}
5562
Ok(incremented_pointer)
5663
}
64+
// !end code
5765

66+
// !start spec(pointers_utility.ptr_byte_add)
5867
/// Calculates the offset from a raw pointer. This function guarantees that
5968
/// the returning pointer did not overflow and is within the owned memory region excluding
6069
/// the one-past-the-end address. The returned pointer is guaranteed to be valid for accesses
6170
/// of size one, if the original pointer is valid. Additional checks are required for making
6271
/// larger memory accesses.
6372
#[rr::ok]
64-
#[rr::requires("pointer.2 + offset_in_bytes < owned_region_end.2")]
73+
#[rr::requires("pointer.(loc_a) + offset_in_bytes < owned_region_end.(loc_a)")]
6574
#[rr::ensures("ret = (pointer +ₗ offset_in_bytes)")]
75+
// !end spec
76+
// !start code(pointers_utility.ptr_byte_add)
6677
pub fn ptr_byte_add(
6778
pointer: *const usize, offset_in_bytes: usize, owned_region_end: *const usize,
6879
) -> Result<*const usize, PointerError> {
6980
Ok(ptr_byte_add_mut(pointer as *mut usize, offset_in_bytes, owned_region_end)?)
7081
}
82+
// !end code

security-monitor/src/core/architecture/riscv/mmu/page_size.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#[rr::derive_instantiate("PEq_refl" := #proof "intros ??; solve_goal")]
1515
#[rr::derive_instantiate("PEq_sym" := #proof "intros ???; solve_goal")]
1616
#[rr::derive_instantiate("PEq_trans" := #proof "intros ????; solve_goal")]
17+
#[rr::derive_instantiate("PEq_leibniz" := #proof "intros ? [] []; simpl; done")]
1718
// POrd
1819
#[rr::derive_instantiate("POrd" := "λ a b, Some (page_size_cmp a b)")]
1920
#[rr::derive_instantiate("POrd_eq_cons" := #proof "intros ? [] []; simpl; done")]
@@ -23,7 +24,6 @@
2324
#[rr::derive_instantiate("Ord_lt_trans" := #proof "intros ????; solve_goal")]
2425
#[rr::derive_instantiate("Ord_eq_trans" := #proof "intros ????; solve_goal")]
2526
#[rr::derive_instantiate("Ord_gt_trans" := #proof "intros ????; solve_goal")]
26-
#[rr::derive_instantiate("Ord_leibniz" := #proof "intros ? [] []; simpl; done")]
2727
#[rr::derive_instantiate("Ord_antisym" := #proof "intros ???; solve_goal")]
2828
pub enum PageSize {
2929
#[rr::pattern("Size4KiB")]

security-monitor/src/core/memory_layout/confidential_memory_address.rs

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
// SPDX-License-Identifier: Apache-2.0
44
use crate::error::Error;
55
use pointers_utility::{ptr_byte_add_mut, ptr_byte_offset};
6+
use super::MemoryLayout;
67

78
/// The wrapper over a raw pointer that is guaranteed to be an address located in the confidential memory region.
89
#[repr(transparent)]
@@ -15,7 +16,7 @@ use pointers_utility::{ptr_byte_add_mut, ptr_byte_offset};
1516
#[rr::exists("MEMORY_CONFIG")]
1617
#[rr::invariant(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")]
1718
/// Invariant: The address is in the confidential part of the memory layout.
18-
#[rr::invariant("(MEMORY_CONFIG.(conf_start).2 ≤ l.2 < MEMORY_CONFIG.(conf_end).2)%Z")]
19+
#[rr::invariant("(MEMORY_CONFIG.(conf_start).(loc_a) ≤ l.(loc_a) < MEMORY_CONFIG.(conf_end).(loc_a))%Z")]
1920
pub struct ConfidentialMemoryAddress(#[rr::field("l")] *mut usize);
2021

2122
/// Verification: We require the ghost state for the global memory layout to be available.
@@ -25,7 +26,7 @@ impl ConfidentialMemoryAddress {
2526
/// Precondition: The global memory layout is initialized.
2627
#[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")]
2728
/// Precondition: The address is in the confidential region of the global memory layout.
28-
#[rr::requires("(MEMORY_CONFIG.(conf_start).2 ≤ address.2 < MEMORY_CONFIG.(conf_end).2)%Z")]
29+
#[rr::requires("(MEMORY_CONFIG.(conf_start).(loc_a) ≤ address.(loc_a) < MEMORY_CONFIG.(conf_end).(loc_a))%Z")]
2930
#[rr::returns("address")]
3031
// TODO this should be unsafe
3132
pub(super) fn new(address: *mut usize) -> Self {
@@ -37,20 +38,22 @@ impl ConfidentialMemoryAddress {
3738
self.0 as *const u8
3839
}
3940

40-
#[rr::returns("self.2")]
41+
42+
#[rr::returns("self.(loc_a)")]
4143
pub fn as_usize(&self) -> usize {
4244
self.0.addr()
4345
}
4446

45-
#[rr::only_spec]
47+
// Precondition: Theh alignment is a power of two.
48+
#[rr::requires("is_power_of_two (Z.to_nat align)")]
4649
/// Postcondition: Verifies that the pointer is aligned to the given alignment.
4750
#[rr::returns("bool_decide (self `aligned_to` (Z.to_nat align))")]
4851
pub fn is_aligned_to(&self, align: usize) -> bool {
4952
self.0.is_aligned_to(align)
5053
}
5154

5255
/// Postcondition: Compute the offset.
53-
#[rr::returns("wrap_to_it (pointer.2 - self.2) isize")]
56+
#[rr::returns("wrap_to_it (pointer.(loc_a) - self.(loc_a)) isize")]
5457
pub fn offset_from(&self, pointer: *const usize) -> isize {
5558
ptr_byte_offset(pointer, self.0)
5659
}
@@ -61,20 +64,23 @@ impl ConfidentialMemoryAddress {
6164
/// # Safety
6265
///
6366
/// The caller must ensure that the address at given offset is still within the confidential memory region.
64-
// TODO: can we require the offset to be a multiple of usize? (woz: yes we can)
65-
#[rr::only_spec]
6667
#[rr::params("MEMORY_CONFIG")]
6768
/// Precondition: The global memory layout is initialized.
68-
#[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")]
69+
#[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")]
6970
#[rr::ok]
7071
/// Precondition: The offset address is in the given range.
71-
#[rr::requires("self.2 + offset_in_bytes < upper_bound.2")]
72+
#[rr::requires("self.(loc_a) + offset_in_bytes < upper_bound.(loc_a)")]
7273
/// Precondition: The maximum (and thus the offset address) is in the confidential memory range.
73-
#[rr::requires("upper_bound.2 ≤ MEMORY_CONFIG.(conf_end).2")]
74+
#[rr::requires("upper_bound.(loc_a) ≤ MEMORY_CONFIG.(conf_end).(loc_a)")]
7475
/// Postcondition: The offset pointer is in the confidential memory range.
7576
#[rr::ensures("ret = self +ₗ offset_in_bytes")]
7677
pub fn add(&self, offset_in_bytes: usize, upper_bound: *const usize) -> Result<ConfidentialMemoryAddress, Error> {
77-
let pointer = ptr_byte_add_mut(self.0, offset_in_bytes, upper_bound).map_err(#[rr::verify] |_| Error::AddressNotInConfidentialMemory())?;
78+
let memory_layout = MemoryLayout::read();
79+
ensure!(upper_bound <= memory_layout.confidential_memory_end, Error::AddressNotInConfidentialMemory())?;
80+
let pointer = ptr_byte_add_mut(self.0, offset_in_bytes, upper_bound).map_err(
81+
#[rr::verify]
82+
|_| Error::AddressNotInConfidentialMemory(),
83+
)?;
7884
Ok(Self::new(pointer))
7985
}
8086

security-monitor/src/core/memory_layout/mod.rs

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,11 @@ static MEMORY_LAYOUT: Once<MemoryLayout> = Once::new();
2828
/// non-confidential memory.
2929
#[rr::refined_by("ml" : "memory_layout")]
3030
/// Invariant: The starts of the region have addresses less-or-equal-to the ends of the regions.
31-
#[rr::invariant("ml.(non_conf_start).2 ≤ ml.(non_conf_end).2")]
32-
#[rr::invariant("ml.(conf_start).2 ≤ ml.(conf_end).2")]
31+
#[rr::invariant("ml.(non_conf_start).(loc_a) ≤ ml.(non_conf_end).(loc_a)")]
32+
#[rr::invariant("ml.(conf_start).(loc_a) ≤ ml.(conf_end).(loc_a)")]
3333
/// Invariant: the non-confidential memory region comes before the confidential memory region.
3434
// TODO: this could be generalized to the regions being disjoint
35-
#[rr::invariant("ml.(non_conf_end).2 ≤ ml.(conf_start).2")]
35+
#[rr::invariant("ml.(non_conf_end).(loc_a) ≤ ml.(conf_start).(loc_a)")]
3636
/// Invariant: the bounds of the confidential memory region are aligned to 4KiB pages
3737
#[rr::invariant("ml.(conf_start) `aligned_to` (page_size_in_bytes_nat Size4KiB)")]
3838
#[rr::invariant("ml.(conf_end) `aligned_to` (page_size_in_bytes_nat Size4KiB)")]
@@ -68,19 +68,19 @@ impl MemoryLayout {
6868
/// This function must be called only once by the initialization procedure during the boot of the system.
6969
#[rr::only_spec]
7070
/// Precondition: The non-confidential memory should have positive size.
71-
#[rr::requires("non_confidential_memory_start.2 < non_confidential_memory_end.2")]
71+
#[rr::requires("non_confidential_memory_start.(loc_a) < non_confidential_memory_end.(loc_a)")]
7272
/// Precondition: The non-condidential memory should preceed and not overlap with confidential memory.
73-
#[rr::requires("non_confidential_memory_end.2 ≤ confidential_memory_start.2")]
73+
#[rr::requires("non_confidential_memory_end.(loc_a) ≤ confidential_memory_start.(loc_a)")]
7474
/// Precondition: The confidential memory should have positive size.
75-
#[rr::requires("confidential_memory_start.2 < confidential_memory_end.2")]
75+
#[rr::requires("confidential_memory_start.(loc_a) < confidential_memory_end.(loc_a)")]
7676
/// Precondition: The global MEMORY_LAYOUT has not been initialized yet.
7777
#[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" None")]
7878
/// Postcondition: There exists a result -- failure is always an option
7979
#[rr::exists("res", "maybe_mem_layout")]
8080
/// Postcondition: failure due to low memory can occur if there is no sufficiently aligned
8181
/// confidential address
8282
#[rr::ensures(
83-
"if_Err res (λ err, (confidential_memory_start.2 - confidential_memory_end.2 ≤ page_size_in_bytes_Z Size4KiB)%Z ∧ err = error_Error_NotEnoughMemory)"
83+
"if_Err res (λ err, (confidential_memory_start.(loc_a) - confidential_memory_end.(loc_a) ≤ page_size_in_bytes_Z Size4KiB)%Z ∧ err = error_Error_NotEnoughMemory)"
8484
)]
8585
/// Postcondition: if we return Ok, we get a new confidential memory range that is correctly
8686
/// aligned for the smallest page size and is a subrange of [conf_start, conf_end)
@@ -90,9 +90,9 @@ impl MemoryLayout {
9090
maybe_mem_layout = Some mem_layout ∧
9191
mem_layout.(conf_start) `aligned_to` (page_size_in_bytes_nat Size4KiB) ∧
9292
mem_layout.(conf_end) `aligned_to` (page_size_in_bytes_nat Size4KiB) ∧
93-
confidential_memory_start.2 ≤ mem_layout.(conf_start).2
94-
mem_layout.(conf_end).2 ≤ confidential_memory_end.2
95-
mem_layout.(conf_start).2 ≤ mem_layout.(conf_end).2
93+
confidential_memory_start.(loc_a) ≤ mem_layout.(conf_start).(loc_a)
94+
mem_layout.(conf_end).(loc_a) ≤ confidential_memory_end.(loc_a)
95+
mem_layout.(conf_start).(loc_a) ≤ mem_layout.(conf_end).(loc_a)
9696
ok = *[ mem_layout.(conf_start); mem_layout.(conf_end)])%Z"
9797
)]
9898
/// Postcondition: if we return Ok, the MEMORY_LAYOUT has been initialized.
@@ -138,7 +138,7 @@ impl MemoryLayout {
138138
#[rr::only_spec]
139139
#[rr::ok]
140140
/// Precondition: The offset address is in confidential memory.
141-
#[rr::requires("address.2 + offset_in_bytes < self.(conf_end).2")]
141+
#[rr::requires("address.(loc_a) + offset_in_bytes < self.(conf_end).(loc_a)")]
142142
/// Postcondition: The offset pointer is in confidential memory.
143143
#[rr::ensures("ret = address +ₗ offset_in_bytes")]
144144
pub fn confidential_address_at_offset(
@@ -152,9 +152,9 @@ impl MemoryLayout {
152152
#[rr::only_spec]
153153
#[rr::ok]
154154
/// Precondition: The offset address is in confidential memory.
155-
#[rr::requires("address.2 + offset_in_bytes < upper_bound.2")]
155+
#[rr::requires("address.(loc_a) + offset_in_bytes < upper_bound.(loc_a)")]
156156
/// Precondition: The bounds we are checking are within confidential memory.
157-
#[rr::requires("upper_bound.2 ≤ self.(conf_end).2")]
157+
#[rr::requires("upper_bound.(loc_a) ≤ self.(conf_end).(loc_a)")]
158158
/// Postcondition: Then we can correctly offset the address and ensure it is in confidential
159159
/// memory.
160160
#[rr::ensures("ret = address +ₗ offset_in_bytes")]
@@ -170,7 +170,7 @@ impl MemoryLayout {
170170
#[rr::only_spec]
171171
#[rr::ok]
172172
/// Precondition: The offset address is in non-confidential memory.
173-
#[rr::requires("address.2 + offset_in_bytes < self.(non_conf_end).2")]
173+
#[rr::requires("address.(loc_a) + offset_in_bytes < self.(non_conf_end).(loc_a)")]
174174
/// Postcondition: Then we can correctly offset the address and ensure it is in
175175
/// non-confidential memory.
176176
#[rr::ensures("ret = address +ₗ offset_in_bytes")]
@@ -183,7 +183,7 @@ impl MemoryLayout {
183183

184184
/// Returns true if the raw pointer is inside the non-confidential memory.
185185
#[rr::only_spec]
186-
#[rr::returns("bool_decide (self.(non_conf_start).2 ≤ address.2 ∧ address.2 < self.(non_conf_end).2)")]
186+
#[rr::returns("bool_decide (self.(non_conf_start).(loc_a) ≤ address.(loc_a) ∧ address.(loc_a) < self.(non_conf_end).(loc_a))")]
187187
pub fn is_in_non_confidential_range(&self, address: *const usize) -> bool {
188188
self.non_confidential_memory_start as *const usize <= address && address < self.non_confidential_memory_end
189189
}
@@ -219,7 +219,7 @@ impl MemoryLayout {
219219

220220
/// Get the boundaries of confidential memory as a (start, end) tuple.
221221
#[rr::only_spec]
222-
#[rr::returns(" *[self.(conf_start).2; self.(conf_end).2]")]
222+
#[rr::returns(" *[self.(conf_start).(loc_a); self.(conf_end).(loc_a)]")]
223223
pub fn confidential_memory_boundary(&self) -> (usize, usize) {
224224
(self.confidential_memory_start as usize, self.confidential_memory_end as usize)
225225
}

0 commit comments

Comments
 (0)