Skip to content

Commit b64f6c8

Browse files
authored
Finish page allocator verification (#119)
Signed-off-by: Lennard Gäher <l.gaeher@posteo.de>
1 parent 24be7b6 commit b64f6c8

File tree

61 files changed

+2005
-719
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+2005
-719
lines changed

.github/workflows/verify.yml

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ jobs:
1717
- name: Install opam dependencies
1818
run: sudo apt update -y && sudo apt install -y pkg-config git rsync tar unzip m4 time curl ocaml build-essential bubblewrap gawk libgmp-dev python3 python-is-python3 libmpfr-dev && python3 -m pip install setuptools
1919
- name: Install opam
20-
run: curl "https://github.com/ocaml/opam/releases/download/2.1.5/opam-2.1.5-x86_64-linux" -Lo /usr/local/bin/opam && chmod +x /usr/local/bin/opam
20+
run: curl "https://github.com/ocaml/opam/releases/download/2.5.0/opam-2.5.0-x86_64-linux" -Lo /usr/local/bin/opam && chmod +x /usr/local/bin/opam
2121
- name: Setup opam
2222
run: opam init -y
2323
- name: Setup opam switch
@@ -62,13 +62,16 @@ jobs:
6262
run: |
6363
python3 .github/workflows/prune_dune_modules.py verification/rust_proofs/ace/proofs/dune \
6464
proof_core_page_allocator_allocator_PageAllocator_add_memory_region \
65+
proof_core_page_allocator_allocator_PageAllocator_release_pages_closure0 \
6566
proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token \
67+
proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token \
6668
proof_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary \
69+
proof_core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens \
6770
proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size \
6871
proof_core_page_allocator_page_Page_T_write \
6972
proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read \
70-
proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token \
71-
proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide
73+
proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide \
74+
proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_merge
7275
- name: Compile Rocq proofs
7376
run: |
7477
(

rust-toolchain.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
[toolchain]
2-
channel = "nightly-2025-09-12"
2+
channel = "nightly-2026-03-23"
33
targets = [ "riscv64gc-unknown-none-elf" ]

security-monitor/rust-crates/pointers_utility/src/lib.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,9 @@ mod error;
1616
use core::mem::size_of;
1717
pub use crate::error::PointerError;
1818

19-
/// Calculates the offset in bytes between two pointers.
19+
/// Calculates the offset in bytes between two pointers.
2020
#[rr::only_spec]
21-
#[rr::returns("wrap_to_it (pointer1.2 - pointer2.2) isize")]
21+
#[rr::returns("wrap_to_it (pointer1.(loc_a) - pointer2.(loc_a)) isize")]
2222
pub fn ptr_byte_offset(pointer1: *const usize, pointer2: *const usize) -> isize {
2323
// TODO: we should use wrapping arithmetic here, as it might overflow
2424
(pointer1.addr() as isize) - (pointer2.addr() as isize)
@@ -38,7 +38,7 @@ pub fn ptr_align(pointer: *mut usize, align_in_bytes: usize, owned_region_end: *
3838
/// of size one, if the original pointer is valid. Additional checks are required for making
3939
/// larger memory accesses.
4040
#[rr::ok]
41-
#[rr::requires("pointer.2 + offset_in_bytes < owned_region_end.2")]
41+
#[rr::requires("pointer.(loc_a) + offset_in_bytes < owned_region_end.(loc_a)")]
4242
#[rr::ensures("ret = (pointer +ₗ offset_in_bytes)")]
4343
pub fn ptr_byte_add_mut(
4444
pointer: *mut usize, offset_in_bytes: usize, owned_region_end: *const usize,
@@ -61,7 +61,7 @@ pub fn ptr_byte_add_mut(
6161
/// of size one, if the original pointer is valid. Additional checks are required for making
6262
/// larger memory accesses.
6363
#[rr::ok]
64-
#[rr::requires("pointer.2 + offset_in_bytes < owned_region_end.2")]
64+
#[rr::requires("pointer.(loc_a) + offset_in_bytes < owned_region_end.(loc_a)")]
6565
#[rr::ensures("ret = (pointer +ₗ offset_in_bytes)")]
6666
pub fn ptr_byte_add(
6767
pointer: *const usize, offset_in_bytes: usize, owned_region_end: *const usize,

security-monitor/src/core/architecture/riscv/mmu/page_size.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#[rr::derive_instantiate("PEq_refl" := #proof "intros ??; solve_goal")]
1515
#[rr::derive_instantiate("PEq_sym" := #proof "intros ???; solve_goal")]
1616
#[rr::derive_instantiate("PEq_trans" := #proof "intros ????; solve_goal")]
17+
#[rr::derive_instantiate("PEq_leibniz" := #proof "intros ? [] []; simpl; done")]
1718
// POrd
1819
#[rr::derive_instantiate("POrd" := "λ a b, Some (page_size_cmp a b)")]
1920
#[rr::derive_instantiate("POrd_eq_cons" := #proof "intros ? [] []; simpl; done")]
@@ -23,7 +24,6 @@
2324
#[rr::derive_instantiate("Ord_lt_trans" := #proof "intros ????; solve_goal")]
2425
#[rr::derive_instantiate("Ord_eq_trans" := #proof "intros ????; solve_goal")]
2526
#[rr::derive_instantiate("Ord_gt_trans" := #proof "intros ????; solve_goal")]
26-
#[rr::derive_instantiate("Ord_leibniz" := #proof "intros ? [] []; simpl; done")]
2727
#[rr::derive_instantiate("Ord_antisym" := #proof "intros ???; solve_goal")]
2828
pub enum PageSize {
2929
#[rr::pattern("Size4KiB")]

security-monitor/src/core/memory_layout/confidential_memory_address.rs

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
// SPDX-License-Identifier: Apache-2.0
44
use crate::error::Error;
55
use pointers_utility::{ptr_byte_add_mut, ptr_byte_offset};
6+
use super::MemoryLayout;
67

78
/// The wrapper over a raw pointer that is guaranteed to be an address located in the confidential memory region.
89
#[repr(transparent)]
@@ -15,7 +16,7 @@ use pointers_utility::{ptr_byte_add_mut, ptr_byte_offset};
1516
#[rr::exists("MEMORY_CONFIG")]
1617
#[rr::invariant(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")]
1718
/// Invariant: The address is in the confidential part of the memory layout.
18-
#[rr::invariant("(MEMORY_CONFIG.(conf_start).2 ≤ l.2 < MEMORY_CONFIG.(conf_end).2)%Z")]
19+
#[rr::invariant("(MEMORY_CONFIG.(conf_start).(loc_a) ≤ l.(loc_a) < MEMORY_CONFIG.(conf_end).(loc_a))%Z")]
1920
pub struct ConfidentialMemoryAddress(#[rr::field("l")] *mut usize);
2021

2122
/// Verification: We require the ghost state for the global memory layout to be available.
@@ -25,7 +26,7 @@ impl ConfidentialMemoryAddress {
2526
/// Precondition: The global memory layout is initialized.
2627
#[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")]
2728
/// Precondition: The address is in the confidential region of the global memory layout.
28-
#[rr::requires("(MEMORY_CONFIG.(conf_start).2 ≤ address.2 < MEMORY_CONFIG.(conf_end).2)%Z")]
29+
#[rr::requires("(MEMORY_CONFIG.(conf_start).(loc_a) ≤ address.(loc_a) < MEMORY_CONFIG.(conf_end).(loc_a))%Z")]
2930
#[rr::returns("address")]
3031
// TODO this should be unsafe
3132
pub(super) fn new(address: *mut usize) -> Self {
@@ -37,20 +38,22 @@ impl ConfidentialMemoryAddress {
3738
self.0 as *const u8
3839
}
3940

40-
#[rr::returns("self.2")]
41+
42+
#[rr::returns("self.(loc_a)")]
4143
pub fn as_usize(&self) -> usize {
4244
self.0.addr()
4345
}
4446

45-
#[rr::only_spec]
47+
// Precondition: Theh alignment is a power of two.
48+
#[rr::requires("is_power_of_two (Z.to_nat align)")]
4649
/// Postcondition: Verifies that the pointer is aligned to the given alignment.
4750
#[rr::returns("bool_decide (self `aligned_to` (Z.to_nat align))")]
4851
pub fn is_aligned_to(&self, align: usize) -> bool {
4952
self.0.is_aligned_to(align)
5053
}
5154

5255
/// Postcondition: Compute the offset.
53-
#[rr::returns("wrap_to_it (pointer.2 - self.2) isize")]
56+
#[rr::returns("wrap_to_it (pointer.(loc_a) - self.(loc_a)) isize")]
5457
pub fn offset_from(&self, pointer: *const usize) -> isize {
5558
ptr_byte_offset(pointer, self.0)
5659
}
@@ -61,20 +64,23 @@ impl ConfidentialMemoryAddress {
6164
/// # Safety
6265
///
6366
/// The caller must ensure that the address at given offset is still within the confidential memory region.
64-
// TODO: can we require the offset to be a multiple of usize? (woz: yes we can)
65-
#[rr::only_spec]
6667
#[rr::params("MEMORY_CONFIG")]
6768
/// Precondition: The global memory layout is initialized.
68-
#[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")]
69+
#[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")]
6970
#[rr::ok]
7071
/// Precondition: The offset address is in the given range.
71-
#[rr::requires("self.2 + offset_in_bytes < upper_bound.2")]
72+
#[rr::requires("self.(loc_a) + offset_in_bytes < upper_bound.(loc_a)")]
7273
/// Precondition: The maximum (and thus the offset address) is in the confidential memory range.
73-
#[rr::requires("upper_bound.2 ≤ MEMORY_CONFIG.(conf_end).2")]
74+
#[rr::requires("upper_bound.(loc_a) ≤ MEMORY_CONFIG.(conf_end).(loc_a)")]
7475
/// Postcondition: The offset pointer is in the confidential memory range.
7576
#[rr::ensures("ret = self +ₗ offset_in_bytes")]
7677
pub fn add(&self, offset_in_bytes: usize, upper_bound: *const usize) -> Result<ConfidentialMemoryAddress, Error> {
77-
let pointer = ptr_byte_add_mut(self.0, offset_in_bytes, upper_bound).map_err(#[rr::verify] |_| Error::AddressNotInConfidentialMemory())?;
78+
let memory_layout = MemoryLayout::read();
79+
ensure!(upper_bound <= memory_layout.confidential_memory_end, Error::AddressNotInConfidentialMemory())?;
80+
let pointer = ptr_byte_add_mut(self.0, offset_in_bytes, upper_bound).map_err(
81+
#[rr::verify]
82+
|_| Error::AddressNotInConfidentialMemory(),
83+
)?;
7884
Ok(Self::new(pointer))
7985
}
8086

security-monitor/src/core/memory_layout/mod.rs

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,11 @@ static MEMORY_LAYOUT: Once<MemoryLayout> = Once::new();
2828
/// non-confidential memory.
2929
#[rr::refined_by("ml" : "memory_layout")]
3030
/// Invariant: The starts of the region have addresses less-or-equal-to the ends of the regions.
31-
#[rr::invariant("ml.(non_conf_start).2 ≤ ml.(non_conf_end).2")]
32-
#[rr::invariant("ml.(conf_start).2 ≤ ml.(conf_end).2")]
31+
#[rr::invariant("ml.(non_conf_start).(loc_a) ≤ ml.(non_conf_end).(loc_a)")]
32+
#[rr::invariant("ml.(conf_start).(loc_a) ≤ ml.(conf_end).(loc_a)")]
3333
/// Invariant: the non-confidential memory region comes before the confidential memory region.
3434
// TODO: this could be generalized to the regions being disjoint
35-
#[rr::invariant("ml.(non_conf_end).2 ≤ ml.(conf_start).2")]
35+
#[rr::invariant("ml.(non_conf_end).(loc_a) ≤ ml.(conf_start).(loc_a)")]
3636
/// Invariant: the bounds of the confidential memory region are aligned to 4KiB pages
3737
#[rr::invariant("ml.(conf_start) `aligned_to` (page_size_in_bytes_nat Size4KiB)")]
3838
#[rr::invariant("ml.(conf_end) `aligned_to` (page_size_in_bytes_nat Size4KiB)")]
@@ -68,19 +68,19 @@ impl MemoryLayout {
6868
/// This function must be called only once by the initialization procedure during the boot of the system.
6969
#[rr::only_spec]
7070
/// Precondition: The non-confidential memory should have positive size.
71-
#[rr::requires("non_confidential_memory_start.2 < non_confidential_memory_end.2")]
71+
#[rr::requires("non_confidential_memory_start.(loc_a) < non_confidential_memory_end.(loc_a)")]
7272
/// Precondition: The non-condidential memory should preceed and not overlap with confidential memory.
73-
#[rr::requires("non_confidential_memory_end.2 ≤ confidential_memory_start.2")]
73+
#[rr::requires("non_confidential_memory_end.(loc_a) ≤ confidential_memory_start.(loc_a)")]
7474
/// Precondition: The confidential memory should have positive size.
75-
#[rr::requires("confidential_memory_start.2 < confidential_memory_end.2")]
75+
#[rr::requires("confidential_memory_start.(loc_a) < confidential_memory_end.(loc_a)")]
7676
/// Precondition: The global MEMORY_LAYOUT has not been initialized yet.
7777
#[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" None")]
7878
/// Postcondition: There exists a result -- failure is always an option
7979
#[rr::exists("res", "maybe_mem_layout")]
8080
/// Postcondition: failure due to low memory can occur if there is no sufficiently aligned
8181
/// confidential address
8282
#[rr::ensures(
83-
"if_Err res (λ err, (confidential_memory_start.2 - confidential_memory_end.2 ≤ page_size_in_bytes_Z Size4KiB)%Z ∧ err = error_Error_NotEnoughMemory)"
83+
"if_Err res (λ err, (confidential_memory_start.(loc_a) - confidential_memory_end.(loc_a) ≤ page_size_in_bytes_Z Size4KiB)%Z ∧ err = error_Error_NotEnoughMemory)"
8484
)]
8585
/// Postcondition: if we return Ok, we get a new confidential memory range that is correctly
8686
/// aligned for the smallest page size and is a subrange of [conf_start, conf_end)
@@ -90,9 +90,9 @@ impl MemoryLayout {
9090
maybe_mem_layout = Some mem_layout ∧
9191
mem_layout.(conf_start) `aligned_to` (page_size_in_bytes_nat Size4KiB) ∧
9292
mem_layout.(conf_end) `aligned_to` (page_size_in_bytes_nat Size4KiB) ∧
93-
confidential_memory_start.2 ≤ mem_layout.(conf_start).2
94-
mem_layout.(conf_end).2 ≤ confidential_memory_end.2
95-
mem_layout.(conf_start).2 ≤ mem_layout.(conf_end).2
93+
confidential_memory_start.(loc_a) ≤ mem_layout.(conf_start).(loc_a)
94+
mem_layout.(conf_end).(loc_a) ≤ confidential_memory_end.(loc_a)
95+
mem_layout.(conf_start).(loc_a) ≤ mem_layout.(conf_end).(loc_a)
9696
ok = *[ mem_layout.(conf_start); mem_layout.(conf_end)])%Z"
9797
)]
9898
/// Postcondition: if we return Ok, the MEMORY_LAYOUT has been initialized.
@@ -138,7 +138,7 @@ impl MemoryLayout {
138138
#[rr::only_spec]
139139
#[rr::ok]
140140
/// Precondition: The offset address is in confidential memory.
141-
#[rr::requires("address.2 + offset_in_bytes < self.(conf_end).2")]
141+
#[rr::requires("address.(loc_a) + offset_in_bytes < self.(conf_end).(loc_a)")]
142142
/// Postcondition: The offset pointer is in confidential memory.
143143
#[rr::ensures("ret = address +ₗ offset_in_bytes")]
144144
pub fn confidential_address_at_offset(
@@ -152,9 +152,9 @@ impl MemoryLayout {
152152
#[rr::only_spec]
153153
#[rr::ok]
154154
/// Precondition: The offset address is in confidential memory.
155-
#[rr::requires("address.2 + offset_in_bytes < upper_bound.2")]
155+
#[rr::requires("address.(loc_a) + offset_in_bytes < upper_bound.(loc_a)")]
156156
/// Precondition: The bounds we are checking are within confidential memory.
157-
#[rr::requires("upper_bound.2 ≤ self.(conf_end).2")]
157+
#[rr::requires("upper_bound.(loc_a) ≤ self.(conf_end).(loc_a)")]
158158
/// Postcondition: Then we can correctly offset the address and ensure it is in confidential
159159
/// memory.
160160
#[rr::ensures("ret = address +ₗ offset_in_bytes")]
@@ -170,7 +170,7 @@ impl MemoryLayout {
170170
#[rr::only_spec]
171171
#[rr::ok]
172172
/// Precondition: The offset address is in non-confidential memory.
173-
#[rr::requires("address.2 + offset_in_bytes < self.(non_conf_end).2")]
173+
#[rr::requires("address.(loc_a) + offset_in_bytes < self.(non_conf_end).(loc_a)")]
174174
/// Postcondition: Then we can correctly offset the address and ensure it is in
175175
/// non-confidential memory.
176176
#[rr::ensures("ret = address +ₗ offset_in_bytes")]
@@ -183,7 +183,7 @@ impl MemoryLayout {
183183

184184
/// Returns true if the raw pointer is inside the non-confidential memory.
185185
#[rr::only_spec]
186-
#[rr::returns("bool_decide (self.(non_conf_start).2 ≤ address.2 ∧ address.2 < self.(non_conf_end).2)")]
186+
#[rr::returns("bool_decide (self.(non_conf_start).(loc_a) ≤ address.(loc_a) ∧ address.(loc_a) < self.(non_conf_end).(loc_a))")]
187187
pub fn is_in_non_confidential_range(&self, address: *const usize) -> bool {
188188
self.non_confidential_memory_start as *const usize <= address && address < self.non_confidential_memory_end
189189
}
@@ -219,7 +219,7 @@ impl MemoryLayout {
219219

220220
/// Get the boundaries of confidential memory as a (start, end) tuple.
221221
#[rr::only_spec]
222-
#[rr::returns(" *[self.(conf_start).2; self.(conf_end).2]")]
222+
#[rr::returns(" *[self.(conf_start).(loc_a); self.(conf_end).(loc_a)]")]
223223
pub fn confidential_memory_boundary(&self) -> (usize, usize) {
224224
(self.confidential_memory_start as usize, self.confidential_memory_end as usize)
225225
}

security-monitor/src/core/memory_layout/non_confidential_memory_address.rs

Lines changed: 22 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -16,20 +16,19 @@ use pointers_utility::ptr_byte_add_mut;
1616
#[rr::exists("MEMORY_CONFIG")]
1717
#[rr::invariant(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")]
1818
/// Invariant: The address is in non-confidential memory.
19-
#[rr::invariant("(MEMORY_CONFIG.(non_conf_start).2 ≤ l.2 < MEMORY_CONFIG.(non_conf_end).2)%Z")]
19+
#[rr::invariant("(MEMORY_CONFIG.(non_conf_start).(loc_a) ≤ l.(loc_a) < MEMORY_CONFIG.(non_conf_end).(loc_a))%Z")]
2020
pub struct NonConfidentialMemoryAddress(#[rr::field("l")] *mut usize);
2121

2222
#[rr::context("onceG Σ memory_layout")]
2323
impl NonConfidentialMemoryAddress {
2424
/// Constructs an address in a non-confidential memory. Returns error if the address is outside non-confidential
2525
/// memory.
26-
#[rr::trust_me]
2726
#[rr::params("bounds")]
2827
/// Precondition: The global memory layout has been initialized.
29-
#[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some bounds)")]
28+
#[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some bounds)")]
3029
#[rr::ok]
3130
/// Precondition: The location is in non-confidential memory.
32-
#[rr::requires("bounds.(non_conf_start).2 ≤ address.2 < bounds.(non_conf_end).2")]
31+
#[rr::requires("bounds.(non_conf_start).(loc_a) ≤ address.(loc_a) < bounds.(non_conf_end).(loc_a)")]
3332
/// Postcondition: The non-confidential memory address is correctly initialized.
3433
#[rr::ensures("ret = address")]
3534
pub fn new(address: *mut usize) -> Result<Self, Error> {
@@ -46,19 +45,20 @@ impl NonConfidentialMemoryAddress {
4645
///
4746
/// The caller must ensure that the address advanced by the offset is still within the non-confidential memory
4847
/// region.
49-
// TODO: can we require the offset to be a multiple of usize?
50-
#[rr::only_spec]
51-
#[rr::params("MEMORY_CONFIG")]
52-
/// Precondition: The offset address is in the given range.
53-
#[rr::requires("self.2 + offset_in_bytes < upper_bound.2")]
48+
#[rr::params("MEMORY_CONFIG" : "memory_layout")]
5449
/// Precondition: The global memory layout is initialized.
55-
#[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")]
50+
#[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")]
51+
#[rr::ok]
52+
/// Precondition: The offset address is in the given range.
53+
#[rr::requires("self.(loc_a) + offset_in_bytes < upper_bound.(loc_a)")]
5654
/// Precondition: The maximum (and thus the offset address) is in the non-confidential memory range.
57-
#[rr::requires("upper_bound.2 < MEMORY_CONFIG.(non_conf_end).2")]
55+
#[rr::requires("upper_bound.(loc_a) ≤ MEMORY_CONFIG.(non_conf_end).(loc_a)")]
5856
/// Postcondition: The offset pointer is in the non-confidential memory range.
59-
#[rr::returns("Ok(self +ₗ offset_in_bytes)")]
57+
#[rr::ensures("ret = self +ₗ offset_in_bytes")]
6058
pub unsafe fn add(&self, offset_in_bytes: usize, upper_bound: *const usize) -> Result<NonConfidentialMemoryAddress, Error> {
61-
let pointer = ptr_byte_add_mut(self.0, offset_in_bytes, upper_bound).map_err(|_| Error::AddressNotInNonConfidentialMemory())?;
59+
let memory_layout = MemoryLayout::read();
60+
ensure!(upper_bound <= memory_layout.non_confidential_memory_end, Error::AddressNotInNonConfidentialMemory())?;
61+
let pointer = ptr_byte_add_mut(self.0, offset_in_bytes, upper_bound).map_err(#[rr::verify] |_| Error::AddressNotInNonConfidentialMemory())?;
6262
Ok(NonConfidentialMemoryAddress(pointer))
6363
}
6464

@@ -68,6 +68,10 @@ impl NonConfidentialMemoryAddress {
6868
///
6969
/// We need to ensure the pointer is not used by two threads simultaneously. See `ptr::read_volatile` for safety
7070
/// concerns.
71+
#[rr::params("z", "lft_el")]
72+
#[rr::unsafe_elctx("[ϝ ⊑ₑ lft_el]")]
73+
#[rr::requires(#iris "self ◁ₗ[π, Shared lft_el] #z @ ◁ int usize")]
74+
#[rr::returns("z")]
7175
pub unsafe fn read(&self) -> usize {
7276
unsafe { self.0.read_volatile() }
7377
}
@@ -78,6 +82,9 @@ impl NonConfidentialMemoryAddress {
7882
///
7983
/// We need to ensure the pointer is not used by two threads simultaneously. See `ptr::write_volatile` for safety
8084
/// concerns.
85+
#[rr::params("z")]
86+
#[rr::requires(#type "self" : "z" @ "int usize")]
87+
#[rr::ensures(#type "self" : "value" @ "int usize")]
8188
pub unsafe fn write(&self, value: usize) {
8289
unsafe { self.0.write_volatile(value) };
8390
}
@@ -87,12 +94,9 @@ impl NonConfidentialMemoryAddress {
8794
self.0
8895
}
8996

90-
#[rr::only_spec]
91-
#[rr::returns("self.2")]
97+
#[rr::returns("self.(loc_a)")]
9298
pub fn usize(&self) -> usize {
9399
// TODO: check if we need to expose the pointer.
94-
// If not, use addr() instead.
95-
// self.0.addr()
96-
self.0 as usize
100+
self.0.addr()
97101
}
98102
}

0 commit comments

Comments
 (0)