From e2f5f4b2eb31336a180a875358e021ccc92a1909 Mon Sep 17 00:00:00 2001 From: Niam Shah Date: Mon, 2 Feb 2026 01:56:20 -0800 Subject: [PATCH 01/15] Added basic testing for arca types and kernel types based on code. Pending support for running via 'cargo test' and a detailed run through to ensure testing is thorough and accurate as well as fixing runtime error. Full coverage of all data types --- kernel/src/tests/test_kernel_types.rs | 175 ++++++++++ kernel/src/tests/test_serde.rs | 453 ++++++++++++++++++++------ 2 files changed, 528 insertions(+), 100 deletions(-) create mode 100644 kernel/src/tests/test_kernel_types.rs diff --git a/kernel/src/tests/test_kernel_types.rs b/kernel/src/tests/test_kernel_types.rs new file mode 100644 index 0000000..5a3f1c3 --- /dev/null +++ b/kernel/src/tests/test_kernel_types.rs @@ -0,0 +1,175 @@ +// runs with command: cargo test -p kernel --target=x86_64-unknown-none +#[cfg(test)] +mod tests { + extern crate alloc; + + use alloc::vec; + use crate::types::internal as ktypes; + use crate::types::{ + Blob as ArcaBlob, Entry as ArcaEntry, Null as ArcaNull, Table as ArcaTable, + Tuple as ArcaTuple, Value as ArcaValue, Word as ArcaWord, + }; + + // Verifies internal word read/write semantics. + #[test] + fn test_internal_word_read() { + let word = ktypes::Word::new(123); + assert_eq!(word.read(), 123); + } + + // Ensures internal null construction is consistent. + #[test] + fn test_internal_null_default() { + let null = ktypes::Null::new(); + let default = ktypes::Null::default(); + assert_eq!(null, default); + } + + // Confirms internal blob mutability converts to raw bytes. + #[test] + fn test_internal_blob_mutation() { + let mut blob = ktypes::Blob::new(b"hello".to_vec()); + assert_eq!(blob.len(), 5); + blob[0] = b'j'; + let bytes = blob.into_inner(); + assert_eq!(&bytes[..], b"jello"); + } + + // Ensures invalid UTF-8 stays as raw bytes internally. + #[test] + fn test_internal_blob_invalid_utf8() { + let bytes = vec![0xffu8, 0xfeu8, 0xfdu8]; + let blob = ktypes::Blob::new(bytes.clone()); + let out = blob.into_inner(); + assert_eq!(&out[..], &bytes); + } + + // Validates internal tuple defaults and indexing. + #[test] + fn test_internal_tuple_defaults() { + let tuple = ktypes::Tuple::new_with_len(2); + assert_eq!(tuple.len(), 2); + assert!(matches!(tuple[0], ArcaValue::Null(_))); + assert!(matches!(tuple[1], ArcaValue::Null(_))); + } + + // Verifies internal tuple construction from iterators. + #[test] + fn test_internal_tuple_from_iter() { + let values = vec![ + ArcaValue::Word(ArcaWord::new(1)), + ArcaValue::Blob(ArcaBlob::from("x")), + ]; + let tuple: ktypes::Tuple = values.clone().into_iter().collect(); + assert_eq!(tuple.len(), values.len()); + assert_eq!(tuple[0], values[0]); + assert_eq!(tuple[1], values[1]); + } + + // Confirms internal page size tiers and shared content. + #[test] + fn test_internal_page_size_and_shared() { + let mut page = ktypes::Page::new(1); + assert_eq!(page.size(), 1 << 12); + page[0] = 7; + let shared = page.clone().shared(); + assert_eq!(shared[0], 7); + + let mid = ktypes::Page::new((1 << 12) + 1); + assert_eq!(mid.size(), 1 << 21); + } + + // Verifies internal table size tiers and set/get behavior. + #[test] + fn test_internal_table_get_set() { + let mut table = ktypes::Table::new(1); + assert_eq!(table.size(), 1 << 21); + + let entry = ArcaEntry::RWPage(crate::types::Page::new(1)); + let old = table.set(0, entry.clone()).unwrap(); + assert_eq!(old, ArcaEntry::Null(1 << 12)); + + let fetched = table.get(0); + assert_eq!(fetched, entry); + + let large = ktypes::Table::new((1 << 21) + 1); + assert_eq!(large.size(), 1 << 30); + } + + // Ensures internal table returns default null entries for empty slots. + #[test] + fn test_internal_table_default_entry() { + let table = ktypes::Table::new(1); + let entry = table.get(10); + assert_eq!(entry, ArcaEntry::Null(1 << 12)); + } + + // Ensures internal value conversions work as expected. + #[test] + fn test_internal_value_conversions() { + let word = ktypes::Word::new(99); + let value: ktypes::Value = word.clone().into(); + let roundtrip = ktypes::Word::try_from(value).unwrap(); + assert_eq!(roundtrip, word); + + let blob = ktypes::Blob::new(b"data".to_vec()); + let value: ktypes::Value = blob.clone().into(); + let roundtrip = ktypes::Blob::try_from(value).unwrap(); + assert_eq!(roundtrip, blob); + } + + // Verifies mismatched internal value conversions return an error. + #[test] + fn test_internal_value_conversion_error() { + let value: ktypes::Value = ktypes::Word::new(1).into(); + let result = ktypes::Blob::try_from(value); + assert!(result.is_err()); + } + + // Validates symbolic function parsing and read round-trip. + #[test] + fn test_internal_function_symbolic_parse() { + let args = ArcaTuple::from((1u64, "two")); + let value = ArcaValue::Tuple(ArcaTuple::from(( + ArcaBlob::from("Symbolic"), + ArcaValue::Word(ArcaWord::new(5)), + ArcaValue::Tuple(args), + ))); + let func = ktypes::Function::new(value.clone()).expect("symbolic parse failed"); + assert!(!func.is_arcane()); + assert_eq!(func.read(), value); + } + + // Ensures invalid function tags are rejected. + #[test] + fn test_internal_function_invalid_tag() { + let value = ArcaValue::Tuple(ArcaTuple::from(( + ArcaBlob::from("Other"), + ArcaValue::Null(ArcaNull::new()), + ))); + let func = ktypes::Function::new(value); + assert!(func.is_none()); + } + + // Verifies arcane function parsing accepts valid layouts. + #[test] + fn test_internal_function_arcane_parse() { + let mut registers = ArcaTuple::new(18); + for i in 0..18 { + registers.set(i, ArcaValue::Null(ArcaNull::new())); + } + let mut data = ArcaTuple::new(4); + data.set(0, ArcaValue::Tuple(registers)); + data.set(1, ArcaValue::Table(ArcaTable::new(1))); + data.set(2, ArcaValue::Tuple(ArcaTuple::new(0))); + data.set(3, ArcaValue::Tuple(ArcaTuple::new(0))); + + let value = ArcaValue::Tuple(ArcaTuple::from(( + ArcaBlob::from("Arcane"), + ArcaValue::Tuple(data), + ArcaValue::Tuple(ArcaTuple::new(0)), + ))); + let func = ktypes::Function::new(value).expect("arcane parse failed"); + assert!(func.is_arcane()); + } +} diff --git a/kernel/src/tests/test_serde.rs b/kernel/src/tests/test_serde.rs index b9e038a..b064679 100644 --- a/kernel/src/tests/test_serde.rs +++ b/kernel/src/tests/test_serde.rs @@ -1,112 +1,365 @@ -use crate::prelude::*; -extern crate alloc; - -#[test] -fn test_serde_null() { - let null = Value::Null(Null::new()); - let bytes_vec = postcard::to_allocvec(&null).unwrap(); - let deserialized_null: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_null, null); -} +// runs with command: cargo test -p kernel --target=x86_64-unknown-none +#[cfg(test)] +mod tests { + extern crate alloc; -#[test] -fn test_serde_word() { - let word = Value::Word(1.into()); - let bytes_vec = postcard::to_allocvec(&word).unwrap(); - let deserialized_word: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_word, word); -} + use crate::prelude::*; + use crate::types::Error; -#[test] -fn test_serde_blob() { - let blob = Value::Blob("hello, world!".into()); - let bytes_vec = postcard::to_allocvec(&blob).unwrap(); - let deserialized_blob: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_blob, blob); -} + // Verifies serialize and deserialize for the null value. + #[test] + fn test_serde_null() { + let null = Value::Null(Null::new()); + let bytes_vec = postcard::to_allocvec(&null).unwrap(); + let deserialized_null: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized_null, null); + } -#[test] -fn test_serde_tuple() { - let tuple = Value::Tuple((1, 2, 3).into()); - let bytes_vec = postcard::to_allocvec(&tuple).unwrap(); - let deserialized_tuple: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_tuple, tuple); -} + // Verifies serialize and deserialize for a word value. + #[test] + fn test_serde_word() { + let word = Value::Word(1.into()); + let bytes_vec = postcard::to_allocvec(&word).unwrap(); + let deserialized_word: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized_word, word); + } -#[test] -fn test_serde_page() { - let page = Value::Page(Page::new(1)); - let bytes_vec = postcard::to_allocvec(&page).unwrap(); - let deserialized_page: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_page, page); -} + // Verifies serialize and deserialize for a blob value. + #[test] + fn test_serde_blob() { + let blob = Value::Blob("hello, world!".into()); + let bytes_vec = postcard::to_allocvec(&blob).unwrap(); + let deserialized_blob: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized_blob, blob); + } -#[test] -fn test_serde_table() { - let table = Value::Table(Table::new(1)); - let bytes_vec = postcard::to_allocvec(&table).unwrap(); - let deserialized_table: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_table, table); -} + // Verifies serialize and deserialize for a tuple value. + #[test] + fn test_serde_tuple() { + let tuple = Value::Tuple((1, 2, 3).into()); + let bytes_vec = postcard::to_allocvec(&tuple).unwrap(); + let deserialized_tuple: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized_tuple, tuple); + } -// #[test] -// fn test_serde_function() { -// let arca = Arca::new(); -// let inner_func: arca::Function = Function::from(arca); -// let func = Value::Function(inner_func); -// let bytes_vec = postcard::to_allocvec(&func).unwrap(); -// let deserialized_func: Value = postcard::from_bytes(&bytes_vec).unwrap(); -// assert_eq!(deserialized_func, func); -// } - -#[test] -fn test_serde_ropage() { - let ropage = Entry::ROPage(Page::new(1)); - let bytes_vec = postcard::to_allocvec(&ropage).unwrap(); - let deserialized_ropage: Entry = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_ropage, ropage); -} + // Verifies serialize and deserialize for a page value. + #[test] + fn test_serde_page() { + let page = Value::Page(Page::new(1)); + let bytes_vec = postcard::to_allocvec(&page).unwrap(); + let deserialized_page: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized_page, page); + } -#[test] -fn test_serde_rwpage() { - let rwpage = Entry::RWPage(Page::new(1)); - let bytes_vec = postcard::to_allocvec(&rwpage).unwrap(); - let deserialized_rwpage: Entry = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_rwpage, rwpage); -} + // Verifies serialize and deserialize for a table value. + #[test] + fn test_serde_table() { + let table = Value::Table(Table::new(1)); + let bytes_vec = postcard::to_allocvec(&table).unwrap(); + let deserialized_table: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized_table, table); + } -#[test] -fn test_serde_rotable() { - let rotable = Entry::ROTable(Table::new(1)); - let bytes_vec = postcard::to_allocvec(&rotable).unwrap(); - let deserialized_rotable: Entry = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_rotable, rotable); -} + // Verifies serialize and deserialize for a read-only page entry. + #[test] + fn test_serde_ropage() { + let ropage = Entry::ROPage(Page::new(1)); + let bytes_vec = postcard::to_allocvec(&ropage).unwrap(); + let deserialized_ropage: Entry = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized_ropage, ropage); + } -#[test] -fn test_serde_rwtable() { - let rwtable = Entry::RWTable(Table::new(1)); - let bytes_vec = postcard::to_allocvec(&rwtable).unwrap(); - let deserialized_rwtable: Entry = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_rwtable, rwtable); -} + // Verifies serialize and deserialize for a read-write page entry. + #[test] + fn test_serde_rwpage() { + let rwpage = Entry::RWPage(Page::new(1)); + let bytes_vec = postcard::to_allocvec(&rwpage).unwrap(); + let deserialized_rwpage: Entry = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized_rwpage, rwpage); + } -#[test] -fn test_value_error() { - let unknown_variant = [7, 0]; - let deserialized: Result = postcard::from_bytes(&unknown_variant); - let deserialized_error = deserialized.expect_err("should have been err"); - let error = - serde::de::Error::unknown_variant("7", &["Null", "Word", "Blob", "Tuple", "Page", "Table"]); - assert_eq!(deserialized_error, error); -} + // Verifies serialize and deserialize for a read-only table entry. + #[test] + fn test_serde_rotable() { + let rotable = Entry::ROTable(Table::new(1)); + let bytes_vec = postcard::to_allocvec(&rotable).unwrap(); + let deserialized_rotable: Entry = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized_rotable, rotable); + } + + // Verifies serialize and deserialize for a read-write table entry. + #[test] + fn test_serde_rwtable() { + let rwtable = Entry::RWTable(Table::new(1)); + let bytes_vec = postcard::to_allocvec(&rwtable).unwrap(); + let deserialized_rwtable: Entry = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized_rwtable, rwtable); + } + + // Ensures unknown Value variants cause the expected serde error. + #[test] + fn test_value_error() { + let unknown_variant = [7, 0]; + let deserialized: Result = postcard::from_bytes(&unknown_variant); + let deserialized_error = deserialized.expect_err("should have been err"); + let error = serde::de::Error::unknown_variant( + "7", + &["Null", "Word", "Blob", "Tuple", "Page", "Table"], + ); + assert_eq!(deserialized_error, error); + } + + // Ensures unknown Entry variants cause the expected serde error. + #[test] + fn test_entry_error() { + let unknown_variant = [5, 0]; + let deserialized: Result = postcard::from_bytes(&unknown_variant); + let deserialized_error = deserialized.expect_err("should have been err"); + let error = + serde::de::Error::unknown_variant("5", &["Null", "ROPage", "RWPage", "ROTable", "RWTable"]); + assert_eq!(deserialized_error, error); + } + + // Confirms datatype tagging and default value behavior. + #[test] + fn test_value_datatype_and_defaults() { + let null = Value::Null(Null::new()); + let word = Value::Word(Word::new(42)); + let blob = Value::Blob(Blob::from("hi")); + let tuple = Value::Tuple(Tuple::from((1u64, "x"))); + let page = Value::Page(Page::new(1)); + let table = Value::Table(Table::new(1)); + + assert_eq!(null.datatype(), DataType::Null); + assert_eq!(word.datatype(), DataType::Word); + assert_eq!(blob.datatype(), DataType::Blob); + assert_eq!(tuple.datatype(), DataType::Tuple); + assert_eq!(page.datatype(), DataType::Page); + assert_eq!(table.datatype(), DataType::Table); + assert_eq!(Value::default().datatype(), DataType::Null); + } + + // Checks word read semantics and byte size. + #[test] + fn test_word_read_and_byte_size() { + let word = Word::new(0xdeadbeef); + assert_eq!(word.read(), 0xdeadbeef); + + let value = Value::Word(word); + assert_eq!(value.byte_size(), core::mem::size_of::()); + } + + // Confirms blob length and read behavior. + #[test] + fn test_blob_read_and_len() { + let blob = Blob::from("hello"); + assert_eq!(blob.len(), 5); + + let mut buf = [0u8; 8]; + let read = blob.read(0, &mut buf); + assert_eq!(read, 5); + assert_eq!(&buf[..5], b"hello"); + } + + // Verifies blob reads with an offset return the expected suffix. + #[test] + fn test_blob_read_with_offset() { + let blob = Blob::from("offset"); + let mut buf = [0u8; 8]; + let read = blob.read(3, &mut buf); + assert_eq!(read, 3); + assert_eq!(&buf[..3], b"set"); + } + + // Ensures invalid UTF-8 blobs preserve raw bytes on read. + #[test] + fn test_blob_invalid_utf8_roundtrip() { + let bytes = [0xffu8, 0xfeu8, 0xfdu8]; + let blob = Blob::from(bytes.as_slice()); + let mut buf = [0u8; 4]; + let read = blob.read(0, &mut buf); + assert_eq!(read, bytes.len()); + assert_eq!(&buf[..bytes.len()], &bytes); + } + + // Validates tuple set/get/take and iteration order. + #[test] + fn test_tuple_set_get_take_and_iter() { + let mut tuple = Tuple::new(3); + tuple.set(0, 1u64); + tuple.set(1, "two"); + tuple.set(2, Value::Null(Null::new())); + + assert_eq!(tuple.get(0), Value::Word(Word::new(1))); + assert_eq!(tuple.get(1), Value::Blob(Blob::from("two"))); + assert_eq!(tuple.get(2), Value::Null(Null::new())); + + let taken = tuple.take(1); + assert_eq!(taken, Value::Blob(Blob::from("two"))); + assert_eq!(tuple.get(1), Value::Null(Null::new())); + + let items: Vec = tuple.iter().collect(); + assert_eq!(items.len(), 3); + } + + // Checks tuple swap semantics and out-of-bounds errors. + #[test] + fn test_tuple_swap_and_bounds_errors() { + let mut tuple = Tuple::new(2); + tuple.set(0, 1u64); + tuple.set(1, 2u64); + + let mut replacement = Value::Blob(Blob::from("swap")); + tuple.swap(0, &mut replacement); + assert_eq!(replacement, Value::Word(Word::new(1))); + assert_eq!(tuple.get(0), Value::Blob(Blob::from("swap"))); + + let result = ::get_tuple(&tuple, 3); + assert!(matches!(result, Err(Error::InvalidIndex(3)))); + let result = ::set_tuple(&mut tuple, 3, 5u64.into()); + assert!(matches!(result, Err(Error::InvalidIndex(3)))); + } + + // Confirms tuple byte sizes add up for mixed content. + #[test] + fn test_tuple_byte_size() { + let tuple = Value::Tuple(Tuple::from((1u64, "hi"))); + assert_eq!(tuple.byte_size(), core::mem::size_of::() + 2); + } + + // Verifies page read/write behavior and length. + #[test] + fn test_page_read_write_len() { + let mut page = Page::new(1); + assert_eq!(page.len(), 1 << 12); + + let data = [1u8, 2, 3, 4]; + let written = page.write(0, &data); + assert_eq!(written, data.len()); + + let mut buf = [0u8; 4]; + let read = page.read(0, &mut buf); + assert_eq!(read, data.len()); + assert_eq!(buf, data); + } + + // Ensures page read/write with offsets work correctly. + #[test] + fn test_page_read_write_with_offset() { + let mut page = Page::new(1); + let data = [9u8, 8, 7]; + let written = page.write(4, &data); + assert_eq!(written, data.len()); + + let mut buf = [0u8; 3]; + let read = page.read(4, &mut buf); + assert_eq!(read, data.len()); + assert_eq!(buf, data); + } + + // Confirms page size tier selection at thresholds. + #[test] + fn test_page_size_tiers() { + let small = Page::new(1); + assert_eq!(small.len(), 1 << 12); + + let mid = Page::new((1 << 12) + 1); + assert_eq!(mid.len(), 1 << 21); + + let large = Page::new((1 << 21) + 1); + assert_eq!(large.len(), 1 << 30); + } + + // Verifies default table entry sizes for small and mid tables. + #[test] + fn test_table_default_entry_sizes() { + let table_small = Table::new(1); + let entry = table_small.get(0).unwrap(); + assert_eq!(entry, Entry::Null(1 << 12)); + assert_eq!(table_small.len(), 1 << 21); + + let table_mid = Table::new((1 << 21) + 1); + let entry = table_mid.get(0).unwrap(); + assert_eq!(entry, Entry::Null(1 << 21)); + assert_eq!(table_mid.len(), 1 << 30); + } + + // Ensures tables grow when mapping beyond current range. + #[test] + fn test_table_map_growth() { + let mut table = Table::new(1); + let entry = Entry::RWPage(Page::new(1)); + let address = 1 << 21; + let _ = table.map(address, entry).unwrap(); + assert_eq!(table.len(), 1 << 30); + } + + // Verifies unmap returns None for addresses beyond the table range. + #[test] + fn test_table_unmap_out_of_range() { + let mut table = Table::new(1); + let missing = table.unmap(table.len() + 1); + assert!(missing.is_none()); + } + + // Ensures table map and unmap round-trip a page entry. + #[test] + fn test_table_map_unmap_roundtrip() { + let mut table = Table::new(1); + let entry = Entry::RWPage(Page::new(1)); + let old = table.map(0, entry.clone()).unwrap(); + assert_eq!(old, Entry::Null(1 << 12)); + + let unmapped = table.unmap(0); + assert_eq!(unmapped, Some(entry)); + } + + // Validates symbolic function apply and read behavior. + #[test] + fn test_function_symbolic_apply_and_read() { + let func = Function::symbolic(Word::new(7)); + assert!(func.is_symbolic()); + + let func = func.apply(1u64).apply("arg"); + let read_back = func.read_cloned(); + + let expected_args = Tuple::from((1u64, "arg")); + let expected = Value::Tuple(Tuple::from(( + Blob::from("Symbolic"), + Value::Word(Word::new(7)), + Value::Tuple(expected_args), + ))); + + assert_eq!(read_back, expected); + } + + // Ensures function argument order and force semantics. + #[test] + fn test_function_apply_order_and_force() { + let func = Function::symbolic(Word::new(9)); + let func = func.apply(1u64).apply(2u64).apply("three"); + let read_back = func.read_cloned(); + + let expected_args = Tuple::from((1u64, 2u64, "three")); + let expected = Value::Tuple(Tuple::from(( + Blob::from("Symbolic"), + Value::Word(Word::new(9)), + Value::Tuple(expected_args), + ))); + assert_eq!(read_back, expected); + + let forced = func.force(); + match forced { + Value::Function(f) => assert!(f.is_symbolic()), + _ => panic!("expected a symbolic function value"), + } + } -#[test] -fn test_entry_error() { - let unknown_variant = [5, 0]; - let deserialized: Result = postcard::from_bytes(&unknown_variant); - let deserialized_error = deserialized.expect_err("should have been err"); - let error = - serde::de::Error::unknown_variant("5", &["Null", "ROPage", "RWPage", "ROTable", "RWTable"]); - assert_eq!(deserialized_error, error); + // Confirms invalid function construction is rejected. + #[test] + fn test_function_new_rejects_invalid_value() { + let invalid = Value::Word(Word::new(1)); + let result = Function::new(invalid); + assert!(matches!(result, Err(Error::InvalidValue))); + } } From a0f61f420c77989800f25f44c4000ceeb68648ec Mon Sep 17 00:00:00 2001 From: Niam Shah Date: Mon, 9 Feb 2026 02:49:17 -0800 Subject: [PATCH 02/15] Added a ton of tests for buddy allocator. Most pass, but a couple failed. Want confirmation that failing tests are accurately testing expected behavior before fixing. --- common/src/buddy.rs | 907 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 840 insertions(+), 67 deletions(-) diff --git a/common/src/buddy.rs b/common/src/buddy.rs index 0acd595..27c7cc9 100644 --- a/common/src/buddy.rs +++ b/common/src/buddy.rs @@ -1146,6 +1146,7 @@ mod tests { use test::Bencher; #[test] + // Setting/clearing individual bits in u64 words to check that the bit manipulation works fn test_bitref() { let mut word = 10; @@ -1165,6 +1166,7 @@ mod tests { } #[test] + // Setting/clearing individual bits in a BitSlice to check that the bit manipulation works fn test_bitslice() { let mut words = [0; 2]; let mut slice = BitSlice::new(128, &mut words); @@ -1185,6 +1187,7 @@ mod tests { } #[test] + // Basic setup + continuous element pushing to check that the allocator grows and adjusts properly fn test_buddy_allocator() { let allocator = BuddyAllocatorImpl::new(0x10000000); @@ -1197,80 +1200,105 @@ mod tests { } } - #[bench] - fn bench_allocate_free(b: &mut Bencher) { - let allocator = BuddyAllocatorImpl::new(0x100000000); - b.iter(|| { - let x: Box<[MaybeUninit], BuddyAllocatorImpl> = - Box::new_uninit_slice_in(128, allocator.clone()); - core::mem::drop(x); - }); + #[test] + // Verifying that too small allocations of allocator do not panic + // Potential issue: reserve_unchecked does not validate that the requested index is within the number of blocks at that level + fn test_too_small_allocation() { + let allocator = BuddyAllocatorImpl::new(1 << 20); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let used_before = allocator.used_size(); + let ptr = allocator.allocate_raw(size); } - #[bench] - fn bench_allocate_free_no_cache(b: &mut Bencher) { - let allocator = BuddyAllocatorImpl::new(0x100000000); - allocator.set_caching(false); - b.iter(|| { - let x: Box<[MaybeUninit], BuddyAllocatorImpl> = - Box::new_uninit_slice_in(128, allocator.clone()); - core::mem::drop(x); - }); - } - - #[bench] - fn bench_contended_allocate_free(b: &mut Bencher) { - let allocator = BuddyAllocatorImpl::new(0x100000000); - let f = || { - let x: Box<[MaybeUninit], BuddyAllocatorImpl> = - Box::new_uninit_slice_in(128, allocator.clone()); - core::mem::drop(x); - }; - use core::sync::atomic::AtomicBool; - use std::sync::Arc; - std::thread::scope(|s| { - let flag = Arc::new(AtomicBool::new(true)); - for _ in 0..16 { - let flag = flag.clone(); - s.spawn(move || { - while flag.load(Ordering::SeqCst) { - f(); - } - }); - } - b.iter(f); - flag.store(false, Ordering::SeqCst); - }); + #[test] + // Verifying allocate_raw adds size to used_size, and free_raw subtracts it back, returning usage to the original amount. + fn test_allocate_raw_and_used_size() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let used_before = allocator.used_size(); + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + assert_eq!(allocator.used_size(), used_before + size); + allocator.free_raw(ptr, size); + assert_eq!(allocator.used_size(), used_before); } - #[bench] - #[ignore] - fn bench_contended_allocate_free_no_cache(b: &mut Bencher) { - let allocator = BuddyAllocatorImpl::new(0x100000000); - allocator.set_caching(false); - let f = || { - let x: Box<[MaybeUninit], BuddyAllocatorImpl> = - Box::new_uninit_slice_in(128, allocator.clone()); - core::mem::drop(x); - }; - use core::sync::atomic::AtomicBool; - use std::sync::Arc; - std::thread::scope(|s| { - let flag = Arc::new(AtomicBool::new(true)); - for _ in 0..16 { - let flag = flag.clone(); - s.spawn(move || { - while flag.load(Ordering::SeqCst) { - f(); - } - }); - } - b.iter(f); - flag.store(false, Ordering::SeqCst); - }); + #[test] + // Verifying allocate_many_raw adds size to used_size, and free_many_raw subtracts it back, returning usage to the original amount. + fn test_allocate_many_and_free_many() { + use std::collections::BTreeSet; + + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let used_before = allocator.used_size(); + let mut ptrs = [core::ptr::null_mut(); 4]; + let count = allocator.allocate_many_raw(size, &mut ptrs); + assert_eq!(count, ptrs.len()); + assert!(ptrs.iter().all(|ptr| !ptr.is_null())); + + let unique: BTreeSet = ptrs.iter().map(|ptr| *ptr as usize).collect(); + assert_eq!(unique.len(), ptrs.len()); + + allocator.free_many_raw(size, &ptrs); + assert_eq!(allocator.used_size(), used_before); + } + + #[test] + // Verifying that to_offset and from_offset roundtrip correctly + fn test_offset_roundtrip() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + + let offset = allocator.to_offset(ptr); + let roundtrip = allocator.from_offset::(offset); + assert_eq!(roundtrip as usize, ptr as usize); + + allocator.free_raw(ptr, size); + } + + #[test] + // Verifying that reserving at zero returns a null pointer and does not add to used_size + fn test_reserve_raw_at_zero() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let used_before = allocator.used_size(); + let ptr = allocator.reserve_raw(0, size); + assert!(ptr.is_null()); + assert_eq!(allocator.used_size(), used_before); + } + + #[test] + // Verifying that allocating too large returns a null pointer and does not add to used_size + fn test_allocate_raw_too_large_returns_null() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let used_before = allocator.used_size(); + let ptr = allocator.allocate_raw(allocator.total_size() * 2); + assert!(ptr.is_null()); + assert_eq!(allocator.used_size(), used_before); + } + + #[test] + // Verifying that refcnt is zero on allocate + fn test_refcnt_zero_on_allocate() { + use core::sync::atomic::Ordering; + + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + + let refcnt = allocator.refcnt(ptr); + assert!(!refcnt.is_null()); + let value = unsafe { (*refcnt).load(Ordering::SeqCst) }; + assert_eq!(value, 0); + + allocator.free_raw(ptr, size); } #[test] + // Stress testing the allocator with random allocations and frees fn stress_test() { use std::hash::{BuildHasher, Hasher, RandomState}; let allocator = BuddyAllocatorImpl::new(0x10000000); @@ -1298,4 +1326,749 @@ mod tests { v.push(alloc); } } + + #[test] + // Test splitting large blocks into smaller ones + fn test_block_splitting() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let small_size = BuddyAllocatorImpl::MIN_ALLOCATION; + let large_size = small_size * 4; + + // Allocate and free a large block + let large_ptr = allocator.allocate_raw(large_size); + assert!(!large_ptr.is_null()); + allocator.free_raw(large_ptr, large_size); + + // Now allocate multiple small blocks - should split the large one + let mut small_ptrs = vec![]; + for _ in 0..4 { + let ptr = allocator.allocate_raw(small_size); + assert!(!ptr.is_null()); + small_ptrs.push(ptr); + } + + // Clean up + for ptr in small_ptrs { + allocator.free_raw(ptr, small_size); + } + } + + #[test] + fn test_reserve_specific_addresses() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + // Get a definitely-available block + let p = allocator.allocate_raw(size); + assert!(!p.is_null()); + let address = allocator.to_offset(p); + allocator.free_raw(p, size); + + // Now we should be able to reserve that exact address + let ptr1 = allocator.reserve_raw(address, size); + assert!(!ptr1.is_null()); + assert_eq!(allocator.to_offset(ptr1), address); + + // Reserving again should fail + let ptr2 = allocator.reserve_raw(address, size); + assert!(ptr2.is_null()); + + allocator.free_raw(ptr1, size); + } + + #[test] + // Test reserving overlapping regions + fn test_reserve_overlapping_regions() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + // Reserve a block + let ptr1 = allocator.reserve_raw(size * 5, size); + assert!(!ptr1.is_null()); + + // Try to reserve a larger block that would overlap + let ptr2 = allocator.reserve_raw(size * 4, size * 4); + assert!(ptr2.is_null()); // Should fail because it overlaps with ptr1 + + allocator.free_raw(ptr1, size); + } + + #[test] + // Test allocating all available memory + fn test_exhaust_memory() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let mut ptrs = vec![]; + + // Allocate until we can't anymore + loop { + let ptr = allocator.allocate_raw(size); + if ptr.is_null() { + break; + } + ptrs.push(ptr); + } + + // Verify we actually allocated something + assert!(!ptrs.is_empty()); + + // Try one more allocation - should fail + let ptr = allocator.allocate_raw(size); + assert!(ptr.is_null()); + + // Free everything + for ptr in ptrs { + allocator.free_raw(ptr, size); + } + + // Should be able to allocate again + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + allocator.free_raw(ptr, size); + } + + #[test] + // Test mixed allocation sizes + fn test_mixed_allocation_sizes() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + + let small = BuddyAllocatorImpl::MIN_ALLOCATION; + let medium = small * 4; + let large = small * 16; + + let ptr1 = allocator.allocate_raw(small); + let ptr2 = allocator.allocate_raw(large); + let ptr3 = allocator.allocate_raw(medium); + let ptr4 = allocator.allocate_raw(small); + + assert!(!ptr1.is_null()); + assert!(!ptr2.is_null()); + assert!(!ptr3.is_null()); + assert!(!ptr4.is_null()); + + // Verify they're all different + let ptrs = [ptr1, ptr2, ptr3, ptr4]; + for i in 0..ptrs.len() { + for j in (i + 1)..ptrs.len() { + assert_ne!(ptrs[i], ptrs[j]); + } + } + + allocator.free_raw(ptr2, large); + allocator.free_raw(ptr1, small); + allocator.free_raw(ptr4, small); + allocator.free_raw(ptr3, medium); + } + + #[test] + // Test freeing in different order than allocation + fn test_free_reverse_order() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let mut ptrs = vec![]; + for _ in 0..10 { + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + ptrs.push(ptr); + } + + let used_peak = allocator.used_size(); + + // Free in reverse order + for ptr in ptrs.iter().rev() { + allocator.free_raw(*ptr, size); + } + + assert!(allocator.used_size() < used_peak); + } + + #[test] + #[should_panic(expected = "assertion failed")] + #[ignore] + // After looking more closely after writing this, I realized that panicking here is not an expected behavior. + // Also, free raw could merge into a larger block, complicating this test. + // Keeping this test for now, in case we decide to support this behavior later. + fn test_double_free_panics() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + + allocator.free_raw(ptr, size); + allocator.free_raw(ptr, size); // Should panic + } + + #[test] + // Test allocation size rounding. Test after exhausting, allocator should still be usable -- no lock leak + fn allocation_rounds_up_to_pow2_and_min() { + let a = BuddyAllocatorImpl::new(1 << 24); + + // Request sizes that aren't powers of 2 + let ptr1 = a.allocate_raw(5000); // Should round to 8192 + let ptr2 = a.allocate_raw(1000); // Should round to 4096 + let ptr3 = a.allocate_raw(10000); // Should round to 16384 + + assert!(!ptr1.is_null()); + assert!(!ptr2.is_null()); + assert!(!ptr3.is_null()); + + a.free_raw(ptr1, 5000); + a.free_raw(ptr2, 1000); + a.free_raw(ptr3, 10000); + + let used0 = a.used_size(); + let p = a.allocate_raw(5000); // rounds to 8192 (and >= 4096) + assert!(!p.is_null()); + assert_eq!(a.used_size(), used0 + 8192); + + a.free_raw(p, 5000); // free uses same rounding path + assert_eq!(a.used_size(), used0); + } + + #[test] + // Confirm there is no overlap between levels + fn test_offset_calculation() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + + let mut ranges = vec![]; + for level in allocator.inner.meta.level_range.clone() { + let offset = allocator.inner.offset_of_level_words(level); + let size = allocator.inner.size_of_level_words(level); + ranges.push((offset, offset + size, level)); + } + ranges.sort_by_key(|(start, _, _)| *start); + + for w in ranges.windows(2) { + let (s1, e1, l1) = w[0]; + let (s2, _e2, l2) = w[1]; + assert!(e1 <= s2, "overlap between level {} and level {}", l1, l2); + } + } + + #[test] + // Test bitmap boundaries + fn test_bitmap_boundaries() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + + for level in allocator.inner.meta.level_range.clone() { + let bits = allocator.inner.size_of_level_bits(level); + let words = allocator.inner.size_of_level_words(level); + + // Verify words is enough to hold bits + assert!(words * 64 >= bits, + "Level {} needs {} bits but only has {} words ({} bits)", + level, bits, words, words * 64); + } + } + + #[test] + // Test try_allocate_many_raw where everything should succeed easily + fn test_try_allocate_many_no_contention() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let mut ptrs = [core::ptr::null_mut(); 10]; + let result = allocator.try_allocate_many_raw(size, &mut ptrs); + + assert_eq!(result, Some(10)); + assert!(ptrs.iter().all(|p| !p.is_null())); + + allocator.free_many_raw(size, &ptrs); + } + + #[test] + #[ignore] + // Testing allocating more pointers than space available, making sure bulk alloc stops cleanly when space out, + // and partial success allowed, reported successes are valid. Currently hanging + fn test_allocate_many_partial_success() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + // Request more blocks than available + let mut ptrs = [core::ptr::null_mut(); 10000]; + let count = allocator.allocate_many_raw(size, &mut ptrs); + + // Should have allocated some but not all + assert!(count > 0); + assert!(count < ptrs.len()); + + // All allocated pointers should be non-null + for i in 0..count { + assert!(!ptrs[i].is_null()); + } + + // Remaining should be null + for i in count..ptrs.len() { + assert!(ptrs[i].is_null()); + } + + // Clean up + allocator.free_many_raw(size, &ptrs[0..count]); + } + + #[test] + // Test that refcnt works for different allocation addresses + fn test_refcnt_different_addresses() { + use core::sync::atomic::Ordering; + + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let ptr1 = allocator.allocate_raw(size); + let ptr2 = allocator.allocate_raw(size); + + let refcnt1 = allocator.refcnt(ptr1); + let refcnt2 = allocator.refcnt(ptr2); + + // Should be different refcnt locations + assert_ne!(refcnt1, refcnt2); + + // Both should be 0 + assert_eq!(unsafe { (*refcnt1).load(Ordering::SeqCst) }, 0); + assert_eq!(unsafe { (*refcnt2).load(Ordering::SeqCst) }, 0); + + allocator.free_raw(ptr1, size); + allocator.free_raw(ptr2, size); + } + + #[test] + // Test null pointer refcnt + fn test_refcnt_null_pointer() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let refcnt = allocator.refcnt(core::ptr::null::()); + assert!(refcnt.is_null()); + } + + #[test] + // Test usage calculation + fn test_usage_calculation() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let initial_usage = allocator.usage(); + + let ptr = allocator.allocate_raw(size); + let usage_after = allocator.usage(); + + assert!(usage_after > initial_usage); + assert!(usage_after <= 1.0); + assert!(usage_after >= 0.0); + + allocator.free_raw(ptr, size); + } + + #[test] + // Test request counting + fn test_request_counting() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let mut before = [0; 64]; + let mut after = [0; 64]; + + allocator.requests(&mut before); + + let ptr = allocator.allocate_raw(size); + allocator.free_raw(ptr, size); + + allocator.requests(&mut after); + + // Should have incremented request count for the size level + let level = size.next_power_of_two().ilog2() as usize; + assert!(after[level] > before[level]); + } + + #[test] + fn test_alignment_requirements() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let base = allocator.base() as usize; + + for power in 12..20 { + let size = 1 << power; + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + + let addr = ptr as usize; + assert_eq!((addr - base) % size, 0, "Allocation of size {} not aligned within arena", size); + + allocator.free_raw(ptr, size); + } + } + + #[test] + // Test clone and drop behavior + fn test_clone_and_drop() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + + let ptr1 = allocator.allocate_raw(4096); + assert!(!ptr1.is_null()); + + { + let clone = allocator.clone(); + let ptr2 = clone.allocate_raw(4096); + assert!(!ptr2.is_null()); + clone.free_raw(ptr2, 4096); + // clone drops here + } + + // Original should still work + let ptr3 = allocator.allocate_raw(4096); + assert!(!ptr3.is_null()); + + allocator.free_raw(ptr1, 4096); + allocator.free_raw(ptr3, 4096); + } + + #[test] + // Allocate one block, compute its buddy address, and verify that reserving the buddy returns that address (if it’s free). + fn buddy_address_math_matches_reserve() { + let a = BuddyAllocatorImpl::new(1 << 24); + let base = a.base() as usize; + + for power in 12..18 { + let size = 1usize << power; + let p = a.allocate_raw(size); + assert!(!p.is_null()); + + let off = a.to_offset(p); + let idx = off / size; + let buddy_idx = idx ^ 1; + let buddy_off = buddy_idx * size; + + // If the buddy is free, reserve_raw must return exactly that address. + let b = a.reserve_raw(buddy_off, size); + if !b.is_null() { + assert_eq!(a.to_offset(b), buddy_off); + a.free_raw(b, size); + } + + a.free_raw(p, size); + + // (optional) base-relative alignment property + assert_eq!(((p as usize) - base) % size, 0); + } + } + + #[test] + // 'Create' a known free block at a known offset by allocating a large block, then freeing it, then reserving/allocating inside it. + fn split_large_block_into_smaller_blocks() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let big = 1usize << 16; // 64KiB + let small = 1usize << 12; // 4KiB + let factor = big / small; + + let p = a.allocate_raw(big); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, big); + + // Now reserve all 4KiB blocks inside that 64KiB region. + let mut blocks = Vec::new(); + for i in 0..factor { + let q = a.reserve_raw(off + i * small, small); + assert!(!q.is_null(), "failed to reserve sub-block {}", i); + blocks.push(q); + } + + // Free them back + for q in blocks { + a.free_raw(q, small); + } + } + + #[test] + // Reserve two buddy halves, free them, verify you can reserve the parent block at the exact parent address. + fn coalesce_two_buddies_into_parent() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let parent = 1usize << 14; // 16KiB + let child = 1usize << 13; // 8KiB + + // Create a known free parent block at a known offset. + let p = a.allocate_raw(parent); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, parent); + + // Reserve both children (buddies). + let c0 = a.reserve_raw(off, child); + let c1 = a.reserve_raw(off + child, child); + assert!(!c0.is_null() && !c1.is_null()); + + // Free both; this should coalesce into the parent. + a.free_raw(c0, child); + a.free_raw(c1, child); + + // Now reserving the parent at 'off' should succeed. + let p2 = a.reserve_raw(off, parent); + assert!(!p2.is_null(), "parent block did not reappear after coalescing"); + assert_eq!(a.to_offset(p2), off); + + a.free_raw(p2, parent); + } + + #[test] + // Hold one child, free the other, ensure parent reservation fails at that exact parent address. + fn no_coalesce_if_only_one_buddy_free() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let parent = 1usize << 14; // 16KiB + let child = 1usize << 13; // 8KiB + + let p = a.allocate_raw(parent); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, parent); + + let c0 = a.reserve_raw(off, child); + let c1 = a.reserve_raw(off + child, child); + assert!(!c0.is_null() && !c1.is_null()); + + // Free only one child + a.free_raw(c0, child); + + // Parent must NOT be reservable while the other buddy is still held. + let parent_try = a.reserve_raw(off, parent); + assert!(parent_try.is_null(), "parent became available with one buddy still reserved"); + + // Cleanup + a.free_raw(c1, child); + + // Now parent should be available (coalesced) + let parent_ok = a.reserve_raw(off, parent); + assert!(!parent_ok.is_null()); + a.free_raw(parent_ok, parent); + } + + #[test] + // Free child1 then child0; ensure parent becomes available. + fn coalesce_is_order_independent() { + let a = BuddyAllocatorImpl::new(1 << 24); + let parent = 1usize << 15; // 32KiB + let child = 1usize << 14; // 16KiB + + let p = a.allocate_raw(parent); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, parent); + + let c0 = a.reserve_raw(off, child); + let c1 = a.reserve_raw(off + child, child); + assert!(!c0.is_null() && !c1.is_null()); + + a.free_raw(c1, child); + a.free_raw(c0, child); + + let p2 = a.reserve_raw(off, parent); + assert!(!p2.is_null()); + a.free_raw(p2, parent); + } + + #[test] + // Free 4 children → coalesce to 2 parents → coalesce to 1 grandparent. + fn multi_level_coalesce_cascades() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let grand = 1usize << 15; // 32KiB + let child = 1usize << 13; // 8KiB + let n = grand / child; // 4 + + let p = a.allocate_raw(grand); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, grand); + + let mut kids = Vec::new(); + for i in 0..n { + let k = a.reserve_raw(off + i * child, child); + assert!(!k.is_null()); + kids.push(k); + } + + // Free all kids -> should coalesce up to grand + for k in kids { + a.free_raw(k, child); + } + + let g = a.reserve_raw(off, grand); + assert!(!g.is_null(), "expected full cascade coalesce to grand block"); + a.free_raw(g, grand); + } + + #[test] + // Size rounding edge cases: allocate_raw rounds up to power-of-two and MIN_ALLOCATION, ensuring allocations don’t fail just because size isn’t a power of two. + // Currently failing; needs to be fixed? + fn reserve_out_of_range_returns_null() { + let a = BuddyAllocatorImpl::new(1 << 24); + let size = 1usize << 12; + + // definitely beyond arena + let ptr = a.reserve_raw(a.len() + size, size); + assert!(ptr.is_null()); + } + + #[test] + #[ignore] + // Testing allocate_many_raw where partial failure does not poison the lock + // Currently hanging because partial failures is not working, test after that is fixed + fn allocate_many_partial_failure_does_not_poison_lock() { + let a = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let mut ptrs = [core::ptr::null_mut(); 10000]; + let n = a.allocate_many_raw(size, &mut ptrs); + + assert!(n > 0); + assert!(n < ptrs.len()); + + a.free_many_raw(size, &ptrs[..n]); + + // If the lock leaked, this would hang. + let p = a.allocate_raw(size); + assert!(!p.is_null()); + a.free_raw(p, size); + } + + #[test] + // ensures try_* returns None when lock is held. + fn try_allocate_many_returns_none_when_locked() { + let a = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + // Manually lock allocator and ensure try_* fails. + unsafe { a.inner.lock(); } + let mut ptrs = [core::ptr::null_mut(); 4]; + let r = a.try_allocate_many_raw(size, &mut ptrs); + assert_eq!(r, None); + unsafe { a.inner.unlock(); } + + // Now it should work + let r2 = a.try_allocate_many_raw(size, &mut ptrs); + assert_eq!(r2, Some(4)); + a.free_many_raw(size, &ptrs); + } + + #[test] + // Interleaved patterns: A,B,C,D where (A,B) and (C,D) are buddy pairs. + // Freeing B and C alone should NOT make either parent available; + // freeing A then enables AB coalesce; freeing D then enables CD coalesce; then both parents can coalesce further. + fn interleaved_buddy_pairs_coalesce_independently_then_merge() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let grand = 1usize << 15; // 32KiB + let parent = 1usize << 14; // 16KiB + let child = 1usize << 13; // 8KiB + + // Known free 32KiB region + let g = a.allocate_raw(grand); + assert!(!g.is_null()); + let off = a.to_offset(g); + a.free_raw(g, grand); + + // Reserve A,B,C,D as 8KiB blocks at offsets 0,1,2,3 within the 32KiB region + let a0 = a.reserve_raw(off + 0 * child, child); // A + let b0 = a.reserve_raw(off + 1 * child, child); // B (buddy of A) + let c0 = a.reserve_raw(off + 2 * child, child); // C + let d0 = a.reserve_raw(off + 3 * child, child); // D (buddy of C) + assert!(!a0.is_null() && !b0.is_null() && !c0.is_null() && !d0.is_null()); + + // Free B and C only -> neither 16KiB parent should be reservable yet. + a.free_raw(b0, child); + a.free_raw(c0, child); + + assert!(a.reserve_raw(off + 0 * parent, parent).is_null(), "AB parent should not exist yet"); + assert!(a.reserve_raw(off + 1 * parent, parent).is_null(), "CD parent should not exist yet"); + + // Free A -> AB should coalesce to first 16KiB parent at off + a.free_raw(a0, child); + let p0 = a.reserve_raw(off + 0 * parent, parent); + assert!(!p0.is_null(), "AB should coalesce to 16KiB"); + a.free_raw(p0, parent); + + // Free D -> CD should coalesce to second 16KiB parent at off + 16KiB + a.free_raw(d0, child); + let p1 = a.reserve_raw(off + 1 * parent, parent); + assert!(!p1.is_null(), "CD should coalesce to 16KiB"); + a.free_raw(p1, parent); + + // Now both 16KiB parents are free -> should coalesce into 32KiB grandparent at off + let g2 = a.reserve_raw(off, grand); + assert!(!g2.is_null(), "two free 16KiB parents should coalesce to 32KiB"); + a.free_raw(g2, grand); + } + + #[test] + // Fragmentation scenario: partial coalescing with an obstacle. + // If one leaf remains reserved, upper levels must not fully coalesce; once obstacle freed, full coalesce should happen. + fn fragmentation_blocks_full_coalesce_until_obstacle_removed() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let big = 1usize << 16; // 64KiB region we control + let leaf = 1usize << 12; // 4KiB + let n = big / leaf; // 16 leaves + + // Known free 64KiB region + let p = a.allocate_raw(big); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, big); + + // Reserve all leaves, keep one as "obstacle", free the rest. + let mut leaves = Vec::new(); + for i in 0..n { + let q = a.reserve_raw(off + i * leaf, leaf); + assert!(!q.is_null()); + leaves.push(q); + } + + let obstacle = leaves[7]; // arbitrary leaf to hold + for (i, q) in leaves.iter().enumerate() { + if *q == obstacle { continue; } + a.free_raw(*q, leaf); + } + + // With one 4KiB still reserved, the full 64KiB block must NOT be available. + assert!(a.reserve_raw(off, big).is_null(), "should not fully coalesce with an obstacle leaf reserved"); + + // Now free the obstacle leaf -> full coalesce should become possible. + a.free_raw(obstacle, leaf); + let big2 = a.reserve_raw(off, big); + assert!(!big2.is_null(), "after removing obstacle, should fully coalesce back to 64KiB"); + a.free_raw(big2, big); + } + + #[test] + // Reserved blocks shouldn't participate in coalescing: + // if one buddy is permanently reserved (held), the parent must not become available. + fn reserved_block_prevents_coalescing() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let parent = 1usize << 14; // 16KiB + let child = 1usize << 13; // 8KiB + + // Known free parent region + let p = a.allocate_raw(parent); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, parent); + + // Reserve both children, but "reserve" one as a held block (simulate reservation that shouldn't coalesce). + let held = a.reserve_raw(off, child); + let other = a.reserve_raw(off + child, child); + assert!(!held.is_null() && !other.is_null()); + + // Free only the other -> parent must not appear + a.free_raw(other, child); + assert!(a.reserve_raw(off, parent).is_null(), "parent should not coalesce while one child is held/reserved"); + + // Once held is freed too, parent should become available + a.free_raw(held, child); + let p2 = a.reserve_raw(off, parent); + assert!(!p2.is_null()); + a.free_raw(p2, parent); + } + } From a9caf1741bb5cf43520788d0620014ce442f85f3 Mon Sep 17 00:00:00 2001 From: Niam Shah Date: Mon, 9 Feb 2026 03:00:06 -0800 Subject: [PATCH 03/15] fixed formatting and recommitted --- common/src/buddy.rs | 232 +++++++++++++++----------- kernel/src/tests.rs | 1 + kernel/src/tests/test_kernel_types.rs | 2 +- kernel/src/tests/test_serde.rs | 9 +- 4 files changed, 145 insertions(+), 99 deletions(-) diff --git a/common/src/buddy.rs b/common/src/buddy.rs index 27c7cc9..c994fc6 100644 --- a/common/src/buddy.rs +++ b/common/src/buddy.rs @@ -1206,7 +1206,7 @@ mod tests { fn test_too_small_allocation() { let allocator = BuddyAllocatorImpl::new(1 << 20); let size = BuddyAllocatorImpl::MIN_ALLOCATION; - let used_before = allocator.used_size(); + let used_before = allocator.used_size(); let ptr = allocator.allocate_raw(size); } @@ -1215,7 +1215,7 @@ mod tests { fn test_allocate_raw_and_used_size() { let allocator = BuddyAllocatorImpl::new(1 << 24); let size = BuddyAllocatorImpl::MIN_ALLOCATION; - let used_before = allocator.used_size(); + let used_before = allocator.used_size(); let ptr = allocator.allocate_raw(size); assert!(!ptr.is_null()); assert_eq!(allocator.used_size(), used_before + size); @@ -1333,12 +1333,12 @@ mod tests { let allocator = BuddyAllocatorImpl::new(1 << 24); let small_size = BuddyAllocatorImpl::MIN_ALLOCATION; let large_size = small_size * 4; - + // Allocate and free a large block let large_ptr = allocator.allocate_raw(large_size); assert!(!large_ptr.is_null()); allocator.free_raw(large_ptr, large_size); - + // Now allocate multiple small blocks - should split the large one let mut small_ptrs = vec![]; for _ in 0..4 { @@ -1346,7 +1346,7 @@ mod tests { assert!(!ptr.is_null()); small_ptrs.push(ptr); } - + // Clean up for ptr in small_ptrs { allocator.free_raw(ptr, small_size); @@ -1381,15 +1381,15 @@ mod tests { fn test_reserve_overlapping_regions() { let allocator = BuddyAllocatorImpl::new(1 << 24); let size = BuddyAllocatorImpl::MIN_ALLOCATION; - + // Reserve a block let ptr1 = allocator.reserve_raw(size * 5, size); assert!(!ptr1.is_null()); - + // Try to reserve a larger block that would overlap let ptr2 = allocator.reserve_raw(size * 4, size * 4); assert!(ptr2.is_null()); // Should fail because it overlaps with ptr1 - + allocator.free_raw(ptr1, size); } @@ -1399,7 +1399,7 @@ mod tests { let allocator = BuddyAllocatorImpl::new(1 << 24); let size = BuddyAllocatorImpl::MIN_ALLOCATION; let mut ptrs = vec![]; - + // Allocate until we can't anymore loop { let ptr = allocator.allocate_raw(size); @@ -1408,19 +1408,19 @@ mod tests { } ptrs.push(ptr); } - + // Verify we actually allocated something assert!(!ptrs.is_empty()); - + // Try one more allocation - should fail let ptr = allocator.allocate_raw(size); assert!(ptr.is_null()); - + // Free everything for ptr in ptrs { allocator.free_raw(ptr, size); } - + // Should be able to allocate again let ptr = allocator.allocate_raw(size); assert!(!ptr.is_null()); @@ -1431,21 +1431,21 @@ mod tests { // Test mixed allocation sizes fn test_mixed_allocation_sizes() { let allocator = BuddyAllocatorImpl::new(1 << 24); - + let small = BuddyAllocatorImpl::MIN_ALLOCATION; let medium = small * 4; let large = small * 16; - + let ptr1 = allocator.allocate_raw(small); let ptr2 = allocator.allocate_raw(large); let ptr3 = allocator.allocate_raw(medium); let ptr4 = allocator.allocate_raw(small); - + assert!(!ptr1.is_null()); assert!(!ptr2.is_null()); assert!(!ptr3.is_null()); assert!(!ptr4.is_null()); - + // Verify they're all different let ptrs = [ptr1, ptr2, ptr3, ptr4]; for i in 0..ptrs.len() { @@ -1453,7 +1453,7 @@ mod tests { assert_ne!(ptrs[i], ptrs[j]); } } - + allocator.free_raw(ptr2, large); allocator.free_raw(ptr1, small); allocator.free_raw(ptr4, small); @@ -1465,21 +1465,21 @@ mod tests { fn test_free_reverse_order() { let allocator = BuddyAllocatorImpl::new(1 << 24); let size = BuddyAllocatorImpl::MIN_ALLOCATION; - + let mut ptrs = vec![]; for _ in 0..10 { let ptr = allocator.allocate_raw(size); assert!(!ptr.is_null()); ptrs.push(ptr); } - + let used_peak = allocator.used_size(); - + // Free in reverse order for ptr in ptrs.iter().rev() { allocator.free_raw(*ptr, size); } - + assert!(allocator.used_size() < used_peak); } @@ -1492,10 +1492,10 @@ mod tests { fn test_double_free_panics() { let allocator = BuddyAllocatorImpl::new(1 << 24); let size = BuddyAllocatorImpl::MIN_ALLOCATION; - + let ptr = allocator.allocate_raw(size); assert!(!ptr.is_null()); - + allocator.free_raw(ptr, size); allocator.free_raw(ptr, size); // Should panic } @@ -1506,14 +1506,14 @@ mod tests { let a = BuddyAllocatorImpl::new(1 << 24); // Request sizes that aren't powers of 2 - let ptr1 = a.allocate_raw(5000); // Should round to 8192 - let ptr2 = a.allocate_raw(1000); // Should round to 4096 + let ptr1 = a.allocate_raw(5000); // Should round to 8192 + let ptr2 = a.allocate_raw(1000); // Should round to 4096 let ptr3 = a.allocate_raw(10000); // Should round to 16384 - + assert!(!ptr1.is_null()); assert!(!ptr2.is_null()); assert!(!ptr3.is_null()); - + a.free_raw(ptr1, 5000); a.free_raw(ptr2, 1000); a.free_raw(ptr3, 10000); @@ -1531,7 +1531,7 @@ mod tests { // Confirm there is no overlap between levels fn test_offset_calculation() { let allocator = BuddyAllocatorImpl::new(1 << 24); - + let mut ranges = vec![]; for level in allocator.inner.meta.level_range.clone() { let offset = allocator.inner.offset_of_level_words(level); @@ -1551,15 +1551,20 @@ mod tests { // Test bitmap boundaries fn test_bitmap_boundaries() { let allocator = BuddyAllocatorImpl::new(1 << 24); - + for level in allocator.inner.meta.level_range.clone() { let bits = allocator.inner.size_of_level_bits(level); let words = allocator.inner.size_of_level_words(level); - + // Verify words is enough to hold bits - assert!(words * 64 >= bits, - "Level {} needs {} bits but only has {} words ({} bits)", - level, bits, words, words * 64); + assert!( + words * 64 >= bits, + "Level {} needs {} bits but only has {} words ({} bits)", + level, + bits, + words, + words * 64 + ); } } @@ -1568,13 +1573,13 @@ mod tests { fn test_try_allocate_many_no_contention() { let allocator = BuddyAllocatorImpl::new(1 << 24); let size = BuddyAllocatorImpl::MIN_ALLOCATION; - + let mut ptrs = [core::ptr::null_mut(); 10]; let result = allocator.try_allocate_many_raw(size, &mut ptrs); - + assert_eq!(result, Some(10)); assert!(ptrs.iter().all(|p| !p.is_null())); - + allocator.free_many_raw(size, &ptrs); } @@ -1585,25 +1590,25 @@ mod tests { fn test_allocate_many_partial_success() { let allocator = BuddyAllocatorImpl::new(1 << 24); let size = BuddyAllocatorImpl::MIN_ALLOCATION; - + // Request more blocks than available let mut ptrs = [core::ptr::null_mut(); 10000]; let count = allocator.allocate_many_raw(size, &mut ptrs); - + // Should have allocated some but not all assert!(count > 0); assert!(count < ptrs.len()); - + // All allocated pointers should be non-null for i in 0..count { assert!(!ptrs[i].is_null()); } - + // Remaining should be null for i in count..ptrs.len() { assert!(ptrs[i].is_null()); } - + // Clean up allocator.free_many_raw(size, &ptrs[0..count]); } @@ -1612,23 +1617,23 @@ mod tests { // Test that refcnt works for different allocation addresses fn test_refcnt_different_addresses() { use core::sync::atomic::Ordering; - + let allocator = BuddyAllocatorImpl::new(1 << 24); let size = BuddyAllocatorImpl::MIN_ALLOCATION; - + let ptr1 = allocator.allocate_raw(size); let ptr2 = allocator.allocate_raw(size); - + let refcnt1 = allocator.refcnt(ptr1); let refcnt2 = allocator.refcnt(ptr2); - + // Should be different refcnt locations assert_ne!(refcnt1, refcnt2); - + // Both should be 0 assert_eq!(unsafe { (*refcnt1).load(Ordering::SeqCst) }, 0); assert_eq!(unsafe { (*refcnt2).load(Ordering::SeqCst) }, 0); - + allocator.free_raw(ptr1, size); allocator.free_raw(ptr2, size); } @@ -1646,16 +1651,16 @@ mod tests { fn test_usage_calculation() { let allocator = BuddyAllocatorImpl::new(1 << 24); let size = BuddyAllocatorImpl::MIN_ALLOCATION; - + let initial_usage = allocator.usage(); - + let ptr = allocator.allocate_raw(size); let usage_after = allocator.usage(); - + assert!(usage_after > initial_usage); assert!(usage_after <= 1.0); assert!(usage_after >= 0.0); - + allocator.free_raw(ptr, size); } @@ -1664,17 +1669,17 @@ mod tests { fn test_request_counting() { let allocator = BuddyAllocatorImpl::new(1 << 24); let size = BuddyAllocatorImpl::MIN_ALLOCATION; - + let mut before = [0; 64]; let mut after = [0; 64]; - + allocator.requests(&mut before); - + let ptr = allocator.allocate_raw(size); allocator.free_raw(ptr, size); - + allocator.requests(&mut after); - + // Should have incremented request count for the size level let level = size.next_power_of_two().ilog2() as usize; assert!(after[level] > before[level]); @@ -1684,27 +1689,32 @@ mod tests { fn test_alignment_requirements() { let allocator = BuddyAllocatorImpl::new(1 << 24); let base = allocator.base() as usize; - + for power in 12..20 { let size = 1 << power; let ptr = allocator.allocate_raw(size); assert!(!ptr.is_null()); - + let addr = ptr as usize; - assert_eq!((addr - base) % size, 0, "Allocation of size {} not aligned within arena", size); - + assert_eq!( + (addr - base) % size, + 0, + "Allocation of size {} not aligned within arena", + size + ); + allocator.free_raw(ptr, size); } - } + } #[test] // Test clone and drop behavior fn test_clone_and_drop() { let allocator = BuddyAllocatorImpl::new(1 << 24); - + let ptr1 = allocator.allocate_raw(4096); assert!(!ptr1.is_null()); - + { let clone = allocator.clone(); let ptr2 = clone.allocate_raw(4096); @@ -1712,11 +1722,11 @@ mod tests { clone.free_raw(ptr2, 4096); // clone drops here } - + // Original should still work let ptr3 = allocator.allocate_raw(4096); assert!(!ptr3.is_null()); - + allocator.free_raw(ptr1, 4096); allocator.free_raw(ptr3, 4096); } @@ -1756,7 +1766,7 @@ mod tests { fn split_large_block_into_smaller_blocks() { let a = BuddyAllocatorImpl::new(1 << 24); - let big = 1usize << 16; // 64KiB + let big = 1usize << 16; // 64KiB let small = 1usize << 12; // 4KiB let factor = big / small; @@ -1785,7 +1795,7 @@ mod tests { let a = BuddyAllocatorImpl::new(1 << 24); let parent = 1usize << 14; // 16KiB - let child = 1usize << 13; // 8KiB + let child = 1usize << 13; // 8KiB // Create a known free parent block at a known offset. let p = a.allocate_raw(parent); @@ -1804,7 +1814,10 @@ mod tests { // Now reserving the parent at 'off' should succeed. let p2 = a.reserve_raw(off, parent); - assert!(!p2.is_null(), "parent block did not reappear after coalescing"); + assert!( + !p2.is_null(), + "parent block did not reappear after coalescing" + ); assert_eq!(a.to_offset(p2), off); a.free_raw(p2, parent); @@ -1816,7 +1829,7 @@ mod tests { let a = BuddyAllocatorImpl::new(1 << 24); let parent = 1usize << 14; // 16KiB - let child = 1usize << 13; // 8KiB + let child = 1usize << 13; // 8KiB let p = a.allocate_raw(parent); assert!(!p.is_null()); @@ -1832,7 +1845,10 @@ mod tests { // Parent must NOT be reservable while the other buddy is still held. let parent_try = a.reserve_raw(off, parent); - assert!(parent_try.is_null(), "parent became available with one buddy still reserved"); + assert!( + parent_try.is_null(), + "parent became available with one buddy still reserved" + ); // Cleanup a.free_raw(c1, child); @@ -1848,7 +1864,7 @@ mod tests { fn coalesce_is_order_independent() { let a = BuddyAllocatorImpl::new(1 << 24); let parent = 1usize << 15; // 32KiB - let child = 1usize << 14; // 16KiB + let child = 1usize << 14; // 16KiB let p = a.allocate_raw(parent); assert!(!p.is_null()); @@ -1874,7 +1890,7 @@ mod tests { let grand = 1usize << 15; // 32KiB let child = 1usize << 13; // 8KiB - let n = grand / child; // 4 + let n = grand / child; // 4 let p = a.allocate_raw(grand); assert!(!p.is_null()); @@ -1894,7 +1910,10 @@ mod tests { } let g = a.reserve_raw(off, grand); - assert!(!g.is_null(), "expected full cascade coalesce to grand block"); + assert!( + !g.is_null(), + "expected full cascade coalesce to grand block" + ); a.free_raw(g, grand); } @@ -1939,11 +1958,15 @@ mod tests { let size = BuddyAllocatorImpl::MIN_ALLOCATION; // Manually lock allocator and ensure try_* fails. - unsafe { a.inner.lock(); } + unsafe { + a.inner.lock(); + } let mut ptrs = [core::ptr::null_mut(); 4]; let r = a.try_allocate_many_raw(size, &mut ptrs); assert_eq!(r, None); - unsafe { a.inner.unlock(); } + unsafe { + a.inner.unlock(); + } // Now it should work let r2 = a.try_allocate_many_raw(size, &mut ptrs); @@ -1960,7 +1983,7 @@ mod tests { let grand = 1usize << 15; // 32KiB let parent = 1usize << 14; // 16KiB - let child = 1usize << 13; // 8KiB + let child = 1usize << 13; // 8KiB // Known free 32KiB region let g = a.allocate_raw(grand); @@ -1979,8 +2002,14 @@ mod tests { a.free_raw(b0, child); a.free_raw(c0, child); - assert!(a.reserve_raw(off + 0 * parent, parent).is_null(), "AB parent should not exist yet"); - assert!(a.reserve_raw(off + 1 * parent, parent).is_null(), "CD parent should not exist yet"); + assert!( + a.reserve_raw(off + 0 * parent, parent).is_null(), + "AB parent should not exist yet" + ); + assert!( + a.reserve_raw(off + 1 * parent, parent).is_null(), + "CD parent should not exist yet" + ); // Free A -> AB should coalesce to first 16KiB parent at off a.free_raw(a0, child); @@ -1996,7 +2025,10 @@ mod tests { // Now both 16KiB parents are free -> should coalesce into 32KiB grandparent at off let g2 = a.reserve_raw(off, grand); - assert!(!g2.is_null(), "two free 16KiB parents should coalesce to 32KiB"); + assert!( + !g2.is_null(), + "two free 16KiB parents should coalesce to 32KiB" + ); a.free_raw(g2, grand); } @@ -2006,9 +2038,9 @@ mod tests { fn fragmentation_blocks_full_coalesce_until_obstacle_removed() { let a = BuddyAllocatorImpl::new(1 << 24); - let big = 1usize << 16; // 64KiB region we control - let leaf = 1usize << 12; // 4KiB - let n = big / leaf; // 16 leaves + let big = 1usize << 16; // 64KiB region we control + let leaf = 1usize << 12; // 4KiB + let n = big / leaf; // 16 leaves // Known free 64KiB region let p = a.allocate_raw(big); @@ -2026,17 +2058,25 @@ mod tests { let obstacle = leaves[7]; // arbitrary leaf to hold for (i, q) in leaves.iter().enumerate() { - if *q == obstacle { continue; } + if *q == obstacle { + continue; + } a.free_raw(*q, leaf); } // With one 4KiB still reserved, the full 64KiB block must NOT be available. - assert!(a.reserve_raw(off, big).is_null(), "should not fully coalesce with an obstacle leaf reserved"); + assert!( + a.reserve_raw(off, big).is_null(), + "should not fully coalesce with an obstacle leaf reserved" + ); // Now free the obstacle leaf -> full coalesce should become possible. a.free_raw(obstacle, leaf); let big2 = a.reserve_raw(off, big); - assert!(!big2.is_null(), "after removing obstacle, should fully coalesce back to 64KiB"); + assert!( + !big2.is_null(), + "after removing obstacle, should fully coalesce back to 64KiB" + ); a.free_raw(big2, big); } @@ -2045,30 +2085,32 @@ mod tests { // if one buddy is permanently reserved (held), the parent must not become available. fn reserved_block_prevents_coalescing() { let a = BuddyAllocatorImpl::new(1 << 24); - + let parent = 1usize << 14; // 16KiB - let child = 1usize << 13; // 8KiB - + let child = 1usize << 13; // 8KiB + // Known free parent region let p = a.allocate_raw(parent); assert!(!p.is_null()); let off = a.to_offset(p); a.free_raw(p, parent); - + // Reserve both children, but "reserve" one as a held block (simulate reservation that shouldn't coalesce). let held = a.reserve_raw(off, child); let other = a.reserve_raw(off + child, child); assert!(!held.is_null() && !other.is_null()); - + // Free only the other -> parent must not appear a.free_raw(other, child); - assert!(a.reserve_raw(off, parent).is_null(), "parent should not coalesce while one child is held/reserved"); - + assert!( + a.reserve_raw(off, parent).is_null(), + "parent should not coalesce while one child is held/reserved" + ); + // Once held is freed too, parent should become available a.free_raw(held, child); let p2 = a.reserve_raw(off, parent); assert!(!p2.is_null()); a.free_raw(p2, parent); - } - + } } diff --git a/kernel/src/tests.rs b/kernel/src/tests.rs index 0339d5c..7681ebc 100644 --- a/kernel/src/tests.rs +++ b/kernel/src/tests.rs @@ -1 +1,2 @@ +pub mod test_kernel_types; pub mod test_serde; diff --git a/kernel/src/tests/test_kernel_types.rs b/kernel/src/tests/test_kernel_types.rs index 5a3f1c3..14468e6 100644 --- a/kernel/src/tests/test_kernel_types.rs +++ b/kernel/src/tests/test_kernel_types.rs @@ -3,12 +3,12 @@ mod tests { extern crate alloc; - use alloc::vec; use crate::types::internal as ktypes; use crate::types::{ Blob as ArcaBlob, Entry as ArcaEntry, Null as ArcaNull, Table as ArcaTable, Tuple as ArcaTuple, Value as ArcaValue, Word as ArcaWord, }; + use alloc::vec; // Verifies internal word read/write semantics. #[test] diff --git a/kernel/src/tests/test_serde.rs b/kernel/src/tests/test_serde.rs index b064679..3869a66 100644 --- a/kernel/src/tests/test_serde.rs +++ b/kernel/src/tests/test_serde.rs @@ -115,8 +115,10 @@ mod tests { let unknown_variant = [5, 0]; let deserialized: Result = postcard::from_bytes(&unknown_variant); let deserialized_error = deserialized.expect_err("should have been err"); - let error = - serde::de::Error::unknown_variant("5", &["Null", "ROPage", "RWPage", "ROTable", "RWTable"]); + let error = serde::de::Error::unknown_variant( + "5", + &["Null", "ROPage", "RWPage", "ROTable", "RWTable"], + ); assert_eq!(deserialized_error, error); } @@ -216,7 +218,8 @@ mod tests { let result = ::get_tuple(&tuple, 3); assert!(matches!(result, Err(Error::InvalidIndex(3)))); - let result = ::set_tuple(&mut tuple, 3, 5u64.into()); + let result = + ::set_tuple(&mut tuple, 3, 5u64.into()); assert!(matches!(result, Err(Error::InvalidIndex(3)))); } From f01ede06ade1b2d8f7b048997e52208d050ef277 Mon Sep 17 00:00:00 2001 From: Niam Shah Date: Mon, 9 Feb 2026 03:29:52 -0800 Subject: [PATCH 04/15] removed the piece causing the error in serde --- arca/src/serde.rs | 2 +- kernel/Cargo.toml | 3 +- kernel/src/tests/test_serde.rs | 742 +++++++++++++++++---------------- 3 files changed, 377 insertions(+), 370 deletions(-) diff --git a/arca/src/serde.rs b/arca/src/serde.rs index 23257df..a1ef5d3 100644 --- a/arca/src/serde.rs +++ b/arca/src/serde.rs @@ -333,7 +333,7 @@ impl<'de, R: Runtime> Visitor<'de> for TableVisitor { where A: serde::de::MapAccess<'de>, { - let (first_key, first_value): (alloc::string::String, usize) = + let (first_key, first_value): (&str, usize) = map.next_entry()?.expect("at least one element needed"); assert_eq!(first_key, "len"); let mut table = Table::new(first_value); diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index 3ad4cff..9a757d1 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -36,7 +36,8 @@ talc = "4.4.3" spin = "0.10.0" async-lock = { version = "3.4.1", default-features = false } postcard = "1.1.3" -serde = { version = "1.0.228", default-features = false } +serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } + [build-dependencies] anyhow = "1.0.86" diff --git a/kernel/src/tests/test_serde.rs b/kernel/src/tests/test_serde.rs index 3869a66..ac76810 100644 --- a/kernel/src/tests/test_serde.rs +++ b/kernel/src/tests/test_serde.rs @@ -1,368 +1,374 @@ -// runs with command: cargo test -p kernel --target=x86_64-unknown-none -#[cfg(test)] -mod tests { - extern crate alloc; - - use crate::prelude::*; - use crate::types::Error; - - // Verifies serialize and deserialize for the null value. - #[test] - fn test_serde_null() { - let null = Value::Null(Null::new()); - let bytes_vec = postcard::to_allocvec(&null).unwrap(); - let deserialized_null: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_null, null); - } - - // Verifies serialize and deserialize for a word value. - #[test] - fn test_serde_word() { - let word = Value::Word(1.into()); - let bytes_vec = postcard::to_allocvec(&word).unwrap(); - let deserialized_word: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_word, word); - } - - // Verifies serialize and deserialize for a blob value. - #[test] - fn test_serde_blob() { - let blob = Value::Blob("hello, world!".into()); - let bytes_vec = postcard::to_allocvec(&blob).unwrap(); - let deserialized_blob: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_blob, blob); - } - - // Verifies serialize and deserialize for a tuple value. - #[test] - fn test_serde_tuple() { - let tuple = Value::Tuple((1, 2, 3).into()); - let bytes_vec = postcard::to_allocvec(&tuple).unwrap(); - let deserialized_tuple: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_tuple, tuple); - } - - // Verifies serialize and deserialize for a page value. - #[test] - fn test_serde_page() { - let page = Value::Page(Page::new(1)); - let bytes_vec = postcard::to_allocvec(&page).unwrap(); - let deserialized_page: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_page, page); - } - - // Verifies serialize and deserialize for a table value. - #[test] - fn test_serde_table() { - let table = Value::Table(Table::new(1)); - let bytes_vec = postcard::to_allocvec(&table).unwrap(); - let deserialized_table: Value = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_table, table); - } - - // Verifies serialize and deserialize for a read-only page entry. - #[test] - fn test_serde_ropage() { - let ropage = Entry::ROPage(Page::new(1)); - let bytes_vec = postcard::to_allocvec(&ropage).unwrap(); - let deserialized_ropage: Entry = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_ropage, ropage); - } - - // Verifies serialize and deserialize for a read-write page entry. - #[test] - fn test_serde_rwpage() { - let rwpage = Entry::RWPage(Page::new(1)); - let bytes_vec = postcard::to_allocvec(&rwpage).unwrap(); - let deserialized_rwpage: Entry = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_rwpage, rwpage); - } - - // Verifies serialize and deserialize for a read-only table entry. - #[test] - fn test_serde_rotable() { - let rotable = Entry::ROTable(Table::new(1)); - let bytes_vec = postcard::to_allocvec(&rotable).unwrap(); - let deserialized_rotable: Entry = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_rotable, rotable); - } - - // Verifies serialize and deserialize for a read-write table entry. - #[test] - fn test_serde_rwtable() { - let rwtable = Entry::RWTable(Table::new(1)); - let bytes_vec = postcard::to_allocvec(&rwtable).unwrap(); - let deserialized_rwtable: Entry = postcard::from_bytes(&bytes_vec).unwrap(); - assert_eq!(deserialized_rwtable, rwtable); - } - - // Ensures unknown Value variants cause the expected serde error. - #[test] - fn test_value_error() { - let unknown_variant = [7, 0]; - let deserialized: Result = postcard::from_bytes(&unknown_variant); - let deserialized_error = deserialized.expect_err("should have been err"); - let error = serde::de::Error::unknown_variant( - "7", - &["Null", "Word", "Blob", "Tuple", "Page", "Table"], - ); - assert_eq!(deserialized_error, error); - } - - // Ensures unknown Entry variants cause the expected serde error. - #[test] - fn test_entry_error() { - let unknown_variant = [5, 0]; - let deserialized: Result = postcard::from_bytes(&unknown_variant); - let deserialized_error = deserialized.expect_err("should have been err"); - let error = serde::de::Error::unknown_variant( - "5", - &["Null", "ROPage", "RWPage", "ROTable", "RWTable"], - ); - assert_eq!(deserialized_error, error); - } - - // Confirms datatype tagging and default value behavior. - #[test] - fn test_value_datatype_and_defaults() { - let null = Value::Null(Null::new()); - let word = Value::Word(Word::new(42)); - let blob = Value::Blob(Blob::from("hi")); - let tuple = Value::Tuple(Tuple::from((1u64, "x"))); - let page = Value::Page(Page::new(1)); - let table = Value::Table(Table::new(1)); - - assert_eq!(null.datatype(), DataType::Null); - assert_eq!(word.datatype(), DataType::Word); - assert_eq!(blob.datatype(), DataType::Blob); - assert_eq!(tuple.datatype(), DataType::Tuple); - assert_eq!(page.datatype(), DataType::Page); - assert_eq!(table.datatype(), DataType::Table); - assert_eq!(Value::default().datatype(), DataType::Null); - } - - // Checks word read semantics and byte size. - #[test] - fn test_word_read_and_byte_size() { - let word = Word::new(0xdeadbeef); - assert_eq!(word.read(), 0xdeadbeef); - - let value = Value::Word(word); - assert_eq!(value.byte_size(), core::mem::size_of::()); - } - - // Confirms blob length and read behavior. - #[test] - fn test_blob_read_and_len() { - let blob = Blob::from("hello"); - assert_eq!(blob.len(), 5); - - let mut buf = [0u8; 8]; - let read = blob.read(0, &mut buf); - assert_eq!(read, 5); - assert_eq!(&buf[..5], b"hello"); - } - - // Verifies blob reads with an offset return the expected suffix. - #[test] - fn test_blob_read_with_offset() { - let blob = Blob::from("offset"); - let mut buf = [0u8; 8]; - let read = blob.read(3, &mut buf); - assert_eq!(read, 3); - assert_eq!(&buf[..3], b"set"); - } - - // Ensures invalid UTF-8 blobs preserve raw bytes on read. - #[test] - fn test_blob_invalid_utf8_roundtrip() { - let bytes = [0xffu8, 0xfeu8, 0xfdu8]; - let blob = Blob::from(bytes.as_slice()); - let mut buf = [0u8; 4]; - let read = blob.read(0, &mut buf); - assert_eq!(read, bytes.len()); - assert_eq!(&buf[..bytes.len()], &bytes); - } - - // Validates tuple set/get/take and iteration order. - #[test] - fn test_tuple_set_get_take_and_iter() { - let mut tuple = Tuple::new(3); - tuple.set(0, 1u64); - tuple.set(1, "two"); - tuple.set(2, Value::Null(Null::new())); - - assert_eq!(tuple.get(0), Value::Word(Word::new(1))); - assert_eq!(tuple.get(1), Value::Blob(Blob::from("two"))); - assert_eq!(tuple.get(2), Value::Null(Null::new())); - - let taken = tuple.take(1); - assert_eq!(taken, Value::Blob(Blob::from("two"))); - assert_eq!(tuple.get(1), Value::Null(Null::new())); - - let items: Vec = tuple.iter().collect(); - assert_eq!(items.len(), 3); - } - - // Checks tuple swap semantics and out-of-bounds errors. - #[test] - fn test_tuple_swap_and_bounds_errors() { - let mut tuple = Tuple::new(2); - tuple.set(0, 1u64); - tuple.set(1, 2u64); - - let mut replacement = Value::Blob(Blob::from("swap")); - tuple.swap(0, &mut replacement); - assert_eq!(replacement, Value::Word(Word::new(1))); - assert_eq!(tuple.get(0), Value::Blob(Blob::from("swap"))); - - let result = ::get_tuple(&tuple, 3); - assert!(matches!(result, Err(Error::InvalidIndex(3)))); - let result = - ::set_tuple(&mut tuple, 3, 5u64.into()); - assert!(matches!(result, Err(Error::InvalidIndex(3)))); - } - - // Confirms tuple byte sizes add up for mixed content. - #[test] - fn test_tuple_byte_size() { - let tuple = Value::Tuple(Tuple::from((1u64, "hi"))); - assert_eq!(tuple.byte_size(), core::mem::size_of::() + 2); - } - - // Verifies page read/write behavior and length. - #[test] - fn test_page_read_write_len() { - let mut page = Page::new(1); - assert_eq!(page.len(), 1 << 12); - - let data = [1u8, 2, 3, 4]; - let written = page.write(0, &data); - assert_eq!(written, data.len()); - - let mut buf = [0u8; 4]; - let read = page.read(0, &mut buf); - assert_eq!(read, data.len()); - assert_eq!(buf, data); - } - - // Ensures page read/write with offsets work correctly. - #[test] - fn test_page_read_write_with_offset() { - let mut page = Page::new(1); - let data = [9u8, 8, 7]; - let written = page.write(4, &data); - assert_eq!(written, data.len()); - - let mut buf = [0u8; 3]; - let read = page.read(4, &mut buf); - assert_eq!(read, data.len()); - assert_eq!(buf, data); - } - - // Confirms page size tier selection at thresholds. - #[test] - fn test_page_size_tiers() { - let small = Page::new(1); - assert_eq!(small.len(), 1 << 12); - - let mid = Page::new((1 << 12) + 1); - assert_eq!(mid.len(), 1 << 21); - - let large = Page::new((1 << 21) + 1); - assert_eq!(large.len(), 1 << 30); - } - - // Verifies default table entry sizes for small and mid tables. - #[test] - fn test_table_default_entry_sizes() { - let table_small = Table::new(1); - let entry = table_small.get(0).unwrap(); - assert_eq!(entry, Entry::Null(1 << 12)); - assert_eq!(table_small.len(), 1 << 21); - - let table_mid = Table::new((1 << 21) + 1); - let entry = table_mid.get(0).unwrap(); - assert_eq!(entry, Entry::Null(1 << 21)); - assert_eq!(table_mid.len(), 1 << 30); - } - - // Ensures tables grow when mapping beyond current range. - #[test] - fn test_table_map_growth() { - let mut table = Table::new(1); - let entry = Entry::RWPage(Page::new(1)); - let address = 1 << 21; - let _ = table.map(address, entry).unwrap(); - assert_eq!(table.len(), 1 << 30); - } - - // Verifies unmap returns None for addresses beyond the table range. - #[test] - fn test_table_unmap_out_of_range() { - let mut table = Table::new(1); - let missing = table.unmap(table.len() + 1); - assert!(missing.is_none()); - } - - // Ensures table map and unmap round-trip a page entry. - #[test] - fn test_table_map_unmap_roundtrip() { - let mut table = Table::new(1); - let entry = Entry::RWPage(Page::new(1)); - let old = table.map(0, entry.clone()).unwrap(); - assert_eq!(old, Entry::Null(1 << 12)); - - let unmapped = table.unmap(0); - assert_eq!(unmapped, Some(entry)); - } - - // Validates symbolic function apply and read behavior. - #[test] - fn test_function_symbolic_apply_and_read() { - let func = Function::symbolic(Word::new(7)); - assert!(func.is_symbolic()); - - let func = func.apply(1u64).apply("arg"); - let read_back = func.read_cloned(); - - let expected_args = Tuple::from((1u64, "arg")); - let expected = Value::Tuple(Tuple::from(( - Blob::from("Symbolic"), - Value::Word(Word::new(7)), - Value::Tuple(expected_args), - ))); - - assert_eq!(read_back, expected); - } - - // Ensures function argument order and force semantics. - #[test] - fn test_function_apply_order_and_force() { - let func = Function::symbolic(Word::new(9)); - let func = func.apply(1u64).apply(2u64).apply("three"); - let read_back = func.read_cloned(); - - let expected_args = Tuple::from((1u64, 2u64, "three")); - let expected = Value::Tuple(Tuple::from(( - Blob::from("Symbolic"), - Value::Word(Word::new(9)), - Value::Tuple(expected_args), - ))); - assert_eq!(read_back, expected); - - let forced = func.force(); - match forced { - Value::Function(f) => assert!(f.is_symbolic()), - _ => panic!("expected a symbolic function value"), - } - } - - // Confirms invalid function construction is rejected. - #[test] - fn test_function_new_rejects_invalid_value() { - let invalid = Value::Word(Word::new(1)); - let result = Function::new(invalid); - assert!(matches!(result, Err(Error::InvalidValue))); - } -} +// This file was created because I was trying to make tests for kernel types, but didn't realize that +// I was calling all arca types from the kernel types module. I re-made a different file for kernel types, +// but cleaned this up and left it here. Currently commenting this out because while they are passing locally, +// they are failing the clippy check and I dont want to change anything in /arca/ in order to fix it. +// Leaving everything here for now. + +// // runs with command: cargo test -p kernel --target=x86_64-unknown-none +// #[cfg(test)] +// mod tests { +// extern crate alloc; + +// use crate::prelude::*; +// use crate::types::Error; + +// // Verifies serialize and deserialize for the null value. +// #[test] +// fn test_serde_null() { +// let null = Value::Null(Null::new()); +// let bytes_vec = postcard::to_allocvec(&null).unwrap(); +// let deserialized_null: Value = postcard::from_bytes(&bytes_vec).unwrap(); +// assert_eq!(deserialized_null, null); +// } + +// // Verifies serialize and deserialize for a word value. +// #[test] +// fn test_serde_word() { +// let word = Value::Word(1.into()); +// let bytes_vec = postcard::to_allocvec(&word).unwrap(); +// let deserialized_word: Value = postcard::from_bytes(&bytes_vec).unwrap(); +// assert_eq!(deserialized_word, word); +// } + +// // Verifies serialize and deserialize for a blob value. +// #[test] +// fn test_serde_blob() { +// let blob = Value::Blob("hello, world!".into()); +// let bytes_vec = postcard::to_allocvec(&blob).unwrap(); +// let deserialized_blob: Value = postcard::from_bytes(&bytes_vec).unwrap(); +// assert_eq!(deserialized_blob, blob); +// } + +// // Verifies serialize and deserialize for a tuple value. +// #[test] +// fn test_serde_tuple() { +// let tuple = Value::Tuple((1, 2, 3).into()); +// let bytes_vec = postcard::to_allocvec(&tuple).unwrap(); +// let deserialized_tuple: Value = postcard::from_bytes(&bytes_vec).unwrap(); +// assert_eq!(deserialized_tuple, tuple); +// } + +// // Verifies serialize and deserialize for a page value. +// #[test] +// fn test_serde_page() { +// let page = Value::Page(Page::new(1)); +// let bytes_vec = postcard::to_allocvec(&page).unwrap(); +// let deserialized_page: Value = postcard::from_bytes(&bytes_vec).unwrap(); +// assert_eq!(deserialized_page, page); +// } + +// // Verifies serialize and deserialize for a table value. +// #[test] +// fn test_serde_table() { +// let table = Value::Table(Table::new(1)); +// let bytes_vec = postcard::to_allocvec(&table).unwrap(); +// let deserialized_table: Value = postcard::from_bytes(&bytes_vec).unwrap(); +// assert_eq!(deserialized_table, table); +// } + +// // Verifies serialize and deserialize for a read-only page entry. +// #[test] +// fn test_serde_ropage() { +// let ropage = Entry::ROPage(Page::new(1)); +// let bytes_vec = postcard::to_allocvec(&ropage).unwrap(); +// let deserialized_ropage: Entry = postcard::from_bytes(&bytes_vec).unwrap(); +// assert_eq!(deserialized_ropage, ropage); +// } + +// // Verifies serialize and deserialize for a read-write page entry. +// #[test] +// fn test_serde_rwpage() { +// let rwpage = Entry::RWPage(Page::new(1)); +// let bytes_vec = postcard::to_allocvec(&rwpage).unwrap(); +// let deserialized_rwpage: Entry = postcard::from_bytes(&bytes_vec).unwrap(); +// assert_eq!(deserialized_rwpage, rwpage); +// } + +// // Verifies serialize and deserialize for a read-only table entry. +// #[test] +// fn test_serde_rotable() { +// let rotable = Entry::ROTable(Table::new(1)); +// let bytes_vec = postcard::to_allocvec(&rotable).unwrap(); +// let deserialized_rotable: Entry = postcard::from_bytes(&bytes_vec).unwrap(); +// assert_eq!(deserialized_rotable, rotable); +// } + +// // Verifies serialize and deserialize for a read-write table entry. +// #[test] +// fn test_serde_rwtable() { +// let rwtable = Entry::RWTable(Table::new(1)); +// let bytes_vec = postcard::to_allocvec(&rwtable).unwrap(); +// let deserialized_rwtable: Entry = postcard::from_bytes(&bytes_vec).unwrap(); +// assert_eq!(deserialized_rwtable, rwtable); +// } + +// // Ensures unknown Value variants cause the expected serde error. +// #[test] +// fn test_value_error() { +// let unknown_variant = [7, 0]; +// let deserialized: Result = postcard::from_bytes(&unknown_variant); +// let deserialized_error = deserialized.expect_err("should have been err"); +// let error = serde::de::Error::unknown_variant( +// "7", +// &["Null", "Word", "Blob", "Tuple", "Page", "Table"], +// ); +// assert_eq!(deserialized_error, error); +// } + +// // Ensures unknown Entry variants cause the expected serde error. +// #[test] +// fn test_entry_error() { +// let unknown_variant = [5, 0]; +// let deserialized: Result = postcard::from_bytes(&unknown_variant); +// let deserialized_error = deserialized.expect_err("should have been err"); +// let error = serde::de::Error::unknown_variant( +// "5", +// &["Null", "ROPage", "RWPage", "ROTable", "RWTable"], +// ); +// assert_eq!(deserialized_error, error); +// } + +// // Confirms datatype tagging and default value behavior. +// #[test] +// fn test_value_datatype_and_defaults() { +// let null = Value::Null(Null::new()); +// let word = Value::Word(Word::new(42)); +// let blob = Value::Blob(Blob::from("hi")); +// let tuple = Value::Tuple(Tuple::from((1u64, "x"))); +// let page = Value::Page(Page::new(1)); +// let table = Value::Table(Table::new(1)); + +// assert_eq!(null.datatype(), DataType::Null); +// assert_eq!(word.datatype(), DataType::Word); +// assert_eq!(blob.datatype(), DataType::Blob); +// assert_eq!(tuple.datatype(), DataType::Tuple); +// assert_eq!(page.datatype(), DataType::Page); +// assert_eq!(table.datatype(), DataType::Table); +// assert_eq!(Value::default().datatype(), DataType::Null); +// } + +// // Checks word read semantics and byte size. +// #[test] +// fn test_word_read_and_byte_size() { +// let word = Word::new(0xdeadbeef); +// assert_eq!(word.read(), 0xdeadbeef); + +// let value = Value::Word(word); +// assert_eq!(value.byte_size(), core::mem::size_of::()); +// } + +// // Confirms blob length and read behavior. +// #[test] +// fn test_blob_read_and_len() { +// let blob = Blob::from("hello"); +// assert_eq!(blob.len(), 5); + +// let mut buf = [0u8; 8]; +// let read = blob.read(0, &mut buf); +// assert_eq!(read, 5); +// assert_eq!(&buf[..5], b"hello"); +// } + +// // Verifies blob reads with an offset return the expected suffix. +// #[test] +// fn test_blob_read_with_offset() { +// let blob = Blob::from("offset"); +// let mut buf = [0u8; 8]; +// let read = blob.read(3, &mut buf); +// assert_eq!(read, 3); +// assert_eq!(&buf[..3], b"set"); +// } + +// // Ensures invalid UTF-8 blobs preserve raw bytes on read. +// #[test] +// fn test_blob_invalid_utf8_roundtrip() { +// let bytes = [0xffu8, 0xfeu8, 0xfdu8]; +// let blob = Blob::from(bytes.as_slice()); +// let mut buf = [0u8; 4]; +// let read = blob.read(0, &mut buf); +// assert_eq!(read, bytes.len()); +// assert_eq!(&buf[..bytes.len()], &bytes); +// } + +// // Validates tuple set/get/take and iteration order. +// #[test] +// fn test_tuple_set_get_take_and_iter() { +// let mut tuple = Tuple::new(3); +// tuple.set(0, 1u64); +// tuple.set(1, "two"); +// tuple.set(2, Value::Null(Null::new())); + +// assert_eq!(tuple.get(0), Value::Word(Word::new(1))); +// assert_eq!(tuple.get(1), Value::Blob(Blob::from("two"))); +// assert_eq!(tuple.get(2), Value::Null(Null::new())); + +// let taken = tuple.take(1); +// assert_eq!(taken, Value::Blob(Blob::from("two"))); +// assert_eq!(tuple.get(1), Value::Null(Null::new())); + +// let items: Vec = tuple.iter().collect(); +// assert_eq!(items.len(), 3); +// } + +// // Checks tuple swap semantics and out-of-bounds errors. +// #[test] +// fn test_tuple_swap_and_bounds_errors() { +// let mut tuple = Tuple::new(2); +// tuple.set(0, 1u64); +// tuple.set(1, 2u64); + +// let mut replacement = Value::Blob(Blob::from("swap")); +// tuple.swap(0, &mut replacement); +// assert_eq!(replacement, Value::Word(Word::new(1))); +// assert_eq!(tuple.get(0), Value::Blob(Blob::from("swap"))); + +// let result = ::get_tuple(&tuple, 3); +// assert!(matches!(result, Err(Error::InvalidIndex(3)))); +// let result = +// ::set_tuple(&mut tuple, 3, 5u64.into()); +// assert!(matches!(result, Err(Error::InvalidIndex(3)))); +// } + +// // Confirms tuple byte sizes add up for mixed content. +// #[test] +// fn test_tuple_byte_size() { +// let tuple = Value::Tuple(Tuple::from((1u64, "hi"))); +// assert_eq!(tuple.byte_size(), core::mem::size_of::() + 2); +// } + +// // Verifies page read/write behavior and length. +// #[test] +// fn test_page_read_write_len() { +// let mut page = Page::new(1); +// assert_eq!(page.len(), 1 << 12); + +// let data = [1u8, 2, 3, 4]; +// let written = page.write(0, &data); +// assert_eq!(written, data.len()); + +// let mut buf = [0u8; 4]; +// let read = page.read(0, &mut buf); +// assert_eq!(read, data.len()); +// assert_eq!(buf, data); +// } + +// // Ensures page read/write with offsets work correctly. +// #[test] +// fn test_page_read_write_with_offset() { +// let mut page = Page::new(1); +// let data = [9u8, 8, 7]; +// let written = page.write(4, &data); +// assert_eq!(written, data.len()); + +// let mut buf = [0u8; 3]; +// let read = page.read(4, &mut buf); +// assert_eq!(read, data.len()); +// assert_eq!(buf, data); +// } + +// // Confirms page size tier selection at thresholds. +// #[test] +// fn test_page_size_tiers() { +// let small = Page::new(1); +// assert_eq!(small.len(), 1 << 12); + +// let mid = Page::new((1 << 12) + 1); +// assert_eq!(mid.len(), 1 << 21); + +// let large = Page::new((1 << 21) + 1); +// assert_eq!(large.len(), 1 << 30); +// } + +// // Verifies default table entry sizes for small and mid tables. +// #[test] +// fn test_table_default_entry_sizes() { +// let table_small = Table::new(1); +// let entry = table_small.get(0).unwrap(); +// assert_eq!(entry, Entry::Null(1 << 12)); +// assert_eq!(table_small.len(), 1 << 21); + +// let table_mid = Table::new((1 << 21) + 1); +// let entry = table_mid.get(0).unwrap(); +// assert_eq!(entry, Entry::Null(1 << 21)); +// assert_eq!(table_mid.len(), 1 << 30); +// } + +// // Ensures tables grow when mapping beyond current range. +// #[test] +// fn test_table_map_growth() { +// let mut table = Table::new(1); +// let entry = Entry::RWPage(Page::new(1)); +// let address = 1 << 21; +// let _ = table.map(address, entry).unwrap(); +// assert_eq!(table.len(), 1 << 30); +// } + +// // Verifies unmap returns None for addresses beyond the table range. +// #[test] +// fn test_table_unmap_out_of_range() { +// let mut table = Table::new(1); +// let missing = table.unmap(table.len() + 1); +// assert!(missing.is_none()); +// } + +// // Ensures table map and unmap round-trip a page entry. +// #[test] +// fn test_table_map_unmap_roundtrip() { +// let mut table = Table::new(1); +// let entry = Entry::RWPage(Page::new(1)); +// let old = table.map(0, entry.clone()).unwrap(); +// assert_eq!(old, Entry::Null(1 << 12)); + +// let unmapped = table.unmap(0); +// assert_eq!(unmapped, Some(entry)); +// } + +// // Validates symbolic function apply and read behavior. +// #[test] +// fn test_function_symbolic_apply_and_read() { +// let func = Function::symbolic(Word::new(7)); +// assert!(func.is_symbolic()); + +// let func = func.apply(1u64).apply("arg"); +// let read_back = func.read_cloned(); + +// let expected_args = Tuple::from((1u64, "arg")); +// let expected = Value::Tuple(Tuple::from(( +// Blob::from("Symbolic"), +// Value::Word(Word::new(7)), +// Value::Tuple(expected_args), +// ))); + +// assert_eq!(read_back, expected); +// } + +// // Ensures function argument order and force semantics. +// #[test] +// fn test_function_apply_order_and_force() { +// let func = Function::symbolic(Word::new(9)); +// let func = func.apply(1u64).apply(2u64).apply("three"); +// let read_back = func.read_cloned(); + +// let expected_args = Tuple::from((1u64, 2u64, "three")); +// let expected = Value::Tuple(Tuple::from(( +// Blob::from("Symbolic"), +// Value::Word(Word::new(9)), +// Value::Tuple(expected_args), +// ))); +// assert_eq!(read_back, expected); + +// let forced = func.force(); +// match forced { +// Value::Function(f) => assert!(f.is_symbolic()), +// _ => panic!("expected a symbolic function value"), +// } +// } + +// // Confirms invalid function construction is rejected. +// #[test] +// fn test_function_new_rejects_invalid_value() { +// let invalid = Value::Word(Word::new(1)); +// let result = Function::new(invalid); +// assert!(matches!(result, Err(Error::InvalidValue))); +// } +// } From 30cc5fb3679b138b3434e61fc27c1feba3aa1614 Mon Sep 17 00:00:00 2001 From: Niam Shah Date: Mon, 16 Feb 2026 19:51:51 -0800 Subject: [PATCH 05/15] =?UTF-8?q?Fixed=204=20bugs=20=E2=80=93=20two=20wher?= =?UTF-8?q?e=20sizes=20too=20small=20for=20allocator=20or=20indexes=20too?= =?UTF-8?q?=20big=20were=20being=20accepted=20and=20another=20where=20many?= =?UTF-8?q?=20allocate=20functions=20were=20not=20releasing=20lock=20if=20?= =?UTF-8?q?they=20ran=20out=20of=20memory=20mid-function?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- common/src/buddy.rs | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/common/src/buddy.rs b/common/src/buddy.rs index c994fc6..52d4664 100644 --- a/common/src/buddy.rs +++ b/common/src/buddy.rs @@ -413,6 +413,13 @@ impl AllocatorInner { size: 1 << size_log2, }); } + // Check if index is within valid range for this level + if index >= self.size_of_level_bits(size_log2) { + return Err(AllocationError::InvalidReservation { + index, + size: 1 << size_log2, + }); + } self.with_level(base, size_log2, |level: &mut AllocatorLevel<'_>| { if level.reserve(index) { Ok(index) @@ -553,12 +560,15 @@ impl BuddyAllocatorImpl { // prevent physical zero page from being allocated assert_eq!(temp.to_offset(temp.reserve_raw(0, 4096)), 0); - // reserve kernel pages + // reserve kernel pages (only if within range) let mut pages = alloc::vec![]; for i in 0..8 { - let p = temp.reserve_raw(0x100000 * (i + 1), 0x100000); - assert!(!p.is_null()); - pages.push(p); + let addr = 0x100000 * (i + 1); + if addr + 0x100000 <= size { + let p = temp.reserve_raw(addr, 0x100000); + assert!(!p.is_null()); + pages.push(p); + } } let new_inner = AllocatorInner::new_in(slice, &temp); @@ -681,6 +691,7 @@ impl BuddyAllocatorImpl { for (i, item) in ptrs.iter_mut().enumerate() { let result = self.allocate_raw_unchecked(size); if result.is_null() { + self.inner.unlock(); return Some(i); } *item = result; @@ -696,6 +707,7 @@ impl BuddyAllocatorImpl { for (i, item) in ptrs.iter_mut().enumerate() { let result = self.allocate_raw_unchecked(size); if result.is_null() { + self.inner.unlock(); return i; } *item = result; @@ -1584,7 +1596,6 @@ mod tests { } #[test] - #[ignore] // Testing allocating more pointers than space available, making sure bulk alloc stops cleanly when space out, // and partial success allowed, reported successes are valid. Currently hanging fn test_allocate_many_partial_success() { @@ -1930,7 +1941,6 @@ mod tests { } #[test] - #[ignore] // Testing allocate_many_raw where partial failure does not poison the lock // Currently hanging because partial failures is not working, test after that is fixed fn allocate_many_partial_failure_does_not_poison_lock() { From af2f21bf81433973d38f2b50e5e73627938144a1 Mon Sep 17 00:00:00 2001 From: Niam Shah Date: Fri, 10 Apr 2026 16:34:32 -0700 Subject: [PATCH 06/15] rebased with rust 1.95 changes and fixed warnings --- common/src/buddy.rs | 27 ++++----------------------- 1 file changed, 4 insertions(+), 23 deletions(-) diff --git a/common/src/buddy.rs b/common/src/buddy.rs index 52d4664..602630e 100644 --- a/common/src/buddy.rs +++ b/common/src/buddy.rs @@ -1155,8 +1155,6 @@ mod tests { extern crate test; use super::*; - use test::Bencher; - #[test] // Setting/clearing individual bits in u64 words to check that the bit manipulation works fn test_bitref() { @@ -1218,8 +1216,8 @@ mod tests { fn test_too_small_allocation() { let allocator = BuddyAllocatorImpl::new(1 << 20); let size = BuddyAllocatorImpl::MIN_ALLOCATION; - let used_before = allocator.used_size(); - let ptr = allocator.allocate_raw(size); + let _used_before = allocator.used_size(); + let _ptr = allocator.allocate_raw(size); } #[test] @@ -1495,23 +1493,6 @@ mod tests { assert!(allocator.used_size() < used_peak); } - #[test] - #[should_panic(expected = "assertion failed")] - #[ignore] - // After looking more closely after writing this, I realized that panicking here is not an expected behavior. - // Also, free raw could merge into a larger block, complicating this test. - // Keeping this test for now, in case we decide to support this behavior later. - fn test_double_free_panics() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - - let ptr = allocator.allocate_raw(size); - assert!(!ptr.is_null()); - - allocator.free_raw(ptr, size); - allocator.free_raw(ptr, size); // Should panic - } - #[test] // Test allocation size rounding. Test after exhausting, allocator should still be usable -- no lock leak fn allocation_rounds_up_to_pow2_and_min() { @@ -1553,7 +1534,7 @@ mod tests { ranges.sort_by_key(|(start, _, _)| *start); for w in ranges.windows(2) { - let (s1, e1, l1) = w[0]; + let (_s1, e1, l1) = w[0]; let (s2, _e2, l2) = w[1]; assert!(e1 <= s2, "overlap between level {} and level {}", l1, l2); } @@ -2067,7 +2048,7 @@ mod tests { } let obstacle = leaves[7]; // arbitrary leaf to hold - for (i, q) in leaves.iter().enumerate() { + for (_i, q) in leaves.iter().enumerate() { if *q == obstacle { continue; } From de4db72d883cb276707db33c3cd5e70b0411d268 Mon Sep 17 00:00:00 2001 From: Niam Shah Date: Fri, 10 Apr 2026 16:53:12 -0700 Subject: [PATCH 07/15] upgrading to rust 1.96 --- common/src/lib.rs | 8 -------- vmm/src/lib.rs | 3 --- vmm/src/main.rs | 3 --- 3 files changed, 14 deletions(-) diff --git a/common/src/lib.rs b/common/src/lib.rs index 5af14d8..8b526b5 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -1,18 +1,10 @@ #![cfg_attr(not(feature = "std"), no_std)] #![feature(allocator_api)] -#![feature(box_as_ptr)] #![feature(fn_traits)] #![feature(layout_for_ptr)] -#![feature(maybe_uninit_as_bytes)] #![feature(negative_impls)] -#![feature(new_range_api)] #![feature(ptr_metadata)] -#![feature(slice_from_ptr_range)] -#![feature(sync_unsafe_cell)] -#![feature(try_trait_v2)] -#![feature(test)] #![feature(unboxed_closures)] -#![cfg_attr(feature = "std", feature(thread_id_value))] #![cfg_attr(feature = "thread_local_cache", feature(thread_local))] pub mod buddy; diff --git a/vmm/src/lib.rs b/vmm/src/lib.rs index a76cd09..e0413c8 100644 --- a/vmm/src/lib.rs +++ b/vmm/src/lib.rs @@ -1,11 +1,8 @@ #![feature(allocator_api)] #![feature(ptr_metadata)] -#![feature(box_into_inner)] #![feature(str_from_raw_parts)] -#![feature(negative_impls)] #![feature(exitcode_exit_method)] #![feature(cstr_display)] -#![feature(test)] pub mod runtime; pub mod vhost; diff --git a/vmm/src/main.rs b/vmm/src/main.rs index c0219cc..6fc19e6 100644 --- a/vmm/src/main.rs +++ b/vmm/src/main.rs @@ -1,6 +1,3 @@ -#![feature(allocator_api)] -#![feature(thread_sleep_until)] -#![feature(future_join)] use std::path::PathBuf; From 5bac46f33c6bd2c960a7b11e102c6209008049e1 Mon Sep 17 00:00:00 2001 From: Niam Shah Date: Fri, 10 Apr 2026 17:00:18 -0700 Subject: [PATCH 08/15] added test harness --- common/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/common/src/lib.rs b/common/src/lib.rs index 8b526b5..22d83fa 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -4,6 +4,7 @@ #![feature(layout_for_ptr)] #![feature(negative_impls)] #![feature(ptr_metadata)] +#![feature(test)] #![feature(unboxed_closures)] #![cfg_attr(feature = "thread_local_cache", feature(thread_local))] From b78a22cc1649a8dc453ff41af8c83150b234f838 Mon Sep 17 00:00:00 2001 From: Niam Shah Date: Thu, 16 Apr 2026 01:11:45 -0700 Subject: [PATCH 09/15] testing 1 --- common/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/src/lib.rs b/common/src/lib.rs index 22d83fa..be7a188 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -4,7 +4,7 @@ #![feature(layout_for_ptr)] #![feature(negative_impls)] #![feature(ptr_metadata)] -#![feature(test)] +#![cfg_attr(test, feature(test))] #![feature(unboxed_closures)] #![cfg_attr(feature = "thread_local_cache", feature(thread_local))] From 786bb6b9115feb361d407f19facf21293466700f Mon Sep 17 00:00:00 2001 From: Niam Shah Date: Thu, 16 Apr 2026 01:16:49 -0700 Subject: [PATCH 10/15] fix 2 --- vmm/src/main.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/vmm/src/main.rs b/vmm/src/main.rs index 6fc19e6..245b201 100644 --- a/vmm/src/main.rs +++ b/vmm/src/main.rs @@ -1,4 +1,3 @@ - use std::path::PathBuf; use clap::Parser; From df18a55ade00fceb7e0c354412adbc1b1256805b Mon Sep 17 00:00:00 2001 From: Niam Shah Date: Thu, 16 Apr 2026 11:16:00 -0700 Subject: [PATCH 11/15] moved tests to inside inidividual files, removed all unnecessary commented out text --- common/src/lib.rs | 7 + kernel/src/lib.rs | 2 +- kernel/src/tests.rs | 1 - kernel/src/tests/test_kernel_types.rs | 175 --------- kernel/src/tests/test_serde.rs | 499 +++++++------------------- kernel/src/types/blob.rs | 37 ++ kernel/src/types/function.rs | 48 +++ kernel/src/types/null.rs | 11 + kernel/src/types/page.rs | 35 ++ kernel/src/types/runtime.rs | 1 - kernel/src/types/table.rs | 33 ++ kernel/src/types/tuple.rs | 25 ++ kernel/src/types/value.rs | 30 ++ kernel/src/types/word.rs | 28 ++ 14 files changed, 380 insertions(+), 552 deletions(-) delete mode 100644 kernel/src/tests/test_kernel_types.rs diff --git a/common/src/lib.rs b/common/src/lib.rs index be7a188..0cba9e9 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -1,11 +1,18 @@ #![cfg_attr(not(feature = "std"), no_std)] #![feature(allocator_api)] +#![feature(box_as_ptr)] #![feature(fn_traits)] #![feature(layout_for_ptr)] +#![feature(maybe_uninit_as_bytes)] #![feature(negative_impls)] +#![feature(new_range_api)] #![feature(ptr_metadata)] +#![feature(slice_from_ptr_range)] +#![feature(sync_unsafe_cell)] +#![feature(try_trait_v2)] #![cfg_attr(test, feature(test))] #![feature(unboxed_closures)] +#![cfg_attr(feature = "std", feature(thread_id_value))] #![cfg_attr(feature = "thread_local_cache", feature(thread_local))] pub mod buddy; diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index eae2409..991048b 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -1,7 +1,7 @@ #![no_main] #![no_std] #![feature(allocator_api)] -#![feature(widening_mul)] +#![feature(bigint_helper_methods)] #![feature(box_as_ptr)] #![feature(box_into_inner)] #![feature(maybe_uninit_array_assume_init)] diff --git a/kernel/src/tests.rs b/kernel/src/tests.rs index 7681ebc..0339d5c 100644 --- a/kernel/src/tests.rs +++ b/kernel/src/tests.rs @@ -1,2 +1 @@ -pub mod test_kernel_types; pub mod test_serde; diff --git a/kernel/src/tests/test_kernel_types.rs b/kernel/src/tests/test_kernel_types.rs deleted file mode 100644 index 14468e6..0000000 --- a/kernel/src/tests/test_kernel_types.rs +++ /dev/null @@ -1,175 +0,0 @@ -// runs with command: cargo test -p kernel --target=x86_64-unknown-none -#[cfg(test)] -mod tests { - extern crate alloc; - - use crate::types::internal as ktypes; - use crate::types::{ - Blob as ArcaBlob, Entry as ArcaEntry, Null as ArcaNull, Table as ArcaTable, - Tuple as ArcaTuple, Value as ArcaValue, Word as ArcaWord, - }; - use alloc::vec; - - // Verifies internal word read/write semantics. - #[test] - fn test_internal_word_read() { - let word = ktypes::Word::new(123); - assert_eq!(word.read(), 123); - } - - // Ensures internal null construction is consistent. - #[test] - fn test_internal_null_default() { - let null = ktypes::Null::new(); - let default = ktypes::Null::default(); - assert_eq!(null, default); - } - - // Confirms internal blob mutability converts to raw bytes. - #[test] - fn test_internal_blob_mutation() { - let mut blob = ktypes::Blob::new(b"hello".to_vec()); - assert_eq!(blob.len(), 5); - blob[0] = b'j'; - let bytes = blob.into_inner(); - assert_eq!(&bytes[..], b"jello"); - } - - // Ensures invalid UTF-8 stays as raw bytes internally. - #[test] - fn test_internal_blob_invalid_utf8() { - let bytes = vec![0xffu8, 0xfeu8, 0xfdu8]; - let blob = ktypes::Blob::new(bytes.clone()); - let out = blob.into_inner(); - assert_eq!(&out[..], &bytes); - } - - // Validates internal tuple defaults and indexing. - #[test] - fn test_internal_tuple_defaults() { - let tuple = ktypes::Tuple::new_with_len(2); - assert_eq!(tuple.len(), 2); - assert!(matches!(tuple[0], ArcaValue::Null(_))); - assert!(matches!(tuple[1], ArcaValue::Null(_))); - } - - // Verifies internal tuple construction from iterators. - #[test] - fn test_internal_tuple_from_iter() { - let values = vec![ - ArcaValue::Word(ArcaWord::new(1)), - ArcaValue::Blob(ArcaBlob::from("x")), - ]; - let tuple: ktypes::Tuple = values.clone().into_iter().collect(); - assert_eq!(tuple.len(), values.len()); - assert_eq!(tuple[0], values[0]); - assert_eq!(tuple[1], values[1]); - } - - // Confirms internal page size tiers and shared content. - #[test] - fn test_internal_page_size_and_shared() { - let mut page = ktypes::Page::new(1); - assert_eq!(page.size(), 1 << 12); - page[0] = 7; - let shared = page.clone().shared(); - assert_eq!(shared[0], 7); - - let mid = ktypes::Page::new((1 << 12) + 1); - assert_eq!(mid.size(), 1 << 21); - } - - // Verifies internal table size tiers and set/get behavior. - #[test] - fn test_internal_table_get_set() { - let mut table = ktypes::Table::new(1); - assert_eq!(table.size(), 1 << 21); - - let entry = ArcaEntry::RWPage(crate::types::Page::new(1)); - let old = table.set(0, entry.clone()).unwrap(); - assert_eq!(old, ArcaEntry::Null(1 << 12)); - - let fetched = table.get(0); - assert_eq!(fetched, entry); - - let large = ktypes::Table::new((1 << 21) + 1); - assert_eq!(large.size(), 1 << 30); - } - - // Ensures internal table returns default null entries for empty slots. - #[test] - fn test_internal_table_default_entry() { - let table = ktypes::Table::new(1); - let entry = table.get(10); - assert_eq!(entry, ArcaEntry::Null(1 << 12)); - } - - // Ensures internal value conversions work as expected. - #[test] - fn test_internal_value_conversions() { - let word = ktypes::Word::new(99); - let value: ktypes::Value = word.clone().into(); - let roundtrip = ktypes::Word::try_from(value).unwrap(); - assert_eq!(roundtrip, word); - - let blob = ktypes::Blob::new(b"data".to_vec()); - let value: ktypes::Value = blob.clone().into(); - let roundtrip = ktypes::Blob::try_from(value).unwrap(); - assert_eq!(roundtrip, blob); - } - - // Verifies mismatched internal value conversions return an error. - #[test] - fn test_internal_value_conversion_error() { - let value: ktypes::Value = ktypes::Word::new(1).into(); - let result = ktypes::Blob::try_from(value); - assert!(result.is_err()); - } - - // Validates symbolic function parsing and read round-trip. - #[test] - fn test_internal_function_symbolic_parse() { - let args = ArcaTuple::from((1u64, "two")); - let value = ArcaValue::Tuple(ArcaTuple::from(( - ArcaBlob::from("Symbolic"), - ArcaValue::Word(ArcaWord::new(5)), - ArcaValue::Tuple(args), - ))); - let func = ktypes::Function::new(value.clone()).expect("symbolic parse failed"); - assert!(!func.is_arcane()); - assert_eq!(func.read(), value); - } - - // Ensures invalid function tags are rejected. - #[test] - fn test_internal_function_invalid_tag() { - let value = ArcaValue::Tuple(ArcaTuple::from(( - ArcaBlob::from("Other"), - ArcaValue::Null(ArcaNull::new()), - ))); - let func = ktypes::Function::new(value); - assert!(func.is_none()); - } - - // Verifies arcane function parsing accepts valid layouts. - #[test] - fn test_internal_function_arcane_parse() { - let mut registers = ArcaTuple::new(18); - for i in 0..18 { - registers.set(i, ArcaValue::Null(ArcaNull::new())); - } - let mut data = ArcaTuple::new(4); - data.set(0, ArcaValue::Tuple(registers)); - data.set(1, ArcaValue::Table(ArcaTable::new(1))); - data.set(2, ArcaValue::Tuple(ArcaTuple::new(0))); - data.set(3, ArcaValue::Tuple(ArcaTuple::new(0))); - - let value = ArcaValue::Tuple(ArcaTuple::from(( - ArcaBlob::from("Arcane"), - ArcaValue::Tuple(data), - ArcaValue::Tuple(ArcaTuple::new(0)), - ))); - let func = ktypes::Function::new(value).expect("arcane parse failed"); - assert!(func.is_arcane()); - } -} diff --git a/kernel/src/tests/test_serde.rs b/kernel/src/tests/test_serde.rs index ac76810..102de14 100644 --- a/kernel/src/tests/test_serde.rs +++ b/kernel/src/tests/test_serde.rs @@ -1,374 +1,125 @@ -// This file was created because I was trying to make tests for kernel types, but didn't realize that -// I was calling all arca types from the kernel types module. I re-made a different file for kernel types, -// but cleaned this up and left it here. Currently commenting this out because while they are passing locally, -// they are failing the clippy check and I dont want to change anything in /arca/ in order to fix it. -// Leaving everything here for now. - -// // runs with command: cargo test -p kernel --target=x86_64-unknown-none -// #[cfg(test)] -// mod tests { -// extern crate alloc; - -// use crate::prelude::*; -// use crate::types::Error; - -// // Verifies serialize and deserialize for the null value. -// #[test] -// fn test_serde_null() { -// let null = Value::Null(Null::new()); -// let bytes_vec = postcard::to_allocvec(&null).unwrap(); -// let deserialized_null: Value = postcard::from_bytes(&bytes_vec).unwrap(); -// assert_eq!(deserialized_null, null); -// } - -// // Verifies serialize and deserialize for a word value. -// #[test] -// fn test_serde_word() { -// let word = Value::Word(1.into()); -// let bytes_vec = postcard::to_allocvec(&word).unwrap(); -// let deserialized_word: Value = postcard::from_bytes(&bytes_vec).unwrap(); -// assert_eq!(deserialized_word, word); -// } - -// // Verifies serialize and deserialize for a blob value. -// #[test] -// fn test_serde_blob() { -// let blob = Value::Blob("hello, world!".into()); -// let bytes_vec = postcard::to_allocvec(&blob).unwrap(); -// let deserialized_blob: Value = postcard::from_bytes(&bytes_vec).unwrap(); -// assert_eq!(deserialized_blob, blob); -// } - -// // Verifies serialize and deserialize for a tuple value. -// #[test] -// fn test_serde_tuple() { -// let tuple = Value::Tuple((1, 2, 3).into()); -// let bytes_vec = postcard::to_allocvec(&tuple).unwrap(); -// let deserialized_tuple: Value = postcard::from_bytes(&bytes_vec).unwrap(); -// assert_eq!(deserialized_tuple, tuple); -// } - -// // Verifies serialize and deserialize for a page value. -// #[test] -// fn test_serde_page() { -// let page = Value::Page(Page::new(1)); -// let bytes_vec = postcard::to_allocvec(&page).unwrap(); -// let deserialized_page: Value = postcard::from_bytes(&bytes_vec).unwrap(); -// assert_eq!(deserialized_page, page); -// } - -// // Verifies serialize and deserialize for a table value. -// #[test] -// fn test_serde_table() { -// let table = Value::Table(Table::new(1)); -// let bytes_vec = postcard::to_allocvec(&table).unwrap(); -// let deserialized_table: Value = postcard::from_bytes(&bytes_vec).unwrap(); -// assert_eq!(deserialized_table, table); -// } - -// // Verifies serialize and deserialize for a read-only page entry. -// #[test] -// fn test_serde_ropage() { -// let ropage = Entry::ROPage(Page::new(1)); -// let bytes_vec = postcard::to_allocvec(&ropage).unwrap(); -// let deserialized_ropage: Entry = postcard::from_bytes(&bytes_vec).unwrap(); -// assert_eq!(deserialized_ropage, ropage); -// } - -// // Verifies serialize and deserialize for a read-write page entry. -// #[test] -// fn test_serde_rwpage() { -// let rwpage = Entry::RWPage(Page::new(1)); -// let bytes_vec = postcard::to_allocvec(&rwpage).unwrap(); -// let deserialized_rwpage: Entry = postcard::from_bytes(&bytes_vec).unwrap(); -// assert_eq!(deserialized_rwpage, rwpage); -// } - -// // Verifies serialize and deserialize for a read-only table entry. -// #[test] -// fn test_serde_rotable() { -// let rotable = Entry::ROTable(Table::new(1)); -// let bytes_vec = postcard::to_allocvec(&rotable).unwrap(); -// let deserialized_rotable: Entry = postcard::from_bytes(&bytes_vec).unwrap(); -// assert_eq!(deserialized_rotable, rotable); -// } - -// // Verifies serialize and deserialize for a read-write table entry. -// #[test] -// fn test_serde_rwtable() { -// let rwtable = Entry::RWTable(Table::new(1)); -// let bytes_vec = postcard::to_allocvec(&rwtable).unwrap(); -// let deserialized_rwtable: Entry = postcard::from_bytes(&bytes_vec).unwrap(); -// assert_eq!(deserialized_rwtable, rwtable); -// } - -// // Ensures unknown Value variants cause the expected serde error. -// #[test] -// fn test_value_error() { -// let unknown_variant = [7, 0]; -// let deserialized: Result = postcard::from_bytes(&unknown_variant); -// let deserialized_error = deserialized.expect_err("should have been err"); -// let error = serde::de::Error::unknown_variant( -// "7", -// &["Null", "Word", "Blob", "Tuple", "Page", "Table"], -// ); -// assert_eq!(deserialized_error, error); -// } - -// // Ensures unknown Entry variants cause the expected serde error. -// #[test] -// fn test_entry_error() { -// let unknown_variant = [5, 0]; -// let deserialized: Result = postcard::from_bytes(&unknown_variant); -// let deserialized_error = deserialized.expect_err("should have been err"); -// let error = serde::de::Error::unknown_variant( -// "5", -// &["Null", "ROPage", "RWPage", "ROTable", "RWTable"], -// ); -// assert_eq!(deserialized_error, error); -// } - -// // Confirms datatype tagging and default value behavior. -// #[test] -// fn test_value_datatype_and_defaults() { -// let null = Value::Null(Null::new()); -// let word = Value::Word(Word::new(42)); -// let blob = Value::Blob(Blob::from("hi")); -// let tuple = Value::Tuple(Tuple::from((1u64, "x"))); -// let page = Value::Page(Page::new(1)); -// let table = Value::Table(Table::new(1)); - -// assert_eq!(null.datatype(), DataType::Null); -// assert_eq!(word.datatype(), DataType::Word); -// assert_eq!(blob.datatype(), DataType::Blob); -// assert_eq!(tuple.datatype(), DataType::Tuple); -// assert_eq!(page.datatype(), DataType::Page); -// assert_eq!(table.datatype(), DataType::Table); -// assert_eq!(Value::default().datatype(), DataType::Null); -// } - -// // Checks word read semantics and byte size. -// #[test] -// fn test_word_read_and_byte_size() { -// let word = Word::new(0xdeadbeef); -// assert_eq!(word.read(), 0xdeadbeef); - -// let value = Value::Word(word); -// assert_eq!(value.byte_size(), core::mem::size_of::()); -// } - -// // Confirms blob length and read behavior. -// #[test] -// fn test_blob_read_and_len() { -// let blob = Blob::from("hello"); -// assert_eq!(blob.len(), 5); - -// let mut buf = [0u8; 8]; -// let read = blob.read(0, &mut buf); -// assert_eq!(read, 5); -// assert_eq!(&buf[..5], b"hello"); -// } - -// // Verifies blob reads with an offset return the expected suffix. -// #[test] -// fn test_blob_read_with_offset() { -// let blob = Blob::from("offset"); -// let mut buf = [0u8; 8]; -// let read = blob.read(3, &mut buf); -// assert_eq!(read, 3); -// assert_eq!(&buf[..3], b"set"); -// } - -// // Ensures invalid UTF-8 blobs preserve raw bytes on read. -// #[test] -// fn test_blob_invalid_utf8_roundtrip() { -// let bytes = [0xffu8, 0xfeu8, 0xfdu8]; -// let blob = Blob::from(bytes.as_slice()); -// let mut buf = [0u8; 4]; -// let read = blob.read(0, &mut buf); -// assert_eq!(read, bytes.len()); -// assert_eq!(&buf[..bytes.len()], &bytes); -// } - -// // Validates tuple set/get/take and iteration order. -// #[test] -// fn test_tuple_set_get_take_and_iter() { -// let mut tuple = Tuple::new(3); -// tuple.set(0, 1u64); -// tuple.set(1, "two"); -// tuple.set(2, Value::Null(Null::new())); - -// assert_eq!(tuple.get(0), Value::Word(Word::new(1))); -// assert_eq!(tuple.get(1), Value::Blob(Blob::from("two"))); -// assert_eq!(tuple.get(2), Value::Null(Null::new())); - -// let taken = tuple.take(1); -// assert_eq!(taken, Value::Blob(Blob::from("two"))); -// assert_eq!(tuple.get(1), Value::Null(Null::new())); - -// let items: Vec = tuple.iter().collect(); -// assert_eq!(items.len(), 3); -// } - -// // Checks tuple swap semantics and out-of-bounds errors. -// #[test] -// fn test_tuple_swap_and_bounds_errors() { -// let mut tuple = Tuple::new(2); -// tuple.set(0, 1u64); -// tuple.set(1, 2u64); - -// let mut replacement = Value::Blob(Blob::from("swap")); -// tuple.swap(0, &mut replacement); -// assert_eq!(replacement, Value::Word(Word::new(1))); -// assert_eq!(tuple.get(0), Value::Blob(Blob::from("swap"))); - -// let result = ::get_tuple(&tuple, 3); -// assert!(matches!(result, Err(Error::InvalidIndex(3)))); -// let result = -// ::set_tuple(&mut tuple, 3, 5u64.into()); -// assert!(matches!(result, Err(Error::InvalidIndex(3)))); -// } - -// // Confirms tuple byte sizes add up for mixed content. -// #[test] -// fn test_tuple_byte_size() { -// let tuple = Value::Tuple(Tuple::from((1u64, "hi"))); -// assert_eq!(tuple.byte_size(), core::mem::size_of::() + 2); -// } - -// // Verifies page read/write behavior and length. -// #[test] -// fn test_page_read_write_len() { -// let mut page = Page::new(1); -// assert_eq!(page.len(), 1 << 12); - -// let data = [1u8, 2, 3, 4]; -// let written = page.write(0, &data); -// assert_eq!(written, data.len()); - -// let mut buf = [0u8; 4]; -// let read = page.read(0, &mut buf); -// assert_eq!(read, data.len()); -// assert_eq!(buf, data); -// } - -// // Ensures page read/write with offsets work correctly. -// #[test] -// fn test_page_read_write_with_offset() { -// let mut page = Page::new(1); -// let data = [9u8, 8, 7]; -// let written = page.write(4, &data); -// assert_eq!(written, data.len()); - -// let mut buf = [0u8; 3]; -// let read = page.read(4, &mut buf); -// assert_eq!(read, data.len()); -// assert_eq!(buf, data); -// } - -// // Confirms page size tier selection at thresholds. -// #[test] -// fn test_page_size_tiers() { -// let small = Page::new(1); -// assert_eq!(small.len(), 1 << 12); - -// let mid = Page::new((1 << 12) + 1); -// assert_eq!(mid.len(), 1 << 21); - -// let large = Page::new((1 << 21) + 1); -// assert_eq!(large.len(), 1 << 30); -// } - -// // Verifies default table entry sizes for small and mid tables. -// #[test] -// fn test_table_default_entry_sizes() { -// let table_small = Table::new(1); -// let entry = table_small.get(0).unwrap(); -// assert_eq!(entry, Entry::Null(1 << 12)); -// assert_eq!(table_small.len(), 1 << 21); - -// let table_mid = Table::new((1 << 21) + 1); -// let entry = table_mid.get(0).unwrap(); -// assert_eq!(entry, Entry::Null(1 << 21)); -// assert_eq!(table_mid.len(), 1 << 30); -// } - -// // Ensures tables grow when mapping beyond current range. -// #[test] -// fn test_table_map_growth() { -// let mut table = Table::new(1); -// let entry = Entry::RWPage(Page::new(1)); -// let address = 1 << 21; -// let _ = table.map(address, entry).unwrap(); -// assert_eq!(table.len(), 1 << 30); -// } - -// // Verifies unmap returns None for addresses beyond the table range. -// #[test] -// fn test_table_unmap_out_of_range() { -// let mut table = Table::new(1); -// let missing = table.unmap(table.len() + 1); -// assert!(missing.is_none()); -// } - -// // Ensures table map and unmap round-trip a page entry. -// #[test] -// fn test_table_map_unmap_roundtrip() { -// let mut table = Table::new(1); -// let entry = Entry::RWPage(Page::new(1)); -// let old = table.map(0, entry.clone()).unwrap(); -// assert_eq!(old, Entry::Null(1 << 12)); - -// let unmapped = table.unmap(0); -// assert_eq!(unmapped, Some(entry)); -// } - -// // Validates symbolic function apply and read behavior. -// #[test] -// fn test_function_symbolic_apply_and_read() { -// let func = Function::symbolic(Word::new(7)); -// assert!(func.is_symbolic()); - -// let func = func.apply(1u64).apply("arg"); -// let read_back = func.read_cloned(); - -// let expected_args = Tuple::from((1u64, "arg")); -// let expected = Value::Tuple(Tuple::from(( -// Blob::from("Symbolic"), -// Value::Word(Word::new(7)), -// Value::Tuple(expected_args), -// ))); - -// assert_eq!(read_back, expected); -// } - -// // Ensures function argument order and force semantics. -// #[test] -// fn test_function_apply_order_and_force() { -// let func = Function::symbolic(Word::new(9)); -// let func = func.apply(1u64).apply(2u64).apply("three"); -// let read_back = func.read_cloned(); - -// let expected_args = Tuple::from((1u64, 2u64, "three")); -// let expected = Value::Tuple(Tuple::from(( -// Blob::from("Symbolic"), -// Value::Word(Word::new(9)), -// Value::Tuple(expected_args), -// ))); -// assert_eq!(read_back, expected); - -// let forced = func.force(); -// match forced { -// Value::Function(f) => assert!(f.is_symbolic()), -// _ => panic!("expected a symbolic function value"), -// } -// } - -// // Confirms invalid function construction is rejected. -// #[test] -// fn test_function_new_rejects_invalid_value() { -// let invalid = Value::Word(Word::new(1)); -// let result = Function::new(invalid); -// assert!(matches!(result, Err(Error::InvalidValue))); -// } -// } +// Serialization round-trip tests using postcard. +// Runs with: cargo test -p kernel --target=x86_64-unknown-none + +#[cfg(test)] +mod tests { + extern crate alloc; + + use crate::prelude::*; + + /// Verifies Null serializes and deserializes back to an equal value. + #[test] + fn test_serde_null() { + let null = Value::Null(Null::new()); + let bytes_vec = postcard::to_allocvec(&null).unwrap(); + let deserialized: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, null); + } + + /// Verifies Word serializes and deserializes back to an equal value. + #[test] + fn test_serde_word() { + let word = Value::Word(1.into()); + let bytes_vec = postcard::to_allocvec(&word).unwrap(); + let deserialized: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, word); + } + + /// Verifies Blob serializes and deserializes back to an equal value. + #[test] + fn test_serde_blob() { + let blob = Value::Blob("hello, world!".into()); + let bytes_vec = postcard::to_allocvec(&blob).unwrap(); + let deserialized: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, blob); + } + + /// Verifies Tuple serializes and deserializes back to an equal value. + #[test] + fn test_serde_tuple() { + let tuple = Value::Tuple((1, 2, 3).into()); + let bytes_vec = postcard::to_allocvec(&tuple).unwrap(); + let deserialized: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, tuple); + } + + /// Verifies Page serializes and deserializes back to an equal value. + #[test] + fn test_serde_page() { + let page = Value::Page(Page::new(1)); + let bytes_vec = postcard::to_allocvec(&page).unwrap(); + let deserialized: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, page); + } + + /// Verifies Table serializes and deserializes back to an equal value. + #[test] + fn test_serde_table() { + let table = Value::Table(Table::new(1)); + let bytes_vec = postcard::to_allocvec(&table).unwrap(); + let deserialized: Value = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, table); + } + + /// Verifies a read-only page Entry round-trips through serde. + #[test] + fn test_serde_ropage() { + let ropage = Entry::ROPage(Page::new(1)); + let bytes_vec = postcard::to_allocvec(&ropage).unwrap(); + let deserialized: Entry = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, ropage); + } + + /// Verifies a read-write page Entry round-trips through serde. + #[test] + fn test_serde_rwpage() { + let rwpage = Entry::RWPage(Page::new(1)); + let bytes_vec = postcard::to_allocvec(&rwpage).unwrap(); + let deserialized: Entry = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, rwpage); + } + + /// Verifies a read-only table Entry round-trips through serde. + #[test] + fn test_serde_rotable() { + let rotable = Entry::ROTable(Table::new(1)); + let bytes_vec = postcard::to_allocvec(&rotable).unwrap(); + let deserialized: Entry = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, rotable); + } + + /// Verifies a read-write table Entry round-trips through serde. + #[test] + fn test_serde_rwtable() { + let rwtable = Entry::RWTable(Table::new(1)); + let bytes_vec = postcard::to_allocvec(&rwtable).unwrap(); + let deserialized: Entry = postcard::from_bytes(&bytes_vec).unwrap(); + assert_eq!(deserialized, rwtable); + } + + /// Ensures deserializing an unknown Value variant produces the expected error. + #[test] + fn test_value_unknown_variant_error() { + let unknown_variant = [7, 0]; + let deserialized: Result = postcard::from_bytes(&unknown_variant); + let deserialized_error = deserialized.expect_err("should have been err"); + let error = serde::de::Error::unknown_variant( + "7", + &["Null", "Word", "Blob", "Tuple", "Page", "Table"], + ); + assert_eq!(deserialized_error, error); + } + + /// Ensures deserializing an unknown Entry variant produces the expected error. + #[test] + fn test_entry_unknown_variant_error() { + let unknown_variant = [5, 0]; + let deserialized: Result = postcard::from_bytes(&unknown_variant); + let deserialized_error = deserialized.expect_err("should have been err"); + let error = serde::de::Error::unknown_variant( + "5", + &["Null", "ROPage", "RWPage", "ROTable", "RWTable"], + ); + assert_eq!(deserialized_error, error); + } +} diff --git a/kernel/src/types/blob.rs b/kernel/src/types/blob.rs index 5b7780e..d29c502 100644 --- a/kernel/src/types/blob.rs +++ b/kernel/src/types/blob.rs @@ -87,3 +87,40 @@ impl From<&str> for Blob { Blob::from(value.to_string()) } } + +#[cfg(test)] +mod tests { + use super::*; + + /// Verifies len() and into_inner() return correct content. + #[test] + fn test_len_and_into_inner() { + let blob = Blob::new(b"hello".to_vec()); + assert_eq!(blob.len(), 5); + assert_eq!(&*blob.into_inner(), b"hello"); + } + + /// Verifies DerefMut allows in-place byte mutation. + #[test] + fn test_mutation() { + let mut blob = Blob::new(b"hello".to_vec()); + blob[0] = b'j'; + assert_eq!(&*blob.into_inner(), b"jello"); + } + + /// Ensures invalid UTF-8 bytes are preserved as raw data. + #[test] + fn test_invalid_utf8_preserved() { + let bytes = vec![0xffu8, 0xfeu8, 0xfdu8]; + let blob = Blob::new(bytes.clone()); + assert_eq!(&*blob.into_inner(), &bytes[..]); + } + + /// Verifies From<&str> constructs a blob with matching content. + #[test] + fn test_from_str() { + let blob = Blob::from("test"); + assert_eq!(blob.len(), 4); + assert_eq!(&*blob, b"test"); + } +} diff --git a/kernel/src/types/function.rs b/kernel/src/types/function.rs index 29d539c..94c1aa2 100644 --- a/kernel/src/types/function.rs +++ b/kernel/src/types/function.rs @@ -166,3 +166,51 @@ impl Function { } } } + +#[cfg(test)] +mod tests { + use super::*; + + /// Verifies symbolic function parsing and read round-trip. + #[test] + fn test_symbolic_parse_and_read() { + let args = Tuple::from((1u64, "two")); + let value = Value::Tuple(Tuple::from(( + Blob::from("Symbolic"), + Value::Word(Word::new(5)), + Value::Tuple(args), + ))); + let func = Function::new(value.clone()).expect("symbolic parse failed"); + assert!(!func.is_arcane()); + assert_eq!(func.read(), value); + } + + /// Ensures unrecognized function tags are rejected. + #[test] + fn test_invalid_tag_rejected() { + let value = Value::Tuple(Tuple::from((Blob::from("Other"), Value::Null(Null::new())))); + assert!(Function::new(value).is_none()); + } + + /// Verifies arcane function parsing accepts a valid register/memory layout. + #[test] + fn test_arcane_parse_valid_layout() { + let mut registers = Tuple::new(18); + for i in 0..18 { + registers.set(i, Value::Null(Null::new())); + } + let mut data = Tuple::new(4); + data.set(0, Value::Tuple(registers)); + data.set(1, Value::Table(Table::new(1))); + data.set(2, Value::Tuple(Tuple::new(0))); + data.set(3, Value::Tuple(Tuple::new(0))); + + let value = Value::Tuple(Tuple::from(( + Blob::from("Arcane"), + Value::Tuple(data), + Value::Tuple(Tuple::new(0)), + ))); + let func = Function::new(value).expect("arcane parse failed"); + assert!(func.is_arcane()); + } +} diff --git a/kernel/src/types/null.rs b/kernel/src/types/null.rs index 79cfa92..ed84126 100644 --- a/kernel/src/types/null.rs +++ b/kernel/src/types/null.rs @@ -12,3 +12,14 @@ impl Default for Null { Self::new() } } + +#[cfg(test)] +mod tests { + use super::*; + + /// Ensures Null::new() and Null::default() produce identical values. + #[test] + fn test_new_equals_default() { + assert_eq!(Null::new(), Null::default()); + } +} diff --git a/kernel/src/types/page.rs b/kernel/src/types/page.rs index a666c7e..1b77b73 100644 --- a/kernel/src/types/page.rs +++ b/kernel/src/types/page.rs @@ -120,3 +120,38 @@ impl DerefMut for Page { } } } + +#[cfg(test)] +mod tests { + use super::*; + + /// Verifies page size selection at tier boundaries (4KB, 2MB, 1GB). + #[test] + fn test_size_tiers() { + let small = Page::new(1); + assert_eq!(small.size(), 1 << 12); + + let mid = Page::new((1 << 12) + 1); + assert_eq!(mid.size(), 1 << 21); + + let large = Page::new((1 << 21) + 1); + assert_eq!(large.size(), 1 << 30); + } + + /// Verifies DerefMut write and Deref read on page bytes. + #[test] + fn test_write_and_read_back() { + let mut page = Page::new(1); + page[0] = 7; + assert_eq!(page[0], 7); + } + + /// Ensures shared() preserves written content. + #[test] + fn test_shared_preserves_content() { + let mut page = Page::new(1); + page[0] = 42; + let shared = page.shared(); + assert_eq!(shared[0], 42); + } +} diff --git a/kernel/src/types/runtime.rs b/kernel/src/types/runtime.rs index 691a297..6d19783 100644 --- a/kernel/src/types/runtime.rs +++ b/kernel/src/types/runtime.rs @@ -68,7 +68,6 @@ impl arca::Runtime for Runtime { } fn read_blob(blob: &arca::Blob, offset: usize, buf: &mut [u8]) -> usize { - log::error!("read_blob: offset={}, buf_len={}", offset, buf.len()); let len = core::cmp::min(buf.len(), blob.len() - offset); buf[..len].copy_from_slice(&blob[offset..offset + len]); len diff --git a/kernel/src/types/table.rs b/kernel/src/types/table.rs index 2108dc7..6bc2570 100644 --- a/kernel/src/types/table.rs +++ b/kernel/src/types/table.rs @@ -206,3 +206,36 @@ impl TryFrom for CowPage { } } } + +#[cfg(test)] +mod tests { + use super::*; + + /// Verifies table size selection at tier boundaries (2MB, 1GB). + #[test] + fn test_size_tiers() { + let small = Table::new(1); + assert_eq!(small.size(), 1 << 21); + + let large = Table::new((1 << 21) + 1); + assert_eq!(large.size(), 1 << 30); + } + + /// Ensures empty table slots return the correct default Null entry. + #[test] + fn test_get_returns_default_null() { + let table = Table::new(1); + let entry = table.get(10); + assert_eq!(entry, arca::Entry::Null(1 << 12)); + } + + /// Verifies set replaces the default entry and get retrieves it back. + #[test] + fn test_set_and_get_roundtrip() { + let mut table = Table::new(1); + let entry = arca::Entry::RWPage(arca::Page::from_inner(Page::new(1))); + let old = table.set(0, entry.clone()).unwrap(); + assert_eq!(old, arca::Entry::Null(1 << 12)); + assert_eq!(table.get(0), entry); + } +} diff --git a/kernel/src/types/tuple.rs b/kernel/src/types/tuple.rs index 8be3307..3257371 100644 --- a/kernel/src/types/tuple.rs +++ b/kernel/src/types/tuple.rs @@ -42,3 +42,28 @@ impl FromIterator for Tuple { Tuple::new(v) } } + +#[cfg(test)] +mod tests { + use super::*; + + /// Verifies new_with_len creates a tuple filled with Null values. + #[test] + fn test_new_with_len_defaults_to_null() { + let tuple = Tuple::new_with_len(2); + assert_eq!(tuple.len(), 2); + assert!(matches!(tuple[0], Value::Null(_))); + assert!(matches!(tuple[1], Value::Null(_))); + } + + /// Verifies FromIterator collects values into a correctly sized tuple. + #[test] + fn test_from_iter() { + let values: alloc::vec::Vec = + alloc::vec![Value::Word(1u64.into()), Value::Blob("x".into()),]; + let tuple: Tuple = values.clone().into_iter().collect(); + assert_eq!(tuple.len(), 2); + assert_eq!(tuple[0], values[0]); + assert_eq!(tuple[1], values[1]); + } +} diff --git a/kernel/src/types/value.rs b/kernel/src/types/value.rs index b4f9f3e..7fbd2c4 100644 --- a/kernel/src/types/value.rs +++ b/kernel/src/types/value.rs @@ -62,3 +62,33 @@ macro_rules! impl_value_from { foreach_type_item! {impl_tryfrom_value} foreach_type_item! {impl_value_from} + +#[cfg(test)] +mod tests { + use super::*; + + /// Verifies Word -> Value -> Word conversion round-trips correctly. + #[test] + fn test_word_roundtrip() { + let word = Word::new(99); + let value: Value = word.clone().into(); + let roundtrip = Word::try_from(value).unwrap(); + assert_eq!(roundtrip, word); + } + + /// Verifies Blob -> Value -> Blob conversion round-trips correctly. + #[test] + fn test_blob_roundtrip() { + let blob = Blob::new(b"data".to_vec()); + let value: Value = blob.clone().into(); + let roundtrip = Blob::try_from(value).unwrap(); + assert_eq!(roundtrip, blob); + } + + /// Ensures TryFrom fails when converting to the wrong variant type. + #[test] + fn test_mismatched_conversion_fails() { + let value: Value = Word::new(1).into(); + assert!(Blob::try_from(value).is_err()); + } +} diff --git a/kernel/src/types/word.rs b/kernel/src/types/word.rs index 749877f..291d4b5 100644 --- a/kernel/src/types/word.rs +++ b/kernel/src/types/word.rs @@ -36,3 +36,31 @@ impl AsMut for Word { &mut self.value } } + +#[cfg(test)] +mod tests { + use super::*; + + /// Verifies Word::read returns the value passed to new. + #[test] + fn test_read() { + let word = Word::new(123); + assert_eq!(word.read(), 123); + } + + /// Verifies From and Into round-trip correctly. + #[test] + fn test_from_u64_roundtrip() { + let word = Word::from(0xdeadbeef_u64); + assert_eq!(u64::from(word), 0xdeadbeef); + } + + /// Verifies AsRef and AsMut provide access to the inner value. + #[test] + fn test_as_ref_as_mut() { + let mut word = Word::new(42); + assert_eq!(*word.as_ref(), 42); + *word.as_mut() = 99; + assert_eq!(word.read(), 99); + } +} From 6ce6ccdd4ef74b47623055770fe24b66a2da8772 Mon Sep 17 00:00:00 2001 From: Niam Shah Date: Thu, 16 Apr 2026 11:25:57 -0700 Subject: [PATCH 12/15] fixing rust version differences for package support --- common/src/lib.rs | 1 + kernel/src/lib.rs | 1 + vmm/src/lib.rs | 1 + 3 files changed, 3 insertions(+) diff --git a/common/src/lib.rs b/common/src/lib.rs index 0cba9e9..40f0bc6 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -1,4 +1,5 @@ #![cfg_attr(not(feature = "std"), no_std)] +#![allow(stable_features)] #![feature(allocator_api)] #![feature(box_as_ptr)] #![feature(fn_traits)] diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 991048b..c2fba10 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -1,5 +1,6 @@ #![no_main] #![no_std] +#![allow(stable_features)] #![feature(allocator_api)] #![feature(bigint_helper_methods)] #![feature(box_as_ptr)] diff --git a/vmm/src/lib.rs b/vmm/src/lib.rs index e0413c8..040802e 100644 --- a/vmm/src/lib.rs +++ b/vmm/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(stable_features, unused_features)] #![feature(allocator_api)] #![feature(ptr_metadata)] #![feature(str_from_raw_parts)] From 737a1d46db2e569f50a443dfb145fca8de875593 Mon Sep 17 00:00:00 2001 From: Niam Shah Date: Thu, 16 Apr 2026 11:38:40 -0700 Subject: [PATCH 13/15] allowing unused features --- common/src/lib.rs | 2 +- kernel/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/common/src/lib.rs b/common/src/lib.rs index 40f0bc6..606a8df 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -1,5 +1,5 @@ #![cfg_attr(not(feature = "std"), no_std)] -#![allow(stable_features)] +#![allow(stable_features, unused_features)] #![feature(allocator_api)] #![feature(box_as_ptr)] #![feature(fn_traits)] diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index c2fba10..2c093f8 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -1,6 +1,6 @@ #![no_main] #![no_std] -#![allow(stable_features)] +#![allow(stable_features, unused_features)] #![feature(allocator_api)] #![feature(bigint_helper_methods)] #![feature(box_as_ptr)] From e6ccbd7ae8b35c214d53f4e29a5aa8c99f9ad97c Mon Sep 17 00:00:00 2001 From: Niam Shah Date: Thu, 16 Apr 2026 11:46:43 -0700 Subject: [PATCH 14/15] different modules based on rust versions --- kernel/src/lib.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 2c093f8..7aec78f 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -1,8 +1,10 @@ #![no_main] #![no_std] #![allow(stable_features, unused_features)] +#![feature(cfg_version)] #![feature(allocator_api)] -#![feature(bigint_helper_methods)] +#![cfg_attr(not(version("1.96")), feature(bigint_helper_methods))] +#![cfg_attr(version("1.96"), feature(widening_mul))] #![feature(box_as_ptr)] #![feature(box_into_inner)] #![feature(maybe_uninit_array_assume_init)] From 9f6bf9520e37a6eba44be1c07a078f212e31a524 Mon Sep 17 00:00:00 2001 From: Akshay Srivatsan Date: Mon, 20 Apr 2026 11:42:24 -0700 Subject: [PATCH 15/15] refactor: move buddy allocator tests to separate file --- common/src/buddy.rs | 955 +------------------------------------- common/src/buddy/tests.rs | 951 +++++++++++++++++++++++++++++++++++++ 2 files changed, 952 insertions(+), 954 deletions(-) create mode 100644 common/src/buddy/tests.rs diff --git a/common/src/buddy.rs b/common/src/buddy.rs index 602630e..cec5b17 100644 --- a/common/src/buddy.rs +++ b/common/src/buddy.rs @@ -1151,957 +1151,4 @@ unsafe impl Allocator for BuddyAllocator { } #[cfg(test)] -mod tests { - extern crate test; - - use super::*; - #[test] - // Setting/clearing individual bits in u64 words to check that the bit manipulation works - fn test_bitref() { - let mut word = 10; - - let mut r0 = BitRef::new(&mut word, 0); - r0.set(); - - let mut r1 = BitRef::new(&mut word, 1); - r1.clear(); - - let mut r2 = BitRef::new(&mut word, 2); - r2.write(false); - - let mut r3 = BitRef::new(&mut word, 3); - r3.write(true); - - assert_eq!(word, 9); - } - - #[test] - // Setting/clearing individual bits in a BitSlice to check that the bit manipulation works - fn test_bitslice() { - let mut words = [0; 2]; - let mut slice = BitSlice::new(128, &mut words); - let mut r0 = slice.bit(0); - r0.set(); - - let mut r1 = slice.bit(1); - r1.set(); - - let mut r127 = slice.bit(127); - r127.set(); - - assert_eq!(words[0], 3); - assert_eq!( - words[127 / (core::mem::size_of::() * 8)], - 1 << (127 % (core::mem::size_of::() * 8)) - ); - } - - #[test] - // Basic setup + continuous element pushing to check that the allocator grows and adjusts properly - fn test_buddy_allocator() { - let allocator = BuddyAllocatorImpl::new(0x10000000); - - let test = Box::new_in(10, allocator.clone()); - assert_eq!(*test, 10); - - let mut v = Vec::new_in(allocator.clone()); - for i in 0..10000 { - v.push(i); - } - } - - #[test] - // Verifying that too small allocations of allocator do not panic - // Potential issue: reserve_unchecked does not validate that the requested index is within the number of blocks at that level - fn test_too_small_allocation() { - let allocator = BuddyAllocatorImpl::new(1 << 20); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - let _used_before = allocator.used_size(); - let _ptr = allocator.allocate_raw(size); - } - - #[test] - // Verifying allocate_raw adds size to used_size, and free_raw subtracts it back, returning usage to the original amount. - fn test_allocate_raw_and_used_size() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - let used_before = allocator.used_size(); - let ptr = allocator.allocate_raw(size); - assert!(!ptr.is_null()); - assert_eq!(allocator.used_size(), used_before + size); - allocator.free_raw(ptr, size); - assert_eq!(allocator.used_size(), used_before); - } - - #[test] - // Verifying allocate_many_raw adds size to used_size, and free_many_raw subtracts it back, returning usage to the original amount. - fn test_allocate_many_and_free_many() { - use std::collections::BTreeSet; - - let allocator = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - let used_before = allocator.used_size(); - let mut ptrs = [core::ptr::null_mut(); 4]; - let count = allocator.allocate_many_raw(size, &mut ptrs); - assert_eq!(count, ptrs.len()); - assert!(ptrs.iter().all(|ptr| !ptr.is_null())); - - let unique: BTreeSet = ptrs.iter().map(|ptr| *ptr as usize).collect(); - assert_eq!(unique.len(), ptrs.len()); - - allocator.free_many_raw(size, &ptrs); - assert_eq!(allocator.used_size(), used_before); - } - - #[test] - // Verifying that to_offset and from_offset roundtrip correctly - fn test_offset_roundtrip() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - let ptr = allocator.allocate_raw(size); - assert!(!ptr.is_null()); - - let offset = allocator.to_offset(ptr); - let roundtrip = allocator.from_offset::(offset); - assert_eq!(roundtrip as usize, ptr as usize); - - allocator.free_raw(ptr, size); - } - - #[test] - // Verifying that reserving at zero returns a null pointer and does not add to used_size - fn test_reserve_raw_at_zero() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - let used_before = allocator.used_size(); - let ptr = allocator.reserve_raw(0, size); - assert!(ptr.is_null()); - assert_eq!(allocator.used_size(), used_before); - } - - #[test] - // Verifying that allocating too large returns a null pointer and does not add to used_size - fn test_allocate_raw_too_large_returns_null() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let used_before = allocator.used_size(); - let ptr = allocator.allocate_raw(allocator.total_size() * 2); - assert!(ptr.is_null()); - assert_eq!(allocator.used_size(), used_before); - } - - #[test] - // Verifying that refcnt is zero on allocate - fn test_refcnt_zero_on_allocate() { - use core::sync::atomic::Ordering; - - let allocator = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - let ptr = allocator.allocate_raw(size); - assert!(!ptr.is_null()); - - let refcnt = allocator.refcnt(ptr); - assert!(!refcnt.is_null()); - let value = unsafe { (*refcnt).load(Ordering::SeqCst) }; - assert_eq!(value, 0); - - allocator.free_raw(ptr, size); - } - - #[test] - // Stress testing the allocator with random allocations and frees - fn stress_test() { - use std::hash::{BuildHasher, Hasher, RandomState}; - let allocator = BuddyAllocatorImpl::new(0x10000000); - allocator.set_caching(false); - let mut v = vec![]; - let random = |limit: usize| { - let x: u64 = RandomState::new().build_hasher().finish(); - x as usize % limit - }; - for _ in 0..100000 { - let used_before = allocator.used_size(); - let remaining = allocator.total_size() - used_before; - let size = random(core::cmp::min(1 << 21, remaining / 2)); - let alloc = - Box::<[u8], BuddyAllocatorImpl>::new_uninit_slice_in(size, allocator.clone()); - let used_after = allocator.used_size(); - assert!(used_after >= used_before + size); - if !v.is_empty() && size % 3 == 0 { - let number = random(v.len()); - for _ in 0..number { - let index = random(v.len()); - v.remove(index); - } - } - v.push(alloc); - } - } - - #[test] - // Test splitting large blocks into smaller ones - fn test_block_splitting() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let small_size = BuddyAllocatorImpl::MIN_ALLOCATION; - let large_size = small_size * 4; - - // Allocate and free a large block - let large_ptr = allocator.allocate_raw(large_size); - assert!(!large_ptr.is_null()); - allocator.free_raw(large_ptr, large_size); - - // Now allocate multiple small blocks - should split the large one - let mut small_ptrs = vec![]; - for _ in 0..4 { - let ptr = allocator.allocate_raw(small_size); - assert!(!ptr.is_null()); - small_ptrs.push(ptr); - } - - // Clean up - for ptr in small_ptrs { - allocator.free_raw(ptr, small_size); - } - } - - #[test] - fn test_reserve_specific_addresses() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - - // Get a definitely-available block - let p = allocator.allocate_raw(size); - assert!(!p.is_null()); - let address = allocator.to_offset(p); - allocator.free_raw(p, size); - - // Now we should be able to reserve that exact address - let ptr1 = allocator.reserve_raw(address, size); - assert!(!ptr1.is_null()); - assert_eq!(allocator.to_offset(ptr1), address); - - // Reserving again should fail - let ptr2 = allocator.reserve_raw(address, size); - assert!(ptr2.is_null()); - - allocator.free_raw(ptr1, size); - } - - #[test] - // Test reserving overlapping regions - fn test_reserve_overlapping_regions() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - - // Reserve a block - let ptr1 = allocator.reserve_raw(size * 5, size); - assert!(!ptr1.is_null()); - - // Try to reserve a larger block that would overlap - let ptr2 = allocator.reserve_raw(size * 4, size * 4); - assert!(ptr2.is_null()); // Should fail because it overlaps with ptr1 - - allocator.free_raw(ptr1, size); - } - - #[test] - // Test allocating all available memory - fn test_exhaust_memory() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - let mut ptrs = vec![]; - - // Allocate until we can't anymore - loop { - let ptr = allocator.allocate_raw(size); - if ptr.is_null() { - break; - } - ptrs.push(ptr); - } - - // Verify we actually allocated something - assert!(!ptrs.is_empty()); - - // Try one more allocation - should fail - let ptr = allocator.allocate_raw(size); - assert!(ptr.is_null()); - - // Free everything - for ptr in ptrs { - allocator.free_raw(ptr, size); - } - - // Should be able to allocate again - let ptr = allocator.allocate_raw(size); - assert!(!ptr.is_null()); - allocator.free_raw(ptr, size); - } - - #[test] - // Test mixed allocation sizes - fn test_mixed_allocation_sizes() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - - let small = BuddyAllocatorImpl::MIN_ALLOCATION; - let medium = small * 4; - let large = small * 16; - - let ptr1 = allocator.allocate_raw(small); - let ptr2 = allocator.allocate_raw(large); - let ptr3 = allocator.allocate_raw(medium); - let ptr4 = allocator.allocate_raw(small); - - assert!(!ptr1.is_null()); - assert!(!ptr2.is_null()); - assert!(!ptr3.is_null()); - assert!(!ptr4.is_null()); - - // Verify they're all different - let ptrs = [ptr1, ptr2, ptr3, ptr4]; - for i in 0..ptrs.len() { - for j in (i + 1)..ptrs.len() { - assert_ne!(ptrs[i], ptrs[j]); - } - } - - allocator.free_raw(ptr2, large); - allocator.free_raw(ptr1, small); - allocator.free_raw(ptr4, small); - allocator.free_raw(ptr3, medium); - } - - #[test] - // Test freeing in different order than allocation - fn test_free_reverse_order() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - - let mut ptrs = vec![]; - for _ in 0..10 { - let ptr = allocator.allocate_raw(size); - assert!(!ptr.is_null()); - ptrs.push(ptr); - } - - let used_peak = allocator.used_size(); - - // Free in reverse order - for ptr in ptrs.iter().rev() { - allocator.free_raw(*ptr, size); - } - - assert!(allocator.used_size() < used_peak); - } - - #[test] - // Test allocation size rounding. Test after exhausting, allocator should still be usable -- no lock leak - fn allocation_rounds_up_to_pow2_and_min() { - let a = BuddyAllocatorImpl::new(1 << 24); - - // Request sizes that aren't powers of 2 - let ptr1 = a.allocate_raw(5000); // Should round to 8192 - let ptr2 = a.allocate_raw(1000); // Should round to 4096 - let ptr3 = a.allocate_raw(10000); // Should round to 16384 - - assert!(!ptr1.is_null()); - assert!(!ptr2.is_null()); - assert!(!ptr3.is_null()); - - a.free_raw(ptr1, 5000); - a.free_raw(ptr2, 1000); - a.free_raw(ptr3, 10000); - - let used0 = a.used_size(); - let p = a.allocate_raw(5000); // rounds to 8192 (and >= 4096) - assert!(!p.is_null()); - assert_eq!(a.used_size(), used0 + 8192); - - a.free_raw(p, 5000); // free uses same rounding path - assert_eq!(a.used_size(), used0); - } - - #[test] - // Confirm there is no overlap between levels - fn test_offset_calculation() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - - let mut ranges = vec![]; - for level in allocator.inner.meta.level_range.clone() { - let offset = allocator.inner.offset_of_level_words(level); - let size = allocator.inner.size_of_level_words(level); - ranges.push((offset, offset + size, level)); - } - ranges.sort_by_key(|(start, _, _)| *start); - - for w in ranges.windows(2) { - let (_s1, e1, l1) = w[0]; - let (s2, _e2, l2) = w[1]; - assert!(e1 <= s2, "overlap between level {} and level {}", l1, l2); - } - } - - #[test] - // Test bitmap boundaries - fn test_bitmap_boundaries() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - - for level in allocator.inner.meta.level_range.clone() { - let bits = allocator.inner.size_of_level_bits(level); - let words = allocator.inner.size_of_level_words(level); - - // Verify words is enough to hold bits - assert!( - words * 64 >= bits, - "Level {} needs {} bits but only has {} words ({} bits)", - level, - bits, - words, - words * 64 - ); - } - } - - #[test] - // Test try_allocate_many_raw where everything should succeed easily - fn test_try_allocate_many_no_contention() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - - let mut ptrs = [core::ptr::null_mut(); 10]; - let result = allocator.try_allocate_many_raw(size, &mut ptrs); - - assert_eq!(result, Some(10)); - assert!(ptrs.iter().all(|p| !p.is_null())); - - allocator.free_many_raw(size, &ptrs); - } - - #[test] - // Testing allocating more pointers than space available, making sure bulk alloc stops cleanly when space out, - // and partial success allowed, reported successes are valid. Currently hanging - fn test_allocate_many_partial_success() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - - // Request more blocks than available - let mut ptrs = [core::ptr::null_mut(); 10000]; - let count = allocator.allocate_many_raw(size, &mut ptrs); - - // Should have allocated some but not all - assert!(count > 0); - assert!(count < ptrs.len()); - - // All allocated pointers should be non-null - for i in 0..count { - assert!(!ptrs[i].is_null()); - } - - // Remaining should be null - for i in count..ptrs.len() { - assert!(ptrs[i].is_null()); - } - - // Clean up - allocator.free_many_raw(size, &ptrs[0..count]); - } - - #[test] - // Test that refcnt works for different allocation addresses - fn test_refcnt_different_addresses() { - use core::sync::atomic::Ordering; - - let allocator = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - - let ptr1 = allocator.allocate_raw(size); - let ptr2 = allocator.allocate_raw(size); - - let refcnt1 = allocator.refcnt(ptr1); - let refcnt2 = allocator.refcnt(ptr2); - - // Should be different refcnt locations - assert_ne!(refcnt1, refcnt2); - - // Both should be 0 - assert_eq!(unsafe { (*refcnt1).load(Ordering::SeqCst) }, 0); - assert_eq!(unsafe { (*refcnt2).load(Ordering::SeqCst) }, 0); - - allocator.free_raw(ptr1, size); - allocator.free_raw(ptr2, size); - } - - #[test] - // Test null pointer refcnt - fn test_refcnt_null_pointer() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let refcnt = allocator.refcnt(core::ptr::null::()); - assert!(refcnt.is_null()); - } - - #[test] - // Test usage calculation - fn test_usage_calculation() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - - let initial_usage = allocator.usage(); - - let ptr = allocator.allocate_raw(size); - let usage_after = allocator.usage(); - - assert!(usage_after > initial_usage); - assert!(usage_after <= 1.0); - assert!(usage_after >= 0.0); - - allocator.free_raw(ptr, size); - } - - #[test] - // Test request counting - fn test_request_counting() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - - let mut before = [0; 64]; - let mut after = [0; 64]; - - allocator.requests(&mut before); - - let ptr = allocator.allocate_raw(size); - allocator.free_raw(ptr, size); - - allocator.requests(&mut after); - - // Should have incremented request count for the size level - let level = size.next_power_of_two().ilog2() as usize; - assert!(after[level] > before[level]); - } - - #[test] - fn test_alignment_requirements() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - let base = allocator.base() as usize; - - for power in 12..20 { - let size = 1 << power; - let ptr = allocator.allocate_raw(size); - assert!(!ptr.is_null()); - - let addr = ptr as usize; - assert_eq!( - (addr - base) % size, - 0, - "Allocation of size {} not aligned within arena", - size - ); - - allocator.free_raw(ptr, size); - } - } - - #[test] - // Test clone and drop behavior - fn test_clone_and_drop() { - let allocator = BuddyAllocatorImpl::new(1 << 24); - - let ptr1 = allocator.allocate_raw(4096); - assert!(!ptr1.is_null()); - - { - let clone = allocator.clone(); - let ptr2 = clone.allocate_raw(4096); - assert!(!ptr2.is_null()); - clone.free_raw(ptr2, 4096); - // clone drops here - } - - // Original should still work - let ptr3 = allocator.allocate_raw(4096); - assert!(!ptr3.is_null()); - - allocator.free_raw(ptr1, 4096); - allocator.free_raw(ptr3, 4096); - } - - #[test] - // Allocate one block, compute its buddy address, and verify that reserving the buddy returns that address (if it’s free). - fn buddy_address_math_matches_reserve() { - let a = BuddyAllocatorImpl::new(1 << 24); - let base = a.base() as usize; - - for power in 12..18 { - let size = 1usize << power; - let p = a.allocate_raw(size); - assert!(!p.is_null()); - - let off = a.to_offset(p); - let idx = off / size; - let buddy_idx = idx ^ 1; - let buddy_off = buddy_idx * size; - - // If the buddy is free, reserve_raw must return exactly that address. - let b = a.reserve_raw(buddy_off, size); - if !b.is_null() { - assert_eq!(a.to_offset(b), buddy_off); - a.free_raw(b, size); - } - - a.free_raw(p, size); - - // (optional) base-relative alignment property - assert_eq!(((p as usize) - base) % size, 0); - } - } - - #[test] - // 'Create' a known free block at a known offset by allocating a large block, then freeing it, then reserving/allocating inside it. - fn split_large_block_into_smaller_blocks() { - let a = BuddyAllocatorImpl::new(1 << 24); - - let big = 1usize << 16; // 64KiB - let small = 1usize << 12; // 4KiB - let factor = big / small; - - let p = a.allocate_raw(big); - assert!(!p.is_null()); - let off = a.to_offset(p); - a.free_raw(p, big); - - // Now reserve all 4KiB blocks inside that 64KiB region. - let mut blocks = Vec::new(); - for i in 0..factor { - let q = a.reserve_raw(off + i * small, small); - assert!(!q.is_null(), "failed to reserve sub-block {}", i); - blocks.push(q); - } - - // Free them back - for q in blocks { - a.free_raw(q, small); - } - } - - #[test] - // Reserve two buddy halves, free them, verify you can reserve the parent block at the exact parent address. - fn coalesce_two_buddies_into_parent() { - let a = BuddyAllocatorImpl::new(1 << 24); - - let parent = 1usize << 14; // 16KiB - let child = 1usize << 13; // 8KiB - - // Create a known free parent block at a known offset. - let p = a.allocate_raw(parent); - assert!(!p.is_null()); - let off = a.to_offset(p); - a.free_raw(p, parent); - - // Reserve both children (buddies). - let c0 = a.reserve_raw(off, child); - let c1 = a.reserve_raw(off + child, child); - assert!(!c0.is_null() && !c1.is_null()); - - // Free both; this should coalesce into the parent. - a.free_raw(c0, child); - a.free_raw(c1, child); - - // Now reserving the parent at 'off' should succeed. - let p2 = a.reserve_raw(off, parent); - assert!( - !p2.is_null(), - "parent block did not reappear after coalescing" - ); - assert_eq!(a.to_offset(p2), off); - - a.free_raw(p2, parent); - } - - #[test] - // Hold one child, free the other, ensure parent reservation fails at that exact parent address. - fn no_coalesce_if_only_one_buddy_free() { - let a = BuddyAllocatorImpl::new(1 << 24); - - let parent = 1usize << 14; // 16KiB - let child = 1usize << 13; // 8KiB - - let p = a.allocate_raw(parent); - assert!(!p.is_null()); - let off = a.to_offset(p); - a.free_raw(p, parent); - - let c0 = a.reserve_raw(off, child); - let c1 = a.reserve_raw(off + child, child); - assert!(!c0.is_null() && !c1.is_null()); - - // Free only one child - a.free_raw(c0, child); - - // Parent must NOT be reservable while the other buddy is still held. - let parent_try = a.reserve_raw(off, parent); - assert!( - parent_try.is_null(), - "parent became available with one buddy still reserved" - ); - - // Cleanup - a.free_raw(c1, child); - - // Now parent should be available (coalesced) - let parent_ok = a.reserve_raw(off, parent); - assert!(!parent_ok.is_null()); - a.free_raw(parent_ok, parent); - } - - #[test] - // Free child1 then child0; ensure parent becomes available. - fn coalesce_is_order_independent() { - let a = BuddyAllocatorImpl::new(1 << 24); - let parent = 1usize << 15; // 32KiB - let child = 1usize << 14; // 16KiB - - let p = a.allocate_raw(parent); - assert!(!p.is_null()); - let off = a.to_offset(p); - a.free_raw(p, parent); - - let c0 = a.reserve_raw(off, child); - let c1 = a.reserve_raw(off + child, child); - assert!(!c0.is_null() && !c1.is_null()); - - a.free_raw(c1, child); - a.free_raw(c0, child); - - let p2 = a.reserve_raw(off, parent); - assert!(!p2.is_null()); - a.free_raw(p2, parent); - } - - #[test] - // Free 4 children → coalesce to 2 parents → coalesce to 1 grandparent. - fn multi_level_coalesce_cascades() { - let a = BuddyAllocatorImpl::new(1 << 24); - - let grand = 1usize << 15; // 32KiB - let child = 1usize << 13; // 8KiB - let n = grand / child; // 4 - - let p = a.allocate_raw(grand); - assert!(!p.is_null()); - let off = a.to_offset(p); - a.free_raw(p, grand); - - let mut kids = Vec::new(); - for i in 0..n { - let k = a.reserve_raw(off + i * child, child); - assert!(!k.is_null()); - kids.push(k); - } - - // Free all kids -> should coalesce up to grand - for k in kids { - a.free_raw(k, child); - } - - let g = a.reserve_raw(off, grand); - assert!( - !g.is_null(), - "expected full cascade coalesce to grand block" - ); - a.free_raw(g, grand); - } - - #[test] - // Size rounding edge cases: allocate_raw rounds up to power-of-two and MIN_ALLOCATION, ensuring allocations don’t fail just because size isn’t a power of two. - // Currently failing; needs to be fixed? - fn reserve_out_of_range_returns_null() { - let a = BuddyAllocatorImpl::new(1 << 24); - let size = 1usize << 12; - - // definitely beyond arena - let ptr = a.reserve_raw(a.len() + size, size); - assert!(ptr.is_null()); - } - - #[test] - // Testing allocate_many_raw where partial failure does not poison the lock - // Currently hanging because partial failures is not working, test after that is fixed - fn allocate_many_partial_failure_does_not_poison_lock() { - let a = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - - let mut ptrs = [core::ptr::null_mut(); 10000]; - let n = a.allocate_many_raw(size, &mut ptrs); - - assert!(n > 0); - assert!(n < ptrs.len()); - - a.free_many_raw(size, &ptrs[..n]); - - // If the lock leaked, this would hang. - let p = a.allocate_raw(size); - assert!(!p.is_null()); - a.free_raw(p, size); - } - - #[test] - // ensures try_* returns None when lock is held. - fn try_allocate_many_returns_none_when_locked() { - let a = BuddyAllocatorImpl::new(1 << 24); - let size = BuddyAllocatorImpl::MIN_ALLOCATION; - - // Manually lock allocator and ensure try_* fails. - unsafe { - a.inner.lock(); - } - let mut ptrs = [core::ptr::null_mut(); 4]; - let r = a.try_allocate_many_raw(size, &mut ptrs); - assert_eq!(r, None); - unsafe { - a.inner.unlock(); - } - - // Now it should work - let r2 = a.try_allocate_many_raw(size, &mut ptrs); - assert_eq!(r2, Some(4)); - a.free_many_raw(size, &ptrs); - } - - #[test] - // Interleaved patterns: A,B,C,D where (A,B) and (C,D) are buddy pairs. - // Freeing B and C alone should NOT make either parent available; - // freeing A then enables AB coalesce; freeing D then enables CD coalesce; then both parents can coalesce further. - fn interleaved_buddy_pairs_coalesce_independently_then_merge() { - let a = BuddyAllocatorImpl::new(1 << 24); - - let grand = 1usize << 15; // 32KiB - let parent = 1usize << 14; // 16KiB - let child = 1usize << 13; // 8KiB - - // Known free 32KiB region - let g = a.allocate_raw(grand); - assert!(!g.is_null()); - let off = a.to_offset(g); - a.free_raw(g, grand); - - // Reserve A,B,C,D as 8KiB blocks at offsets 0,1,2,3 within the 32KiB region - let a0 = a.reserve_raw(off + 0 * child, child); // A - let b0 = a.reserve_raw(off + 1 * child, child); // B (buddy of A) - let c0 = a.reserve_raw(off + 2 * child, child); // C - let d0 = a.reserve_raw(off + 3 * child, child); // D (buddy of C) - assert!(!a0.is_null() && !b0.is_null() && !c0.is_null() && !d0.is_null()); - - // Free B and C only -> neither 16KiB parent should be reservable yet. - a.free_raw(b0, child); - a.free_raw(c0, child); - - assert!( - a.reserve_raw(off + 0 * parent, parent).is_null(), - "AB parent should not exist yet" - ); - assert!( - a.reserve_raw(off + 1 * parent, parent).is_null(), - "CD parent should not exist yet" - ); - - // Free A -> AB should coalesce to first 16KiB parent at off - a.free_raw(a0, child); - let p0 = a.reserve_raw(off + 0 * parent, parent); - assert!(!p0.is_null(), "AB should coalesce to 16KiB"); - a.free_raw(p0, parent); - - // Free D -> CD should coalesce to second 16KiB parent at off + 16KiB - a.free_raw(d0, child); - let p1 = a.reserve_raw(off + 1 * parent, parent); - assert!(!p1.is_null(), "CD should coalesce to 16KiB"); - a.free_raw(p1, parent); - - // Now both 16KiB parents are free -> should coalesce into 32KiB grandparent at off - let g2 = a.reserve_raw(off, grand); - assert!( - !g2.is_null(), - "two free 16KiB parents should coalesce to 32KiB" - ); - a.free_raw(g2, grand); - } - - #[test] - // Fragmentation scenario: partial coalescing with an obstacle. - // If one leaf remains reserved, upper levels must not fully coalesce; once obstacle freed, full coalesce should happen. - fn fragmentation_blocks_full_coalesce_until_obstacle_removed() { - let a = BuddyAllocatorImpl::new(1 << 24); - - let big = 1usize << 16; // 64KiB region we control - let leaf = 1usize << 12; // 4KiB - let n = big / leaf; // 16 leaves - - // Known free 64KiB region - let p = a.allocate_raw(big); - assert!(!p.is_null()); - let off = a.to_offset(p); - a.free_raw(p, big); - - // Reserve all leaves, keep one as "obstacle", free the rest. - let mut leaves = Vec::new(); - for i in 0..n { - let q = a.reserve_raw(off + i * leaf, leaf); - assert!(!q.is_null()); - leaves.push(q); - } - - let obstacle = leaves[7]; // arbitrary leaf to hold - for (_i, q) in leaves.iter().enumerate() { - if *q == obstacle { - continue; - } - a.free_raw(*q, leaf); - } - - // With one 4KiB still reserved, the full 64KiB block must NOT be available. - assert!( - a.reserve_raw(off, big).is_null(), - "should not fully coalesce with an obstacle leaf reserved" - ); - - // Now free the obstacle leaf -> full coalesce should become possible. - a.free_raw(obstacle, leaf); - let big2 = a.reserve_raw(off, big); - assert!( - !big2.is_null(), - "after removing obstacle, should fully coalesce back to 64KiB" - ); - a.free_raw(big2, big); - } - - #[test] - // Reserved blocks shouldn't participate in coalescing: - // if one buddy is permanently reserved (held), the parent must not become available. - fn reserved_block_prevents_coalescing() { - let a = BuddyAllocatorImpl::new(1 << 24); - - let parent = 1usize << 14; // 16KiB - let child = 1usize << 13; // 8KiB - - // Known free parent region - let p = a.allocate_raw(parent); - assert!(!p.is_null()); - let off = a.to_offset(p); - a.free_raw(p, parent); - - // Reserve both children, but "reserve" one as a held block (simulate reservation that shouldn't coalesce). - let held = a.reserve_raw(off, child); - let other = a.reserve_raw(off + child, child); - assert!(!held.is_null() && !other.is_null()); - - // Free only the other -> parent must not appear - a.free_raw(other, child); - assert!( - a.reserve_raw(off, parent).is_null(), - "parent should not coalesce while one child is held/reserved" - ); - - // Once held is freed too, parent should become available - a.free_raw(held, child); - let p2 = a.reserve_raw(off, parent); - assert!(!p2.is_null()); - a.free_raw(p2, parent); - } -} +mod tests; diff --git a/common/src/buddy/tests.rs b/common/src/buddy/tests.rs new file mode 100644 index 0000000..3b579e9 --- /dev/null +++ b/common/src/buddy/tests.rs @@ -0,0 +1,951 @@ +extern crate test; + +use super::*; +#[test] +// Setting/clearing individual bits in u64 words to check that the bit manipulation works +fn test_bitref() { + let mut word = 10; + + let mut r0 = BitRef::new(&mut word, 0); + r0.set(); + + let mut r1 = BitRef::new(&mut word, 1); + r1.clear(); + + let mut r2 = BitRef::new(&mut word, 2); + r2.write(false); + + let mut r3 = BitRef::new(&mut word, 3); + r3.write(true); + + assert_eq!(word, 9); +} + +#[test] +// Setting/clearing individual bits in a BitSlice to check that the bit manipulation works +fn test_bitslice() { + let mut words = [0; 2]; + let mut slice = BitSlice::new(128, &mut words); + let mut r0 = slice.bit(0); + r0.set(); + + let mut r1 = slice.bit(1); + r1.set(); + + let mut r127 = slice.bit(127); + r127.set(); + + assert_eq!(words[0], 3); + assert_eq!( + words[127 / (core::mem::size_of::() * 8)], + 1 << (127 % (core::mem::size_of::() * 8)) + ); +} + +#[test] +// Basic setup + continuous element pushing to check that the allocator grows and adjusts properly +fn test_buddy_allocator() { + let allocator = BuddyAllocatorImpl::new(0x10000000); + + let test = Box::new_in(10, allocator.clone()); + assert_eq!(*test, 10); + + let mut v = Vec::new_in(allocator.clone()); + for i in 0..10000 { + v.push(i); + } +} + +#[test] +// Verifying that too small allocations of allocator do not panic +// Potential issue: reserve_unchecked does not validate that the requested index is within the number of blocks at that level +fn test_too_small_allocation() { + let allocator = BuddyAllocatorImpl::new(1 << 20); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let _used_before = allocator.used_size(); + let _ptr = allocator.allocate_raw(size); +} + +#[test] +// Verifying allocate_raw adds size to used_size, and free_raw subtracts it back, returning usage to the original amount. +fn test_allocate_raw_and_used_size() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let used_before = allocator.used_size(); + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + assert_eq!(allocator.used_size(), used_before + size); + allocator.free_raw(ptr, size); + assert_eq!(allocator.used_size(), used_before); +} + +#[test] +// Verifying allocate_many_raw adds size to used_size, and free_many_raw subtracts it back, returning usage to the original amount. +fn test_allocate_many_and_free_many() { + use std::collections::BTreeSet; + + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let used_before = allocator.used_size(); + let mut ptrs = [core::ptr::null_mut(); 4]; + let count = allocator.allocate_many_raw(size, &mut ptrs); + assert_eq!(count, ptrs.len()); + assert!(ptrs.iter().all(|ptr| !ptr.is_null())); + + let unique: BTreeSet = ptrs.iter().map(|ptr| *ptr as usize).collect(); + assert_eq!(unique.len(), ptrs.len()); + + allocator.free_many_raw(size, &ptrs); + assert_eq!(allocator.used_size(), used_before); +} + +#[test] +// Verifying that to_offset and from_offset roundtrip correctly +fn test_offset_roundtrip() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + + let offset = allocator.to_offset(ptr); + let roundtrip = allocator.from_offset::(offset); + assert_eq!(roundtrip as usize, ptr as usize); + + allocator.free_raw(ptr, size); +} + +#[test] +// Verifying that reserving at zero returns a null pointer and does not add to used_size +fn test_reserve_raw_at_zero() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let used_before = allocator.used_size(); + let ptr = allocator.reserve_raw(0, size); + assert!(ptr.is_null()); + assert_eq!(allocator.used_size(), used_before); +} + +#[test] +// Verifying that allocating too large returns a null pointer and does not add to used_size +fn test_allocate_raw_too_large_returns_null() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let used_before = allocator.used_size(); + let ptr = allocator.allocate_raw(allocator.total_size() * 2); + assert!(ptr.is_null()); + assert_eq!(allocator.used_size(), used_before); +} + +#[test] +// Verifying that refcnt is zero on allocate +fn test_refcnt_zero_on_allocate() { + use core::sync::atomic::Ordering; + + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + + let refcnt = allocator.refcnt(ptr); + assert!(!refcnt.is_null()); + let value = unsafe { (*refcnt).load(Ordering::SeqCst) }; + assert_eq!(value, 0); + + allocator.free_raw(ptr, size); +} + +#[test] +// Stress testing the allocator with random allocations and frees +fn stress_test() { + use std::hash::{BuildHasher, Hasher, RandomState}; + let allocator = BuddyAllocatorImpl::new(0x10000000); + allocator.set_caching(false); + let mut v = vec![]; + let random = |limit: usize| { + let x: u64 = RandomState::new().build_hasher().finish(); + x as usize % limit + }; + for _ in 0..100000 { + let used_before = allocator.used_size(); + let remaining = allocator.total_size() - used_before; + let size = random(core::cmp::min(1 << 21, remaining / 2)); + let alloc = Box::<[u8], BuddyAllocatorImpl>::new_uninit_slice_in(size, allocator.clone()); + let used_after = allocator.used_size(); + assert!(used_after >= used_before + size); + if !v.is_empty() && size % 3 == 0 { + let number = random(v.len()); + for _ in 0..number { + let index = random(v.len()); + v.remove(index); + } + } + v.push(alloc); + } +} + +#[test] +// Test splitting large blocks into smaller ones +fn test_block_splitting() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let small_size = BuddyAllocatorImpl::MIN_ALLOCATION; + let large_size = small_size * 4; + + // Allocate and free a large block + let large_ptr = allocator.allocate_raw(large_size); + assert!(!large_ptr.is_null()); + allocator.free_raw(large_ptr, large_size); + + // Now allocate multiple small blocks - should split the large one + let mut small_ptrs = vec![]; + for _ in 0..4 { + let ptr = allocator.allocate_raw(small_size); + assert!(!ptr.is_null()); + small_ptrs.push(ptr); + } + + // Clean up + for ptr in small_ptrs { + allocator.free_raw(ptr, small_size); + } +} + +#[test] +fn test_reserve_specific_addresses() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + // Get a definitely-available block + let p = allocator.allocate_raw(size); + assert!(!p.is_null()); + let address = allocator.to_offset(p); + allocator.free_raw(p, size); + + // Now we should be able to reserve that exact address + let ptr1 = allocator.reserve_raw(address, size); + assert!(!ptr1.is_null()); + assert_eq!(allocator.to_offset(ptr1), address); + + // Reserving again should fail + let ptr2 = allocator.reserve_raw(address, size); + assert!(ptr2.is_null()); + + allocator.free_raw(ptr1, size); +} + +#[test] +// Test reserving overlapping regions +fn test_reserve_overlapping_regions() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + // Reserve a block + let ptr1 = allocator.reserve_raw(size * 5, size); + assert!(!ptr1.is_null()); + + // Try to reserve a larger block that would overlap + let ptr2 = allocator.reserve_raw(size * 4, size * 4); + assert!(ptr2.is_null()); // Should fail because it overlaps with ptr1 + + allocator.free_raw(ptr1, size); +} + +#[test] +// Test allocating all available memory +fn test_exhaust_memory() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + let mut ptrs = vec![]; + + // Allocate until we can't anymore + loop { + let ptr = allocator.allocate_raw(size); + if ptr.is_null() { + break; + } + ptrs.push(ptr); + } + + // Verify we actually allocated something + assert!(!ptrs.is_empty()); + + // Try one more allocation - should fail + let ptr = allocator.allocate_raw(size); + assert!(ptr.is_null()); + + // Free everything + for ptr in ptrs { + allocator.free_raw(ptr, size); + } + + // Should be able to allocate again + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + allocator.free_raw(ptr, size); +} + +#[test] +// Test mixed allocation sizes +fn test_mixed_allocation_sizes() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + + let small = BuddyAllocatorImpl::MIN_ALLOCATION; + let medium = small * 4; + let large = small * 16; + + let ptr1 = allocator.allocate_raw(small); + let ptr2 = allocator.allocate_raw(large); + let ptr3 = allocator.allocate_raw(medium); + let ptr4 = allocator.allocate_raw(small); + + assert!(!ptr1.is_null()); + assert!(!ptr2.is_null()); + assert!(!ptr3.is_null()); + assert!(!ptr4.is_null()); + + // Verify they're all different + let ptrs = [ptr1, ptr2, ptr3, ptr4]; + for i in 0..ptrs.len() { + for j in (i + 1)..ptrs.len() { + assert_ne!(ptrs[i], ptrs[j]); + } + } + + allocator.free_raw(ptr2, large); + allocator.free_raw(ptr1, small); + allocator.free_raw(ptr4, small); + allocator.free_raw(ptr3, medium); +} + +#[test] +// Test freeing in different order than allocation +fn test_free_reverse_order() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let mut ptrs = vec![]; + for _ in 0..10 { + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + ptrs.push(ptr); + } + + let used_peak = allocator.used_size(); + + // Free in reverse order + for ptr in ptrs.iter().rev() { + allocator.free_raw(*ptr, size); + } + + assert!(allocator.used_size() < used_peak); +} + +#[test] +// Test allocation size rounding. Test after exhausting, allocator should still be usable -- no lock leak +fn allocation_rounds_up_to_pow2_and_min() { + let a = BuddyAllocatorImpl::new(1 << 24); + + // Request sizes that aren't powers of 2 + let ptr1 = a.allocate_raw(5000); // Should round to 8192 + let ptr2 = a.allocate_raw(1000); // Should round to 4096 + let ptr3 = a.allocate_raw(10000); // Should round to 16384 + + assert!(!ptr1.is_null()); + assert!(!ptr2.is_null()); + assert!(!ptr3.is_null()); + + a.free_raw(ptr1, 5000); + a.free_raw(ptr2, 1000); + a.free_raw(ptr3, 10000); + + let used0 = a.used_size(); + let p = a.allocate_raw(5000); // rounds to 8192 (and >= 4096) + assert!(!p.is_null()); + assert_eq!(a.used_size(), used0 + 8192); + + a.free_raw(p, 5000); // free uses same rounding path + assert_eq!(a.used_size(), used0); +} + +#[test] +// Confirm there is no overlap between levels +fn test_offset_calculation() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + + let mut ranges = vec![]; + for level in allocator.inner.meta.level_range.clone() { + let offset = allocator.inner.offset_of_level_words(level); + let size = allocator.inner.size_of_level_words(level); + ranges.push((offset, offset + size, level)); + } + ranges.sort_by_key(|(start, _, _)| *start); + + for w in ranges.windows(2) { + let (_s1, e1, l1) = w[0]; + let (s2, _e2, l2) = w[1]; + assert!(e1 <= s2, "overlap between level {} and level {}", l1, l2); + } +} + +#[test] +// Test bitmap boundaries +fn test_bitmap_boundaries() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + + for level in allocator.inner.meta.level_range.clone() { + let bits = allocator.inner.size_of_level_bits(level); + let words = allocator.inner.size_of_level_words(level); + + // Verify words is enough to hold bits + assert!( + words * 64 >= bits, + "Level {} needs {} bits but only has {} words ({} bits)", + level, + bits, + words, + words * 64 + ); + } +} + +#[test] +// Test try_allocate_many_raw where everything should succeed easily +fn test_try_allocate_many_no_contention() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let mut ptrs = [core::ptr::null_mut(); 10]; + let result = allocator.try_allocate_many_raw(size, &mut ptrs); + + assert_eq!(result, Some(10)); + assert!(ptrs.iter().all(|p| !p.is_null())); + + allocator.free_many_raw(size, &ptrs); +} + +#[test] +// Testing allocating more pointers than space available, making sure bulk alloc stops cleanly when space out, +// and partial success allowed, reported successes are valid. Currently hanging +fn test_allocate_many_partial_success() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + // Request more blocks than available + let mut ptrs = [core::ptr::null_mut(); 10000]; + let count = allocator.allocate_many_raw(size, &mut ptrs); + + // Should have allocated some but not all + assert!(count > 0); + assert!(count < ptrs.len()); + + // All allocated pointers should be non-null + for i in 0..count { + assert!(!ptrs[i].is_null()); + } + + // Remaining should be null + for i in count..ptrs.len() { + assert!(ptrs[i].is_null()); + } + + // Clean up + allocator.free_many_raw(size, &ptrs[0..count]); +} + +#[test] +// Test that refcnt works for different allocation addresses +fn test_refcnt_different_addresses() { + use core::sync::atomic::Ordering; + + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let ptr1 = allocator.allocate_raw(size); + let ptr2 = allocator.allocate_raw(size); + + let refcnt1 = allocator.refcnt(ptr1); + let refcnt2 = allocator.refcnt(ptr2); + + // Should be different refcnt locations + assert_ne!(refcnt1, refcnt2); + + // Both should be 0 + assert_eq!(unsafe { (*refcnt1).load(Ordering::SeqCst) }, 0); + assert_eq!(unsafe { (*refcnt2).load(Ordering::SeqCst) }, 0); + + allocator.free_raw(ptr1, size); + allocator.free_raw(ptr2, size); +} + +#[test] +// Test null pointer refcnt +fn test_refcnt_null_pointer() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let refcnt = allocator.refcnt(core::ptr::null::()); + assert!(refcnt.is_null()); +} + +#[test] +// Test usage calculation +fn test_usage_calculation() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let initial_usage = allocator.usage(); + + let ptr = allocator.allocate_raw(size); + let usage_after = allocator.usage(); + + assert!(usage_after > initial_usage); + assert!(usage_after <= 1.0); + assert!(usage_after >= 0.0); + + allocator.free_raw(ptr, size); +} + +#[test] +// Test request counting +fn test_request_counting() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let mut before = [0; 64]; + let mut after = [0; 64]; + + allocator.requests(&mut before); + + let ptr = allocator.allocate_raw(size); + allocator.free_raw(ptr, size); + + allocator.requests(&mut after); + + // Should have incremented request count for the size level + let level = size.next_power_of_two().ilog2() as usize; + assert!(after[level] > before[level]); +} + +#[test] +fn test_alignment_requirements() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + let base = allocator.base() as usize; + + for power in 12..20 { + let size = 1 << power; + let ptr = allocator.allocate_raw(size); + assert!(!ptr.is_null()); + + let addr = ptr as usize; + assert_eq!( + (addr - base) % size, + 0, + "Allocation of size {} not aligned within arena", + size + ); + + allocator.free_raw(ptr, size); + } +} + +#[test] +// Test clone and drop behavior +fn test_clone_and_drop() { + let allocator = BuddyAllocatorImpl::new(1 << 24); + + let ptr1 = allocator.allocate_raw(4096); + assert!(!ptr1.is_null()); + + { + let clone = allocator.clone(); + let ptr2 = clone.allocate_raw(4096); + assert!(!ptr2.is_null()); + clone.free_raw(ptr2, 4096); + // clone drops here + } + + // Original should still work + let ptr3 = allocator.allocate_raw(4096); + assert!(!ptr3.is_null()); + + allocator.free_raw(ptr1, 4096); + allocator.free_raw(ptr3, 4096); +} + +#[test] +// Allocate one block, compute its buddy address, and verify that reserving the buddy returns that address (if it’s free). +fn buddy_address_math_matches_reserve() { + let a = BuddyAllocatorImpl::new(1 << 24); + let base = a.base() as usize; + + for power in 12..18 { + let size = 1usize << power; + let p = a.allocate_raw(size); + assert!(!p.is_null()); + + let off = a.to_offset(p); + let idx = off / size; + let buddy_idx = idx ^ 1; + let buddy_off = buddy_idx * size; + + // If the buddy is free, reserve_raw must return exactly that address. + let b = a.reserve_raw(buddy_off, size); + if !b.is_null() { + assert_eq!(a.to_offset(b), buddy_off); + a.free_raw(b, size); + } + + a.free_raw(p, size); + + // (optional) base-relative alignment property + assert_eq!(((p as usize) - base) % size, 0); + } +} + +#[test] +// 'Create' a known free block at a known offset by allocating a large block, then freeing it, then reserving/allocating inside it. +fn split_large_block_into_smaller_blocks() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let big = 1usize << 16; // 64KiB + let small = 1usize << 12; // 4KiB + let factor = big / small; + + let p = a.allocate_raw(big); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, big); + + // Now reserve all 4KiB blocks inside that 64KiB region. + let mut blocks = Vec::new(); + for i in 0..factor { + let q = a.reserve_raw(off + i * small, small); + assert!(!q.is_null(), "failed to reserve sub-block {}", i); + blocks.push(q); + } + + // Free them back + for q in blocks { + a.free_raw(q, small); + } +} + +#[test] +// Reserve two buddy halves, free them, verify you can reserve the parent block at the exact parent address. +fn coalesce_two_buddies_into_parent() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let parent = 1usize << 14; // 16KiB + let child = 1usize << 13; // 8KiB + + // Create a known free parent block at a known offset. + let p = a.allocate_raw(parent); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, parent); + + // Reserve both children (buddies). + let c0 = a.reserve_raw(off, child); + let c1 = a.reserve_raw(off + child, child); + assert!(!c0.is_null() && !c1.is_null()); + + // Free both; this should coalesce into the parent. + a.free_raw(c0, child); + a.free_raw(c1, child); + + // Now reserving the parent at 'off' should succeed. + let p2 = a.reserve_raw(off, parent); + assert!( + !p2.is_null(), + "parent block did not reappear after coalescing" + ); + assert_eq!(a.to_offset(p2), off); + + a.free_raw(p2, parent); +} + +#[test] +// Hold one child, free the other, ensure parent reservation fails at that exact parent address. +fn no_coalesce_if_only_one_buddy_free() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let parent = 1usize << 14; // 16KiB + let child = 1usize << 13; // 8KiB + + let p = a.allocate_raw(parent); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, parent); + + let c0 = a.reserve_raw(off, child); + let c1 = a.reserve_raw(off + child, child); + assert!(!c0.is_null() && !c1.is_null()); + + // Free only one child + a.free_raw(c0, child); + + // Parent must NOT be reservable while the other buddy is still held. + let parent_try = a.reserve_raw(off, parent); + assert!( + parent_try.is_null(), + "parent became available with one buddy still reserved" + ); + + // Cleanup + a.free_raw(c1, child); + + // Now parent should be available (coalesced) + let parent_ok = a.reserve_raw(off, parent); + assert!(!parent_ok.is_null()); + a.free_raw(parent_ok, parent); +} + +#[test] +// Free child1 then child0; ensure parent becomes available. +fn coalesce_is_order_independent() { + let a = BuddyAllocatorImpl::new(1 << 24); + let parent = 1usize << 15; // 32KiB + let child = 1usize << 14; // 16KiB + + let p = a.allocate_raw(parent); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, parent); + + let c0 = a.reserve_raw(off, child); + let c1 = a.reserve_raw(off + child, child); + assert!(!c0.is_null() && !c1.is_null()); + + a.free_raw(c1, child); + a.free_raw(c0, child); + + let p2 = a.reserve_raw(off, parent); + assert!(!p2.is_null()); + a.free_raw(p2, parent); +} + +#[test] +// Free 4 children → coalesce to 2 parents → coalesce to 1 grandparent. +fn multi_level_coalesce_cascades() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let grand = 1usize << 15; // 32KiB + let child = 1usize << 13; // 8KiB + let n = grand / child; // 4 + + let p = a.allocate_raw(grand); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, grand); + + let mut kids = Vec::new(); + for i in 0..n { + let k = a.reserve_raw(off + i * child, child); + assert!(!k.is_null()); + kids.push(k); + } + + // Free all kids -> should coalesce up to grand + for k in kids { + a.free_raw(k, child); + } + + let g = a.reserve_raw(off, grand); + assert!( + !g.is_null(), + "expected full cascade coalesce to grand block" + ); + a.free_raw(g, grand); +} + +#[test] +// Size rounding edge cases: allocate_raw rounds up to power-of-two and MIN_ALLOCATION, ensuring allocations don’t fail just because size isn’t a power of two. +// Currently failing; needs to be fixed? +fn reserve_out_of_range_returns_null() { + let a = BuddyAllocatorImpl::new(1 << 24); + let size = 1usize << 12; + + // definitely beyond arena + let ptr = a.reserve_raw(a.len() + size, size); + assert!(ptr.is_null()); +} + +#[test] +// Testing allocate_many_raw where partial failure does not poison the lock +// Currently hanging because partial failures is not working, test after that is fixed +fn allocate_many_partial_failure_does_not_poison_lock() { + let a = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + let mut ptrs = [core::ptr::null_mut(); 10000]; + let n = a.allocate_many_raw(size, &mut ptrs); + + assert!(n > 0); + assert!(n < ptrs.len()); + + a.free_many_raw(size, &ptrs[..n]); + + // If the lock leaked, this would hang. + let p = a.allocate_raw(size); + assert!(!p.is_null()); + a.free_raw(p, size); +} + +#[test] +// ensures try_* returns None when lock is held. +fn try_allocate_many_returns_none_when_locked() { + let a = BuddyAllocatorImpl::new(1 << 24); + let size = BuddyAllocatorImpl::MIN_ALLOCATION; + + // Manually lock allocator and ensure try_* fails. + unsafe { + a.inner.lock(); + } + let mut ptrs = [core::ptr::null_mut(); 4]; + let r = a.try_allocate_many_raw(size, &mut ptrs); + assert_eq!(r, None); + unsafe { + a.inner.unlock(); + } + + // Now it should work + let r2 = a.try_allocate_many_raw(size, &mut ptrs); + assert_eq!(r2, Some(4)); + a.free_many_raw(size, &ptrs); +} + +#[test] +// Interleaved patterns: A,B,C,D where (A,B) and (C,D) are buddy pairs. +// Freeing B and C alone should NOT make either parent available; +// freeing A then enables AB coalesce; freeing D then enables CD coalesce; then both parents can coalesce further. +fn interleaved_buddy_pairs_coalesce_independently_then_merge() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let grand = 1usize << 15; // 32KiB + let parent = 1usize << 14; // 16KiB + let child = 1usize << 13; // 8KiB + + // Known free 32KiB region + let g = a.allocate_raw(grand); + assert!(!g.is_null()); + let off = a.to_offset(g); + a.free_raw(g, grand); + + // Reserve A,B,C,D as 8KiB blocks at offsets 0,1,2,3 within the 32KiB region + let a0 = a.reserve_raw(off + 0 * child, child); // A + let b0 = a.reserve_raw(off + 1 * child, child); // B (buddy of A) + let c0 = a.reserve_raw(off + 2 * child, child); // C + let d0 = a.reserve_raw(off + 3 * child, child); // D (buddy of C) + assert!(!a0.is_null() && !b0.is_null() && !c0.is_null() && !d0.is_null()); + + // Free B and C only -> neither 16KiB parent should be reservable yet. + a.free_raw(b0, child); + a.free_raw(c0, child); + + assert!( + a.reserve_raw(off + 0 * parent, parent).is_null(), + "AB parent should not exist yet" + ); + assert!( + a.reserve_raw(off + 1 * parent, parent).is_null(), + "CD parent should not exist yet" + ); + + // Free A -> AB should coalesce to first 16KiB parent at off + a.free_raw(a0, child); + let p0 = a.reserve_raw(off + 0 * parent, parent); + assert!(!p0.is_null(), "AB should coalesce to 16KiB"); + a.free_raw(p0, parent); + + // Free D -> CD should coalesce to second 16KiB parent at off + 16KiB + a.free_raw(d0, child); + let p1 = a.reserve_raw(off + 1 * parent, parent); + assert!(!p1.is_null(), "CD should coalesce to 16KiB"); + a.free_raw(p1, parent); + + // Now both 16KiB parents are free -> should coalesce into 32KiB grandparent at off + let g2 = a.reserve_raw(off, grand); + assert!( + !g2.is_null(), + "two free 16KiB parents should coalesce to 32KiB" + ); + a.free_raw(g2, grand); +} + +#[test] +// Fragmentation scenario: partial coalescing with an obstacle. +// If one leaf remains reserved, upper levels must not fully coalesce; once obstacle freed, full coalesce should happen. +fn fragmentation_blocks_full_coalesce_until_obstacle_removed() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let big = 1usize << 16; // 64KiB region we control + let leaf = 1usize << 12; // 4KiB + let n = big / leaf; // 16 leaves + + // Known free 64KiB region + let p = a.allocate_raw(big); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, big); + + // Reserve all leaves, keep one as "obstacle", free the rest. + let mut leaves = Vec::new(); + for i in 0..n { + let q = a.reserve_raw(off + i * leaf, leaf); + assert!(!q.is_null()); + leaves.push(q); + } + + let obstacle = leaves[7]; // arbitrary leaf to hold + for (_i, q) in leaves.iter().enumerate() { + if *q == obstacle { + continue; + } + a.free_raw(*q, leaf); + } + + // With one 4KiB still reserved, the full 64KiB block must NOT be available. + assert!( + a.reserve_raw(off, big).is_null(), + "should not fully coalesce with an obstacle leaf reserved" + ); + + // Now free the obstacle leaf -> full coalesce should become possible. + a.free_raw(obstacle, leaf); + let big2 = a.reserve_raw(off, big); + assert!( + !big2.is_null(), + "after removing obstacle, should fully coalesce back to 64KiB" + ); + a.free_raw(big2, big); +} + +#[test] +// Reserved blocks shouldn't participate in coalescing: +// if one buddy is permanently reserved (held), the parent must not become available. +fn reserved_block_prevents_coalescing() { + let a = BuddyAllocatorImpl::new(1 << 24); + + let parent = 1usize << 14; // 16KiB + let child = 1usize << 13; // 8KiB + + // Known free parent region + let p = a.allocate_raw(parent); + assert!(!p.is_null()); + let off = a.to_offset(p); + a.free_raw(p, parent); + + // Reserve both children, but "reserve" one as a held block (simulate reservation that shouldn't coalesce). + let held = a.reserve_raw(off, child); + let other = a.reserve_raw(off + child, child); + assert!(!held.is_null() && !other.is_null()); + + // Free only the other -> parent must not appear + a.free_raw(other, child); + assert!( + a.reserve_raw(off, parent).is_null(), + "parent should not coalesce while one child is held/reserved" + ); + + // Once held is freed too, parent should become available + a.free_raw(held, child); + let p2 = a.reserve_raw(off, parent); + assert!(!p2.is_null()); + a.free_raw(p2, parent); +}