@@ -351,6 +351,29 @@ pub(crate) trait VirtualMachine: Debug + Send {
351351
352352#[ cfg( test) ]
353353mod tests {
354+ use super :: * ;
355+ use crate :: hypervisor:: regs:: { CommonSegmentRegister , CommonTableRegister } ;
356+
357+ fn boxed_vm ( ) -> Box < dyn VirtualMachine > {
358+ let available_vm = get_available_hypervisor ( ) . as_ref ( ) . unwrap ( ) ;
359+ match available_vm {
360+ #[ cfg( kvm) ]
361+ HypervisorType :: Kvm => {
362+ use crate :: hypervisor:: virtual_machine:: kvm:: KvmVm ;
363+ Box :: new ( KvmVm :: new ( ) . unwrap ( ) )
364+ }
365+ #[ cfg( mshv3) ]
366+ HypervisorType :: Mshv => {
367+ use crate :: hypervisor:: virtual_machine:: mshv:: MshvVm ;
368+ Box :: new ( MshvVm :: new ( ) . unwrap ( ) )
369+ }
370+ #[ cfg( target_os = "windows" ) ]
371+ HypervisorType :: Whp => {
372+ use crate :: hypervisor:: virtual_machine:: whp:: WhpVm ;
373+ Box :: new ( WhpVm :: new ( ) . unwrap ( ) )
374+ }
375+ }
376+ }
354377
355378 #[ test]
356379 // TODO: add support for testing on WHP
@@ -370,4 +393,210 @@ mod tests {
370393 }
371394 }
372395 }
396+
397+ #[ test]
398+ fn regs ( ) {
399+ let vm = boxed_vm ( ) ;
400+
401+ let regs = CommonRegisters {
402+ rax : 1 ,
403+ rbx : 2 ,
404+ rcx : 3 ,
405+ rdx : 4 ,
406+ rsi : 5 ,
407+ rdi : 6 ,
408+ rsp : 7 ,
409+ rbp : 8 ,
410+ r8 : 9 ,
411+ r9 : 10 ,
412+ r10 : 11 ,
413+ r11 : 12 ,
414+ r12 : 13 ,
415+ r13 : 14 ,
416+ r14 : 15 ,
417+ r15 : 16 ,
418+ rip : 17 ,
419+ rflags : 0x2 ,
420+ } ;
421+
422+ vm. set_regs ( & regs) . unwrap ( ) ;
423+ let read_regs = vm. regs ( ) . unwrap ( ) ;
424+ assert_eq ! ( regs, read_regs) ;
425+ }
426+
427+ #[ test]
428+ fn fpu ( ) {
429+ let vm = boxed_vm ( ) ;
430+
431+ // x87 FPU registers are 80-bit (10 bytes), stored in 16-byte slots for alignment.
432+ // Only the first 10 bytes are preserved; the remaining 6 bytes are reserved/zeroed.
433+ // See Intel® 64 and IA-32 Architectures SDM, Vol. 1, Sec. 10.5.1.1 (x87 State)
434+ let fpr_entry: [ u8 ; 16 ] = [ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 ] ;
435+ let fpu = CommonFpu {
436+ fpr : [ fpr_entry; 8 ] ,
437+ fcw : 2 ,
438+ fsw : 3 ,
439+ ftwx : 4 ,
440+ last_opcode : 5 ,
441+ last_ip : 6 ,
442+ last_dp : 7 ,
443+ xmm : [ [ 8 ; 16 ] ; 16 ] ,
444+ mxcsr : 9 ,
445+ } ;
446+ vm. set_fpu ( & fpu) . unwrap ( ) ;
447+ #[ cfg_attr( not( kvm) , allow( unused_mut) ) ]
448+ let mut read_fpu = vm. fpu ( ) . unwrap ( ) ;
449+ #[ cfg( kvm) ]
450+ {
451+ read_fpu. mxcsr = fpu. mxcsr ; // KVM get/set fpu does not preserve mxcsr
452+ }
453+ assert_eq ! ( fpu, read_fpu) ;
454+ }
455+
456+ #[ test]
457+ fn sregs ( ) {
458+ let vm = boxed_vm ( ) ;
459+
460+ let segment = CommonSegmentRegister {
461+ base : 1 ,
462+ limit : 2 ,
463+ selector : 3 ,
464+ type_ : 3 ,
465+ present : 1 ,
466+ dpl : 1 ,
467+ db : 0 ,
468+ s : 1 ,
469+ l : 1 ,
470+ g : 0 ,
471+ avl : 1 ,
472+ unusable : 0 ,
473+ padding : 0 ,
474+ } ;
475+
476+ let cs_segment = CommonSegmentRegister {
477+ base : 1 ,
478+ limit : 0xFFFF ,
479+ selector : 0x08 ,
480+ type_ : 0b1011 , // code segment, execute/read, accessed
481+ present : 1 ,
482+ dpl : 1 ,
483+ db : 0 , // must be 0 in 64-bit mode
484+ s : 1 ,
485+ l : 1 , // 64-bit mode
486+ g : 0 , // KVM normalizes g to 0 for segments with small limits
487+ avl : 1 ,
488+ unusable : 0 ,
489+ padding : 0 ,
490+ } ;
491+ let table = CommonTableRegister {
492+ base : 12 ,
493+ limit : 13 ,
494+ } ;
495+ let sregs = CommonSpecialRegisters {
496+ cs : cs_segment,
497+ ds : segment,
498+ es : segment,
499+ fs : segment,
500+ gs : segment,
501+ ss : segment,
502+ tr : segment,
503+ ldt : segment,
504+ gdt : table,
505+ idt : table,
506+ cr0 : 0x80000011 , // bit 0 (PE) + bit 4 (ET) + bit 31 (PG)
507+ cr2 : 2 ,
508+ cr3 : 3 ,
509+ cr4 : 0x20 ,
510+ cr8 : 5 ,
511+ efer : 0x500 ,
512+ apic_base : 0xFEE00900 ,
513+ interrupt_bitmap : [ 0 ; 4 ] ,
514+ } ;
515+ vm. set_sregs ( & sregs) . unwrap ( ) ;
516+ let read_sregs = vm. sregs ( ) . unwrap ( ) ;
517+ assert_eq ! ( sregs, read_sregs) ;
518+ }
519+
520+ /// Helper to create a page-aligned memory region for testing
521+ #[ cfg( any( kvm, mshv3) ) ]
522+ fn create_test_memory ( size : usize ) -> crate :: mem:: shared_mem:: ExclusiveSharedMemory {
523+ use hyperlight_common:: mem:: PAGE_SIZE_USIZE ;
524+ let aligned_size = size. div_ceil ( PAGE_SIZE_USIZE ) * PAGE_SIZE_USIZE ;
525+ crate :: mem:: shared_mem:: ExclusiveSharedMemory :: new ( aligned_size) . unwrap ( )
526+ }
527+
528+ /// Helper to create a MemoryRegion from ExclusiveSharedMemory
529+ #[ cfg( any( kvm, mshv3) ) ]
530+ fn region_for_test_memory (
531+ mem : & crate :: mem:: shared_mem:: ExclusiveSharedMemory ,
532+ guest_base : usize ,
533+ flags : crate :: mem:: memory_region:: MemoryRegionFlags ,
534+ ) -> MemoryRegion {
535+ use crate :: mem:: memory_region:: MemoryRegionType ;
536+ use crate :: mem:: shared_mem:: SharedMemory ;
537+ let ptr = mem. base_addr ( ) ;
538+ let len = mem. mem_size ( ) ;
539+ MemoryRegion {
540+ host_region : ptr..( ptr + len) ,
541+ guest_region : guest_base..( guest_base + len) ,
542+ flags,
543+ region_type : MemoryRegionType :: Heap ,
544+ }
545+ }
546+
547+ #[ test]
548+ #[ cfg( any( kvm, mshv3) ) ] // Requires memory mapping support (TODO on WHP)
549+ fn map_memory ( ) {
550+ use crate :: mem:: memory_region:: MemoryRegionFlags ;
551+
552+ let mut vm = boxed_vm ( ) ;
553+
554+ let mem1 = create_test_memory ( 4096 ) ;
555+ let guest_addr: usize = 0x1000 ;
556+ let region = region_for_test_memory (
557+ & mem1,
558+ guest_addr,
559+ MemoryRegionFlags :: READ | MemoryRegionFlags :: WRITE ,
560+ ) ;
561+
562+ // SAFETY: The memory region points to valid memory allocated by ExclusiveSharedMemory,
563+ // and will live until we drop mem1 at the end of the test.
564+ // Slot 0 is not already mapped.
565+ unsafe {
566+ vm. map_memory ( ( 0 , & region) ) . unwrap ( ) ;
567+ }
568+
569+ // Unmap the region
570+ vm. unmap_memory ( ( 0 , & region) ) . unwrap ( ) ;
571+
572+ // Unmapping a region that was already unmapped should fail
573+ vm. unmap_memory ( ( 0 , & region) ) . unwrap_err ( ) ;
574+
575+ // Unmapping a region that was never mapped should fail
576+ vm. unmap_memory ( ( 99 , & region) ) . unwrap_err ( ) ;
577+
578+ // Re-map the same region to a different slot
579+ // SAFETY: Same as above - memory is still valid and slot 1 is not mapped.
580+ unsafe {
581+ vm. map_memory ( ( 1 , & region) ) . unwrap ( ) ;
582+ }
583+
584+ // Map a second region to a different slot
585+ let mem2 = create_test_memory ( 4096 ) ;
586+ let guest_addr2: usize = 0x2000 ;
587+ let region2 = region_for_test_memory (
588+ & mem2,
589+ guest_addr2,
590+ MemoryRegionFlags :: READ | MemoryRegionFlags :: WRITE ,
591+ ) ;
592+
593+ // SAFETY: Memory is valid from ExclusiveSharedMemory, slot 2 is not mapped.
594+ unsafe {
595+ vm. map_memory ( ( 2 , & region2) ) . unwrap ( ) ;
596+ }
597+
598+ // Clean up: unmap both regions
599+ vm. unmap_memory ( ( 1 , & region) ) . unwrap ( ) ;
600+ vm. unmap_memory ( ( 2 , & region2) ) . unwrap ( ) ;
601+ }
373602}
0 commit comments