Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,24 @@ use crate::error::Error;
/// Provides the confidential VM with a secret that was decoded from the attestation payload during the promotion of the VM to confidential
/// VM. Secret is written to the buffer allocated by the confidential VM and passed as arguments to the call.
pub struct RetrieveSecretRequest {
output_buffer_address: ConfidentialVmPhysicalAddress,
output_buffer_address: usize,
output_buffer_size: usize,
}

impl RetrieveSecretRequest {
pub const ADDRESS_ALIGNMENT: usize = core::mem::size_of::<usize>();

pub fn from_confidential_hart(confidential_hart: &ConfidentialHart) -> Self {
Self {
output_buffer_address: ConfidentialVmPhysicalAddress::new(confidential_hart.gprs().read(GeneralPurposeRegister::a0)),
output_buffer_address: confidential_hart.gprs().read(GeneralPurposeRegister::a0),
output_buffer_size: confidential_hart.gprs().read(GeneralPurposeRegister::a1),
}
}

pub fn handle(self, confidential_flow: ConfidentialFlow) -> ! {
let transformation = ControlDataStorage::try_confidential_vm(confidential_flow.confidential_vm_id(), |ref confidential_vm| {
// ensure!(self.output_buffer_address.is_aligned_to(PageSize::Size4KiB.in_bytes()), Error::AddressNotAligned())?;
let output_buffer_address = ConfidentialVmPhysicalAddress::new(self.output_buffer_address)?;
ensure!(output_buffer_address.is_aligned_to(Self::ADDRESS_ALIGNMENT), Error::AddressNotAligned())?;
ensure!(self.output_buffer_size <= PageSize::Size4KiB.in_bytes(), Error::AddressNotAligned())?;
let secret = confidential_vm.secret(0)?;
ensure!(secret.len() <= self.output_buffer_size, Error::AddressNotAligned())?;
Expand All @@ -35,7 +38,7 @@ impl RetrieveSecretRequest {
(0..end_boundary).for_each(|i| buffer[i] = secret[offset + i]);
(end_boundary..8).for_each(|i| buffer[i] = 0u8);
let confidential_memory_address =
confidential_vm.memory_protector().translate_address(&self.output_buffer_address.add(offset))?;
confidential_vm.memory_protector().translate_address(&output_buffer_address.add(offset))?;
unsafe { confidential_memory_address.write_volatile(usize::from_le_bytes(buffer)) };
}
Ok(SbiResponse::success_with_code(secret.len()))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ impl AddMmioRegion {
match ControlDataStorage::try_confidential_vm_mut(confidential_flow.confidential_vm_id(), |mut confidential_vm| {
ensure!(self.region_start_address % PageSize::Size4KiB.in_bytes() == 0, Error::AddressNotAligned())?;
ensure!(self.region_length % PageSize::Size4KiB.in_bytes() == 0, Error::AddressNotAligned())?;
Ok(confidential_vm.add_mmio_region(ConfidentialVmMmioRegion::new(self.region_start_address, self.region_length))?)
Ok(confidential_vm.add_mmio_region(ConfidentialVmMmioRegion::new(self.region_start_address, self.region_length)?)?)
}) {
Ok(_) => confidential_flow
.set_resumable_operation(ResumableOperation::SbiRequest())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,6 @@ use crate::core::control_data::{
};
use crate::core::memory_layout::ConfidentialVmPhysicalAddress;

use core::mem;

pub struct MmioAccessFault {
cause: usize,
mtval: usize,
Expand All @@ -19,15 +17,15 @@ pub struct MmioAccessFault {
}

impl MmioAccessFault {
pub const ADDRESS_ALIGNMENT: usize = mem::size_of::<usize>();
pub const ADDRESS_ALIGNMENT: usize = core::mem::size_of::<usize>();

pub fn new(cause: usize, mtval: usize, mtinst: usize, fault_address: usize) -> Self {
Self { cause, mtval, mtinst, fault_address }
}

pub fn handle(self, mut confidential_flow: ConfidentialFlow) -> ! {
match ControlDataStorage::try_confidential_vm(confidential_flow.confidential_vm_id(), |confidential_vm| {
let confidential_vm_physical_address = ConfidentialVmPhysicalAddress::new(self.fault_address);
let confidential_vm_physical_address = ConfidentialVmPhysicalAddress::new(self.fault_address)?;
let page_size = confidential_vm.memory_protector_mut().map_empty_page(confidential_vm_physical_address, PageSize::Size4KiB)?;
let request = RemoteHfenceGvmaVmid::all_harts(None, page_size, confidential_flow.confidential_vm_id());
confidential_flow.broadcast_remote_command(&confidential_vm, ConfidentialHartRemoteCommand::RemoteHfenceGvmaVmid(request))?;
Expand All @@ -51,7 +49,7 @@ impl MmioAccessFault {

pub fn tried_to_access_valid_mmio_region(confidential_vm_id: ConfidentialVmId, fault_address: usize) -> bool {
ControlDataStorage::try_confidential_vm(confidential_vm_id, |confidential_vm| {
Ok(confidential_vm.is_mmio_region_defined(&ConfidentialVmMmioRegion::new(fault_address, Self::ADDRESS_ALIGNMENT)))
Ok(confidential_vm.is_mmio_region_defined(&ConfidentialVmMmioRegion::new(fault_address, Self::ADDRESS_ALIGNMENT)?))
})
.unwrap_or(false)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ impl RemoveMmioRegion {
match ControlDataStorage::try_confidential_vm_mut(confidential_flow.confidential_vm_id(), |mut confidential_vm| {
ensure!(self.region_start_address % PageSize::Size4KiB.in_bytes() == 0, Error::AddressNotAligned())?;
ensure!(self.region_length % PageSize::Size4KiB.in_bytes() == 0, Error::AddressNotAligned())?;
Ok(confidential_vm.remove_mmio_region(&ConfidentialVmMmioRegion::new(self.region_start_address, self.region_length)))
Ok(confidential_vm.remove_mmio_region(&ConfidentialVmMmioRegion::new(self.region_start_address, self.region_length)?))
}) {
Ok(_) => confidential_flow
.set_resumable_operation(ResumableOperation::SbiRequest())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@ use crate::confidential_flow::handlers::sbi::SbiResponse;
use crate::confidential_flow::handlers::shared_page::SharePageRequest;
use crate::confidential_flow::handlers::symmetrical_multiprocessing::RemoteHfenceGvmaVmid;
use crate::confidential_flow::{ApplyToConfidentialHart, ConfidentialFlow};
use crate::core::architecture::GeneralPurposeRegister;
use crate::core::architecture::{GeneralPurposeRegister, PageSize};
use crate::core::control_data::{ConfidentialHartRemoteCommand, ControlDataStorage, HypervisorHart};
use crate::core::memory_layout::NonConfidentialMemoryAddress;
use crate::core::memory_layout::{ConfidentialVmPhysicalAddress, NonConfidentialMemoryAddress};
use crate::error::Error;

/// Finishes the pending request of sharing a page between the confidential VM and the hypervisor. The hypervisor should provide information
Expand Down Expand Up @@ -40,11 +40,12 @@ impl SharePageComplete {

fn map_shared_page(&self, confidential_flow: &mut ConfidentialFlow) -> Result<(), Error> {
ensure!(self.response_code == 0, Error::Failed())?;
// Security: check that the start address is located in the non-confidential memory
// Security: check that the start address is located in the non-confidential memory and is properly aligned
let hypervisor_address = NonConfidentialMemoryAddress::new(self.hypervisor_page_address as *mut usize)?;

ensure!(hypervisor_address.usize() % PageSize::Size4KiB.in_bytes() == 0, Error::AddressNotAligned())?;
let address = ConfidentialVmPhysicalAddress::new(self.request.confidential_vm_physical_address)?;
ControlDataStorage::try_confidential_vm(confidential_flow.confidential_vm_id(), |confidential_vm| {
let page_size = confidential_vm.memory_protector_mut().map_shared_page(hypervisor_address, self.request.address)?;
let page_size = confidential_vm.memory_protector_mut().map_shared_page(hypervisor_address, &address)?;
let request = RemoteHfenceGvmaVmid::all_harts(None, page_size, confidential_flow.confidential_vm_id());
confidential_flow.broadcast_remote_command(&confidential_vm, ConfidentialHartRemoteCommand::RemoteHfenceGvmaVmid(request))?;
Ok(())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,14 @@ use crate::non_confidential_flow::DeclassifyToHypervisor;
/// allocate a page of non-confidential memory and return back the `host physical address` of this page. Control flows back to the
/// confidential hart if the request was invalid, e.g., the `guest physical address` was not correct.
pub struct SharePageRequest {
pub address: ConfidentialVmPhysicalAddress,
pub confidential_vm_physical_address: usize,
pub size: usize,
}

impl SharePageRequest {
pub fn from_confidential_hart(confidential_hart: &ConfidentialHart) -> Self {
Self {
address: ConfidentialVmPhysicalAddress::new(confidential_hart.gprs().read(GeneralPurposeRegister::a0)),
confidential_vm_physical_address: confidential_hart.gprs().read(GeneralPurposeRegister::a0),
size: confidential_hart.gprs().read(GeneralPurposeRegister::a1),
}
}
Expand All @@ -41,8 +41,9 @@ impl SharePageRequest {
}

fn share_page_sbi_request(&self) -> Result<SbiRequest, Error> {
ensure!(self.address.usize() % SharedPage::SIZE.in_bytes() == 0, Error::AddressNotAligned())?;
let address = ConfidentialVmPhysicalAddress::new(self.confidential_vm_physical_address)?;
ensure!(address.usize() % SharedPage::SIZE.in_bytes() == 0, Error::AddressNotAligned())?;
ensure!(self.size == SharedPage::SIZE.in_bytes(), Error::InvalidParameter())?;
Ok(SbiRequest::new(CovgExtension::EXTID, CovgExtension::SBI_EXT_COVG_SHARE_MEMORY, self.address.usize(), self.size))
Ok(SbiRequest::new(CovgExtension::EXTID, CovgExtension::SBI_EXT_COVG_SHARE_MEMORY, address.usize(), self.size))
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -13,44 +13,41 @@ use crate::non_confidential_flow::DeclassifyToHypervisor;

/// Unshared memory that has been previously shared with the hypervisor.
pub struct UnsharePageRequest {
address: ConfidentialVmPhysicalAddress,
confidential_vm_physical_address: usize,
size: usize,
}

impl UnsharePageRequest {
pub fn from_confidential_hart(confidential_hart: &ConfidentialHart) -> Self {
Self {
address: ConfidentialVmPhysicalAddress::new(confidential_hart.gprs().read(GeneralPurposeRegister::a0)),
confidential_vm_physical_address: confidential_hart.gprs().read(GeneralPurposeRegister::a0),
size: confidential_hart.gprs().read(GeneralPurposeRegister::a1),
}
}

pub fn handle(self, mut confidential_flow: ConfidentialFlow) -> ! {
match self.unmap_shared_page(&mut confidential_flow) {
Ok(_) => confidential_flow
Ok(sbi_request) => confidential_flow
.set_resumable_operation(ResumableOperation::SbiRequest())
.into_non_confidential_flow()
.declassify_and_exit_to_hypervisor(DeclassifyToHypervisor::SbiRequest(self.unshare_page_sbi_request())),
.declassify_and_exit_to_hypervisor(DeclassifyToHypervisor::SbiRequest(sbi_request)),
Err(error) => {
confidential_flow.apply_and_exit_to_confidential_hart(ApplyToConfidentialHart::SbiResponse(SbiResponse::error(error)))
}
}
}

fn unshare_page_sbi_request(&self) -> SbiRequest {
SbiRequest::new(CovgExtension::EXTID, CovgExtension::SBI_EXT_COVG_UNSHARE_MEMORY, self.address.usize(), self.size)
}

fn unmap_shared_page(&self, confidential_flow: &mut ConfidentialFlow) -> Result<(), Error> {
ensure!(self.address.usize() % SharedPage::SIZE.in_bytes() == 0, Error::AddressNotAligned())?;
fn unmap_shared_page(&self, confidential_flow: &mut ConfidentialFlow) -> Result<SbiRequest, Error> {
let address = ConfidentialVmPhysicalAddress::new(self.confidential_vm_physical_address)?;
ensure!(address.usize() % SharedPage::SIZE.in_bytes() == 0, Error::AddressNotAligned())?;
ensure!(self.size == SharedPage::SIZE.in_bytes(), Error::InvalidParameter())?;

let confidential_vm_id = confidential_flow.confidential_vm_id();
ControlDataStorage::try_confidential_vm(confidential_vm_id, |confidential_vm| {
let unmapped_page_size = confidential_vm.memory_protector_mut().unmap_shared_page(&self.address)?;
let unmapped_page_size = confidential_vm.memory_protector_mut().unmap_shared_page(&address)?;
let request = RemoteHfenceGvmaVmid::all_harts(None, unmapped_page_size, confidential_vm_id);
confidential_flow.broadcast_remote_command(&confidential_vm, ConfidentialHartRemoteCommand::RemoteHfenceGvmaVmid(request))?;
Ok(())
Ok(SbiRequest::new(CovgExtension::EXTID, CovgExtension::SBI_EXT_COVG_UNSHARE_MEMORY, address.usize(), self.size))
})
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -177,9 +177,9 @@ impl PageTable {
/// The caller of this function must ensure that he synchronizes changes to page table configuration, i.e., by clearing address
/// translation caches.
pub fn map_shared_page(
&mut self, hypervisor_address: NonConfidentialMemoryAddress, confidential_vm_physical_address: ConfidentialVmPhysicalAddress,
&mut self, hypervisor_address: NonConfidentialMemoryAddress, confidential_vm_physical_address: &ConfidentialVmPhysicalAddress,
) -> Result<PageSize, Error> {
let shared_page = SharedPage::new(hypervisor_address, confidential_vm_physical_address)?;
let shared_page = SharedPage::new(hypervisor_address, confidential_vm_physical_address.clone())?;
let shared_page_size = shared_page.page_size();
self.map_page(&confidential_vm_physical_address, &shared_page_size, LogicalPageTableEntry::PageSharedWithHypervisor(shared_page))?;
Ok(shared_page_size)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ impl ConfidentialHart {

/// Constructs a confidential hart with the state of the non-confidential hart that made a call to promote the VM to confidential VM
pub fn from_vm_hart(
id: usize, program_counter: usize, fdt_address: ConfidentialVmPhysicalAddress, htimedelta: usize, shared_memory: &NaclSharedMemory,
id: usize, program_counter: usize, fdt_address: &ConfidentialVmPhysicalAddress, htimedelta: usize, shared_memory: &NaclSharedMemory,
) -> Self {
// We first create a confidential hart in the reset state and then fill this state with the runtime state of the hart that made a
// call to promote to confidential VM. This state consists of GPRs and VS-level CSRs.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
// SPDX-FileContributor: Wojciech Ozga <woz@zurich.ibm.com>, IBM Research - Zurich
// SPDX-License-Identifier: Apache-2.0
use crate::core::memory_layout::ConfidentialVmPhysicalAddress;
use crate::error::Error;

/// Defines a range of guest physical addresses that the security monitor interprets as MMIO address and expose load/store operations to the
/// hypervisor. The hypervisor can then emulate them to provide access to virtual devices.
Expand All @@ -13,10 +14,10 @@ pub struct ConfidentialVmMmioRegion {
}

impl ConfidentialVmMmioRegion {
pub fn new(start_address: usize, size_in_bytes: usize) -> Self {
let base_address = ConfidentialVmPhysicalAddress::new(start_address);
pub fn new(start_address: usize, size_in_bytes: usize) -> Result<Self, Error> {
let base_address = ConfidentialVmPhysicalAddress::new(start_address)?;
let one_past_the_end_address = base_address.add(size_in_bytes);
Self { base_address, one_past_the_end_address }
Ok(Self { base_address, one_past_the_end_address })
}

pub fn overlaps(&self, other: &Self) -> bool {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,13 +1,18 @@
// SPDX-FileCopyrightText: 2023 IBM Corporation
// SPDX-FileContributor: Wojciech Ozga <woz@zurich.ibm.com>, IBM Research - Zurich
// SPDX-License-Identifier: Apache-2.0
use crate::error::Error;

#[derive(PartialEq, PartialOrd, Clone, Copy)]
pub struct ConfidentialVmPhysicalAddress(usize);

impl ConfidentialVmPhysicalAddress {
pub fn new(confidential_vm_physical_address: usize) -> Self {
Self(confidential_vm_physical_address)
pub fn new(confidential_vm_physical_address: usize) -> Result<Self, Error> {
if confidential_vm_physical_address == 0 {
Err(Error::InvalidParameter())
} else {
Ok(Self(confidential_vm_physical_address))
}
}

pub fn add(&self, offset: usize) -> Self {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ impl ConfidentialVmMemoryProtector {
/// non-confidential memory. To guarantee confidential VM's correctness, the caller must ensure that he will perform `TLB shutdown` on
/// all confidential harts, so that all confidential harts see the newly mapped shared page.
pub fn map_shared_page(
&mut self, hypervisor_address: NonConfidentialMemoryAddress, confidential_vm_physical_address: ConfidentialVmPhysicalAddress,
&mut self, hypervisor_address: NonConfidentialMemoryAddress, confidential_vm_physical_address: &ConfidentialVmPhysicalAddress,
) -> Result<PageSize, Error> {
Ok(self.root_page_table.map_shared_page(hypervisor_address, confidential_vm_physical_address)?)
}
Expand Down
3 changes: 2 additions & 1 deletion security-monitor/src/core/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,13 @@
// SPDX-License-Identifier: Apache-2.0
pub mod architecture;
pub mod control_data;
pub mod interrupt_controller;
pub mod memory_layout;
pub mod memory_protector;
pub mod page_allocator;
pub mod time_controller;

mod hardware_setup;
mod heap_allocator;
mod initialization;
pub mod interrupt_controller;
mod panic;
13 changes: 13 additions & 0 deletions security-monitor/src/core/time_controller/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// SPDX-FileCopyrightText: 2023 IBM Corporation
// SPDX-FileContributor: Wojciech Ozga <woz@zurich.ibm.com>, IBM Research - Zurich
// SPDX-License-Identifier: Apache-2.0

pub struct TimeController {}

impl TimeController {
const TIME_MMIO_ADDRESS: usize = 0x200BFF8;

pub fn read_time() -> usize {
unsafe { (Self::TIME_MMIO_ADDRESS as *const usize).read_volatile() }
}
}
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
// SPDX-FileCopyrightText: 2023 IBM Corporation
// SPDX-FileContributor: Wojciech Ozga <woz@zurich.ibm.com>, IBM Research - Zurich
// SPDX-License-Identifier: Apache-2.0
use crate::non_confidential_flow::handlers::cove_host_extension::PromoteToConfidentialVm;
use crate::non_confidential_flow::handlers::nested_acceleration_extension::NaclSetupSharedMemory;
use crate::non_confidential_flow::handlers::supervisor_binary_interface::SbiResponse;

/// Transformation of the hypervisor state created as a result of processing an SBI request from the hypervisor.
pub enum ApplyToHypervisorHart {
SbiResponse(SbiResponse),
SetSharedMemory(NaclSetupSharedMemory),
PromoteResponse((PromoteToConfidentialVm, SbiResponse)),
}
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ impl<'a> NonConfidentialFlow<'a> {
match transformation {
ApplyToHypervisorHart::SbiResponse(v) => v.apply_to_hypervisor_hart(self.hypervisor_hart_mut()),
ApplyToHypervisorHart::SetSharedMemory(v) => v.apply_to_hypervisor_hart(self.hypervisor_hart_mut()),
ApplyToHypervisorHart::PromoteResponse((v, r)) => v.apply_to_hypervisor_hart(self.hypervisor_hart_mut(), r),
}
unsafe { exit_to_hypervisor_asm() }
}
Expand Down
Loading
Loading