Skip to content

Commit a45708d

Browse files
committed
xen/arch/x86: add TPR (TXT Protected Range) DMA protection support
Recent Intel platforms advertise the TPR (TXT Protected Range) capability bit 14 in OS-to-SINIT, an alternative to VT-d PMR for DMA protection. SINIT programs the chipset's TPR range registers from a HEAP_EXTDATA_TYPE_TPR_REQ element in the OS-to-SINIT extended data. Wire it up: - Set capability bit 14 in the MLE header and bump MLE version to 2.3. - Add the TPR error codes, range/element types, and capability define. - Rename is_in_pmr() to is_in_dma_prot() and txt_verify_pmr_ranges() to txt_verify_dma_protection() to reflect that they cover either PMR or TPR. Update the early/EFI call sites accordingly. - In txt_verify_dma_protection(), when SINIT advertises TPR support, locate the TPR_REQ element via txt_find_ext_data_element(), validate the element size, the range count (1 or 2), that the low range starts at 0 and stays below 4G, that the high range starts at or above 4G, and that the ranges don't overlap. - Fall through to the existing PMR validation otherwise. Assisted-by: Claude:claude-opus-4-7
1 parent 6c6cb79 commit a45708d

4 files changed

Lines changed: 153 additions & 49 deletions

File tree

xen/arch/x86/boot/head.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -136,12 +136,12 @@ SYM(mle_header, DATA, LOCAL, 16)
136136
.long 0xa2555c0f /* UUID2 */
137137
.long 0x42b651cb /* UUID3 */
138138
.long (.Lmle_header_end - mle_header) /* MLE header size */
139-
.long 0x00020002 /* MLE version 2.2 */
139+
.long 0x00020003 /* MLE version 2.3 */
140140
.long (slaunch_stub_entry - start) /* Linear entry point of MLE (SINIT virt. address) */
141141
.long 0x00000000 /* First valid page of MLE */
142142
.long 0x00000000 /* Offset within binary of first byte of MLE */
143143
.long (_end - start) /* Offset within binary of last byte + 1 of MLE */
144-
.long 0x00000723 /* Bit vector of MLE-supported capabilities */
144+
.long 0x00004723 /* Bit vector of MLE-supported capabilities */
145145
.long 0x00000000 /* Starting linear address of command line (unused) */
146146
.long 0x00000000 /* Ending linear address of command line (unused) */
147147
.Lmle_header_end:

xen/arch/x86/boot/slaunch-early.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,6 @@ void asmlinkage slaunch_early_init(uint32_t load_base_addr,
113113

114114
result->mbi_pa = intel_info->boot_params_base;
115115

116-
txt_verify_pmr_ranges(os_mle, os_sinit, intel_info,
117-
load_base_addr, tgt_base_addr, size);
116+
txt_verify_dma_protection(os_mle, os_sinit, intel_info,
117+
load_base_addr, tgt_base_addr, size);
118118
}

xen/arch/x86/efi/efi-boot.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -264,8 +264,9 @@ void __init asmlinkage noreturn start_xen_from_efi(void)
264264
const struct txt_os_sinit_data *os_sinit =
265265
txt_start(txt_heap, TXT_OS2SINIT);
266266

267-
txt_verify_pmr_ranges(os_mle, os_sinit, intel_info, xen_phys_start,
268-
xen_phys_start, xen_image_size);
267+
txt_verify_dma_protection(os_mle, os_sinit, intel_info,
268+
xen_phys_start, xen_phys_start,
269+
xen_image_size);
269270
}
270271
}
271272

xen/arch/x86/include/asm/intel-txt.h

Lines changed: 146 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,9 @@
6565
#define SLAUNCH_ERROR_NO_VENDOR_INFO 0xc0008009U
6666
#define SLAUNCH_ERROR_BAD_VENDOR_INFO 0xc000800AU
6767
#define SLAUNCH_ERROR_BAD_SLRT_ADDRESS 0xc000800BU
68+
#define SLAUNCH_ERROR_TPR_INVALID 0xc000800CU
69+
#define SLAUNCH_ERROR_TPR_UNSUPPORTED 0xc000800DU
70+
#define SLAUNCH_ERROR_TPR_NOT_FOUND 0xc000800EU
6871

6972
#define TXT_AP_BOOT_CS 0x0030
7073
#define TXT_AP_BOOT_DS 0x0038
@@ -74,6 +77,9 @@
7477
/* Intel SDM: GETSEC Capability Result Encoding */
7578
#define GETSEC_CAP_TXT_CHIPSET 1
7679

80+
/* SINIT/MLE capability bit for TPR (TXT Protected Range) DMA protection. */
81+
#define TXT_SINIT_MLE_CAP_TPR_SUPPORT 14
82+
7783
#ifndef __ASSEMBLER__
7884

7985
#include <xen/multiboot2.h>
@@ -259,6 +265,19 @@ struct heap_event_log_pointer_element2_1 {
259265
uint32_t next_record_offset;
260266
} __packed;
261267

268+
/*
269+
* Extended data describing TPR (TXT Protected Range) DMA protection ranges.
270+
*/
271+
struct txt_heap_tpr_range {
272+
uint64_t base;
273+
uint64_t size;
274+
} __packed;
275+
276+
struct txt_heap_tpr_req_element {
277+
uint32_t count;
278+
struct txt_heap_tpr_range ranges[0];
279+
} __packed;
280+
262281
/*
263282
* Functions to extract data from the Intel TXT Heap Memory.
264283
*
@@ -350,67 +369,151 @@ txt_find_ext_data_element(const struct txt_os_sinit_data *os_sinit, uint32_t typ
350369
return NULL;
351370
}
352371

353-
static inline bool is_in_pmr(const struct txt_os_sinit_data *os_sinit,
354-
uint64_t base, uint32_t size, bool check_high)
372+
static inline bool is_in_dma_prot(const struct txt_os_sinit_data *os_sinit,
373+
uint64_t base, uint32_t size, bool check_high)
355374
{
375+
uint64_t lo_size, hi_base, hi_size;
376+
356377
/* Check for size overflow. */
357378
if ( base + size < base )
358379
txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW);
359380

381+
if ( os_sinit->capabilities & (1u << TXT_SINIT_MLE_CAP_TPR_SUPPORT) )
382+
{
383+
384+
/*
385+
* txt_verify_dma_protection() has already validated presence and contents
386+
* of the TPR_REQ element.
387+
*/
388+
const struct txt_heap_tpr_req_element *tpr_req = (const struct txt_heap_tpr_req_element *)
389+
txt_find_ext_data_element(os_sinit, TXT_HEAP_EXTDATA_TYPE_TPR_REQ)->data;
390+
391+
lo_size = tpr_req->ranges[0].size;
392+
if ( tpr_req->count > 1 )
393+
{
394+
hi_base = tpr_req->ranges[1].base;
395+
hi_size = tpr_req->ranges[1].size;
396+
}
397+
else
398+
{
399+
hi_base = 0;
400+
hi_size = 0;
401+
}
402+
}
403+
else
404+
{
405+
lo_size = os_sinit->vtd_pmr_lo_size;
406+
hi_base = os_sinit->vtd_pmr_hi_base;
407+
hi_size = os_sinit->vtd_pmr_hi_size;
408+
}
409+
360410
/*
361-
* txt_verify_pmr_ranges() makes sure the low range always starts at 0, so
362-
* its size is also end address.
411+
* txt_verify_dma_protection() makes sure the low range always starts at
412+
* 0, so its size is also end address.
363413
*/
364-
if ( base + size <= os_sinit->vtd_pmr_lo_size )
414+
if ( base + size <= lo_size )
365415
return true;
366416

367-
if ( check_high && os_sinit->vtd_pmr_hi_size != 0 )
417+
if ( check_high && hi_size != 0 )
368418
{
369-
if ( base >= os_sinit->vtd_pmr_hi_base &&
370-
base + size <= os_sinit->vtd_pmr_hi_base +
371-
os_sinit->vtd_pmr_hi_size )
419+
if ( base >= hi_base && base + size <= hi_base + hi_size )
372420
return true;
373421
}
374422

375423
return false;
376424
}
377425

378-
static inline void txt_verify_pmr_ranges(
426+
static inline void txt_verify_dma_protection(
379427
const struct txt_os_mle_data *os_mle,
380428
const struct txt_os_sinit_data *os_sinit,
381429
const struct slr_entry_intel_info *info,
382430
uint32_t load_base_addr,
383431
uint32_t tgt_base_addr,
384432
uint32_t xen_size)
385433
{
386-
bool check_high_pmr = false;
434+
bool check_high = false;
387435

388-
/* Verify the value of the low PMR base. It should always be 0. */
389-
if ( os_sinit->vtd_pmr_lo_base != 0 )
390-
txt_reset(SLAUNCH_ERROR_LO_PMR_BASE);
436+
if ( os_sinit->capabilities & (1u << TXT_SINIT_MLE_CAP_TPR_SUPPORT) )
437+
{
438+
const struct txt_ext_data_element *tpr_req_data_element;
439+
const struct txt_heap_tpr_req_element *tpr_req;
391440

392-
/*
393-
* Low PMR size should not be 0 on current platforms. There is an ongoing
394-
* transition to TPR-based DMA protection instead of PMR-based; this is not
395-
* yet supported by the code.
396-
*/
397-
if ( os_sinit->vtd_pmr_lo_size == 0 )
398-
txt_reset(SLAUNCH_ERROR_LO_PMR_SIZE);
441+
/*
442+
* For TPR-based DMA protection, it's not specified that the low
443+
* range must begin at address 0. For now though, we support only
444+
* 1- and 2-range configurations with the low range starting at 0.
445+
*/
399446

400-
/* Check if regions overlap. Treat regions with no hole between as error. */
401-
if ( os_sinit->vtd_pmr_hi_size != 0 &&
402-
os_sinit->vtd_pmr_hi_base <= os_sinit->vtd_pmr_lo_size )
403-
txt_reset(SLAUNCH_ERROR_HI_PMR_BASE);
447+
tpr_req_data_element = txt_find_ext_data_element(os_sinit, TXT_HEAP_EXTDATA_TYPE_TPR_REQ);
448+
if ( tpr_req_data_element == NULL )
449+
txt_reset(SLAUNCH_ERROR_TPR_NOT_FOUND);
450+
if ( tpr_req_data_element->size < sizeof(struct txt_heap_tpr_req_element) )
451+
txt_reset(SLAUNCH_ERROR_TPR_INVALID);
452+
tpr_req = (const struct txt_heap_tpr_req_element *)tpr_req_data_element->data;
453+
if ( tpr_req->count < 1 )
454+
txt_reset(SLAUNCH_ERROR_TPR_INVALID);
455+
if ( tpr_req->count > 2 )
456+
txt_reset(SLAUNCH_ERROR_TPR_UNSUPPORTED);
457+
458+
/* Low range must start at 0. */
459+
if ( tpr_req->ranges[0].base != 0 )
460+
txt_reset(SLAUNCH_ERROR_TPR_UNSUPPORTED);
461+
462+
/* Size must not be 0. */
463+
if ( tpr_req->ranges[0].size == 0 )
464+
txt_reset(SLAUNCH_ERROR_TPR_INVALID);
465+
466+
if ( tpr_req->count > 1 )
467+
{
468+
/* Size must not be 0. */
469+
if ( tpr_req->ranges[1].size == 0 )
470+
txt_reset(SLAUNCH_ERROR_TPR_INVALID);
471+
472+
/* Ranges must not overlap. */
473+
if ( tpr_req->ranges[0].size > tpr_req->ranges[1].base )
474+
txt_reset(SLAUNCH_ERROR_TPR_INVALID);
475+
476+
/* Overflow check. */
477+
if ( tpr_req->ranges[1].base + tpr_req->ranges[1].size < tpr_req->ranges[1].size )
478+
txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW);
479+
480+
/* All regions accessed by 32b code must be below 4G. */
481+
if ( tpr_req->ranges[1].base + tpr_req->ranges[1].size <=
482+
0x100000000ULL )
483+
check_high = true;
484+
}
485+
}
486+
else
487+
{
488+
/* Verify the value of the low PMR base. It should always be 0. */
489+
if ( os_sinit->vtd_pmr_lo_base != 0 )
490+
txt_reset(SLAUNCH_ERROR_LO_PMR_BASE);
404491

405-
/* Check for size overflow. */
406-
if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <
407-
os_sinit->vtd_pmr_hi_size )
408-
txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW);
492+
/*
493+
* Low PMR size should not be 0 on current platforms when PMR mode is
494+
* in use.
495+
*/
496+
if ( os_sinit->vtd_pmr_lo_size == 0 )
497+
txt_reset(SLAUNCH_ERROR_LO_PMR_SIZE);
409498

410-
/* All regions accessed by 32b code must be below 4G. */
411-
if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <=
412-
0x100000000ULL )
413-
check_high_pmr = true;
499+
/*
500+
* Check if regions overlap. Treat regions with no hole between as
501+
* error.
502+
*/
503+
if ( os_sinit->vtd_pmr_hi_size != 0 &&
504+
os_sinit->vtd_pmr_hi_base <= os_sinit->vtd_pmr_lo_size )
505+
txt_reset(SLAUNCH_ERROR_HI_PMR_BASE);
506+
507+
/* Check for size overflow. */
508+
if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <
509+
os_sinit->vtd_pmr_hi_size )
510+
txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW);
511+
512+
/* All regions accessed by 32b code must be below 4G. */
513+
if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <=
514+
0x100000000ULL )
515+
check_high = true;
516+
}
414517

415518
/*
416519
* ACM checks that TXT heap and MLE memory is protected against DMA. We have
@@ -419,30 +522,30 @@ static inline void txt_verify_pmr_ranges(
419522
* both pre- and post-relocation code is protected.
420523
*/
421524

422-
/* Check if all of Xen before relocation is covered by PMR. */
423-
if ( !is_in_pmr(os_sinit, load_base_addr, xen_size, check_high_pmr) )
525+
/* Check if all of Xen before relocation is covered. */
526+
if ( !is_in_dma_prot(os_sinit, load_base_addr, xen_size, check_high) )
424527
txt_reset(SLAUNCH_ERROR_LO_PMR_MLE);
425528

426-
/* Check if all of Xen after relocation is covered by PMR. */
529+
/* Check if all of Xen after relocation is covered. */
427530
if ( load_base_addr != tgt_base_addr &&
428-
!is_in_pmr(os_sinit, tgt_base_addr, xen_size, check_high_pmr) )
531+
!is_in_dma_prot(os_sinit, tgt_base_addr, xen_size, check_high) )
429532
txt_reset(SLAUNCH_ERROR_LO_PMR_MLE);
430533

431534
/*
432-
* If present, check that MBI is covered by PMR. MBI starts with 'uint32_t
535+
* If present, check that MBI is covered. MBI starts with 'uint32_t
433536
* total_size'.
434537
*/
435538
if ( info->boot_params_base != 0 )
436539
{
437540
const multiboot2_fixed_t *mbi =
438541
(const multiboot2_fixed_t *)(uintptr_t)info->boot_params_base;
439542

440-
if ( !is_in_pmr(os_sinit, info->boot_params_base, mbi->total_size,
441-
check_high_pmr) )
543+
if ( !is_in_dma_prot(os_sinit, info->boot_params_base, mbi->total_size,
544+
check_high) )
442545
txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR);
443546
}
444547

445-
/* Check if TPM event log (if present) is covered by PMR. */
548+
/* Check if TPM event log (if present) is covered by DMA protection. */
446549
/*
447550
* FIXME: currently commented out as GRUB allocates it in a hole between
448551
* PMR and reserved RAM, due to 2MB resolution of PMR. There are no other
@@ -462,8 +565,8 @@ static inline void txt_verify_pmr_ranges(
462565
*/
463566
/*
464567
if ( os_mle->evtlog_addr != 0 && os_mle->evtlog_size != 0 &&
465-
!is_in_pmr(os_sinit, os_mle->evtlog_addr, os_mle->evtlog_size,
466-
check_high_pmr) )
568+
!is_in_dma_prot(os_sinit, os_mle->evtlog_addr, os_mle->evtlog_size,
569+
check_high) )
467570
txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR);
468571
*/
469572
}

0 commit comments

Comments
 (0)