6565#define SLAUNCH_ERROR_NO_VENDOR_INFO 0xc0008009U
6666#define SLAUNCH_ERROR_BAD_VENDOR_INFO 0xc000800AU
6767#define SLAUNCH_ERROR_BAD_SLRT_ADDRESS 0xc000800BU
68+ #define SLAUNCH_ERROR_TPR_INVALID 0xc000800CU
69+ #define SLAUNCH_ERROR_TPR_UNSUPPORTED 0xc000800DU
70+ #define SLAUNCH_ERROR_TPR_NOT_FOUND 0xc000800EU
6871
6972#define TXT_AP_BOOT_CS 0x0030
7073#define TXT_AP_BOOT_DS 0x0038
7477/* Intel SDM: GETSEC Capability Result Encoding */
7578#define GETSEC_CAP_TXT_CHIPSET 1
7679
80+ /* SINIT/MLE capability bit for TPR (TXT Protected Range) DMA protection. */
81+ #define TXT_SINIT_MLE_CAP_TPR_SUPPORT 14
82+
7783#ifndef __ASSEMBLER__
7884
7985#include <xen/multiboot2.h>
@@ -259,6 +265,19 @@ struct heap_event_log_pointer_element2_1 {
259265 uint32_t next_record_offset ;
260266} __packed ;
261267
268+ /*
269+ * Extended data describing TPR (TXT Protected Range) DMA protection ranges.
270+ */
271+ struct txt_heap_tpr_range {
272+ uint64_t base ;
273+ uint64_t size ;
274+ } __packed ;
275+
276+ struct txt_heap_tpr_req_element {
277+ uint32_t count ;
278+ struct txt_heap_tpr_range ranges [0 ];
279+ } __packed ;
280+
262281/*
263282 * Functions to extract data from the Intel TXT Heap Memory.
264283 *
@@ -350,67 +369,151 @@ txt_find_ext_data_element(const struct txt_os_sinit_data *os_sinit, uint32_t typ
350369 return NULL ;
351370}
352371
353- static inline bool is_in_pmr (const struct txt_os_sinit_data * os_sinit ,
354- uint64_t base , uint32_t size , bool check_high )
372+ static inline bool is_in_dma_prot (const struct txt_os_sinit_data * os_sinit ,
373+ uint64_t base , uint32_t size , bool check_high )
355374{
375+ uint64_t lo_size , hi_base , hi_size ;
376+
356377 /* Check for size overflow. */
357378 if ( base + size < base )
358379 txt_reset (SLAUNCH_ERROR_INTEGER_OVERFLOW );
359380
381+ if ( os_sinit -> capabilities & (1u << TXT_SINIT_MLE_CAP_TPR_SUPPORT ) )
382+ {
383+
384+ /*
385+ * txt_verify_dma_protection() has already validated presence and contents
386+ * of the TPR_REQ element.
387+ */
388+ const struct txt_heap_tpr_req_element * tpr_req = (const struct txt_heap_tpr_req_element * )
389+ txt_find_ext_data_element (os_sinit , TXT_HEAP_EXTDATA_TYPE_TPR_REQ )-> data ;
390+
391+ lo_size = tpr_req -> ranges [0 ].size ;
392+ if ( tpr_req -> count > 1 )
393+ {
394+ hi_base = tpr_req -> ranges [1 ].base ;
395+ hi_size = tpr_req -> ranges [1 ].size ;
396+ }
397+ else
398+ {
399+ hi_base = 0 ;
400+ hi_size = 0 ;
401+ }
402+ }
403+ else
404+ {
405+ lo_size = os_sinit -> vtd_pmr_lo_size ;
406+ hi_base = os_sinit -> vtd_pmr_hi_base ;
407+ hi_size = os_sinit -> vtd_pmr_hi_size ;
408+ }
409+
360410 /*
361- * txt_verify_pmr_ranges () makes sure the low range always starts at 0, so
362- * its size is also end address.
411+ * txt_verify_dma_protection () makes sure the low range always starts at
412+ * 0, so its size is also end address.
363413 */
364- if ( base + size <= os_sinit -> vtd_pmr_lo_size )
414+ if ( base + size <= lo_size )
365415 return true;
366416
367- if ( check_high && os_sinit -> vtd_pmr_hi_size != 0 )
417+ if ( check_high && hi_size != 0 )
368418 {
369- if ( base >= os_sinit -> vtd_pmr_hi_base &&
370- base + size <= os_sinit -> vtd_pmr_hi_base +
371- os_sinit -> vtd_pmr_hi_size )
419+ if ( base >= hi_base && base + size <= hi_base + hi_size )
372420 return true;
373421 }
374422
375423 return false;
376424}
377425
378- static inline void txt_verify_pmr_ranges (
426+ static inline void txt_verify_dma_protection (
379427 const struct txt_os_mle_data * os_mle ,
380428 const struct txt_os_sinit_data * os_sinit ,
381429 const struct slr_entry_intel_info * info ,
382430 uint32_t load_base_addr ,
383431 uint32_t tgt_base_addr ,
384432 uint32_t xen_size )
385433{
386- bool check_high_pmr = false;
434+ bool check_high = false;
387435
388- /* Verify the value of the low PMR base. It should always be 0. */
389- if ( os_sinit -> vtd_pmr_lo_base != 0 )
390- txt_reset (SLAUNCH_ERROR_LO_PMR_BASE );
436+ if ( os_sinit -> capabilities & (1u << TXT_SINIT_MLE_CAP_TPR_SUPPORT ) )
437+ {
438+ const struct txt_ext_data_element * tpr_req_data_element ;
439+ const struct txt_heap_tpr_req_element * tpr_req ;
391440
392- /*
393- * Low PMR size should not be 0 on current platforms. There is an ongoing
394- * transition to TPR-based DMA protection instead of PMR-based; this is not
395- * yet supported by the code.
396- */
397- if ( os_sinit -> vtd_pmr_lo_size == 0 )
398- txt_reset (SLAUNCH_ERROR_LO_PMR_SIZE );
441+ /*
442+ * For TPR-based DMA protection, it's not specified that the low
443+ * range must begin at address 0. For now though, we support only
444+ * 1- and 2-range configurations with the low range starting at 0.
445+ */
399446
400- /* Check if regions overlap. Treat regions with no hole between as error. */
401- if ( os_sinit -> vtd_pmr_hi_size != 0 &&
402- os_sinit -> vtd_pmr_hi_base <= os_sinit -> vtd_pmr_lo_size )
403- txt_reset (SLAUNCH_ERROR_HI_PMR_BASE );
447+ tpr_req_data_element = txt_find_ext_data_element (os_sinit , TXT_HEAP_EXTDATA_TYPE_TPR_REQ );
448+ if ( tpr_req_data_element == NULL )
449+ txt_reset (SLAUNCH_ERROR_TPR_NOT_FOUND );
450+ if ( tpr_req_data_element -> size < sizeof (struct txt_heap_tpr_req_element ) )
451+ txt_reset (SLAUNCH_ERROR_TPR_INVALID );
452+ tpr_req = (const struct txt_heap_tpr_req_element * )tpr_req_data_element -> data ;
453+ if ( tpr_req -> count < 1 )
454+ txt_reset (SLAUNCH_ERROR_TPR_INVALID );
455+ if ( tpr_req -> count > 2 )
456+ txt_reset (SLAUNCH_ERROR_TPR_UNSUPPORTED );
457+
458+ /* Low range must start at 0. */
459+ if ( tpr_req -> ranges [0 ].base != 0 )
460+ txt_reset (SLAUNCH_ERROR_TPR_UNSUPPORTED );
461+
462+ /* Size must not be 0. */
463+ if ( tpr_req -> ranges [0 ].size == 0 )
464+ txt_reset (SLAUNCH_ERROR_TPR_INVALID );
465+
466+ if ( tpr_req -> count > 1 )
467+ {
468+ /* Size must not be 0. */
469+ if ( tpr_req -> ranges [1 ].size == 0 )
470+ txt_reset (SLAUNCH_ERROR_TPR_INVALID );
471+
472+ /* Ranges must not overlap. */
473+ if ( tpr_req -> ranges [0 ].size > tpr_req -> ranges [1 ].base )
474+ txt_reset (SLAUNCH_ERROR_TPR_INVALID );
475+
476+ /* Overflow check. */
477+ if ( tpr_req -> ranges [1 ].base + tpr_req -> ranges [1 ].size < tpr_req -> ranges [1 ].size )
478+ txt_reset (SLAUNCH_ERROR_INTEGER_OVERFLOW );
479+
480+ /* All regions accessed by 32b code must be below 4G. */
481+ if ( tpr_req -> ranges [1 ].base + tpr_req -> ranges [1 ].size <=
482+ 0x100000000ULL )
483+ check_high = true;
484+ }
485+ }
486+ else
487+ {
488+ /* Verify the value of the low PMR base. It should always be 0. */
489+ if ( os_sinit -> vtd_pmr_lo_base != 0 )
490+ txt_reset (SLAUNCH_ERROR_LO_PMR_BASE );
404491
405- /* Check for size overflow. */
406- if ( os_sinit -> vtd_pmr_hi_base + os_sinit -> vtd_pmr_hi_size <
407- os_sinit -> vtd_pmr_hi_size )
408- txt_reset (SLAUNCH_ERROR_INTEGER_OVERFLOW );
492+ /*
493+ * Low PMR size should not be 0 on current platforms when PMR mode is
494+ * in use.
495+ */
496+ if ( os_sinit -> vtd_pmr_lo_size == 0 )
497+ txt_reset (SLAUNCH_ERROR_LO_PMR_SIZE );
409498
410- /* All regions accessed by 32b code must be below 4G. */
411- if ( os_sinit -> vtd_pmr_hi_base + os_sinit -> vtd_pmr_hi_size <=
412- 0x100000000ULL )
413- check_high_pmr = true;
499+ /*
500+ * Check if regions overlap. Treat regions with no hole between as
501+ * error.
502+ */
503+ if ( os_sinit -> vtd_pmr_hi_size != 0 &&
504+ os_sinit -> vtd_pmr_hi_base <= os_sinit -> vtd_pmr_lo_size )
505+ txt_reset (SLAUNCH_ERROR_HI_PMR_BASE );
506+
507+ /* Check for size overflow. */
508+ if ( os_sinit -> vtd_pmr_hi_base + os_sinit -> vtd_pmr_hi_size <
509+ os_sinit -> vtd_pmr_hi_size )
510+ txt_reset (SLAUNCH_ERROR_INTEGER_OVERFLOW );
511+
512+ /* All regions accessed by 32b code must be below 4G. */
513+ if ( os_sinit -> vtd_pmr_hi_base + os_sinit -> vtd_pmr_hi_size <=
514+ 0x100000000ULL )
515+ check_high = true;
516+ }
414517
415518 /*
416519 * ACM checks that TXT heap and MLE memory is protected against DMA. We have
@@ -419,30 +522,30 @@ static inline void txt_verify_pmr_ranges(
419522 * both pre- and post-relocation code is protected.
420523 */
421524
422- /* Check if all of Xen before relocation is covered by PMR . */
423- if ( !is_in_pmr (os_sinit , load_base_addr , xen_size , check_high_pmr ) )
525+ /* Check if all of Xen before relocation is covered. */
526+ if ( !is_in_dma_prot (os_sinit , load_base_addr , xen_size , check_high ) )
424527 txt_reset (SLAUNCH_ERROR_LO_PMR_MLE );
425528
426- /* Check if all of Xen after relocation is covered by PMR . */
529+ /* Check if all of Xen after relocation is covered. */
427530 if ( load_base_addr != tgt_base_addr &&
428- !is_in_pmr (os_sinit , tgt_base_addr , xen_size , check_high_pmr ) )
531+ !is_in_dma_prot (os_sinit , tgt_base_addr , xen_size , check_high ) )
429532 txt_reset (SLAUNCH_ERROR_LO_PMR_MLE );
430533
431534 /*
432- * If present, check that MBI is covered by PMR . MBI starts with 'uint32_t
535+ * If present, check that MBI is covered. MBI starts with 'uint32_t
433536 * total_size'.
434537 */
435538 if ( info -> boot_params_base != 0 )
436539 {
437540 const multiboot2_fixed_t * mbi =
438541 (const multiboot2_fixed_t * )(uintptr_t )info -> boot_params_base ;
439542
440- if ( !is_in_pmr (os_sinit , info -> boot_params_base , mbi -> total_size ,
441- check_high_pmr ) )
543+ if ( !is_in_dma_prot (os_sinit , info -> boot_params_base , mbi -> total_size ,
544+ check_high ) )
442545 txt_reset (SLAUNCH_ERROR_BUFFER_BEYOND_PMR );
443546 }
444547
445- /* Check if TPM event log (if present) is covered by PMR . */
548+ /* Check if TPM event log (if present) is covered by DMA protection . */
446549 /*
447550 * FIXME: currently commented out as GRUB allocates it in a hole between
448551 * PMR and reserved RAM, due to 2MB resolution of PMR. There are no other
@@ -462,8 +565,8 @@ static inline void txt_verify_pmr_ranges(
462565 */
463566 /*
464567 if ( os_mle->evtlog_addr != 0 && os_mle->evtlog_size != 0 &&
465- !is_in_pmr (os_sinit, os_mle->evtlog_addr, os_mle->evtlog_size,
466- check_high_pmr ) )
568+ !is_in_dma_prot (os_sinit, os_mle->evtlog_addr, os_mle->evtlog_size,
569+ check_high ) )
467570 txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR);
468571 */
469572}
0 commit comments