On 7/2/25 7:29 AM, 'Jan Beulich' via trenchboot-devel wrote:
On 30.05.2025 15:17, Sergii Dmytruk wrote:
From: Krystian Hebel <krystian.he...@3mdeb.com>
The file contains base address of TXT register spaces, offsets of
registers within them, error codes and inline functions for accessing
structures stored on TXT heap.
xen/arch/x86/tboot.c is updated to use definitions from this new header
instead of duplicating them. The change in tboot_protect_mem_regions()
there is caused by going from NR_TXT_CONFIG_PAGES to
TXT_CONFIG_SPACE_SIZE which avoids multiplying number of pages by page
size on every use.
Signed-off-by: Krystian Hebel <krystian.he...@3mdeb.com>
Signed-off-by: Sergii Dmytruk <sergii.dmyt...@3mdeb.com>
---
xen/arch/x86/include/asm/intel-txt.h | 297 +++++++++++++++++++++++++++
xen/arch/x86/tboot.c | 20 +-
2 files changed, 299 insertions(+), 18 deletions(-)
create mode 100644 xen/arch/x86/include/asm/intel-txt.h
Btw, a brief rev log would be nice here. I saw you have something in the
cover letter, but having to look in two places isn't very helpful.
--- /dev/null
+++ b/xen/arch/x86/include/asm/intel-txt.h
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Intel TXT is an implementation of DRTM in CPUs made by Intel (although CPU
+ * alone isn't enough, chipset must support TXT as well).
+ *
+ * Overview:
+ *
https://urldefense.com/v3/__https://www.intel.com/content/www/us/en/support/articles/000025873/processors.html__;!!ACWV5N9M2RV99hQ!OUH3eJp-ibnVcREG1NP431nerbq06yFOw8iSqY_0pRJ_W6p-NKiEx8pNmWP_4UZ5QO0C0SvMZPlD3gfIzoght5uZPuJdxLmWp-8$
+ * Software Development Guide (SDG):
+ *
https://urldefense.com/v3/__https://www.intel.com/content/www/us/en/content-details/315168/__;!!ACWV5N9M2RV99hQ!OUH3eJp-ibnVcREG1NP431nerbq06yFOw8iSqY_0pRJ_W6p-NKiEx8pNmWP_4UZ5QO0C0SvMZPlD3gfIzoght5uZPuJdhnQ6RKc$
+ */
+
+#ifndef X86_INTEL_TXT_H
+#define X86_INTEL_TXT_H
+
+/*
+ * TXT configuration registers (offsets from TXT_{PUB, PRIV}_CONFIG_REGS_BASE)
+ */
+#define TXT_PUB_CONFIG_REGS_BASE 0xfed30000U
+#define TXT_PRIV_CONFIG_REGS_BASE 0xfed20000U
+
+/*
+ * The same set of registers is exposed twice (with different permissions) and
+ * they are allocated continuously with page alignment.
+ */
+#define TXT_CONFIG_SPACE_SIZE \
+ (TXT_PUB_CONFIG_REGS_BASE - TXT_PRIV_CONFIG_REGS_BASE)
+
+/* Offsets from pub/priv config space. */
+#define TXTCR_STS 0x0000
+#define TXTCR_ESTS 0x0008
+#define TXTCR_ERRORCODE 0x0030
+#define TXTCR_CMD_RESET 0x0038
+#define TXTCR_CMD_CLOSE_PRIVATE 0x0048
+#define TXTCR_DIDVID 0x0110
+#define TXTCR_VER_EMIF 0x0200
+#define TXTCR_CMD_UNLOCK_MEM_CONFIG 0x0218
+#define TXTCR_SINIT_BASE 0x0270
+#define TXTCR_SINIT_SIZE 0x0278
+#define TXTCR_MLE_JOIN 0x0290
+#define TXTCR_HEAP_BASE 0x0300
+#define TXTCR_HEAP_SIZE 0x0308
+#define TXTCR_SCRATCHPAD 0x0378
+#define TXTCR_CMD_OPEN_LOCALITY1 0x0380
+#define TXTCR_CMD_CLOSE_LOCALITY1 0x0388
+#define TXTCR_CMD_OPEN_LOCALITY2 0x0390
+#define TXTCR_CMD_CLOSE_LOCALITY2 0x0398
+#define TXTCR_CMD_SECRETS 0x08e0
+#define TXTCR_CMD_NO_SECRETS 0x08e8
+#define TXTCR_E2STS 0x08f0
+
+/*
+ * Secure Launch Defined Error Codes used in MLE-initiated TXT resets.
+ *
+ * TXT Specification
+ * Appendix I ACM Error Codes
+ */
+#define SLAUNCH_ERROR_GENERIC 0xc0008001U
+#define SLAUNCH_ERROR_TPM_INIT 0xc0008002U
+#define SLAUNCH_ERROR_TPM_INVALID_LOG20 0xc0008003U
+#define SLAUNCH_ERROR_TPM_LOGGING_FAILED 0xc0008004U
+#define SLAUNCH_ERROR_REGION_STRADDLE_4GB 0xc0008005U
+#define SLAUNCH_ERROR_TPM_EXTEND 0xc0008006U
+#define SLAUNCH_ERROR_MTRR_INV_VCNT 0xc0008007U
+#define SLAUNCH_ERROR_MTRR_INV_DEF_TYPE 0xc0008008U
+#define SLAUNCH_ERROR_MTRR_INV_BASE 0xc0008009U
+#define SLAUNCH_ERROR_MTRR_INV_MASK 0xc000800aU
+#define SLAUNCH_ERROR_MSR_INV_MISC_EN 0xc000800bU
+#define SLAUNCH_ERROR_INV_AP_INTERRUPT 0xc000800cU
+#define SLAUNCH_ERROR_INTEGER_OVERFLOW 0xc000800dU
+#define SLAUNCH_ERROR_HEAP_WALK 0xc000800eU
+#define SLAUNCH_ERROR_HEAP_MAP 0xc000800fU
+#define SLAUNCH_ERROR_REGION_ABOVE_4GB 0xc0008010U
+#define SLAUNCH_ERROR_HEAP_INVALID_DMAR 0xc0008011U
+#define SLAUNCH_ERROR_HEAP_DMAR_SIZE 0xc0008012U
+#define SLAUNCH_ERROR_HEAP_DMAR_MAP 0xc0008013U
+#define SLAUNCH_ERROR_HI_PMR_BASE 0xc0008014U
+#define SLAUNCH_ERROR_HI_PMR_SIZE 0xc0008015U
+#define SLAUNCH_ERROR_LO_PMR_BASE 0xc0008016U
+#define SLAUNCH_ERROR_LO_PMR_SIZE 0xc0008017U
+#define SLAUNCH_ERROR_LO_PMR_MLE 0xc0008018U
+#define SLAUNCH_ERROR_INITRD_TOO_BIG 0xc0008019U
+#define SLAUNCH_ERROR_HEAP_ZERO_OFFSET 0xc000801aU
+#define SLAUNCH_ERROR_WAKE_BLOCK_TOO_SMALL 0xc000801bU
+#define SLAUNCH_ERROR_MLE_BUFFER_OVERLAP 0xc000801cU
+#define SLAUNCH_ERROR_BUFFER_BEYOND_PMR 0xc000801dU
+#define SLAUNCH_ERROR_OS_SINIT_BAD_VERSION 0xc000801eU
+#define SLAUNCH_ERROR_EVENTLOG_MAP 0xc000801fU
+#define SLAUNCH_ERROR_TPM_NUMBER_ALGS 0xc0008020U
+#define SLAUNCH_ERROR_TPM_UNKNOWN_DIGEST 0xc0008021U
+#define SLAUNCH_ERROR_TPM_INVALID_EVENT 0xc0008022U
+
+#define SLAUNCH_BOOTLOADER_MAGIC 0x4c534254
+
+#ifndef __ASSEMBLY__
+
+/* Need to differentiate between pre- and post paging enabled. */
+#ifdef __EARLY_SLAUNCH__
+#include <xen/macros.h>
+#define _txt(x) _p(x)
+#else
+#include <xen/types.h>
+#include <asm/page.h> // __va()
Nit: No C++ style comments, please.
+#define _txt(x) __va(x)
+#endif
Without any uses the correctness of the above is hard to judge.
+/*
+ * Always use private space as some of registers are either read-only or not
+ * present in public space.
+ */
+static inline uint64_t txt_read(unsigned int reg_no)
+{
+ volatile uint64_t *reg = _txt(TXT_PRIV_CONFIG_REGS_BASE + reg_no);
+ return *reg;
+}
+
+static inline void txt_write(unsigned int reg_no, uint64_t val)
+{
+ volatile uint64_t *reg = _txt(TXT_PRIV_CONFIG_REGS_BASE + reg_no);
+ *reg = val;
+}
+
+static inline void noreturn txt_reset(uint32_t error)
+{
+ txt_write(TXTCR_ERRORCODE, error);
+ txt_write(TXTCR_CMD_NO_SECRETS, 1);
+ txt_write(TXTCR_CMD_UNLOCK_MEM_CONFIG, 1);
+ /*
+ * This serves as TXT register barrier after writing to
+ * TXTCR_CMD_UNLOCK_MEM_CONFIG. Must be done to ensure that any future
+ * chipset operations see the write.
+ */
+ (void)txt_read(TXTCR_ESTS);
I don't think the cast is needed.
+ txt_write(TXTCR_CMD_RESET, 1);
+ unreachable();
What guarantees the write to take immediate effect? That is, shouldn't there
be e.g. an infinite loop here, just in case?
This is a very good point. My experience with TXT reset command it that
the command completes but the reset may take a short while to occur
asynchronously. In that time, the BSP will keep running. I noticed this
because the code would keep running and crash elsewhere.
In the Linux version, this function does a hlt loop right after the
reset before the unreachable() statement. We also use reads of
TXT_CR_E2STS as r/w barriers per the spec.
Thanks
Ross
+static inline uint64_t txt_bios_data_size(const void *heap)
+{
+ return *(const uint64_t *)heap - sizeof(uint64_t);
Like you already do here, ...
+}
+
+static inline void *txt_bios_data_start(const void *heap)
+{
+ return (void *)heap + sizeof(uint64_t);
... please don't cast away const-ness. I'm pretty sure I said before that
Misra objects to us doing so.
@@ -409,7 +393,7 @@ int __init tboot_protect_mem_regions(void)
/* TXT Private Space */
rc = e820_change_range_type(&e820, TXT_PRIV_CONFIG_REGS_BASE,
- TXT_PRIV_CONFIG_REGS_BASE + NR_TXT_CONFIG_PAGES * PAGE_SIZE,
+ TXT_PRIV_CONFIG_REGS_BASE + NR_TXT_CONFIG_SIZE,
Was this perhaps meant to be TXT_CONFIG_SPACE_SIZE?
Jan