On 21-04-2025 21:57, Ross Philipson wrote:
The Secure Launch (SL) stub provides the entry point for Intel TXT to
jump to during the dynamic launch. The symbol sl_stub_entry is that entry
point and its offset into the kernel is conveyed to the launching code
using
the Measured Launch Environment (MLE) header in the structure named
mle_header.
The offset of the MLE header is set in the kernel_info.
The routine sl_stub contains the very early dynamic launch setup code
responsible for setting up the basic operating environment to allow
the normal
kernel startup_32 code to proceed. It is also responsible for properly
waking
and handling the APs on Intel platforms.
The routine sl_main which runs after entering 64b mode in the setup
kernel. It
is responsible for measuring configuration and module information before
it is used. An example of entities measured on Intel x86 are the boot
params,
the kernel command line, the TXT heap, any external initramfs, etc. In
addition
this routine does some early setup and validation of the environment like
locating the TPM event log and validating the location of various
buffers to
ensure they are protected and not overlapping.
Signed-off-by: Ross Philipson <ross.philip...@oracle.com>
---
Documentation/arch/x86/boot.rst | 21 +
arch/x86/boot/compressed/Makefile | 3 +-
arch/x86/boot/compressed/head_64.S | 29 +
arch/x86/boot/compressed/sl_main.c | 597 +++++++++++++++++++++
arch/x86/boot/compressed/sl_stub.S | 731 ++++++++++++++++++++++++++
arch/x86/include/uapi/asm/bootparam.h | 1 +
arch/x86/kernel/asm-offsets.c | 20 +
7 files changed, 1401 insertions(+), 1 deletion(-)
create mode 100644 arch/x86/boot/compressed/sl_main.c
create mode 100644 arch/x86/boot/compressed/sl_stub.S
[clip]
index 000000000000..5e0fd0d7bd72
--- /dev/null
+++ b/arch/x86/boot/compressed/sl_main.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Secure Launch early measurement and validation routines.
+ *
+ * Copyright (c) 2025, Oracle and/or its affiliates.
+ */
+
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/boot.h>
+#include <asm/msr.h>
+#include <asm/mtrr.h>
+#include <asm/processor-flags.h>
+#include <asm/asm-offsets.h>
+#include <asm/bootparam.h>
+#include <asm/bootparam_utils.h>
+#include <linux/slr_table.h>
+#include <linux/slaunch.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
consider header reordering For clarity and consistency
+
+#define CAPS_VARIABLE_MTRR_COUNT_MASK 0xff
+
+#define SL_TPM_LOG 1
+#define SL_TPM2_LOG 2
+
+static u64 sl_txt_read(u32 reg)
+{
+ return readq((void *)(u64)(TXT_PRIV_CONFIG_REGS_BASE + reg));
+}
+
[clip]
+/*
+ * Process all EFI config entries and extend the measurements to the
evtlog
+ */
+static void sl_process_extend_uefi_config(struct slr_table *slrt)
+{
+ struct slr_entry_uefi_config *uefi_config;
+ u16 i;
+
+ uefi_config = slr_next_entry_by_tag(slrt, NULL,
SLR_ENTRY_UEFI_CONFIG);
+
+ /* Optionally here depending on how SL kernel was booted */
+ if (!uefi_config)
+ return;
+
+ for (i = 0; i < uefi_config->nr_entries; i++) {
+ sl_tpm_extend_evtlog(uefi_config->uefi_cfg_entries[i].pcr,
TXT_EVTYPE_SLAUNCH,
+ (void *)uefi_config->uefi_cfg_entries[i].cfg,
+ uefi_config->uefi_cfg_entries[i].size,
+ uefi_config->uefi_cfg_entries[i].evt_info);
+ }
+}
+
+asmlinkage __visible void sl_check_region(void *base, u32 size)
+{
+ sl_check_pmr_coverage(base, size, false);
+}
+
+asmlinkage __visible void sl_main(void *bootparams)
+{
+ struct boot_params *bp = (struct boot_params *)bootparams;
remove extra ' ' before =
+ struct txt_os_mle_data *os_mle_data;
+ struct slr_table *slrt;
+ void *txt_heap;
+
+ /*
+ * Ensure loadflags do not indicate a secure launch was done
+ * unless it really was.
+ */
+ bp->hdr.loadflags &= ~SLAUNCH_FLAG;
+
+ /*
+ * Currently only Intel TXT is supported for Secure Launch. Testing
+ * this value also indicates that the kernel was booted successfully
+ * through the Secure Launch entry point and is in SMX mode.
+ */
+ if (!(sl_cpu_type & SL_CPU_INTEL))
+ return;
+
+ slrt = sl_locate_and_validate_slrt();
+
+ /* Locate the TPM event log. */
+ sl_find_drtm_event_log(slrt);
+
+ /* Validate the location of the event log buffer before using it */
+ sl_validate_event_log_buffer();
+
+ /*
+ * Find the TPM hash algorithms used by the ACM and recorded in the
+ * event log.
+ */
+ if (tpm_log_ver == SL_TPM2_LOG)
+ sl_find_event_log_algorithms();
+
+ /*
+ * Sanitize them before measuring. Set the SLAUNCH_FLAG early
since if
+ * anything fails, the system will reset anyway.
+ */
+ sanitize_boot_params(bp);
+ bp->hdr.loadflags |= SLAUNCH_FLAG;
+
+ sl_check_pmr_coverage(bootparams, PAGE_SIZE, false);
+
+ /* Place event log SL specific tags before and after measurements */
+ sl_tpm_extend_evtlog(17, TXT_EVTYPE_SLAUNCH_START, NULL, 0, "");
+
+ sl_process_extend_policy(slrt);
+
+ sl_process_extend_uefi_config(slrt);
+
+ sl_tpm_extend_evtlog(17, TXT_EVTYPE_SLAUNCH_END, NULL, 0, "");
+
+ /* No PMR check is needed, the TXT heap is covered by the DPR */
+ txt_heap = (void *)sl_txt_read(TXT_CR_HEAP_BASE);
+ os_mle_data = txt_os_mle_data_start(txt_heap);
+
+ /*
+ * Now that the OS-MLE data is measured, ensure the MTRR and
+ * misc enable MSRs are what we expect.
+ */
+ sl_txt_validate_msrs(os_mle_data);
+}
diff --git a/arch/x86/boot/compressed/sl_stub.S b/arch/x86/boot/
compressed/sl_stub.S
new file mode 100644
index 000000000000..6c0f0b2a062d
--- /dev/null
+++ b/arch/x86/boot/compressed/sl_stub.S
@@ -0,0 +1,731 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Secure Launch protected mode entry point.
+ *
+ * Copyright (c) 2025, Oracle and/or its affiliates.
+ */
+ .code32
+ .text
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/msr.h>
+#include <asm/apicdef.h>
+#include <asm/trapnr.h>
+#include <asm/processor-flags.h>
+#include <asm/asm-offsets.h>
+#include <asm/bootparam.h>
+#include <asm/page_types.h>
+#include <asm/irq_vectors.h>
+#include <linux/slr_table.h>
+#include <linux/slaunch.h>
consider header reordering For clarity and consistency
+
+/* CPUID: leaf 1, ECX, SMX feature bit */
+#define X86_FEATURE_BIT_SMX (1 << 6)
+
+#define IDT_VECTOR_LO_BITS 0
+#define IDT_VECTOR_HI_BITS 6
+
[clip]
+ jz .Lwake_getsec
+
+ /* Wake using MWAIT MONITOR */
+ movl $1, (%edi)
+ jmp .Laps_awake
+
+.Lwake_getsec:
+ /* Wake using GETSEC(WAKEUP) */
+ GETSEC $(SMX_X86_GETSEC_WAKEUP)
+
+.Laps_awake:
+ /*
+ * All of the APs are woken up and rendesvous in the relocated wake
typo rendesvous -> rendezvous
+ * block starting at sl_txt_ap_wake_begin. Wait for all of them to
+ * halt.
+ */
+ pause
+ cmpl rva(sl_txt_cpu_count)(%ebx), %edx
+ jne .Laps_awake
+
+ popl %esi
+ ret
+SYM_FUNC_END(sl_txt_wake_aps)
+
+/* This is the beginning of the relocated AP wake code block */
+ .global sl_txt_ap_wake_begin
+sl_txt_ap_wake_begin:
Thanks,
Alok