Add the union RELOCATE_AP_LOOP_ENTRY, split the path in RelocateApLoop
 into two:
 1. 64-bit AMD processors with SEV-ES
 2. Intel processors (32-bit or 64-bit), 32-bit AMD processors, or
 64-bit AMD processors without SEV-ES.

Cc: Guo Dong <guo.d...@intel.com>
Cc: Ray Ni <ray...@intel.com>
Cc: Sean Rhodes <sean@starlabs.systems>
Cc: James Lu <james...@intel.com>
Cc: Gua Guo <gua....@intel.com>
Signed-off-by: Yuanhao Xie <yuanhao....@intel.com>
---
 UefiCpuPkg/Library/MpInitLib/DxeMpLib.c | 70 
++++++++++++++++++++++++++++++++++++++++++----------------------------
 UefiCpuPkg/Library/MpInitLib/MpLib.h    |  6 ++++++
 2 files changed, 48 insertions(+), 28 deletions(-)

diff --git a/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c 
b/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c
index a84e9e33ba..e9ac858f4f 100644
--- a/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c
+++ b/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c
@@ -1,7 +1,7 @@
 /** @file
   MP initialize support functions for DXE phase.
 
-  Copyright (c) 2016 - 2020, Intel Corporation. All rights reserved.<BR>
+  Copyright (c) 2016 - 2023, Intel Corporation. All rights reserved.<BR>
   SPDX-License-Identifier: BSD-2-Clause-Patent
 
 **/
@@ -20,14 +20,14 @@
 
 #define  AP_SAFE_STACK_SIZE  128
 
-CPU_MP_DATA       *mCpuMpData                  = NULL;
-EFI_EVENT         mCheckAllApsEvent            = NULL;
-EFI_EVENT         mMpInitExitBootServicesEvent = NULL;
-EFI_EVENT         mLegacyBootEvent             = NULL;
-volatile BOOLEAN  mStopCheckAllApsStatus       = TRUE;
-VOID              *mReservedApLoopFunc         = NULL;
-UINTN             mReservedTopOfApStack;
-volatile UINT32   mNumberToFinish = 0;
+CPU_MP_DATA             *mCpuMpData                  = NULL;
+EFI_EVENT               mCheckAllApsEvent            = NULL;
+EFI_EVENT               mMpInitExitBootServicesEvent = NULL;
+EFI_EVENT               mLegacyBootEvent             = NULL;
+volatile BOOLEAN        mStopCheckAllApsStatus       = TRUE;
+RELOCATE_AP_LOOP_ENTRY  mReservedApLoop;
+UINTN                   mReservedTopOfApStack;
+volatile UINT32         mNumberToFinish = 0;
 
 //
 // Begin wakeup buffer allocation below 0x88000
@@ -378,32 +378,46 @@ RelocateApLoop (
   IN OUT VOID  *Buffer
   )
 {
-  CPU_MP_DATA           *CpuMpData;
-  BOOLEAN               MwaitSupport;
-  ASM_RELOCATE_AP_LOOP  AsmRelocateApLoopFunc;
-  UINTN                 ProcessorNumber;
-  UINTN                 StackStart;
+  CPU_MP_DATA  *CpuMpData;
+  BOOLEAN      MwaitSupport;
+  UINTN        ProcessorNumber;
+  UINTN        StackStart;
 
   MpInitLibWhoAmI (&ProcessorNumber);
   CpuMpData    = GetCpuMpData ();
   MwaitSupport = IsMwaitSupport ();
   if (CpuMpData->UseSevEsAPMethod) {
+    //
+    // 64-bit AMD processors with SEV-ES
+    //
     StackStart = CpuMpData->SevEsAPResetStackStart;
+    mReservedApLoop.AmdSevEntry (
+                      MwaitSupport,
+                      CpuMpData->ApTargetCState,
+                      CpuMpData->PmCodeSegment,
+                      StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
+                      (UINTN)&mNumberToFinish,
+                      CpuMpData->Pm16CodeSegment,
+                      CpuMpData->SevEsAPBuffer,
+                      CpuMpData->WakeupBuffer
+                      );
   } else {
+    //
+    // Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit 
AMD processors without SEV-ES
+    //
     StackStart = mReservedTopOfApStack;
+    mReservedApLoop.GenericEntry (
+                      MwaitSupport,
+                      CpuMpData->ApTargetCState,
+                      CpuMpData->PmCodeSegment,
+                      StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
+                      (UINTN)&mNumberToFinish,
+                      CpuMpData->Pm16CodeSegment,
+                      CpuMpData->SevEsAPBuffer,
+                      CpuMpData->WakeupBuffer
+                      );
   }
 
-  AsmRelocateApLoopFunc = (ASM_RELOCATE_AP_LOOP)(UINTN)mReservedApLoopFunc;
-  AsmRelocateApLoopFunc (
-    MwaitSupport,
-    CpuMpData->ApTargetCState,
-    CpuMpData->PmCodeSegment,
-    StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
-    (UINTN)&mNumberToFinish,
-    CpuMpData->Pm16CodeSegment,
-    CpuMpData->SevEsAPBuffer,
-    CpuMpData->WakeupBuffer
-    );
   //
   // It should never reach here
   //
@@ -547,8 +561,8 @@ InitMpGlobalData (
                    );
   ASSERT_EFI_ERROR (Status);
 
-  mReservedApLoopFunc = (VOID *)(UINTN)Address;
-  ASSERT (mReservedApLoopFunc != NULL);
+  mReservedApLoop.Data = (VOID *)(UINTN)Address;
+  ASSERT (mReservedApLoop.Data != NULL);
 
   //
   // Make sure that the buffer memory is executable if NX protection is enabled
@@ -583,7 +597,7 @@ InitMpGlobalData (
   mReservedTopOfApStack = (UINTN)Address + ApSafeBufferSize;
   ASSERT ((mReservedTopOfApStack & (UINTN)(CPU_STACK_ALIGNMENT - 1)) == 0);
   CopyMem (
-    mReservedApLoopFunc,
+    mReservedApLoop.Data,
     CpuMpData->AddressMap.RelocateApLoopFuncAddress,
     CpuMpData->AddressMap.RelocateApLoopFuncSize
     );
diff --git a/UefiCpuPkg/Library/MpInitLib/MpLib.h 
b/UefiCpuPkg/Library/MpInitLib/MpLib.h
index a73a89d2a5..81a95733fc 100644
--- a/UefiCpuPkg/Library/MpInitLib/MpLib.h
+++ b/UefiCpuPkg/Library/MpInitLib/MpLib.h
@@ -402,6 +402,12 @@ AsmExchangeRole (
   IN CPU_EXCHANGE_ROLE_INFO  *OthersInfo
   );
 
+typedef union {
+  VOID                    *Data;
+  ASM_RELOCATE_AP_LOOP    AmdSevEntry;  // 64-bit AMD Sev processors
+  ASM_RELOCATE_AP_LOOP    GenericEntry; // Intel processors (32-bit or 
64-bit), 32-bit AMD processors, or AMD non-Sev processors
+} RELOCATE_AP_LOOP_ENTRY;
+
 /**
   Get the pointer to CPU MP Data structure.
 
-- 
2.36.1.windows.1



-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#100573): https://edk2.groups.io/g/devel/message/100573
Mute This Topic: https://groups.io/mt/97309032/21656
Group Owner: devel+ow...@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [arch...@mail-archive.com]
-=-=-=-=-=-=-=-=-=-=-=-


Reply via email to