BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=4654
The PVALIDATE instruction can only be performed at VMPL0. An SVSM will be present when running at VMPL1 or higher. When an SVSM is present, use the SVSM_CORE_PVALIDATE call to perform memory validation instead of issuing the PVALIDATE instruction directly. This moves the current PVALIDATE functionality into the CcExitLib library, where it can be determined whether an SVSM is present and perform the proper operation. Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com> --- OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c | 82 +----- OvmfPkg/Library/CcExitLib/CcExitSvsm.c | 311 ++++++++++++++++++++ 2 files changed, 321 insertions(+), 72 deletions(-) diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c index f8bbe4d6f46b..60d47ce090fe 100644 --- a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c +++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c @@ -17,11 +17,10 @@ #include <Register/Amd/Ghcb.h> #include <Register/Amd/Msr.h> +#include <Register/Amd/Svsm.h> #include "SnpPageStateChange.h" -#define PAGES_PER_LARGE_ENTRY 512 - STATIC UINTN MemoryStateToGhcbOp ( @@ -63,73 +62,6 @@ SnpPageStateFailureTerminate ( CpuDeadLoop (); } -/** - This function issues the PVALIDATE instruction to validate or invalidate the memory - range specified. If PVALIDATE returns size mismatch then it retry validating with - smaller page size. - - */ -STATIC -VOID -PvalidateRange ( - IN SNP_PAGE_STATE_CHANGE_INFO *Info - ) -{ - UINTN RmpPageSize; - UINTN StartIndex; - UINTN EndIndex; - UINTN Index; - UINTN Ret; - EFI_PHYSICAL_ADDRESS Address; - BOOLEAN Validate; - - StartIndex = Info->Header.CurrentEntry; - EndIndex = Info->Header.EndEntry; - - for ( ; StartIndex <= EndIndex; StartIndex++) { - // - // Get the address and the page size from the Info. - // - Address = ((EFI_PHYSICAL_ADDRESS)Info->Entry[StartIndex].GuestFrameNumber) << EFI_PAGE_SHIFT; - RmpPageSize = Info->Entry[StartIndex].PageSize; - Validate = Info->Entry[StartIndex].Operation == SNP_PAGE_STATE_PRIVATE; - - Ret = AsmPvalidate (RmpPageSize, Validate, Address); - - // - // If we fail to validate due to size mismatch then try with the - // smaller page size. This senario will occur if the backing page in - // the RMP entry is 4K and we are validating it as a 2MB. - // - if ((Ret == PVALIDATE_RET_SIZE_MISMATCH) && (RmpPageSize == PvalidatePageSize2MB)) { - for (Index = 0; Index < PAGES_PER_LARGE_ENTRY; Index++) { - Ret = AsmPvalidate (PvalidatePageSize4K, Validate, Address); - if (Ret) { - break; - } - - Address = Address + EFI_PAGE_SIZE; - } - } - - // - // If validation failed then do not continue. - // - if (Ret) { - DEBUG (( - DEBUG_ERROR, - "%a:%a: Failed to %a address 0x%Lx Error code %d\n", - gEfiCallerBaseName, - __func__, - Validate ? "Validate" : "Invalidate", - Address, - Ret - )); - SnpPageStateFailureTerminate (); - } - } -} - STATIC EFI_PHYSICAL_ADDRESS BuildPageStateBuffer ( @@ -145,6 +77,7 @@ BuildPageStateBuffer ( UINTN Index; UINTN IndexMax; UINTN PscIndexMax; + UINTN SvsmIndexMax; UINTN RmpPageSize; // Clear the page state structure @@ -159,11 +92,16 @@ BuildPageStateBuffer ( // exiting from the guest to the hypervisor. Maximize the number of entries // that can be processed per exit. // - PscIndexMax = (IndexMax / SNP_PAGE_STATE_MAX_ENTRY) * SNP_PAGE_STATE_MAX_ENTRY; + PscIndexMax = (IndexMax / SNP_PAGE_STATE_MAX_ENTRY) * SNP_PAGE_STATE_MAX_ENTRY; + SvsmIndexMax = (IndexMax / SVSM_PVALIDATE_MAX_ENTRY) * SVSM_PVALIDATE_MAX_ENTRY; if (PscIndexMax > 0) { IndexMax = MIN (IndexMax, PscIndexMax); } + if (SvsmIndexMax > 0) { + IndexMax = MIN (IndexMax, SvsmIndexMax); + } + // // Populate the page state entry structure // @@ -328,7 +266,7 @@ InternalSetPageState ( // invalidate the pages before making the page shared in the RMP table. // if (State == SevSnpPageShared) { - PvalidateRange (Info); + CcExitSnpPvalidate (Info); } // @@ -341,7 +279,7 @@ InternalSetPageState ( // validate the pages after it has been added in the RMP table. // if (State == SevSnpPagePrivate) { - PvalidateRange (Info); + CcExitSnpPvalidate (Info); } } } diff --git a/OvmfPkg/Library/CcExitLib/CcExitSvsm.c b/OvmfPkg/Library/CcExitLib/CcExitSvsm.c index fb8b762caadc..43e0a357efa5 100644 --- a/OvmfPkg/Library/CcExitLib/CcExitSvsm.c +++ b/OvmfPkg/Library/CcExitLib/CcExitSvsm.c @@ -13,6 +13,312 @@ #include <Register/Amd/Msr.h> #include <Register/Amd/Svsm.h> +#define PAGES_PER_2MB_ENTRY 512 + +/** + Terminate the guest using the GHCB MSR protocol. + + Uses the GHCB MSR protocol to request that the guest be termiated. + +**/ +STATIC +VOID +SvsmTerminate ( + VOID + ) +{ + MSR_SEV_ES_GHCB_REGISTER Msr; + + // + // Use the GHCB MSR Protocol to request termination by the hypervisor + // + Msr.Uint64 = 0; + Msr.GhcbTerminate.Function = GHCB_INFO_TERMINATE_REQUEST; + Msr.GhcbTerminate.ReasonCodeSet = GHCB_TERMINATE_GHCB; + Msr.GhcbTerminate.ReasonCode = GHCB_TERMINATE_GHCB_GENERAL; + AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.Uint64); + + AsmVmgExit (); + + ASSERT (FALSE); + CpuDeadLoop (); +} + +/** + Return the address of SVSM Call Area (CAA). + + Determines the address of the SVSM CAA. + + @return The address of the SVSM CAA + +**/ +STATIC +SVSM_CAA * +SvsmGetCaa ( + VOID + ) +{ + SVSM_INFORMATION *SvsmInfo; + + SvsmInfo = (SVSM_INFORMATION *)(UINTN)PcdGet32 (PcdOvmfSnpSecretsBase); + + return CcExitSnpSvsmPresent () ? (SVSM_CAA *)SvsmInfo->SvsmCaa : NULL; +} + +/** + Issue an SVSM request. + + Invokes the SVSM to process a request on behalf of the guest. + + @param[in,out] SvsmCallData Pointer to the SVSM call data + + @return Contents of RAX upon return from VMGEXIT +**/ +STATIC +UINTN +SvsmMsrProtocol ( + IN OUT SVSM_CALL_DATA *SvsmCallData + ) +{ + MSR_SEV_ES_GHCB_REGISTER Msr; + UINT64 CurrentMsr; + UINT8 Pending; + BOOLEAN InterruptState; + UINTN Ret; + + do { + // + // Be sure that an interrupt can't cause a #VC while the GHCB MSR protocol + // is being used (#VC handler will ASSERT if lower 12-bits are not zero). + // + InterruptState = GetInterruptState (); + if (InterruptState) { + DisableInterrupts (); + } + + Pending = 0; + SvsmCallData->CallPending = &Pending; + + CurrentMsr = AsmReadMsr64 (MSR_SEV_ES_GHCB); + + Msr.Uint64 = 0; + Msr.SnpVmplRequest.Function = GHCB_INFO_SNP_VMPL_REQUEST; + Msr.SnpVmplRequest.Vmpl = 0; + AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.Uint64); + + // + // Guest memory is used for the guest-SVSM communication, so fence the + // invocation of the VMGEXIT instruction to ensure VMSA accesses are + // synchronized properly. + // + MemoryFence (); + Ret = AsmVmgExitSvsm (SvsmCallData); + MemoryFence (); + + Msr.Uint64 = AsmReadMsr64 (MSR_SEV_ES_GHCB); + + AsmWriteMsr64 (MSR_SEV_ES_GHCB, CurrentMsr); + + if (InterruptState) { + EnableInterrupts (); + } + + if (Pending != 0) { + SvsmTerminate (); + } + + if ((Msr.SnpVmplResponse.Function != GHCB_INFO_SNP_VMPL_RESPONSE) || + (Msr.SnpVmplResponse.ErrorCode != 0)) + { + SvsmTerminate (); + } + } while (Ret == SVSM_ERR_INCOMPLETE || Ret == SVSM_ERR_BUSY); + + return Ret; +} + +/** + Issue an SVSM request to perform the PVALIDATE instruction. + + Invokes the SVSM to process the PVALIDATE instruction on behalf of the + guest to validate or invalidate the memory range specified. + + @param[in] Info Pointer to a page state change structure + +**/ +STATIC +VOID +SvsmPvalidate ( + IN SNP_PAGE_STATE_CHANGE_INFO *Info + ) +{ + SVSM_CALL_DATA SvsmCallData; + SVSM_CAA *Caa; + SVSM_PVALIDATE_REQUEST *Request; + SVSM_FUNCTION Function; + BOOLEAN Validate; + UINTN Entry; + UINTN EntryLimit; + UINTN Index; + UINTN EndIndex; + UINT64 Gfn; + UINT64 GfnEnd; + UINTN Ret; + + Caa = SvsmGetCaa (); + SetMem (Caa->SvsmBuffer, sizeof (Caa->SvsmBuffer), 0); + + Function.Id.Protocol = 0; + Function.Id.CallId = 1; + + Request = (SVSM_PVALIDATE_REQUEST *)Caa->SvsmBuffer; + EntryLimit = ((sizeof (Caa->SvsmBuffer) - sizeof (*Request)) / + sizeof (Request->Entry[0])) - 1; + + SvsmCallData.Caa = Caa; + SvsmCallData.RaxIn = Function.Uint64; + SvsmCallData.RcxIn = (UINT64)(UINTN)Request; + + Entry = 0; + Index = Info->Header.CurrentEntry; + EndIndex = Info->Header.EndEntry; + + while (Index <= EndIndex) { + Validate = Info->Entry[Index].Operation == SNP_PAGE_STATE_PRIVATE; + + Request->Header.Entries++; + Request->Entry[Entry].Bits.PageSize = Info->Entry[Index].PageSize; + Request->Entry[Entry].Bits.Action = (Validate == TRUE) ? 1 : 0; + Request->Entry[Entry].Bits.IgnoreCf = 0; + Request->Entry[Entry].Bits.Address = Info->Entry[Index].GuestFrameNumber; + + Entry++; + if ((Entry > EntryLimit) || (Index == EndIndex)) { + Ret = SvsmMsrProtocol (&SvsmCallData); + if ((Ret == SVSM_ERR_PVALIDATE_FAIL_SIZE_MISMATCH) && + (Request->Entry[Request->Header.Next].Bits.PageSize != 0)) + { + // Calculate the Index of the entry after the entry that failed + // before clearing the buffer so that processing can continue + // from that point + Index = Index - (Entry - Request->Header.Next) + 2; + + // Obtain the failing GFN before clearing the buffer + Gfn = Request->Entry[Request->Header.Next].Bits.Address; + + // Clear the buffer in prep for creating all new entries + SetMem (Caa->SvsmBuffer, sizeof (Caa->SvsmBuffer), 0); + Entry = 0; + + GfnEnd = Gfn + 511; + for ( ; Gfn <= GfnEnd; Gfn++) { + Request->Header.Entries++; + Request->Entry[Entry].Bits.PageSize = 0; + Request->Entry[Entry].Bits.Action = (Validate == TRUE) ? 1 : 0; + Request->Entry[Entry].Bits.IgnoreCf = 0; + Request->Entry[Entry].Bits.Address = Gfn; + + Entry++; + if ((Entry > EntryLimit) || (Gfn == GfnEnd)) { + Ret = SvsmMsrProtocol (&SvsmCallData); + if (Ret != 0) { + SvsmTerminate (); + } + + SetMem (Caa->SvsmBuffer, sizeof (Caa->SvsmBuffer), 0); + Entry = 0; + } + } + + continue; + } + + if (Ret != 0) { + SvsmTerminate (); + } + + SetMem (Caa->SvsmBuffer, sizeof (Caa->SvsmBuffer), 0); + Entry = 0; + } + + Index++; + } +} + +/** + Perform the PVALIDATE instruction. + + Performs the PVALIDATE instruction to validate or invalidate the memory + range specified. + + @param[in] Info Pointer to a page state change structure + +**/ +STATIC +VOID +BasePvalidate ( + IN SNP_PAGE_STATE_CHANGE_INFO *Info + ) +{ + UINTN Index; + UINTN EndIndex; + UINTN Address; + UINTN RmpPageSize; + BOOLEAN Validate; + UINTN Ret; + + Index = Info->Header.CurrentEntry; + EndIndex = Info->Header.EndEntry; + while (Index <= EndIndex) { + // + // Get the address and the page size from the Info. + // + Address = Info->Entry[Index].GuestFrameNumber << EFI_PAGE_SHIFT; + RmpPageSize = Info->Entry[Index].PageSize; + Validate = Info->Entry[Index].Operation == SNP_PAGE_STATE_PRIVATE; + + Ret = AsmPvalidate (RmpPageSize, Validate, Address); + + // + // If PVALIDATE of a 2M page fails due to a size mismatch, then retry + // the full 2M range using a page size of 4K. This can occur if RMP entry + // has a page size of 4K. + // + if ((Ret == PVALIDATE_RET_SIZE_MISMATCH) && (RmpPageSize == PvalidatePageSize2MB)) { + UINTN EndAddress; + + EndAddress = Address + (PAGES_PER_2MB_ENTRY * SIZE_4KB); + while (Address < EndAddress) { + Ret = AsmPvalidate (PvalidatePageSize4K, Validate, Address); + if (Ret) { + break; + } + + Address += SIZE_4KB; + } + } + + // + // If validation failed then do not continue. + // + if (Ret) { + DEBUG (( + DEBUG_ERROR, + "%a:%a: Failed to %a address 0x%Lx Error code %d\n", + gEfiCallerBaseName, + __func__, + Validate ? "Validate" : "Invalidate", + Address, + Ret + )); + + SvsmTerminate (); + } + + Index++; + } +} + /** Report the presence of an Secure Virtual Services Module (SVSM). @@ -72,6 +378,11 @@ CcExitSnpPvalidate ( IN SNP_PAGE_STATE_CHANGE_INFO *Info ) { + if (CcExitSnpSvsmPresent ()) { + SvsmPvalidate (Info); + } else { + BasePvalidate (Info); + } } /** -- 2.42.0 -=-=-=-=-=-=-=-=-=-=-=- Groups.io Links: You receive all messages sent to this group. View/Reply Online (#114633): https://edk2.groups.io/g/devel/message/114633 Mute This Topic: https://groups.io/mt/103986455/21656 Group Owner: devel+ow...@edk2.groups.io Unsubscribe: https://edk2.groups.io/g/devel/unsub [arch...@mail-archive.com] -=-=-=-=-=-=-=-=-=-=-=-