BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=4654

The PVALIDATE instruction can only be performed at VMPL0. An SVSM will
be present when running at VMPL1 or higher.

When an SVSM is present, use the SVSM_CORE_PVALIDATE call to perform
memory validation instead of issuing the PVALIDATE instruction directly.

Cc: Ard Biesheuvel <ardb+tianoc...@kernel.org>
Cc: Gerd Hoffmann <kra...@redhat.com>
Cc: Jiewen Yao <jiewen....@intel.com>
Cc: Laszlo Ersek <ler...@redhat.com>
Acked-by: Gerd Hoffmann <kra...@redhat.com>
Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
---
 OvmfPkg/Library/AmdSvsmLib/AmdSvsmLib.c | 183 +++++++++++++++++++-
 1 file changed, 182 insertions(+), 1 deletion(-)

diff --git a/OvmfPkg/Library/AmdSvsmLib/AmdSvsmLib.c 
b/OvmfPkg/Library/AmdSvsmLib/AmdSvsmLib.c
index 861bf9591ae3..fb3fda70e948 100644
--- a/OvmfPkg/Library/AmdSvsmLib/AmdSvsmLib.c
+++ b/OvmfPkg/Library/AmdSvsmLib/AmdSvsmLib.c
@@ -8,6 +8,7 @@
 
 #include <Base.h>
 #include <Uefi.h>
+#include <Library/BaseMemoryLib.h>
 #include <Library/AmdSvsmLib.h>
 #include <Register/Amd/Msr.h>
 #include <Register/Amd/Svsm.h>
@@ -43,6 +44,78 @@ SnpTerminate (
   CpuDeadLoop ();
 }
 
+/**
+  Issue an SVSM request.
+
+  Invokes the SVSM to process a request on behalf of the guest.
+
+  @param[in,out]  SvsmCallData  Pointer to the SVSM call data
+
+  @return                       Contents of RAX upon return from VMGEXIT
+**/
+STATIC
+UINTN
+SvsmMsrProtocol (
+  IN OUT SVSM_CALL_DATA  *SvsmCallData
+  )
+{
+  MSR_SEV_ES_GHCB_REGISTER  Msr;
+  UINT64                    CurrentMsr;
+  UINT8                     Pending;
+  BOOLEAN                   InterruptState;
+  UINTN                     Ret;
+
+  do {
+    //
+    // Be sure that an interrupt can't cause a #VC while the GHCB MSR protocol
+    // is being used (#VC handler will ASSERT if lower 12-bits are not zero).
+    //
+    InterruptState = GetInterruptState ();
+    if (InterruptState) {
+      DisableInterrupts ();
+    }
+
+    Pending                   = 0;
+    SvsmCallData->CallPending = &Pending;
+
+    CurrentMsr = AsmReadMsr64 (MSR_SEV_ES_GHCB);
+
+    Msr.Uint64                  = 0;
+    Msr.SnpVmplRequest.Function = GHCB_INFO_SNP_VMPL_REQUEST;
+    Msr.SnpVmplRequest.Vmpl     = 0;
+    AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.Uint64);
+
+    //
+    // Guest memory is used for the guest-SVSM communication, so fence the
+    // invocation of the VMGEXIT instruction to ensure VMSA accesses are
+    // synchronized properly.
+    //
+    MemoryFence ();
+    Ret = AsmVmgExitSvsm (SvsmCallData);
+    MemoryFence ();
+
+    Msr.Uint64 = AsmReadMsr64 (MSR_SEV_ES_GHCB);
+
+    AsmWriteMsr64 (MSR_SEV_ES_GHCB, CurrentMsr);
+
+    if (InterruptState) {
+      EnableInterrupts ();
+    }
+
+    if (Pending != 0) {
+      SnpTerminate ();
+    }
+
+    if ((Msr.SnpVmplResponse.Function != GHCB_INFO_SNP_VMPL_RESPONSE) ||
+        (Msr.SnpVmplResponse.ErrorCode != 0))
+    {
+      SnpTerminate ();
+    }
+  } while (Ret == SVSM_ERR_INCOMPLETE || Ret == SVSM_ERR_BUSY);
+
+  return Ret;
+}
+
 /**
   Report the presence of an Secure Virtual Services Module (SVSM).
 
@@ -109,6 +182,114 @@ AmdSvsmSnpGetCaa (
   return AmdSvsmIsSvsmPresent () ? SvsmInfo->SvsmCaa : 0;
 }
 
+/**
+  Issue an SVSM request to perform the PVALIDATE instruction.
+
+  Invokes the SVSM to process the PVALIDATE instruction on behalf of the
+  guest to validate or invalidate the memory range specified.
+
+  @param[in]       Info           Pointer to a page state change structure
+
+**/
+STATIC
+VOID
+SvsmPvalidate (
+  IN SNP_PAGE_STATE_CHANGE_INFO  *Info
+  )
+{
+  SVSM_CALL_DATA          SvsmCallData;
+  SVSM_CAA                *Caa;
+  SVSM_PVALIDATE_REQUEST  *Request;
+  SVSM_FUNCTION           Function;
+  BOOLEAN                 Validate;
+  UINTN                   Entry;
+  UINTN                   EntryLimit;
+  UINTN                   Index;
+  UINTN                   EndIndex;
+  UINT64                  Gfn;
+  UINT64                  GfnEnd;
+  UINTN                   Ret;
+
+  Caa = (SVSM_CAA *)AmdSvsmSnpGetCaa ();
+  ZeroMem (Caa->SvsmBuffer, sizeof (Caa->SvsmBuffer));
+
+  Function.Id.Protocol = 0;
+  Function.Id.CallId   = 1;
+
+  Request    = (SVSM_PVALIDATE_REQUEST *)Caa->SvsmBuffer;
+  EntryLimit = ((sizeof (Caa->SvsmBuffer) - sizeof (*Request)) /
+                sizeof (Request->Entry[0])) - 1;
+
+  SvsmCallData.Caa   = Caa;
+  SvsmCallData.RaxIn = Function.Uint64;
+  SvsmCallData.RcxIn = (UINT64)(UINTN)Request;
+
+  Entry    = 0;
+  Index    = Info->Header.CurrentEntry;
+  EndIndex = Info->Header.EndEntry;
+
+  while (Index <= EndIndex) {
+    Validate = Info->Entry[Index].Operation == SNP_PAGE_STATE_PRIVATE;
+
+    Request->Header.Entries++;
+    Request->Entry[Entry].Bits.PageSize = Info->Entry[Index].PageSize;
+    Request->Entry[Entry].Bits.Action   = (Validate == TRUE) ? 1 : 0;
+    Request->Entry[Entry].Bits.IgnoreCf = 0;
+    Request->Entry[Entry].Bits.Address  = Info->Entry[Index].GuestFrameNumber;
+
+    Entry++;
+    if ((Entry > EntryLimit) || (Index == EndIndex)) {
+      Ret = SvsmMsrProtocol (&SvsmCallData);
+      if ((Ret == SVSM_ERR_PVALIDATE_FAIL_SIZE_MISMATCH) &&
+          (Request->Entry[Request->Header.Next].Bits.PageSize != 0))
+      {
+        // Calculate the Index of the entry after the entry that failed
+        // before clearing the buffer so that processing can continue
+        // from that point
+        Index = Index - (Entry - Request->Header.Next) + 2;
+
+        // Obtain the failing GFN before clearing the buffer
+        Gfn = Request->Entry[Request->Header.Next].Bits.Address;
+
+        // Clear the buffer in prep for creating all new entries
+        ZeroMem (Caa->SvsmBuffer, sizeof (Caa->SvsmBuffer));
+        Entry = 0;
+
+        GfnEnd = Gfn + PAGES_PER_2MB_ENTRY - 1;
+        for ( ; Gfn <= GfnEnd; Gfn++) {
+          Request->Header.Entries++;
+          Request->Entry[Entry].Bits.PageSize = 0;
+          Request->Entry[Entry].Bits.Action   = (Validate == TRUE) ? 1 : 0;
+          Request->Entry[Entry].Bits.IgnoreCf = 0;
+          Request->Entry[Entry].Bits.Address  = Gfn;
+
+          Entry++;
+          if ((Entry > EntryLimit) || (Gfn == GfnEnd)) {
+            Ret = SvsmMsrProtocol (&SvsmCallData);
+            if (Ret != 0) {
+              SnpTerminate ();
+            }
+
+            ZeroMem (Caa->SvsmBuffer, sizeof (Caa->SvsmBuffer));
+            Entry = 0;
+          }
+        }
+
+        continue;
+      }
+
+      if (Ret != 0) {
+        SnpTerminate ();
+      }
+
+      ZeroMem (Caa->SvsmBuffer, sizeof (Caa->SvsmBuffer));
+      Entry = 0;
+    }
+
+    Index++;
+  }
+}
+
 /**
   Perform a native PVALIDATE operation for the page ranges specified.
 
@@ -193,7 +374,7 @@ AmdSvsmSnpPvalidate (
   IN SNP_PAGE_STATE_CHANGE_INFO  *Info
   )
 {
-  BasePvalidate (Info);
+  AmdSvsmIsSvsmPresent () ? SvsmPvalidate (Info) : BasePvalidate (Info);
 }
 
 /**
-- 
2.43.2



-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#116544): https://edk2.groups.io/g/devel/message/116544
Mute This Topic: https://groups.io/mt/104810732/21656
Group Owner: devel+ow...@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [arch...@mail-archive.com]
-=-=-=-=-=-=-=-=-=-=-=-


Reply via email to