The DXE core implementation of PcdDxeNxMemoryProtectionPolicy already
contains an assertion that EfiConventionalMemory and EfiBootServicesData
are subjected to the same policy when it comes to the use of NX
permissions. The reason for this is that we may otherwise end up with
unbounded recursion in the page table code, given that allocating a page
table would then involve a permission attribute change, and this could
result in the need for a block entry to be split, which would trigger
the allocation of a page table recursively.

For the same reason, a shortcut exists in ApplyMemoryProtectionPolicy()
where, instead of setting the memory attributes unconditionally, we
compare the NX policies and avoid touching the page tables if they are
the same for the old and the new memory types. Without this shortcut, we
may end up in a situation where, as the CPU arch protocol DXE driver is
ramping up, the same unbounded recursion is triggered, due to the fact
that the NX policy for EfiConventionalMemory has not been applied yet.

To break this cycle, let's remap all EfiConventionalMemory regions
according to the NX policy for EfiBootServicesData before exposing the
CPU arch protocol to the DXE core and other drivers. This ensures that
creating EfiBootServicesData allocations does not result in memory
attribute changes, and therefore no recursion.

Signed-off-by: Ard Biesheuvel <a...@kernel.org>
---
 ArmPkg/Drivers/CpuDxe/CpuDxe.c   | 77 ++++++++++++++++++++
 ArmPkg/Drivers/CpuDxe/CpuDxe.inf |  2 +
 2 files changed, 79 insertions(+)

diff --git a/ArmPkg/Drivers/CpuDxe/CpuDxe.c b/ArmPkg/Drivers/CpuDxe/CpuDxe.c
index d04958e79e52..83fd6fd4e476 100644
--- a/ArmPkg/Drivers/CpuDxe/CpuDxe.c
+++ b/ArmPkg/Drivers/CpuDxe/CpuDxe.c
@@ -11,6 +11,8 @@
 
 #include <Guid/IdleLoopEvent.h>
 
+#include <Library/MemoryAllocationLib.h>
+
 BOOLEAN  mIsFlushingGCD;
 
 /**
@@ -227,6 +229,69 @@ InitializeDma (
   CpuArchProtocol->DmaBufferAlignment = ArmCacheWritebackGranule ();
 }
 
+STATIC
+VOID
+RemapUnusedMemoryNx (
+  VOID
+  )
+{
+  UINT64                     TestBit;
+  UINTN                      MemoryMapSize;
+  UINTN                      MapKey;
+  UINTN                      DescriptorSize;
+  UINT32                     DescriptorVersion;
+  EFI_MEMORY_DESCRIPTOR      *MemoryMap;
+  EFI_MEMORY_DESCRIPTOR      *MemoryMapEntry;
+  EFI_MEMORY_DESCRIPTOR      *MemoryMapEnd;
+  EFI_STATUS                 Status;
+
+  TestBit = LShiftU64 (1, EfiBootServicesData);
+  if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & TestBit) == 0) {
+    return;
+  }
+
+  MemoryMapSize = 0;
+  MemoryMap     = NULL;
+
+  Status = gBS->GetMemoryMap (
+                  &MemoryMapSize,
+                  MemoryMap,
+                  &MapKey,
+                  &DescriptorSize,
+                  &DescriptorVersion
+                  );
+  ASSERT (Status == EFI_BUFFER_TOO_SMALL);
+  do {
+    MemoryMap = (EFI_MEMORY_DESCRIPTOR *)AllocatePool (MemoryMapSize);
+    ASSERT (MemoryMap != NULL);
+    Status = gBS->GetMemoryMap (
+                    &MemoryMapSize,
+                    MemoryMap,
+                    &MapKey,
+                    &DescriptorSize,
+                    &DescriptorVersion
+                    );
+    if (EFI_ERROR (Status)) {
+      FreePool (MemoryMap);
+    }
+  } while (Status == EFI_BUFFER_TOO_SMALL);
+
+  ASSERT_EFI_ERROR (Status);
+
+  MemoryMapEntry = MemoryMap;
+  MemoryMapEnd   = (EFI_MEMORY_DESCRIPTOR *)((UINT8 *)MemoryMap + 
MemoryMapSize);
+  while ((UINTN)MemoryMapEntry < (UINTN)MemoryMapEnd) {
+    if (MemoryMapEntry->Type == EfiConventionalMemory) {
+      ArmSetMemoryRegionNoExec (
+        MemoryMapEntry->PhysicalStart,
+        EFI_PAGES_TO_SIZE (MemoryMapEntry->NumberOfPages)
+        );
+    }
+
+    MemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (MemoryMapEntry, DescriptorSize);
+  }
+}
+
 EFI_STATUS
 CpuDxeInitialize (
   IN EFI_HANDLE        ImageHandle,
@@ -240,6 +305,18 @@ CpuDxeInitialize (
 
   InitializeDma (&mCpu);
 
+  //
+  // Once we install the CPU arch protocol, the DXE core's memory
+  // protection routines will invoke them to manage the permissions of page
+  // allocations as they are created. Given that this includes pages
+  // allocated for page tables by this driver, we must ensure that unused
+  // memory is mapped with the same permissions as boot services data
+  // regions. Otherwise, we may end up with unbounded recursion, due to the
+  // fact that updating permissions on a newly allocated page table may trigger
+  // a block entry split, which triggers a page table allocation, etc etc
+  //
+  RemapUnusedMemoryNx ();
+
   Status = gBS->InstallMultipleProtocolInterfaces (
                   &mCpuHandle,
                   &gEfiCpuArchProtocolGuid,
diff --git a/ArmPkg/Drivers/CpuDxe/CpuDxe.inf b/ArmPkg/Drivers/CpuDxe/CpuDxe.inf
index e732e21cb94a..8fd0f4133088 100644
--- a/ArmPkg/Drivers/CpuDxe/CpuDxe.inf
+++ b/ArmPkg/Drivers/CpuDxe/CpuDxe.inf
@@ -48,6 +48,7 @@ [LibraryClasses]
   DefaultExceptionHandlerLib
   DxeServicesTableLib
   HobLib
+  MemoryAllocationLib
   PeCoffGetEntryPointLib
   UefiDriverEntryPoint
   UefiLib
@@ -64,6 +65,7 @@ [Guids]
 
 [Pcd.common]
   gArmTokenSpaceGuid.PcdVFPEnabled
+  gEfiMdeModulePkgTokenSpaceGuid.PcdDxeNxMemoryProtectionPolicy
 
 [FeaturePcd.common]
   gArmTokenSpaceGuid.PcdDebuggerExceptionSupport
-- 
2.39.1



-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#99803): https://edk2.groups.io/g/devel/message/99803
Mute This Topic: https://groups.io/mt/96835915/21656
Group Owner: devel+ow...@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [arch...@mail-archive.com]
-=-=-=-=-=-=-=-=-=-=-=-


Reply via email to