This patch will set the memory pages used for page table as read-only
memory after the paging is setup. CR0.WP must set to let it take into
effect.

Cc: Jiewen Yao <jiewen....@intel.com>
Cc: Star Zeng <star.z...@intel.com>
Cc: Eric Dong <eric.d...@intel.com>
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Jian J Wang <jian.j.w...@intel.com>
---
 MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c | 166 +++++++++++++++++++++++
 MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.h |  14 ++
 2 files changed, 180 insertions(+)

diff --git a/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c 
b/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
index 29b6205e88..7a859606c6 100644
--- a/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
+++ b/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
@@ -234,6 +234,166 @@ Split1GPageTo2M (
   }
 }
 
+/**
+  Set one page (4KB) of memory to be read-only.
+
+  @param[in] PageTableBase    Base address of page table (CR3).
+  @param[in] Address          Start address of a page to be set as read-only.
+
+**/
+VOID
+SetPageReadOnly (
+  IN  UINTN                             PageTableBase,
+  IN  PHYSICAL_ADDRESS                  Address
+  )
+{
+  UINTN                 Index;
+  UINTN                 Index1;
+  UINTN                 Index2;
+  UINTN                 Index3;
+  UINTN                 Index4;
+  UINT64                *L1PageTable;
+  UINT64                *L2PageTable;
+  UINT64                *L3PageTable;
+  UINT64                *L4PageTable;
+  UINT64                AddressEncMask;
+  PHYSICAL_ADDRESS      PhysicalAddress;
+
+  ASSERT (PageTableBase != 0);
+
+  Index4 = ((UINTN)RShiftU64 (Address, PAGING_L4_ADDRESS_SHIFT)) &
+           PAGING_PAE_INDEX_MASK;
+  ASSERT (Index4 < PAGING_PML4E_NUMBER);
+
+  Index3 = ((UINTN)Address >> PAGING_L3_ADDRESS_SHIFT) & PAGING_PAE_INDEX_MASK;
+  Index2 = ((UINTN)Address >> PAGING_L2_ADDRESS_SHIFT) & PAGING_PAE_INDEX_MASK;
+  Index1 = ((UINTN)Address >> PAGING_L1_ADDRESS_SHIFT) & PAGING_PAE_INDEX_MASK;
+
+  //
+  // Make sure AddressEncMask is contained to smallest supported address field.
+  //
+  AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
+                   PAGING_1G_ADDRESS_MASK_64;
+
+  L4PageTable = (UINT64 *)(UINTN)PageTableBase;
+  L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~AddressEncMask &
+                                  PAGING_4K_ADDRESS_MASK_64);
+  if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
+    // 1G page. Split to 2M.
+    L2PageTable = AllocatePages (1);
+    ASSERT (L2PageTable != NULL);
+
+    PhysicalAddress = L3PageTable[Index3] & PAGING_1G_ADDRESS_MASK_64;
+    for (Index = 0; Index < EFI_PAGE_SIZE/sizeof (UINT64); ++Index) {
+      L2PageTable[Index] = PhysicalAddress  | AddressEncMask |
+                           IA32_PG_PS | IA32_PG_P | IA32_PG_RW;
+      PhysicalAddress += SIZE_2MB;
+    }
+
+    L3PageTable[Index3] = (UINT64) (UINTN) L2PageTable | AddressEncMask |
+                                           IA32_PG_P | IA32_PG_RW;
+    SetPageReadOnly (PageTableBase, (EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable);
+  }
+
+  L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~AddressEncMask &
+                                  PAGING_4K_ADDRESS_MASK_64);
+  if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
+    // 2M page. Split to 4K.
+    L1PageTable = AllocatePages (1);
+    ASSERT (L1PageTable != NULL);
+
+    PhysicalAddress = L2PageTable[Index2] & PAGING_2M_ADDRESS_MASK_64;
+    for (Index = 0; Index < EFI_PAGE_SIZE/sizeof (UINT64); ++Index) {
+      L1PageTable[Index] = PhysicalAddress  | AddressEncMask |
+                           IA32_PG_P | IA32_PG_RW;
+      PhysicalAddress += SIZE_4KB;
+    }
+
+    L2PageTable[Index2] = (UINT64)(UINTN)L1PageTable | AddressEncMask |
+                                         IA32_PG_P | IA32_PG_RW;
+    SetPageReadOnly (PageTableBase, (EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable);
+  }
+
+  // 4k
+  L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~AddressEncMask &
+                                  PAGING_4K_ADDRESS_MASK_64);
+  L1PageTable[Index1] &= ~IA32_PG_RW;
+}
+
+/**
+  Prevent the memory pages used for page table from been overwritten.
+
+  @param[in] PageTableBase    Base address of page table (CR3).
+
+**/
+VOID
+EnablePageTableProtection (
+  IN UINTN      PageTableBase
+  )
+{
+  UINTN                 Index2;
+  UINTN                 Index3;
+  UINTN                 Index4;
+  UINT64                *L1PageTable;
+  UINT64                *L2PageTable;
+  UINT64                *L3PageTable;
+  UINT64                *L4PageTable;
+  UINT64                AddressEncMask;
+
+  //
+  // Disable write protection, because we need to mark page table to be write 
+  // protected.
+  //
+  AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
+
+  AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
+                   PAGING_1G_ADDRESS_MASK_64;
+  L4PageTable = (UINT64 *)PageTableBase;
+  SetPageReadOnly (PageTableBase, (EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable);
+
+  for (Index4 = 0; Index4 < PAGING_PML4E_NUMBER; Index4++) {
+    L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~AddressEncMask &
+                                    PAGING_4K_ADDRESS_MASK_64);
+    if (L3PageTable == NULL) {
+      continue;
+    }
+    SetPageReadOnly (PageTableBase, (EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable);
+
+    for (Index3 = 0; Index3 < EFI_PAGE_SIZE/sizeof(UINT64); Index3++) {
+      if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
+        // 1G
+        continue;
+      }
+
+      L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~AddressEncMask &
+                                      PAGING_4K_ADDRESS_MASK_64);
+      if (L2PageTable == NULL) {
+        continue;
+      }
+      SetPageReadOnly (PageTableBase, 
(EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable);
+
+      for (Index2 = 0; Index2 < EFI_PAGE_SIZE/sizeof(UINT64); Index2++) {
+        if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
+          // 2M
+          continue;
+        }
+
+        L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~AddressEncMask &
+                                        PAGING_4K_ADDRESS_MASK_64);
+        if (L1PageTable == NULL) {
+          continue;
+        }
+        SetPageReadOnly (PageTableBase, 
(EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable);
+      }
+    }
+  }
+
+  //
+  // Enable write protection, after page table updated.
+  //
+  AsmWriteCr0 (AsmReadCr0() | CR0_WP);
+}
+
 /**
   Allocates and fills in the Page Directory and Page Table Entries to
   establish a 1:1 Virtual to Physical mapping.
@@ -430,6 +590,12 @@ CreateIdentityMappingPageTables (
       );
   }
 
+  //
+  // Protect the page table by marking the memory used for page table to be
+  // read-only.
+  //
+  EnablePageTableProtection ((UINTN)PageMap);
+
   if (PcdGetBool (PcdSetNxForStack)) {
     EnableExecuteDisableBit ();
   }
diff --git a/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.h 
b/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.h
index 7c9bb49e3e..6d1961b6f8 100644
--- a/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.h
+++ b/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.h
@@ -148,11 +148,25 @@ typedef union {
 
 #pragma pack()
 
+#define CR0_WP                      BIT16
+
 #define IA32_PG_P                   BIT0
 #define IA32_PG_RW                  BIT1
+#define IA32_PG_PS                  BIT7
+
+#define PAGING_PAE_INDEX_MASK       0x1FF
 
+#define PAGING_4K_ADDRESS_MASK_64   0x000FFFFFFFFFF000ull
+#define PAGING_2M_ADDRESS_MASK_64   0x000FFFFFFFE00000ull
 #define PAGING_1G_ADDRESS_MASK_64   0x000FFFFFC0000000ull
 
+#define PAGING_L1_ADDRESS_SHIFT     12
+#define PAGING_L2_ADDRESS_SHIFT     21
+#define PAGING_L3_ADDRESS_SHIFT     30
+#define PAGING_L4_ADDRESS_SHIFT     39
+
+#define PAGING_PML4E_NUMBER         4
+
 /**
   Enable Execute Disable Bit.
 
-- 
2.14.1.windows.1

_______________________________________________
edk2-devel mailing list
edk2-devel@lists.01.org
https://lists.01.org/mailman/listinfo/edk2-devel

Reply via email to