The memory integrity guarantees of SEV-SNP are enforced through a new
structure called the Reverse Map Table (RMP). The RMP is a single data
structure shared across the system that contains one entry for every 4K
page of DRAM that may be used by SEV-SNP VMs. The goal of RMP is to
track the owner of each page of memory. Pages of memory can be owned by
the hypervisor, owned by a specific VM or owned by the AMD-SP. See APM2
section 15.36.3 for more detail on RMP.

The RMP table is used to enforce access control to memory. The table itself
is not directly writable by the software. New CPU instructions (RMPUPDATE,
PVALIDATE, RMPADJUST) are used to manipulate the RMP entries.

Based on the platform configuration, the BIOS reserves the memory used
for the RMP table. The start and end address of the RMP table can be
queried by reading the RMP_BASE and RMP_END MSRs. If the RMP_BASE and
RMP_END are not set then we disable the SEV-SNP feature.

The SEV-SNP feature is enabled only after the RMP table is successfully
initialized.

Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Joerg Roedel <jroe...@suse.de>
Cc: "H. Peter Anvin" <h...@zytor.com>
Cc: Tony Luck <tony.l...@intel.com>
Cc: Dave Hansen <dave.han...@intel.com>
Cc: "Peter Zijlstra (Intel)" <pet...@infradead.org>
Cc: Paolo Bonzini <pbonz...@redhat.com>
Cc: Tom Lendacky <thomas.lenda...@amd.com>
Cc: David Rientjes <rient...@google.com>
Cc: Sean Christopherson <sea...@google.com>
Cc: x...@kernel.org
Cc: k...@vger.kernel.org
Signed-off-by: Brijesh Singh <brijesh.si...@amd.com>
---
 arch/x86/include/asm/msr-index.h |  6 +++
 arch/x86/include/asm/sev-snp.h   | 10 ++++
 arch/x86/mm/mem_encrypt.c        | 84 ++++++++++++++++++++++++++++++++
 3 files changed, 100 insertions(+)

diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index b03694e116fe..1142d31eb06c 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -481,6 +481,8 @@
 #define MSR_AMD64_SEV_ENABLED          BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
 #define MSR_AMD64_SEV_ES_ENABLED       BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT)
 #define MSR_AMD64_SEV_SNP_ENABLED      BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT)
+#define MSR_AMD64_RMP_BASE             0xc0010132
+#define MSR_AMD64_RMP_END              0xc0010133
 
 #define MSR_AMD64_VIRT_SPEC_CTRL       0xc001011f
 
@@ -538,6 +540,10 @@
 #define MSR_K8_SYSCFG                  0xc0010010
 #define MSR_K8_SYSCFG_MEM_ENCRYPT_BIT  23
 #define MSR_K8_SYSCFG_MEM_ENCRYPT      BIT_ULL(MSR_K8_SYSCFG_MEM_ENCRYPT_BIT)
+#define MSR_K8_SYSCFG_SNP_EN_BIT       24
+#define MSR_K8_SYSCFG_SNP_EN           BIT_ULL(MSR_K8_SYSCFG_SNP_EN_BIT)
+#define MSR_K8_SYSCFG_SNP_VMPL_EN_BIT  25
+#define MSR_K8_SYSCFG_SNP_VMPL_EN      BIT_ULL(MSR_K8_SYSCFG_SNP_VMPL_EN_BIT)
 #define MSR_K8_INT_PENDING_MSG         0xc0010055
 /* C1E active bits in int pending message */
 #define K8_INTP_C1E_ACTIVE_MASK                0x18000000
diff --git a/arch/x86/include/asm/sev-snp.h b/arch/x86/include/asm/sev-snp.h
index 59b57a5f6524..f7280d5c6158 100644
--- a/arch/x86/include/asm/sev-snp.h
+++ b/arch/x86/include/asm/sev-snp.h
@@ -68,6 +68,8 @@ struct __packed snp_page_state_change {
 #define RMP_X86_PG_LEVEL(level)        (((level) == RMP_PG_SIZE_4K) ? 
PG_LEVEL_4K : PG_LEVEL_2M)
 
 #ifdef CONFIG_AMD_MEM_ENCRYPT
+#include <linux/jump_label.h>
+
 static inline int __pvalidate(unsigned long vaddr, int rmp_psize, int validate,
                              unsigned long *rflags)
 {
@@ -93,6 +95,13 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, 
unsigned long paddr
 int snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
 int snp_set_memory_private(unsigned long vaddr, unsigned int npages);
 
+extern struct static_key_false snp_enable_key;
+static inline bool snp_key_active(void)
+{
+       return static_branch_unlikely(&snp_enable_key);
+}
+
+
 #else  /* !CONFIG_AMD_MEM_ENCRYPT */
 
 static inline int __pvalidate(unsigned long vaddr, int psize, int validate, 
unsigned long *eflags)
@@ -114,6 +123,7 @@ early_snp_set_memory_shared(unsigned long vaddr, unsigned 
long paddr, unsigned i
 }
 static inline int snp_set_memory_shared(unsigned long vaddr, unsigned int 
npages) { return 0; }
 static inline int snp_set_memory_private(unsigned long vaddr, unsigned int 
npages) { return 0; }
+static inline bool snp_key_active(void) { return false; }
 
 #endif /* CONFIG_AMD_MEM_ENCRYPT */
 
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 35af2f21b8f1..39461b9cb34e 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -30,6 +30,7 @@
 #include <asm/msr.h>
 #include <asm/cmdline.h>
 #include <asm/sev-snp.h>
+#include <linux/io.h>
 
 #include "mm_internal.h"
 
@@ -44,12 +45,16 @@ u64 sev_check_data __section(".data") = 0;
 EXPORT_SYMBOL(sme_me_mask);
 DEFINE_STATIC_KEY_FALSE(sev_enable_key);
 EXPORT_SYMBOL_GPL(sev_enable_key);
+DEFINE_STATIC_KEY_FALSE(snp_enable_key);
+EXPORT_SYMBOL_GPL(snp_enable_key);
 
 bool sev_enabled __section(".data");
 
 /* Buffer used for early in-place encryption by BSP, no locking needed */
 static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
 
+static unsigned long rmptable_start, rmptable_end;
+
 /*
  * When SNP is active, this routine changes the page state from private to 
shared before
  * copying the data from the source to destination and restore after the copy. 
This is required
@@ -528,3 +533,82 @@ void __init mem_encrypt_init(void)
        print_mem_encrypt_feature_info();
 }
 
+static __init void snp_enable(void *arg)
+{
+       u64 val;
+
+       rdmsrl_safe(MSR_K8_SYSCFG, &val);
+
+       val |= MSR_K8_SYSCFG_SNP_EN;
+       val |= MSR_K8_SYSCFG_SNP_VMPL_EN;
+
+       wrmsrl(MSR_K8_SYSCFG, val);
+}
+
+static __init int rmptable_init(void)
+{
+       u64 rmp_base, rmp_end;
+       unsigned long sz;
+       void *start;
+       u64 val;
+
+       rdmsrl_safe(MSR_AMD64_RMP_BASE, &rmp_base);
+       rdmsrl_safe(MSR_AMD64_RMP_END, &rmp_end);
+
+       if (!rmp_base || !rmp_end) {
+               pr_info("SEV-SNP: Memory for the RMP table has not been 
reserved by BIOS\n");
+               return 1;
+       }
+
+       sz = rmp_end - rmp_base + 1;
+
+       start = memremap(rmp_base, sz, MEMREMAP_WB);
+       if (!start) {
+               pr_err("SEV-SNP: Failed to map RMP table 0x%llx-0x%llx\n", 
rmp_base, rmp_end);
+               return 1;
+       }
+
+       /*
+        * Check if SEV-SNP is already enabled, this can happen if we are 
coming from kexec boot.
+        * Do not initialize the RMP table when SEV-SNP is already.
+        */
+       rdmsrl_safe(MSR_K8_SYSCFG, &val);
+       if (val & MSR_K8_SYSCFG_SNP_EN)
+               goto skip_enable;
+
+       /* Initialize the RMP table to zero */
+       memset(start, 0, sz);
+
+       /* Flush the caches to ensure that data is written before we enable the 
SNP */
+       wbinvd_on_all_cpus();
+
+       /* Enable the SNP feature */
+       on_each_cpu(snp_enable, NULL, 1);
+
+skip_enable:
+       rmptable_start = (unsigned long)start;
+       rmptable_end = rmptable_start + sz;
+
+       pr_info("SEV-SNP: RMP table physical address 0x%016llx - 0x%016llx\n", 
rmp_base, rmp_end);
+
+       return 0;
+}
+
+static int __init mem_encrypt_snp_init(void)
+{
+       if (!boot_cpu_has(X86_FEATURE_SEV_SNP))
+               return 1;
+
+       if (rmptable_init()) {
+               setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
+               return 1;
+       }
+
+       static_branch_enable(&snp_enable_key);
+
+       return 0;
+}
+/*
+ * SEV-SNP must be enabled across all CPUs, so make the initialization as a 
late initcall.
+ */
+late_initcall(mem_encrypt_snp_init);
-- 
2.17.1

Reply via email to