>
> So I want to be able to disable SEV and the whole code that comes with
> it in the *host*.

We can add a new variable 'sme_only'. By default this variable should be set
to false. When mem_encrypt=sme is passed then set it to true and
based on sme_only state early_detect_mem_encrypt() can clear X86_FEATURE_SEV
flag.

Here are the changes on top of your patch. I did a quick test in both host and
guest OS and it seems to be working okay. In host OS mem_encrypt=sme disabled
the SEV but in guest its still don't care. I will do more test later...

diff --git a/arch/x86/include/asm/mem_encrypt.h 
b/arch/x86/include/asm/mem_encrypt.h
index 175310f00202..73a6fb3b14a1 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -19,7 +19,7 @@
 
 #include <asm/bootparam.h>
 
-extern bool sev_enabled;
+extern bool sme_only;
 
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index d0669f3966a6..a09b02959874 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -33,7 +33,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, 
const int *erratum);
  */
 static u32 nodes_per_socket = 1;
 
-bool sev_enabled __section(.data) = false;
+bool sme_only __section(.data) = false;
 
 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
 {
@@ -591,7 +591,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
                if (IS_ENABLED(CONFIG_X86_32))
                        goto clear_all;
 
-               if (!sev_enabled)
+               if (sme_only)
                        goto clear_sev;
 
                rdmsrl(MSR_K7_HWCR, msr);
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 9b83bc1be7c0..a135e4497021 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -45,6 +45,7 @@ u64 sme_me_mask __section(.data) = 0;
 EXPORT_SYMBOL_GPL(sme_me_mask);
 DEFINE_STATIC_KEY_FALSE(__sev);
 EXPORT_SYMBOL_GPL(__sev);
+static bool sev_enabled __section(.data) = false;
 
 /* Buffer used for early in-place encryption by BSP, no locking needed */
 static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
@@ -773,7 +774,6 @@ void __init __nostackprotector sme_enable(struct 
boot_params *bp)
        unsigned long feature_mask;
        u64 me_mask, msr;
        char buffer[16];
-       bool sme_only;
        int ret;
 
        /* Check for the SME/SEV support leaf */
@@ -808,6 +808,8 @@ void __init __nostackprotector sme_enable(struct 
boot_params *bp)
        if (!(eax & feature_mask))
                return;
 
+       me_mask = BIT_ULL(ebx & 0x3f);
+
        /* For SME, check the SYSCFG MSR */
        if (feature_mask == AMD_SME_BIT) {
                msr = __rdmsr(MSR_K8_SYSCFG);
@@ -820,9 +822,13 @@ void __init __nostackprotector sme_enable(struct 
boot_params *bp)
                msr = __rdmsr(MSR_AMD64_SEV);
                if (!(msr & MSR_AMD64_SEV_ENABLED))
                        return;
-       }
 
-       me_mask = BIT_ULL(ebx & 0x3f);
+               if (feature_mask == AMD_SEV_BIT) {
+                       sme_me_mask = me_mask;
+                       sev_enabled = true;
+                       return;
+               }
+       }
 
        /*
         * Fixups have not been applied to phys_base yet and we're running
@@ -847,16 +853,11 @@ void __init __nostackprotector sme_enable(struct 
boot_params *bp)
        } else if (!strncmp(buffer, cmd_on, sizeof(buffer))) {
                sme_me_mask = me_mask;
        } else if (!strncmp(buffer, cmd_sme, sizeof(buffer))) {
+               sme_me_mask = me_mask;
                sme_only = true;
+               return;
        }
 
        if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
                sme_me_mask = me_mask;
-
-       if (sme_only)
-               return;
-
-       /* For SEV, check the SEV MSR */
-       if (feature_mask == AMD_SEV_BIT)
-               sev_enabled = true;
 }
-- 
2.9.5

Reply via email to