From: Joerg Roedel <jroe...@suse.de>

Add a #VC exception handler which is used when the kernel still executes
in protected mode. This boot-path already uses CPUID, which will cause #VC
exceptions in an SEV-ES guest.

Signed-off-by: Joerg Roedel <jroe...@suse.de>
---
 arch/x86/boot/compressed/head_64.S     |  6 ++
 arch/x86/boot/compressed/mem_encrypt.S | 96 +++++++++++++++++++++++++-
 2 files changed, 101 insertions(+), 1 deletion(-)

diff --git a/arch/x86/boot/compressed/head_64.S 
b/arch/x86/boot/compressed/head_64.S
index 2001c3bf0748..ee448aedb8b0 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -34,6 +34,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/bootparam.h>
 #include <asm/desc_defs.h>
+#include <asm/trapnr.h>
 #include "pgtable.h"
 
 /*
@@ -857,6 +858,11 @@ SYM_FUNC_END(startup32_set_idt_entry)
 
 SYM_FUNC_START(startup32_load_idt)
 #ifdef CONFIG_AMD_MEM_ENCRYPT
+       /* #VC handler */
+       leal    rva(startup32_vc_handler)(%ebp), %eax
+       movl    $X86_TRAP_VC, %edx
+       call    startup32_set_idt_entry
+
        /* Load IDT */
        leal    rva(boot32_idt)(%ebp), %eax
        movl    %eax, rva(boot32_idt_desc+2)(%ebp)
diff --git a/arch/x86/boot/compressed/mem_encrypt.S 
b/arch/x86/boot/compressed/mem_encrypt.S
index aa561795efd1..2ca056a3707c 100644
--- a/arch/x86/boot/compressed/mem_encrypt.S
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -67,10 +67,104 @@ SYM_FUNC_START(get_sev_encryption_bit)
        ret
 SYM_FUNC_END(get_sev_encryption_bit)
 
+/**
+ * sev_es_req_cpuid - Request a CPUID value from the Hypervisor using
+ *                   the GHCB MSR protocol
+ *
+ * @%eax:      Register to request (0=EAX, 1=EBX, 2=ECX, 3=EDX)
+ * @%edx:      CPUID Function
+ *
+ * Returns 0 in %eax on sucess, non-zero on failure
+ * %edx returns CPUID value on success
+ */
+SYM_CODE_START_LOCAL(sev_es_req_cpuid)
+       shll    $30, %eax
+       orl     $0x00000004, %eax
+       movl    $MSR_AMD64_SEV_ES_GHCB, %ecx
+       wrmsr
+       rep; vmmcall            # VMGEXIT
+       rdmsr
+
+       /* Check response */
+       movl    %eax, %ecx
+       andl    $0x3ffff000, %ecx       # Bits [12-29] MBZ
+       jnz     2f
+
+       /* Check return code */
+       andl    $0xfff, %eax
+       cmpl    $5, %eax
+       jne     2f
+
+       /* All good - return success */
+       xorl    %eax, %eax
+1:
+       ret
+2:
+       movl    $-1, %eax
+       jmp     1b
+SYM_CODE_END(sev_es_req_cpuid)
+
+SYM_CODE_START(startup32_vc_handler)
+       pushl   %eax
+       pushl   %ebx
+       pushl   %ecx
+       pushl   %edx
+
+       /* Keep CPUID function in %ebx */
+       movl    %eax, %ebx
+
+       /* Check if error-code == SVM_EXIT_CPUID */
+       cmpl    $0x72, 16(%esp)
+       jne     .Lfail
+
+       movl    $0, %eax                # Request CPUID[fn].EAX
+       movl    %ebx, %edx              # CPUID fn
+       call    sev_es_req_cpuid        # Call helper
+       testl   %eax, %eax              # Check return code
+       jnz     .Lfail
+       movl    %edx, 12(%esp)          # Store result
+
+       movl    $1, %eax                # Request CPUID[fn].EBX
+       movl    %ebx, %edx              # CPUID fn
+       call    sev_es_req_cpuid        # Call helper
+       testl   %eax, %eax              # Check return code
+       jnz     .Lfail
+       movl    %edx, 8(%esp)           # Store result
+
+       movl    $2, %eax                # Request CPUID[fn].ECX
+       movl    %ebx, %edx              # CPUID fn
+       call    sev_es_req_cpuid        # Call helper
+       testl   %eax, %eax              # Check return code
+       jnz     .Lfail
+       movl    %edx, 4(%esp)           # Store result
+
+       movl    $3, %eax                # Request CPUID[fn].EDX
+       movl    %ebx, %edx              # CPUID fn
+       call    sev_es_req_cpuid        # Call helper
+       testl   %eax, %eax              # Check return code
+       jnz     .Lfail
+       movl    %edx, 0(%esp)           # Store result
+
+       popl    %edx
+       popl    %ecx
+       popl    %ebx
+       popl    %eax
+
+       /* Remove error code */
+       addl    $4, %esp
+
+       /* Jump over CPUID instruction */
+       addl    $2, (%esp)
+
+       iret
+.Lfail:
+       hlt
+       jmp .Lfail
+SYM_CODE_END(startup32_vc_handler)
+
        .code64
 
 #include "../../kernel/sev_verify_cbit.S"
-
 SYM_FUNC_START(set_sev_encryption_mask)
 #ifdef CONFIG_AMD_MEM_ENCRYPT
        push    %rbp
-- 
2.30.1

Reply via email to