On 10/28/20 11:46 AM, Joerg Roedel wrote:
> From: Joerg Roedel <jroe...@suse.de>
> 
> MMIO memory is usually not mapped encrypted, so there is no reason to
> support emulated MMIO when it is mapped encrypted.
> 
> Prevent a possible hypervisor attack where a RAM page is mapped as
> an MMIO page in the nested page-table, so that any guest access to it
> will trigger a #VC exception and leak the data on that page to the
> hypervisor via the GHCB (like with valid MMIO). On the read side this
> attack would allow the HV to inject data into the guest.
> 
> Signed-off-by: Joerg Roedel <jroe...@suse.de>

Reviewed-by: Tom Lendacky <thomas.lenda...@amd.com>

> ---
>  arch/x86/kernel/sev-es.c | 20 +++++++++++++-------
>  1 file changed, 13 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
> index 4a96726fbaf8..0bd1a0fc587e 100644
> --- a/arch/x86/kernel/sev-es.c
> +++ b/arch/x86/kernel/sev-es.c
> @@ -374,8 +374,8 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
>       return ES_EXCEPTION;
>  }
>  
> -static bool vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
> -                              unsigned long vaddr, phys_addr_t *paddr)
> +static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct 
> es_em_ctxt *ctxt,
> +                                        unsigned long vaddr, phys_addr_t 
> *paddr)
>  {
>       unsigned long va = (unsigned long)vaddr;
>       unsigned int level;
> @@ -394,15 +394,19 @@ static bool vc_slow_virt_to_phys(struct ghcb *ghcb, 
> struct es_em_ctxt *ctxt,
>               if (user_mode(ctxt->regs))
>                       ctxt->fi.error_code |= X86_PF_USER;
>  
> -             return false;
> +             return ES_EXCEPTION;
>       }
>  
> +     if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
> +             /* Emulated MMIO to/from encrypted memory not supported */
> +             return ES_UNSUPPORTED;
> +
>       pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
>       pa |= va & ~page_level_mask(level);
>  
>       *paddr = pa;
>  
> -     return true;
> +     return ES_OK;
>  }
>  
>  /* Include code shared with pre-decompression boot stage */
> @@ -731,6 +735,7 @@ static enum es_result vc_do_mmio(struct ghcb *ghcb, 
> struct es_em_ctxt *ctxt,
>  {
>       u64 exit_code, exit_info_1, exit_info_2;
>       unsigned long ghcb_pa = __pa(ghcb);
> +     enum es_result res;
>       phys_addr_t paddr;
>       void __user *ref;
>  
> @@ -740,11 +745,12 @@ static enum es_result vc_do_mmio(struct ghcb *ghcb, 
> struct es_em_ctxt *ctxt,
>  
>       exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
>  
> -     if (!vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr)) {
> -             if (!read)
> +     res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
> +     if (res != ES_OK) {
> +             if (res == ES_EXCEPTION && !read)
>                       ctxt->fi.error_code |= X86_PF_WRITE;
>  
> -             return ES_EXCEPTION;
> +             return res;
>       }
>  
>       exit_info_1 = paddr;
> 

Reply via email to