Re: [PATCH v8 10/18] mm: x86: Invoke hypercall when page encryption status is changed

2020-05-29 Thread Steve Rutherford
On Tue, May 5, 2020 at 2:18 PM Ashish Kalra  wrote:
>
> From: Brijesh Singh 
>
> Invoke a hypercall when a memory region is changed from encrypted ->
> decrypted and vice versa. Hypervisor needs to know the page encryption
> status during the guest migration.
>
> Cc: Thomas Gleixner 
> Cc: Ingo Molnar 
> Cc: "H. Peter Anvin" 
> Cc: Paolo Bonzini 
> Cc: "Radim Krčmář" 
> Cc: Joerg Roedel 
> Cc: Borislav Petkov 
> Cc: Tom Lendacky 
> Cc: x...@kernel.org
> Cc: k...@vger.kernel.org
> Cc: linux-kernel@vger.kernel.org
> Reviewed-by: Venu Busireddy 
> Signed-off-by: Brijesh Singh 
> Signed-off-by: Ashish Kalra 
> ---
>  arch/x86/include/asm/paravirt.h   | 10 +
>  arch/x86/include/asm/paravirt_types.h |  2 +
>  arch/x86/kernel/paravirt.c|  1 +
>  arch/x86/mm/mem_encrypt.c | 57 ++-
>  arch/x86/mm/pat/set_memory.c  |  7 
>  5 files changed, 76 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
> index 694d8daf4983..8127b9c141bf 100644
> --- a/arch/x86/include/asm/paravirt.h
> +++ b/arch/x86/include/asm/paravirt.h
> @@ -78,6 +78,12 @@ static inline void paravirt_arch_exit_mmap(struct 
> mm_struct *mm)
> PVOP_VCALL1(mmu.exit_mmap, mm);
>  }
>
> +static inline void page_encryption_changed(unsigned long vaddr, int npages,
> +   bool enc)
> +{
> +   PVOP_VCALL3(mmu.page_encryption_changed, vaddr, npages, enc);
> +}
> +
>  #ifdef CONFIG_PARAVIRT_XXL
>  static inline void load_sp0(unsigned long sp0)
>  {
> @@ -946,6 +952,10 @@ static inline void paravirt_arch_dup_mmap(struct 
> mm_struct *oldmm,
>  static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
>  {
>  }
> +
> +static inline void page_encryption_changed(unsigned long vaddr, int npages, 
> bool enc)
> +{
> +}
>  #endif
>  #endif /* __ASSEMBLY__ */
>  #endif /* _ASM_X86_PARAVIRT_H */
> diff --git a/arch/x86/include/asm/paravirt_types.h 
> b/arch/x86/include/asm/paravirt_types.h
> index 732f62e04ddb..03bfd515c59c 100644
> --- a/arch/x86/include/asm/paravirt_types.h
> +++ b/arch/x86/include/asm/paravirt_types.h
> @@ -215,6 +215,8 @@ struct pv_mmu_ops {
>
> /* Hook for intercepting the destruction of an mm_struct. */
> void (*exit_mmap)(struct mm_struct *mm);
> +   void (*page_encryption_changed)(unsigned long vaddr, int npages,
> +   bool enc);
>
>  #ifdef CONFIG_PARAVIRT_XXL
> struct paravirt_callee_save read_cr2;
> diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
> index c131ba4e70ef..840c02b23aeb 100644
> --- a/arch/x86/kernel/paravirt.c
> +++ b/arch/x86/kernel/paravirt.c
> @@ -367,6 +367,7 @@ struct paravirt_patch_template pv_ops = {
> (void (*)(struct mmu_gather *, void 
> *))tlb_remove_page,
>
> .mmu.exit_mmap  = paravirt_nop,
> +   .mmu.page_encryption_changed= paravirt_nop,
>
>  #ifdef CONFIG_PARAVIRT_XXL
> .mmu.read_cr2   = __PV_IS_CALLEE_SAVE(native_read_cr2),
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index f4bd4b431ba1..c9800fa811f6 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -19,6 +19,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>
>  #include 
>  #include 
> @@ -29,6 +30,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>
>  #include "mm_internal.h"
>
> @@ -196,6 +198,47 @@ void __init sme_early_init(void)
> swiotlb_force = SWIOTLB_FORCE;
>  }
>
> +static void set_memory_enc_dec_hypercall(unsigned long vaddr, int npages,
> +   bool enc)
> +{
> +   unsigned long sz = npages << PAGE_SHIFT;
> +   unsigned long vaddr_end, vaddr_next;
> +
> +   vaddr_end = vaddr + sz;
> +
> +   for (; vaddr < vaddr_end; vaddr = vaddr_next) {
> +   int psize, pmask, level;
> +   unsigned long pfn;
> +   pte_t *kpte;
> +
> +   kpte = lookup_address(vaddr, );
> +   if (!kpte || pte_none(*kpte))
> +   return;
> +
> +   switch (level) {
> +   case PG_LEVEL_4K:
> +   pfn = pte_pfn(*kpte);
> +   break;
> +   case PG_LEVEL_2M:
> +   pfn = pmd_pfn(*(pmd_t *)kpte);
> +   break;
> +   case PG_LEVEL_1G:
> +   pfn = pud_pfn(*(pud_t *)kpte);
> +   break;
> +   default:
> +   return;
> +   }
> +
> +   psize = page_level_size(level);
> +   pmask = page_level_mask(level);
> +
> +   kvm_sev_hypercall3(KVM_HC_PAGE_ENC_STATUS,
> +  pfn << PAGE_SHIFT, psize >> PAGE_SHIFT, 
> enc);
> +
> +   vaddr_next = (vaddr & pmask) + psize;
> +   }
> +}
> +

[PATCH v8 10/18] mm: x86: Invoke hypercall when page encryption status is changed

2020-05-05 Thread Ashish Kalra
From: Brijesh Singh 

Invoke a hypercall when a memory region is changed from encrypted ->
decrypted and vice versa. Hypervisor needs to know the page encryption
status during the guest migration.

Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: Paolo Bonzini 
Cc: "Radim Krčmář" 
Cc: Joerg Roedel 
Cc: Borislav Petkov 
Cc: Tom Lendacky 
Cc: x...@kernel.org
Cc: k...@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Reviewed-by: Venu Busireddy 
Signed-off-by: Brijesh Singh 
Signed-off-by: Ashish Kalra 
---
 arch/x86/include/asm/paravirt.h   | 10 +
 arch/x86/include/asm/paravirt_types.h |  2 +
 arch/x86/kernel/paravirt.c|  1 +
 arch/x86/mm/mem_encrypt.c | 57 ++-
 arch/x86/mm/pat/set_memory.c  |  7 
 5 files changed, 76 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 694d8daf4983..8127b9c141bf 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -78,6 +78,12 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct 
*mm)
PVOP_VCALL1(mmu.exit_mmap, mm);
 }
 
+static inline void page_encryption_changed(unsigned long vaddr, int npages,
+   bool enc)
+{
+   PVOP_VCALL3(mmu.page_encryption_changed, vaddr, npages, enc);
+}
+
 #ifdef CONFIG_PARAVIRT_XXL
 static inline void load_sp0(unsigned long sp0)
 {
@@ -946,6 +952,10 @@ static inline void paravirt_arch_dup_mmap(struct mm_struct 
*oldmm,
 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
 {
 }
+
+static inline void page_encryption_changed(unsigned long vaddr, int npages, 
bool enc)
+{
+}
 #endif
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_X86_PARAVIRT_H */
diff --git a/arch/x86/include/asm/paravirt_types.h 
b/arch/x86/include/asm/paravirt_types.h
index 732f62e04ddb..03bfd515c59c 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -215,6 +215,8 @@ struct pv_mmu_ops {
 
/* Hook for intercepting the destruction of an mm_struct. */
void (*exit_mmap)(struct mm_struct *mm);
+   void (*page_encryption_changed)(unsigned long vaddr, int npages,
+   bool enc);
 
 #ifdef CONFIG_PARAVIRT_XXL
struct paravirt_callee_save read_cr2;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c131ba4e70ef..840c02b23aeb 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -367,6 +367,7 @@ struct paravirt_patch_template pv_ops = {
(void (*)(struct mmu_gather *, void *))tlb_remove_page,
 
.mmu.exit_mmap  = paravirt_nop,
+   .mmu.page_encryption_changed= paravirt_nop,
 
 #ifdef CONFIG_PARAVIRT_XXL
.mmu.read_cr2   = __PV_IS_CALLEE_SAVE(native_read_cr2),
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index f4bd4b431ba1..c9800fa811f6 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -19,6 +19,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -29,6 +30,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "mm_internal.h"
 
@@ -196,6 +198,47 @@ void __init sme_early_init(void)
swiotlb_force = SWIOTLB_FORCE;
 }
 
+static void set_memory_enc_dec_hypercall(unsigned long vaddr, int npages,
+   bool enc)
+{
+   unsigned long sz = npages << PAGE_SHIFT;
+   unsigned long vaddr_end, vaddr_next;
+
+   vaddr_end = vaddr + sz;
+
+   for (; vaddr < vaddr_end; vaddr = vaddr_next) {
+   int psize, pmask, level;
+   unsigned long pfn;
+   pte_t *kpte;
+
+   kpte = lookup_address(vaddr, );
+   if (!kpte || pte_none(*kpte))
+   return;
+
+   switch (level) {
+   case PG_LEVEL_4K:
+   pfn = pte_pfn(*kpte);
+   break;
+   case PG_LEVEL_2M:
+   pfn = pmd_pfn(*(pmd_t *)kpte);
+   break;
+   case PG_LEVEL_1G:
+   pfn = pud_pfn(*(pud_t *)kpte);
+   break;
+   default:
+   return;
+   }
+
+   psize = page_level_size(level);
+   pmask = page_level_mask(level);
+
+   kvm_sev_hypercall3(KVM_HC_PAGE_ENC_STATUS,
+  pfn << PAGE_SHIFT, psize >> PAGE_SHIFT, enc);
+
+   vaddr_next = (vaddr & pmask) + psize;
+   }
+}
+
 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
 {
pgprot_t old_prot, new_prot;
@@ -253,12 +296,13 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int 
level, bool enc)
 static int __init early_set_memory_enc_dec(unsigned long vaddr,
   unsigned long size, bool