Re: [PATCH v2] KVM: arm/arm64: Re-check VMA on detecting a poisoned page

2019-12-18 Thread Christoffer Dall
On Tue, Dec 17, 2019 at 12:38:09PM +, James Morse wrote:
> When we check for a poisoned page, we use the VMA to tell userspace
> about the looming disaster. But we pass a pointer to this VMA
> after having released the mmap_sem, which isn't a good idea.
> 
> Instead, stash the shift value that goes with this pfn while
> we are holding the mmap_sem.
> 
> Reported-by: Marc Zyngier 
> Signed-off-by: James Morse 
> ---
> 
> Based on Marc's patch:
> Link: lore.kernel.org/r/20191211165651.7889-3-...@kernel.org
> 
>  virt/kvm/arm/mmu.c | 20 +---
>  1 file changed, 9 insertions(+), 11 deletions(-)
> 
> diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
> index 38b4c910b6c3..bb0f8d648678 100644
> --- a/virt/kvm/arm/mmu.c
> +++ b/virt/kvm/arm/mmu.c
> @@ -1591,16 +1591,8 @@ static void invalidate_icache_guest_page(kvm_pfn_t 
> pfn, unsigned long size)
>   __invalidate_icache_guest_page(pfn, size);
>  }
>  
> -static void kvm_send_hwpoison_signal(unsigned long address,
> -  struct vm_area_struct *vma)
> +static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
>  {
> - short lsb;
> -
> - if (is_vm_hugetlb_page(vma))
> - lsb = huge_page_shift(hstate_vma(vma));
> - else
> - lsb = PAGE_SHIFT;
> -
>   send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
>  }
>  
> @@ -1673,6 +1665,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
> phys_addr_t fault_ipa,
>   struct kvm *kvm = vcpu->kvm;
>   struct kvm_mmu_memory_cache *memcache = >arch.mmu_page_cache;
>   struct vm_area_struct *vma;
> + short vma_shift;
>   kvm_pfn_t pfn;
>   pgprot_t mem_type = PAGE_S2;
>   bool logging_active = memslot_is_logging(memslot);
> @@ -1696,7 +1689,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
> phys_addr_t fault_ipa,
>   return -EFAULT;
>   }
>  
> - vma_pagesize = vma_kernel_pagesize(vma);
> + if (is_vm_hugetlb_page(vma))
> + vma_shift = huge_page_shift(hstate_vma(vma));
> + else
> + vma_shift = PAGE_SHIFT;
> +
> + vma_pagesize = 1ULL << vma_shift;
>   if (logging_active ||
>   !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
>   force_pte = true;
> @@ -1735,7 +1733,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
> phys_addr_t fault_ipa,
>  
>   pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, );
>   if (pfn == KVM_PFN_ERR_HWPOISON) {
> - kvm_send_hwpoison_signal(hva, vma);
> + kvm_send_hwpoison_signal(hva, vma_shift);
>   return 0;
>   }
>   if (is_error_noslot_pfn(pfn))
> -- 
> 2.24.0
> 
> 
Reviewed-by: Christoffer Dall 
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH v2] KVM: arm/arm64: Re-check VMA on detecting a poisoned page

2019-12-17 Thread James Morse
When we check for a poisoned page, we use the VMA to tell userspace
about the looming disaster. But we pass a pointer to this VMA
after having released the mmap_sem, which isn't a good idea.

Instead, stash the shift value that goes with this pfn while
we are holding the mmap_sem.

Reported-by: Marc Zyngier 
Signed-off-by: James Morse 
---

Based on Marc's patch:
Link: lore.kernel.org/r/20191211165651.7889-3-...@kernel.org

 virt/kvm/arm/mmu.c | 20 +---
 1 file changed, 9 insertions(+), 11 deletions(-)

diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 38b4c910b6c3..bb0f8d648678 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1591,16 +1591,8 @@ static void invalidate_icache_guest_page(kvm_pfn_t pfn, 
unsigned long size)
__invalidate_icache_guest_page(pfn, size);
 }
 
-static void kvm_send_hwpoison_signal(unsigned long address,
-struct vm_area_struct *vma)
+static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
 {
-   short lsb;
-
-   if (is_vm_hugetlb_page(vma))
-   lsb = huge_page_shift(hstate_vma(vma));
-   else
-   lsb = PAGE_SHIFT;
-
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
 }
 
@@ -1673,6 +1665,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = >arch.mmu_page_cache;
struct vm_area_struct *vma;
+   short vma_shift;
kvm_pfn_t pfn;
pgprot_t mem_type = PAGE_S2;
bool logging_active = memslot_is_logging(memslot);
@@ -1696,7 +1689,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
return -EFAULT;
}
 
-   vma_pagesize = vma_kernel_pagesize(vma);
+   if (is_vm_hugetlb_page(vma))
+   vma_shift = huge_page_shift(hstate_vma(vma));
+   else
+   vma_shift = PAGE_SHIFT;
+
+   vma_pagesize = 1ULL << vma_shift;
if (logging_active ||
!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
force_pte = true;
@@ -1735,7 +1733,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
 
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, );
if (pfn == KVM_PFN_ERR_HWPOISON) {
-   kvm_send_hwpoison_signal(hva, vma);
+   kvm_send_hwpoison_signal(hva, vma_shift);
return 0;
}
if (is_error_noslot_pfn(pfn))
-- 
2.24.0

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm