[RFC v5 08/11] mm: Provide speculative fault infrastructure

2017-06-16 Thread Laurent Dufour
From: Peter Zijlstra 

Provide infrastructure to do a speculative fault (not holding
mmap_sem).

The not holding of mmap_sem means we can race against VMA
change/removal and page-table destruction. We use the SRCU VMA freeing
to keep the VMA around. We use the VMA seqcount to detect change
(including umapping / page-table deletion) and we use gup_fast() style
page-table walking to deal with page-table races.

Once we've obtained the page and are ready to update the PTE, we
validate if the state we started the fault with is still valid, if
not, we'll fail the fault with VM_FAULT_RETRY, otherwise we update the
PTE and we're done.

Signed-off-by: Peter Zijlstra (Intel) 

[Manage the newly introduced pte_spinlock() for speculative page
 fault to fail if the VMA is touched in our back]
[Rename vma_is_dead() to vma_has_changed()]
[Call p4d_alloc() as it is safe since pgd is valid]
[Call pud_alloc() as it is safe since p4d is valid]
[Set fe.sequence in __handle_mm_fault()]
[Abort speculative path when handle_userfault() has to be called]
[Add additional VMA's flags checks in handle_speculative_fault()]
[Clear FAULT_FLAG_ALLOW_RETRY in handle_speculative_fault()]
[Don't set vmf->pte and vmf->ptl if pte_map_lock() failed]
[Remove warning comment about waiting for !seq&1 since we don't want
 to wait]
[Remove warning about no huge page support, mention it explictly]
Signed-off-by: Laurent Dufour 
---
 include/linux/mm.h |   3 +
 mm/memory.c| 181 -
 2 files changed, 181 insertions(+), 3 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6b7ec2a76953..671541e00d26 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -315,6 +315,7 @@ struct vm_fault {
gfp_t gfp_mask; /* gfp mask to be used for allocations 
*/
pgoff_t pgoff;  /* Logical page offset based on vma */
unsigned long address;  /* Faulting virtual address */
+   unsigned int sequence;
pmd_t *pmd; /* Pointer to pmd entry matching
 * the 'address' */
pud_t *pud; /* Pointer to pud entry matching
@@ -1286,6 +1287,8 @@ int invalidate_inode_page(struct page *page);
 #ifdef CONFIG_MMU
 extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
unsigned int flags);
+extern int handle_speculative_fault(struct mm_struct *mm,
+   unsigned long address, unsigned int flags);
 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
diff --git a/mm/memory.c b/mm/memory.c
index 5d259cd67a83..0645cb21155f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2244,15 +2244,69 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
 
 static bool pte_spinlock(struct vm_fault *vmf)
 {
+   bool ret = false;
+
+   /* Check if vma is still valid */
+   if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
+   vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
+   spin_lock(vmf->ptl);
+   return true;
+   }
+
+   local_irq_disable();
+   if (vma_has_changed(vmf->vma, vmf->sequence))
+   goto out;
+
vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
spin_lock(vmf->ptl);
-   return true;
+
+   if (vma_has_changed(vmf->vma, vmf->sequence)) {
+   spin_unlock(vmf->ptl);
+   goto out;
+   }
+
+   ret = true;
+out:
+   local_irq_enable();
+   return ret;
 }
 
 static bool pte_map_lock(struct vm_fault *vmf)
 {
-   vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, 
>ptl);
-   return true;
+   bool ret = false;
+   pte_t *pte;
+   spinlock_t *ptl;
+
+   if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
+   vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
+  vmf->address, >ptl);
+   return true;
+   }
+
+   /*
+* The first vma_has_changed() guarantees the page-tables are still
+* valid, having IRQs disabled ensures they stay around, hence the
+* second vma_has_changed() to make sure they are still valid once
+* we've got the lock. After that a concurrent zap_pte_range() will
+* block on the PTL and thus we're safe.
+*/
+   local_irq_disable();
+   if (vma_has_changed(vmf->vma, vmf->sequence))
+   goto out;
+
+   pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
+ vmf->address, );
+   if (vma_has_changed(vmf->vma, vmf->sequence)) {
+   pte_unmap_unlock(pte, ptl);
+   goto out;
+   }
+
+  

[RFC v5 08/11] mm: Provide speculative fault infrastructure

2017-06-16 Thread Laurent Dufour
From: Peter Zijlstra 

Provide infrastructure to do a speculative fault (not holding
mmap_sem).

The not holding of mmap_sem means we can race against VMA
change/removal and page-table destruction. We use the SRCU VMA freeing
to keep the VMA around. We use the VMA seqcount to detect change
(including umapping / page-table deletion) and we use gup_fast() style
page-table walking to deal with page-table races.

Once we've obtained the page and are ready to update the PTE, we
validate if the state we started the fault with is still valid, if
not, we'll fail the fault with VM_FAULT_RETRY, otherwise we update the
PTE and we're done.

Signed-off-by: Peter Zijlstra (Intel) 

[Manage the newly introduced pte_spinlock() for speculative page
 fault to fail if the VMA is touched in our back]
[Rename vma_is_dead() to vma_has_changed()]
[Call p4d_alloc() as it is safe since pgd is valid]
[Call pud_alloc() as it is safe since p4d is valid]
[Set fe.sequence in __handle_mm_fault()]
[Abort speculative path when handle_userfault() has to be called]
[Add additional VMA's flags checks in handle_speculative_fault()]
[Clear FAULT_FLAG_ALLOW_RETRY in handle_speculative_fault()]
[Don't set vmf->pte and vmf->ptl if pte_map_lock() failed]
[Remove warning comment about waiting for !seq&1 since we don't want
 to wait]
[Remove warning about no huge page support, mention it explictly]
Signed-off-by: Laurent Dufour 
---
 include/linux/mm.h |   3 +
 mm/memory.c| 181 -
 2 files changed, 181 insertions(+), 3 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6b7ec2a76953..671541e00d26 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -315,6 +315,7 @@ struct vm_fault {
gfp_t gfp_mask; /* gfp mask to be used for allocations 
*/
pgoff_t pgoff;  /* Logical page offset based on vma */
unsigned long address;  /* Faulting virtual address */
+   unsigned int sequence;
pmd_t *pmd; /* Pointer to pmd entry matching
 * the 'address' */
pud_t *pud; /* Pointer to pud entry matching
@@ -1286,6 +1287,8 @@ int invalidate_inode_page(struct page *page);
 #ifdef CONFIG_MMU
 extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
unsigned int flags);
+extern int handle_speculative_fault(struct mm_struct *mm,
+   unsigned long address, unsigned int flags);
 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
diff --git a/mm/memory.c b/mm/memory.c
index 5d259cd67a83..0645cb21155f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2244,15 +2244,69 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
 
 static bool pte_spinlock(struct vm_fault *vmf)
 {
+   bool ret = false;
+
+   /* Check if vma is still valid */
+   if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
+   vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
+   spin_lock(vmf->ptl);
+   return true;
+   }
+
+   local_irq_disable();
+   if (vma_has_changed(vmf->vma, vmf->sequence))
+   goto out;
+
vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
spin_lock(vmf->ptl);
-   return true;
+
+   if (vma_has_changed(vmf->vma, vmf->sequence)) {
+   spin_unlock(vmf->ptl);
+   goto out;
+   }
+
+   ret = true;
+out:
+   local_irq_enable();
+   return ret;
 }
 
 static bool pte_map_lock(struct vm_fault *vmf)
 {
-   vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, 
>ptl);
-   return true;
+   bool ret = false;
+   pte_t *pte;
+   spinlock_t *ptl;
+
+   if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
+   vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
+  vmf->address, >ptl);
+   return true;
+   }
+
+   /*
+* The first vma_has_changed() guarantees the page-tables are still
+* valid, having IRQs disabled ensures they stay around, hence the
+* second vma_has_changed() to make sure they are still valid once
+* we've got the lock. After that a concurrent zap_pte_range() will
+* block on the PTL and thus we're safe.
+*/
+   local_irq_disable();
+   if (vma_has_changed(vmf->vma, vmf->sequence))
+   goto out;
+
+   pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
+ vmf->address, );
+   if (vma_has_changed(vmf->vma, vmf->sequence)) {
+   pte_unmap_unlock(pte, ptl);
+   goto out;
+   }
+
+   vmf->pte = pte;
+   vmf->ptl = ptl;
+   ret = true;
+out:
+