Re: [RFC v3 03/17] mm: Introduce pte_spinlock

2017-05-03 Thread Laurent Dufour
On 30/04/2017 06:47, Matthew Wilcox wrote:
> On Thu, Apr 27, 2017 at 05:52:42PM +0200, Laurent Dufour wrote:
>> +++ b/mm/memory.c
>> @@ -2100,6 +2100,13 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
>>  pte_unmap_unlock(vmf->pte, vmf->ptl);
>>  }
>>  
>> +static bool pte_spinlock(struct vm_fault *vmf)
>> +{
>> +vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
>> +spin_lock(vmf->ptl);
>> +return true;
>> +}
> 
> To me 'pte_spinlock' is a noun, but this is really pte_spin_lock() (a verb).

Fair enough. Even pte_trylock() should be more accurate since patch 8/17
changes this function to call spin_trylock().

> Actually, it's really vmf_lock_pte().  We're locking the pte
> referred to by this vmf.  And so we should probably have a matching
> vmf_unlock_pte(vmf) to preserve the abstraction.

I'm not sure this will ease the reading. In most of this code, the pte
are unlocked through the call to pte_unmap_unlock().
The call to pte_trylock() has been introduced because in few cases there
is the need to check the VMA validity before calling spinlock(ptl). The
unlock is then managed through pte_unmap_unlock().



Re: [RFC v3 03/17] mm: Introduce pte_spinlock

2017-05-03 Thread Laurent Dufour
On 30/04/2017 06:47, Matthew Wilcox wrote:
> On Thu, Apr 27, 2017 at 05:52:42PM +0200, Laurent Dufour wrote:
>> +++ b/mm/memory.c
>> @@ -2100,6 +2100,13 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
>>  pte_unmap_unlock(vmf->pte, vmf->ptl);
>>  }
>>  
>> +static bool pte_spinlock(struct vm_fault *vmf)
>> +{
>> +vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
>> +spin_lock(vmf->ptl);
>> +return true;
>> +}
> 
> To me 'pte_spinlock' is a noun, but this is really pte_spin_lock() (a verb).

Fair enough. Even pte_trylock() should be more accurate since patch 8/17
changes this function to call spin_trylock().

> Actually, it's really vmf_lock_pte().  We're locking the pte
> referred to by this vmf.  And so we should probably have a matching
> vmf_unlock_pte(vmf) to preserve the abstraction.

I'm not sure this will ease the reading. In most of this code, the pte
are unlocked through the call to pte_unmap_unlock().
The call to pte_trylock() has been introduced because in few cases there
is the need to check the VMA validity before calling spinlock(ptl). The
unlock is then managed through pte_unmap_unlock().



Re: [RFC v3 03/17] mm: Introduce pte_spinlock

2017-04-29 Thread Matthew Wilcox
On Thu, Apr 27, 2017 at 05:52:42PM +0200, Laurent Dufour wrote:
> +++ b/mm/memory.c
> @@ -2100,6 +2100,13 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
>   pte_unmap_unlock(vmf->pte, vmf->ptl);
>  }
>  
> +static bool pte_spinlock(struct vm_fault *vmf)
> +{
> + vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
> + spin_lock(vmf->ptl);
> + return true;
> +}

To me 'pte_spinlock' is a noun, but this is really pte_spin_lock() (a verb).

Actually, it's really vmf_lock_pte().  We're locking the pte
referred to by this vmf.  And so we should probably have a matching
vmf_unlock_pte(vmf) to preserve the abstraction.



Re: [RFC v3 03/17] mm: Introduce pte_spinlock

2017-04-29 Thread Matthew Wilcox
On Thu, Apr 27, 2017 at 05:52:42PM +0200, Laurent Dufour wrote:
> +++ b/mm/memory.c
> @@ -2100,6 +2100,13 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
>   pte_unmap_unlock(vmf->pte, vmf->ptl);
>  }
>  
> +static bool pte_spinlock(struct vm_fault *vmf)
> +{
> + vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
> + spin_lock(vmf->ptl);
> + return true;
> +}

To me 'pte_spinlock' is a noun, but this is really pte_spin_lock() (a verb).

Actually, it's really vmf_lock_pte().  We're locking the pte
referred to by this vmf.  And so we should probably have a matching
vmf_unlock_pte(vmf) to preserve the abstraction.



[RFC v3 03/17] mm: Introduce pte_spinlock

2017-04-27 Thread Laurent Dufour
This is needed because in handle_pte_fault() pte_offset_map() is
called and then fe->ptl is fetched and spin_locked.

This was previously embedded in the call to pte_offset_map_lock().

Signed-off-by: Laurent Dufour 
---
 mm/memory.c | 15 +++
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index bce32c9d73c2..441c0e3f3a0f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2100,6 +2100,13 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
pte_unmap_unlock(vmf->pte, vmf->ptl);
 }
 
+static bool pte_spinlock(struct vm_fault *vmf)
+{
+   vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
+   spin_lock(vmf->ptl);
+   return true;
+}
+
 static bool pte_map_lock(struct vm_fault *vmf)
 {
vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, 
>ptl);
@@ -3398,8 +3405,8 @@ static int do_numa_page(struct vm_fault *vmf)
* page table entry is not accessible, so there would be no
* concurrent hardware modifications to the PTE.
*/
-   vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
-   spin_lock(vmf->ptl);
+   if (!pte_spinlock(vmf))
+   return VM_FAULT_RETRY;
if (unlikely(!pte_same(*vmf->pte, pte))) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
@@ -3566,8 +3573,8 @@ static int handle_pte_fault(struct vm_fault *vmf)
if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
return do_numa_page(vmf);
 
-   vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
-   spin_lock(vmf->ptl);
+   if (!pte_spinlock(vmf))
+   return VM_FAULT_RETRY;
entry = vmf->orig_pte;
if (unlikely(!pte_same(*vmf->pte, entry)))
goto unlock;
-- 
2.7.4



[RFC v3 03/17] mm: Introduce pte_spinlock

2017-04-27 Thread Laurent Dufour
This is needed because in handle_pte_fault() pte_offset_map() is
called and then fe->ptl is fetched and spin_locked.

This was previously embedded in the call to pte_offset_map_lock().

Signed-off-by: Laurent Dufour 
---
 mm/memory.c | 15 +++
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index bce32c9d73c2..441c0e3f3a0f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2100,6 +2100,13 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
pte_unmap_unlock(vmf->pte, vmf->ptl);
 }
 
+static bool pte_spinlock(struct vm_fault *vmf)
+{
+   vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
+   spin_lock(vmf->ptl);
+   return true;
+}
+
 static bool pte_map_lock(struct vm_fault *vmf)
 {
vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, 
>ptl);
@@ -3398,8 +3405,8 @@ static int do_numa_page(struct vm_fault *vmf)
* page table entry is not accessible, so there would be no
* concurrent hardware modifications to the PTE.
*/
-   vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
-   spin_lock(vmf->ptl);
+   if (!pte_spinlock(vmf))
+   return VM_FAULT_RETRY;
if (unlikely(!pte_same(*vmf->pte, pte))) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
@@ -3566,8 +3573,8 @@ static int handle_pte_fault(struct vm_fault *vmf)
if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
return do_numa_page(vmf);
 
-   vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
-   spin_lock(vmf->ptl);
+   if (!pte_spinlock(vmf))
+   return VM_FAULT_RETRY;
entry = vmf->orig_pte;
if (unlikely(!pte_same(*vmf->pte, entry)))
goto unlock;
-- 
2.7.4