Re: [Intel-gfx] [PATCH 2/5] mm: Refactor remap_pfn_range()

2015-04-09 Thread Joonas Lahtinen
On ti, 2015-04-07 at 17:31 +0100, Chris Wilson wrote:
 In preparation for exporting very similar functionality through another
 interface, gut the current remap_pfn_range(). The motivating factor here
 is to reuse the PGB/PUD/PMD/PTE walker, but allow back progation of
 errors rather than BUG_ON.
 
 Signed-off-by: Chris Wilson ch...@chris-wilson.co.uk
 Cc: Andrew Morton a...@linux-foundation.org
 Cc: Kirill A. Shutemov kirill.shute...@linux.intel.com
 Cc: Peter Zijlstra pet...@infradead.org
 Cc: Rik van Riel r...@redhat.com
 Cc: Mel Gorman mgor...@suse.de
 Cc: Cyrill Gorcunov gorcu...@gmail.com
 Cc: Johannes Weiner han...@cmpxchg.org
 Cc: linux...@kvack.org
 ---
  mm/memory.c | 102 
 +---
  1 file changed, 57 insertions(+), 45 deletions(-)
 
 diff --git a/mm/memory.c b/mm/memory.c
 index 97839f5c8c30..acb06f40d614 100644
 --- a/mm/memory.c
 +++ b/mm/memory.c
 @@ -1614,71 +1614,81 @@ int vm_insert_mixed(struct vm_area_struct *vma, 
 unsigned long addr,
  }
  EXPORT_SYMBOL(vm_insert_mixed);
  
 +struct remap_pfn {
 + struct mm_struct *mm;
 + unsigned long addr;
 + unsigned long pfn;
 + pgprot_t prot;
 +};
 +
  /*
   * maps a range of physical memory into the requested pages. the old
   * mappings are removed. any references to nonexistent pages results
   * in null mappings (currently treated as copy-on-access)
   */
 -static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
 - unsigned long addr, unsigned long end,
 - unsigned long pfn, pgprot_t prot)
 +static inline int remap_pfn(struct remap_pfn *r, pte_t *pte)

I think add a brief own comment for this function and keep it below old
comment not to cause unnecessary noise.

Otherwise looks good.

Reviewed-by: Joonas Lahtinen joonas.lahti...@linux.intel.com

 +{
 + if (!pte_none(*pte))
 + return -EBUSY;
 +
 + set_pte_at(r-mm, r-addr, pte,
 +pte_mkspecial(pfn_pte(r-pfn, r-prot)));
 + r-pfn++;
 + r-addr += PAGE_SIZE;
 + return 0;
 +}
 +
 +static int remap_pte_range(struct remap_pfn *r, pmd_t *pmd, unsigned long 
 end)
  {
   pte_t *pte;
   spinlock_t *ptl;
 + int err;
  
 - pte = pte_alloc_map_lock(mm, pmd, addr, ptl);
 + pte = pte_alloc_map_lock(r-mm, pmd, r-addr, ptl);
   if (!pte)
   return -ENOMEM;
 +
   arch_enter_lazy_mmu_mode();
   do {
 - BUG_ON(!pte_none(*pte));
 - set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
 - pfn++;
 - } while (pte++, addr += PAGE_SIZE, addr != end);
 + err = remap_pfn(r, pte++);
 + } while (err == 0  r-addr  end);
   arch_leave_lazy_mmu_mode();
 +
   pte_unmap_unlock(pte - 1, ptl);
 - return 0;
 + return err;
  }
  
 -static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
 - unsigned long addr, unsigned long end,
 - unsigned long pfn, pgprot_t prot)
 +static inline int remap_pmd_range(struct remap_pfn *r, pud_t *pud, unsigned 
 long end)
  {
   pmd_t *pmd;
 - unsigned long next;
 + int err;
  
 - pfn -= addr  PAGE_SHIFT;
 - pmd = pmd_alloc(mm, pud, addr);
 + pmd = pmd_alloc(r-mm, pud, r-addr);
   if (!pmd)
   return -ENOMEM;
   VM_BUG_ON(pmd_trans_huge(*pmd));
 +
   do {
 - next = pmd_addr_end(addr, end);
 - if (remap_pte_range(mm, pmd, addr, next,
 - pfn + (addr  PAGE_SHIFT), prot))
 - return -ENOMEM;
 - } while (pmd++, addr = next, addr != end);
 - return 0;
 + err = remap_pte_range(r, pmd++, pmd_addr_end(r-addr, end));
 + } while (err == 0  r-addr  end);
 +
 + return err;
  }
  
 -static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
 - unsigned long addr, unsigned long end,
 - unsigned long pfn, pgprot_t prot)
 +static inline int remap_pud_range(struct remap_pfn *r, pgd_t *pgd, unsigned 
 long end)
  {
   pud_t *pud;
 - unsigned long next;
 + int err;
  
 - pfn -= addr  PAGE_SHIFT;
 - pud = pud_alloc(mm, pgd, addr);
 + pud = pud_alloc(r-mm, pgd, r-addr);
   if (!pud)
   return -ENOMEM;
 +
   do {
 - next = pud_addr_end(addr, end);
 - if (remap_pmd_range(mm, pud, addr, next,
 - pfn + (addr  PAGE_SHIFT), prot))
 - return -ENOMEM;
 - } while (pud++, addr = next, addr != end);
 - return 0;
 + err = remap_pmd_range(r, pud++, pud_addr_end(r-addr, end));
 + } while (err == 0  r-addr  end);
 +
 + return err;
  }
  
  /**
 @@ -1694,10 +1704,9 @@ static inline int remap_pud_range(struct mm_struct 
 *mm, pgd_t *pgd,
  int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
   unsigned long pfn, unsigned long size, pgprot_t 

Re: [Intel-gfx] [PATCH 2/5] mm: Refactor remap_pfn_range()

2015-04-08 Thread Peter Zijlstra
On Tue, Apr 07, 2015 at 01:27:21PM -0700, Andrew Morton wrote:
 On Tue,  7 Apr 2015 17:31:36 +0100 Chris Wilson ch...@chris-wilson.co.uk 
 wrote:
 
  In preparation for exporting very similar functionality through another
  interface, gut the current remap_pfn_range(). The motivating factor here
  is to reuse the PGB/PUD/PMD/PTE walker, but allow back progation of
  errors rather than BUG_ON.
 
 I'm not on intel-gfx and for some reason these patches didn't show up on
 linux-mm.  I wanted to comment on mutex: Export an interface to wrap a
 mutex lock but
 http://lists.freedesktop.org/archives/intel-gfx/2015-April/064063.html
 doesn't tell me which mailing lists were cc'ed and I can't find that
 patch on linux-kernel.
 
 Can you please do something to make this easier for us??
 
 And please fully document all the mutex interfaces which you just
 added.

Also, please Cc locking people if you poke at mutexes..
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH 2/5] mm: Refactor remap_pfn_range()

2015-04-07 Thread Chris Wilson
In preparation for exporting very similar functionality through another
interface, gut the current remap_pfn_range(). The motivating factor here
is to reuse the PGB/PUD/PMD/PTE walker, but allow back progation of
errors rather than BUG_ON.

Signed-off-by: Chris Wilson ch...@chris-wilson.co.uk
Cc: Andrew Morton a...@linux-foundation.org
Cc: Kirill A. Shutemov kirill.shute...@linux.intel.com
Cc: Peter Zijlstra pet...@infradead.org
Cc: Rik van Riel r...@redhat.com
Cc: Mel Gorman mgor...@suse.de
Cc: Cyrill Gorcunov gorcu...@gmail.com
Cc: Johannes Weiner han...@cmpxchg.org
Cc: linux...@kvack.org
---
 mm/memory.c | 102 +---
 1 file changed, 57 insertions(+), 45 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 97839f5c8c30..acb06f40d614 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1614,71 +1614,81 @@ int vm_insert_mixed(struct vm_area_struct *vma, 
unsigned long addr,
 }
 EXPORT_SYMBOL(vm_insert_mixed);
 
+struct remap_pfn {
+   struct mm_struct *mm;
+   unsigned long addr;
+   unsigned long pfn;
+   pgprot_t prot;
+};
+
 /*
  * maps a range of physical memory into the requested pages. the old
  * mappings are removed. any references to nonexistent pages results
  * in null mappings (currently treated as copy-on-access)
  */
-static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
-   unsigned long addr, unsigned long end,
-   unsigned long pfn, pgprot_t prot)
+static inline int remap_pfn(struct remap_pfn *r, pte_t *pte)
+{
+   if (!pte_none(*pte))
+   return -EBUSY;
+
+   set_pte_at(r-mm, r-addr, pte,
+  pte_mkspecial(pfn_pte(r-pfn, r-prot)));
+   r-pfn++;
+   r-addr += PAGE_SIZE;
+   return 0;
+}
+
+static int remap_pte_range(struct remap_pfn *r, pmd_t *pmd, unsigned long end)
 {
pte_t *pte;
spinlock_t *ptl;
+   int err;
 
-   pte = pte_alloc_map_lock(mm, pmd, addr, ptl);
+   pte = pte_alloc_map_lock(r-mm, pmd, r-addr, ptl);
if (!pte)
return -ENOMEM;
+
arch_enter_lazy_mmu_mode();
do {
-   BUG_ON(!pte_none(*pte));
-   set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
-   pfn++;
-   } while (pte++, addr += PAGE_SIZE, addr != end);
+   err = remap_pfn(r, pte++);
+   } while (err == 0  r-addr  end);
arch_leave_lazy_mmu_mode();
+
pte_unmap_unlock(pte - 1, ptl);
-   return 0;
+   return err;
 }
 
-static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
-   unsigned long addr, unsigned long end,
-   unsigned long pfn, pgprot_t prot)
+static inline int remap_pmd_range(struct remap_pfn *r, pud_t *pud, unsigned 
long end)
 {
pmd_t *pmd;
-   unsigned long next;
+   int err;
 
-   pfn -= addr  PAGE_SHIFT;
-   pmd = pmd_alloc(mm, pud, addr);
+   pmd = pmd_alloc(r-mm, pud, r-addr);
if (!pmd)
return -ENOMEM;
VM_BUG_ON(pmd_trans_huge(*pmd));
+
do {
-   next = pmd_addr_end(addr, end);
-   if (remap_pte_range(mm, pmd, addr, next,
-   pfn + (addr  PAGE_SHIFT), prot))
-   return -ENOMEM;
-   } while (pmd++, addr = next, addr != end);
-   return 0;
+   err = remap_pte_range(r, pmd++, pmd_addr_end(r-addr, end));
+   } while (err == 0  r-addr  end);
+
+   return err;
 }
 
-static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
-   unsigned long addr, unsigned long end,
-   unsigned long pfn, pgprot_t prot)
+static inline int remap_pud_range(struct remap_pfn *r, pgd_t *pgd, unsigned 
long end)
 {
pud_t *pud;
-   unsigned long next;
+   int err;
 
-   pfn -= addr  PAGE_SHIFT;
-   pud = pud_alloc(mm, pgd, addr);
+   pud = pud_alloc(r-mm, pgd, r-addr);
if (!pud)
return -ENOMEM;
+
do {
-   next = pud_addr_end(addr, end);
-   if (remap_pmd_range(mm, pud, addr, next,
-   pfn + (addr  PAGE_SHIFT), prot))
-   return -ENOMEM;
-   } while (pud++, addr = next, addr != end);
-   return 0;
+   err = remap_pmd_range(r, pud++, pud_addr_end(r-addr, end));
+   } while (err == 0  r-addr  end);
+
+   return err;
 }
 
 /**
@@ -1694,10 +1704,9 @@ static inline int remap_pud_range(struct mm_struct *mm, 
pgd_t *pgd,
 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
 {
-   pgd_t *pgd;
-   unsigned long next;
unsigned long end = addr + PAGE_ALIGN(size);
-   struct mm_struct *mm = vma-vm_mm;
+   struct remap_pfn r;
+   pgd_t *pgd;
int err;
 
/*
@@ -1731,19 +1740,22 @@ 

Re: [Intel-gfx] [PATCH 2/5] mm: Refactor remap_pfn_range()

2015-04-07 Thread Andrew Morton
On Tue,  7 Apr 2015 17:31:36 +0100 Chris Wilson ch...@chris-wilson.co.uk 
wrote:

 In preparation for exporting very similar functionality through another
 interface, gut the current remap_pfn_range(). The motivating factor here
 is to reuse the PGB/PUD/PMD/PTE walker, but allow back progation of
 errors rather than BUG_ON.

I'm not on intel-gfx and for some reason these patches didn't show up on
linux-mm.  I wanted to comment on mutex: Export an interface to wrap a
mutex lock but
http://lists.freedesktop.org/archives/intel-gfx/2015-April/064063.html
doesn't tell me which mailing lists were cc'ed and I can't find that
patch on linux-kernel.

Can you please do something to make this easier for us??

And please fully document all the mutex interfaces which you just
added.
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx