Re: [PATCH 4/5] powerpc/mmu: use pinned_vm instead of locked_vm to account pinned pages

2019-02-12 Thread kbuild test robot
Hi Daniel,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on vfio/next]
[also build test ERROR on v5.0-rc4]
[cannot apply to next-20190212]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improve the system]

url:
https://github.com/0day-ci/linux/commits/Daniel-Jordan/use-pinned_vm-instead-of-locked_vm-to-account-pinned-pages/20190213-070458
base:   https://github.com/awilliam/linux-vfio.git next
config: powerpc-defconfig (attached as .config)
compiler: powerpc64-linux-gnu-gcc (Debian 8.2.0-11) 8.2.0
reproduce:
wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
GCC_VERSION=8.2.0 make.cross ARCH=powerpc 

All errors (new ones prefixed by >>):

   In file included from include/linux/rcupdate.h:38,
from include/linux/rculist.h:11,
from include/linux/sched/signal.h:5,
from arch/powerpc/mm/mmu_context_iommu.c:13:
   arch/powerpc/mm/mmu_context_iommu.c: In function 'mm_iommu_adjust_pinned_vm':
>> arch/powerpc/mm/mmu_context_iommu.c:55:43: error: passing argument 2 of 
>> 'atomic64_add_return_relaxed' from incompatible pointer type 
>> [-Werror=incompatible-pointer-types]
  pinned_vm = atomic64_add_return(npages, &mm->pinned_vm);
  ^~
   include/linux/atomic.h:75:22: note: in definition of macro 
'__atomic_op_fence'
 typeof(op##_relaxed(args)) __ret;\
 ^~~~
   arch/powerpc/mm/mmu_context_iommu.c:55:15: note: in expansion of macro 
'atomic64_add_return'
  pinned_vm = atomic64_add_return(npages, &mm->pinned_vm);
  ^~~
   In file included from include/linux/atomic.h:7,
from include/linux/rcupdate.h:38,
from include/linux/rculist.h:11,
from include/linux/sched/signal.h:5,
from arch/powerpc/mm/mmu_context_iommu.c:13:
   arch/powerpc/include/asm/atomic.h:331:52: note: expected 'atomic64_t *' {aka 
'struct  *'} but argument is of type 'long unsigned int *'
atomic64_##op##_return_relaxed(long a, atomic64_t *v)   \
   ^
   arch/powerpc/include/asm/atomic.h:367:2: note: in expansion of macro 
'ATOMIC64_OP_RETURN_RELAXED'
 ATOMIC64_OP_RETURN_RELAXED(op, asm_op)\
 ^~
   arch/powerpc/include/asm/atomic.h:370:1: note: in expansion of macro 
'ATOMIC64_OPS'
ATOMIC64_OPS(add, add)
^~~~
   In file included from include/linux/rcupdate.h:38,
from include/linux/rculist.h:11,
from include/linux/sched/signal.h:5,
from arch/powerpc/mm/mmu_context_iommu.c:13:
>> arch/powerpc/mm/mmu_context_iommu.c:55:43: error: passing argument 2 of 
>> 'atomic64_add_return_relaxed' from incompatible pointer type 
>> [-Werror=incompatible-pointer-types]
  pinned_vm = atomic64_add_return(npages, &mm->pinned_vm);
  ^~
   include/linux/atomic.h:77:23: note: in definition of macro 
'__atomic_op_fence'
 __ret = op##_relaxed(args); \
  ^~~~
   arch/powerpc/mm/mmu_context_iommu.c:55:15: note: in expansion of macro 
'atomic64_add_return'
  pinned_vm = atomic64_add_return(npages, &mm->pinned_vm);
  ^~~
   In file included from include/linux/atomic.h:7,
from include/linux/rcupdate.h:38,
from include/linux/rculist.h:11,
from include/linux/sched/signal.h:5,
from arch/powerpc/mm/mmu_context_iommu.c:13:
   arch/powerpc/include/asm/atomic.h:331:52: note: expected 'atomic64_t *' {aka 
'struct  *'} but argument is of type 'long unsigned int *'
atomic64_##op##_return_relaxed(long a, atomic64_t *v)   \
   ^
   arch/powerpc/include/asm/atomic.h:367:2: note: in expansion of macro 
'ATOMIC64_OP_RETURN_RELAXED'
 ATOMIC64_OP_RETURN_RELAXED(op, asm_op)\
 ^~
   arch/powerpc/include/asm/atomic.h:370:1: note: in expansion of macro 
'ATOMIC64_OPS'
ATOMIC64_OPS(add, add)
^~~~
>> arch/powerpc/mm/mmu_context_iommu.c:58:25: error: passing argument 2 of 
>> 'atomic64_sub' from incompatible pointer type 
>> [-Werror=incompatible-pointer-types]
   atomic64_sub(npages, &mm->pinned_vm);
^~
   In file included from include/linux/atomic.h:7,
from include/linux/rcupdate.h:38,
from include/linux/rculist.h:11,
from include/linux/sched/signal.h:5,
from arch/powerpc/mm/mmu_context_iommu.c:13:
   arch/powerpc/include/asm/ato

[PATCH 4/5] powerpc/mmu: use pinned_vm instead of locked_vm to account pinned pages

2019-02-11 Thread Daniel Jordan
Beginning with bc3e53f682d9 ("mm: distinguish between mlocked and pinned
pages"), locked and pinned pages are accounted separately.  The IOMMU
MMU helpers on powerpc account pinned pages to locked_vm; use pinned_vm
instead.

pinned_vm recently became atomic and so no longer relies on mmap_sem
held as writer: delete.

Signed-off-by: Daniel Jordan 
---
 arch/powerpc/mm/mmu_context_iommu.c | 43 ++---
 1 file changed, 21 insertions(+), 22 deletions(-)

diff --git a/arch/powerpc/mm/mmu_context_iommu.c 
b/arch/powerpc/mm/mmu_context_iommu.c
index a712a650a8b6..fdf670542847 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -40,36 +40,35 @@ struct mm_iommu_table_group_mem_t {
u64 dev_hpa;/* Device memory base address */
 };
 
-static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
+static long mm_iommu_adjust_pinned_vm(struct mm_struct *mm,
unsigned long npages, bool incr)
 {
-   long ret = 0, locked, lock_limit;
+   long ret = 0;
+   unsigned long lock_limit;
+   s64 pinned_vm;
 
if (!npages)
return 0;
 
-   down_write(&mm->mmap_sem);
-
if (incr) {
-   locked = mm->locked_vm + npages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-   if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+   pinned_vm = atomic64_add_return(npages, &mm->pinned_vm);
+   if (pinned_vm > lock_limit && !capable(CAP_IPC_LOCK)) {
ret = -ENOMEM;
-   else
-   mm->locked_vm += npages;
+   atomic64_sub(npages, &mm->pinned_vm);
+   }
} else {
-   if (WARN_ON_ONCE(npages > mm->locked_vm))
-   npages = mm->locked_vm;
-   mm->locked_vm -= npages;
+   pinned_vm = atomic64_read(&mm->pinned_vm);
+   if (WARN_ON_ONCE(npages > pinned_vm))
+   npages = pinned_vm;
+   atomic64_sub(npages, &mm->pinned_vm);
}
 
-   pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
-   current ? current->pid : 0,
-   incr ? '+' : '-',
+   pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%lu %ld/%lu\n",
+   current ? current->pid : 0, incr ? '+' : '-',
npages << PAGE_SHIFT,
-   mm->locked_vm << PAGE_SHIFT,
+   atomic64_read(&mm->pinned_vm) << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK));
-   up_write(&mm->mmap_sem);
 
return ret;
 }
@@ -133,7 +132,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, 
unsigned long ua,
struct mm_iommu_table_group_mem_t **pmem)
 {
struct mm_iommu_table_group_mem_t *mem;
-   long i, j, ret = 0, locked_entries = 0;
+   long i, j, ret = 0, pinned_entries = 0;
unsigned int pageshift;
unsigned long flags;
unsigned long cur_ua;
@@ -154,11 +153,11 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, 
unsigned long ua,
}
 
if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
-   ret = mm_iommu_adjust_locked_vm(mm, entries, true);
+   ret = mm_iommu_adjust_pinned_vm(mm, entries, true);
if (ret)
goto unlock_exit;
 
-   locked_entries = entries;
+   pinned_entries = entries;
}
 
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
@@ -252,8 +251,8 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, 
unsigned long ua,
list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
 
 unlock_exit:
-   if (locked_entries && ret)
-   mm_iommu_adjust_locked_vm(mm, locked_entries, false);
+   if (pinned_entries && ret)
+   mm_iommu_adjust_pinned_vm(mm, pinned_entries, false);
 
mutex_unlock(&mem_list_mutex);
 
@@ -352,7 +351,7 @@ long mm_iommu_put(struct mm_struct *mm, struct 
mm_iommu_table_group_mem_t *mem)
mm_iommu_release(mem);
 
if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
-   mm_iommu_adjust_locked_vm(mm, entries, false);
+   mm_iommu_adjust_pinned_vm(mm, entries, false);
 
 unlock_exit:
mutex_unlock(&mem_list_mutex);
-- 
2.20.1