On Fri 16-12-16 09:22:01, Vegard Nossum wrote:
> We already have the helper, we can convert the rest of the kernel
> mechanically using:
> 
>   git grep -l 'atomic_inc_not_zero.*mm_users' | xargs sed -i 
> 's/atomic_inc_not_zero(&\(.*\)->mm_users)/mmget_not_zero\(\1\)/'
> 
> This is needed for a later patch that hooks into the helper, but might be
> a worthwhile cleanup on its own.
> 
> Cc: Andrew Morton <a...@linux-foundation.org>
> Cc: Michal Hocko <mho...@suse.com>
> Signed-off-by: Vegard Nossum <vegard.nos...@oracle.com>

Acked-by: Michal Hocko <mho...@suse.com>

> ---
>  drivers/gpu/drm/i915/i915_gem_userptr.c | 2 +-
>  drivers/iommu/intel-svm.c               | 2 +-
>  fs/proc/base.c                          | 4 ++--
>  fs/proc/task_mmu.c                      | 4 ++--
>  fs/proc/task_nommu.c                    | 2 +-
>  kernel/events/uprobes.c                 | 2 +-
>  mm/swapfile.c                           | 2 +-
>  7 files changed, 9 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c 
> b/drivers/gpu/drm/i915/i915_gem_userptr.c
> index f21ca404af79..e97f9ade99fc 100644
> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
> @@ -514,7 +514,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct 
> *_work)
>                       flags |= FOLL_WRITE;
>  
>               ret = -EFAULT;
> -             if (atomic_inc_not_zero(&mm->mm_users)) {
> +             if (mmget_not_zero(mm)) {
>                       down_read(&mm->mmap_sem);
>                       while (pinned < npages) {
>                               ret = get_user_pages_remote
> diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
> index cb72e0011310..51f2b228723f 100644
> --- a/drivers/iommu/intel-svm.c
> +++ b/drivers/iommu/intel-svm.c
> @@ -579,7 +579,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
>               if (!svm->mm)
>                       goto bad_req;
>               /* If the mm is already defunct, don't handle faults. */
> -             if (!atomic_inc_not_zero(&svm->mm->mm_users))
> +             if (!mmget_not_zero(svm->mm))
>                       goto bad_req;
>               down_read(&svm->mm->mmap_sem);
>               vma = find_extend_vma(svm->mm, address);
> diff --git a/fs/proc/base.c b/fs/proc/base.c
> index 0b8ccacae8b3..87fd5bf07578 100644
> --- a/fs/proc/base.c
> +++ b/fs/proc/base.c
> @@ -842,7 +842,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
>               return -ENOMEM;
>  
>       copied = 0;
> -     if (!atomic_inc_not_zero(&mm->mm_users))
> +     if (!mmget_not_zero(mm))
>               goto free;
>  
>       /* Maybe we should limit FOLL_FORCE to actual ptrace users? */
> @@ -950,7 +950,7 @@ static ssize_t environ_read(struct file *file, char 
> __user *buf,
>               return -ENOMEM;
>  
>       ret = 0;
> -     if (!atomic_inc_not_zero(&mm->mm_users))
> +     if (!mmget_not_zero(mm))
>               goto free;
>  
>       down_read(&mm->mmap_sem);
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 35b92d81692f..c71975293dc8 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -167,7 +167,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
>               return ERR_PTR(-ESRCH);
>  
>       mm = priv->mm;
> -     if (!mm || !atomic_inc_not_zero(&mm->mm_users))
> +     if (!mm || !mmget_not_zero(mm))
>               return NULL;
>  
>       down_read(&mm->mmap_sem);
> @@ -1352,7 +1352,7 @@ static ssize_t pagemap_read(struct file *file, char 
> __user *buf,
>       unsigned long end_vaddr;
>       int ret = 0, copied = 0;
>  
> -     if (!mm || !atomic_inc_not_zero(&mm->mm_users))
> +     if (!mm || !mmget_not_zero(mm))
>               goto out;
>  
>       ret = -EINVAL;
> diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
> index 37175621e890..1ef97cfcf422 100644
> --- a/fs/proc/task_nommu.c
> +++ b/fs/proc/task_nommu.c
> @@ -219,7 +219,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
>               return ERR_PTR(-ESRCH);
>  
>       mm = priv->mm;
> -     if (!mm || !atomic_inc_not_zero(&mm->mm_users))
> +     if (!mm || !mmget_not_zero(mm))
>               return NULL;
>  
>       down_read(&mm->mmap_sem);
> diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
> index f9ec9add2164..bcf0f9d77d4d 100644
> --- a/kernel/events/uprobes.c
> +++ b/kernel/events/uprobes.c
> @@ -741,7 +741,7 @@ build_map_info(struct address_space *mapping, loff_t 
> offset, bool is_register)
>                       continue;
>               }
>  
> -             if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
> +             if (!mmget_not_zero(vma->vm_mm))
>                       continue;
>  
>               info = prev;
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index cf73169ce153..8c92829326cb 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -1494,7 +1494,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
>                       while (swap_count(*swap_map) && !retval &&
>                                       (p = p->next) != &start_mm->mmlist) {
>                               mm = list_entry(p, struct mm_struct, mmlist);
> -                             if (!atomic_inc_not_zero(&mm->mm_users))
> +                             if (!mmget_not_zero(mm))
>                                       continue;
>                               spin_unlock(&mmlist_lock);
>                               mmput(prev_mm);
> -- 
> 2.11.0.1.gaa10c3f
> 

-- 
Michal Hocko
SUSE Labs

Reply via email to