On Fri, Jun 20, 2014 at 04:11:33PM -0400, Naoya Horiguchi wrote:
> pagewalk.c can handle vma in itself, so we don't have to pass vma via
> walk->private. And show_numa_map() walks pages on vma basis, so using
> walk_page_vma() is preferable.
> 
> Signed-off-by: Naoya Horiguchi <[email protected]>
> ---
>  fs/proc/task_mmu.c | 18 ++++++++----------
>  1 file changed, 8 insertions(+), 10 deletions(-)
> 
> diff --git v3.16-rc1.orig/fs/proc/task_mmu.c v3.16-rc1/fs/proc/task_mmu.c
> index 74f87794afab..b4459c006d50 100644
> --- v3.16-rc1.orig/fs/proc/task_mmu.c
> +++ v3.16-rc1/fs/proc/task_mmu.c
> @@ -1247,7 +1247,6 @@ const struct file_operations proc_pagemap_operations = {
>  #ifdef CONFIG_NUMA
>  
>  struct numa_maps {
> -     struct vm_area_struct *vma;
>       unsigned long pages;
>       unsigned long anon;
>       unsigned long active;
> @@ -1316,18 +1315,17 @@ static struct page *can_gather_numa_stats(pte_t pte, 
> struct vm_area_struct *vma,
>  static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
>               unsigned long end, struct mm_walk *walk)
>  {
> -     struct numa_maps *md;
> +     struct numa_maps *md = walk->private;
> +     struct vm_area_struct *vma = walk->vma;
>       spinlock_t *ptl;
>       pte_t *orig_pte;
>       pte_t *pte;
>  
> -     md = walk->private;
> -
> -     if (pmd_trans_huge_lock(pmd, md->vma, &ptl) == 1) {
> +     if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
>               pte_t huge_pte = *(pte_t *)pmd;
>               struct page *page;
>  
> -             page = can_gather_numa_stats(huge_pte, md->vma, addr);
> +             page = can_gather_numa_stats(huge_pte, vma, addr);
>               if (page)
>                       gather_stats(page, md, pte_dirty(huge_pte),
>                                    HPAGE_PMD_SIZE/PAGE_SIZE);
> @@ -1339,7 +1337,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long 
> addr,
>               return 0;
>       orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
>       do {
> -             struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
> +             struct page *page = can_gather_numa_stats(*pte, vma, addr);
>               if (!page)
>                       continue;
>               gather_stats(page, md, pte_dirty(*pte), 1);
> @@ -1398,12 +1396,11 @@ static int show_numa_map(struct seq_file *m, void *v, 
> int is_pid)
>       /* Ensure we start with an empty set of numa_maps statistics. */
>       memset(md, 0, sizeof(*md));
>  
> -     md->vma = vma;
> -
>       walk.hugetlb_entry = gather_hugetbl_stats;
>       walk.pmd_entry = gather_pte_stats;
>       walk.private = md;
>       walk.mm = mm;
> +     walk.vma = vma;

Redundant. 

And it's probably good idea move walk initialization to declaration.

Otherwise:

Acked-by: Kirill A. Shutemov <[email protected]>

>  
>       pol = get_vma_policy(task, vma, vma->vm_start);
>       mpol_to_str(buffer, sizeof(buffer), pol);
> @@ -1434,7 +1431,8 @@ static int show_numa_map(struct seq_file *m, void *v, 
> int is_pid)
>       if (is_vm_hugetlb_page(vma))
>               seq_puts(m, " huge");
>  
> -     walk_page_range(vma->vm_start, vma->vm_end, &walk);
> +     /* mmap_sem is held by m_start */
> +     walk_page_vma(vma, &walk);
>  
>       if (!md->pages)
>               goto out;
> -- 
> 1.9.3
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/

-- 
 Kirill A. Shutemov
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to