1. Page table waker already pass the vma it is processing
so we don't need to pass vma.

2. If page table entry is dirty in try_to_unmap_one, the dirtiness
should propagate to PG_dirty of the page. So, it's enough to check
only PageDirty without other pte dirty bit checking.

Signed-off-by: Minchan Kim <minc...@kernel.org>
---
 mm/madvise.c | 17 +++--------------
 mm/rmap.c    |  6 ++----
 2 files changed, 5 insertions(+), 18 deletions(-)

diff --git a/mm/madvise.c b/mm/madvise.c
index 7835bc1eaccb..fdfb14a78c60 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -24,11 +24,6 @@
 
 #include <asm/tlb.h>
 
-struct madvise_free_private {
-       struct vm_area_struct *vma;
-       struct mmu_gather *tlb;
-};
-
 /*
  * Any behaviour which results in changes to the vma->vm_flags needs to
  * take mmap_sem for writing. Others, which simply traverse vmas, need
@@ -269,10 +264,9 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned 
long addr,
                                unsigned long end, struct mm_walk *walk)
 
 {
-       struct madvise_free_private *fp = walk->private;
-       struct mmu_gather *tlb = fp->tlb;
+       struct mmu_gather *tlb = walk->private;
        struct mm_struct *mm = tlb->mm;
-       struct vm_area_struct *vma = fp->vma;
+       struct vm_area_struct *vma = walk->vma;
        spinlock_t *ptl;
        pte_t *pte, ptent;
        struct page *page;
@@ -365,15 +359,10 @@ static void madvise_free_page_range(struct mmu_gather 
*tlb,
                             struct vm_area_struct *vma,
                             unsigned long addr, unsigned long end)
 {
-       struct madvise_free_private fp = {
-               .vma = vma,
-               .tlb = tlb,
-       };
-
        struct mm_walk free_walk = {
                .pmd_entry = madvise_free_pte_range,
                .mm = vma->vm_mm,
-               .private = &fp,
+               .private = tlb,
        };
 
        BUG_ON(addr >= end);
diff --git a/mm/rmap.c b/mm/rmap.c
index 6f0f9331a20f..94ee372e238b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1380,7 +1380,6 @@ static int try_to_unmap_one(struct page *page, struct 
vm_area_struct *vma,
        spinlock_t *ptl;
        int ret = SWAP_AGAIN;
        enum ttu_flags flags = (enum ttu_flags)arg;
-       int dirty = 0;
 
        pte = page_check_address(page, mm, address, &ptl, 0);
        if (!pte)
@@ -1423,8 +1422,7 @@ static int try_to_unmap_one(struct page *page, struct 
vm_area_struct *vma,
        }
 
        /* Move the dirty bit to the physical page now the pte is gone. */
-       dirty = pte_dirty(pteval);
-       if (dirty)
+       if (pte_dirty(pteval))
                set_page_dirty(page);
 
        /* Update high watermark before we lower rss */
@@ -1457,7 +1455,7 @@ static int try_to_unmap_one(struct page *page, struct 
vm_area_struct *vma,
 
                if (flags & TTU_FREE) {
                        VM_BUG_ON_PAGE(PageSwapCache(page), page);
-                       if (!dirty && !PageDirty(page)) {
+                       if (!PageDirty(page)) {
                                /* It's a freeable page by MADV_FREE */
                                dec_mm_counter(mm, MM_ANONPAGES);
                                goto discard;
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to