The speculative page fault handler which is run without holding the
mmap_sem is calling lru_cache_add_active_or_unevictable() but the vm_flags
is not guaranteed to remain constant.
Introducing __lru_cache_add_active_or_unevictable() which has the vma flags
value parameter instead of the vma pointer.

Acked-by: David Rientjes <rient...@google.com>
Signed-off-by: Laurent Dufour <lduf...@linux.vnet.ibm.com>
---
 include/linux/swap.h | 10 ++++++++--
 mm/memory.c          |  8 ++++----
 mm/swap.c            |  6 +++---
 3 files changed, 15 insertions(+), 9 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index f73eafcaf4e9..730c14738574 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -338,8 +338,14 @@ extern void deactivate_file_page(struct page *page);
 extern void mark_page_lazyfree(struct page *page);
 extern void swap_setup(void);
 
-extern void lru_cache_add_active_or_unevictable(struct page *page,
-                                               struct vm_area_struct *vma);
+extern void __lru_cache_add_active_or_unevictable(struct page *page,
+                                               unsigned long vma_flags);
+
+static inline void lru_cache_add_active_or_unevictable(struct page *page,
+                                               struct vm_area_struct *vma)
+{
+       return __lru_cache_add_active_or_unevictable(page, vma->vm_flags);
+}
 
 /* linux/mm/vmscan.c */
 extern unsigned long zone_reclaimable_pages(struct zone *zone);
diff --git a/mm/memory.c b/mm/memory.c
index cb6310b74cfb..deac7f12d777 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2569,7 +2569,7 @@ static int wp_page_copy(struct vm_fault *vmf)
                ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
                page_add_new_anon_rmap(new_page, vma, vmf->address, false);
                mem_cgroup_commit_charge(new_page, memcg, false, false);
-               lru_cache_add_active_or_unevictable(new_page, vma);
+               __lru_cache_add_active_or_unevictable(new_page, vmf->vma_flags);
                /*
                 * We call the notify macro here because, when using secondary
                 * mmu page tables (such as kvm shadow page tables), we want the
@@ -3105,7 +3105,7 @@ int do_swap_page(struct vm_fault *vmf)
        if (unlikely(page != swapcache && swapcache)) {
                page_add_new_anon_rmap(page, vma, vmf->address, false);
                mem_cgroup_commit_charge(page, memcg, false, false);
-               lru_cache_add_active_or_unevictable(page, vma);
+               __lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
        } else {
                do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
                mem_cgroup_commit_charge(page, memcg, true, false);
@@ -3256,7 +3256,7 @@ static int do_anonymous_page(struct vm_fault *vmf)
        inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
        page_add_new_anon_rmap(page, vma, vmf->address, false);
        mem_cgroup_commit_charge(page, memcg, false, false);
-       lru_cache_add_active_or_unevictable(page, vma);
+       __lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
 setpte:
        set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
 
@@ -3510,7 +3510,7 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup 
*memcg,
                inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
                page_add_new_anon_rmap(page, vma, vmf->address, false);
                mem_cgroup_commit_charge(page, memcg, false, false);
-               lru_cache_add_active_or_unevictable(page, vma);
+               __lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
        } else {
                inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
                page_add_file_rmap(page, false);
diff --git a/mm/swap.c b/mm/swap.c
index 26fc9b5f1b6c..ba97d437e68a 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -456,12 +456,12 @@ void lru_cache_add(struct page *page)
  * directly back onto it's zone's unevictable list, it does NOT use a
  * per cpu pagevec.
  */
-void lru_cache_add_active_or_unevictable(struct page *page,
-                                        struct vm_area_struct *vma)
+void __lru_cache_add_active_or_unevictable(struct page *page,
+                                          unsigned long vma_flags)
 {
        VM_BUG_ON_PAGE(PageLRU(page), page);
 
-       if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
+       if (likely((vma_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
                SetPageActive(page);
        else if (!TestSetPageMlocked(page)) {
                /*
-- 
2.7.4

Reply via email to