No longer required, let's drop it.

Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 fs/proc/task_mmu.c | 6 +++---
 include/linux/mm.h | 6 ++----
 mm/huge_memory.c   | 4 ++--
 mm/memory.c        | 8 +++-----
 mm/pagewalk.c      | 2 +-
 5 files changed, 11 insertions(+), 15 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index c4ad3083bbfa0..36ef67cdf7a3b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -861,7 +861,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
        struct folio *folio;
 
        if (pmd_present(*pmd)) {
-               page = vm_normal_page_pmd(vma, addr, *pmd);
+               page = vm_normal_page_pmd(vma, *pmd);
                present = true;
        } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
                swp_entry_t entry = pmd_to_swp_entry(*pmd);
@@ -2177,7 +2177,7 @@ static unsigned long pagemap_thp_category(struct 
pagemap_scan_private *p,
                        categories |= PAGE_IS_WRITTEN;
 
                if (p->masks_of_interest & PAGE_IS_FILE) {
-                       page = vm_normal_page_pmd(vma, addr, pmd);
+                       page = vm_normal_page_pmd(vma, pmd);
                        if (page && !PageAnon(page))
                                categories |= PAGE_IS_FILE;
                }
@@ -2942,7 +2942,7 @@ static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
        if (!pmd_present(pmd))
                return NULL;
 
-       page = vm_normal_page_pmd(vma, addr, pmd);
+       page = vm_normal_page_pmd(vma, pmd);
        if (!page)
                return NULL;
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3f52871becd3f..ef709457c7076 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2359,10 +2359,8 @@ struct folio *vm_normal_folio(struct vm_area_struct 
*vma, unsigned long addr,
                             pte_t pte);
 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                             pte_t pte);
-struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
-                                 unsigned long addr, pmd_t pmd);
-struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
-                               pmd_t pmd);
+struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, pmd_t pmd);
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, pmd_t pmd);
 
 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
                  unsigned long size);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 67220c30e7818..bf2aed8d92ec2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1994,7 +1994,7 @@ static inline bool can_change_pmd_writable(struct 
vm_area_struct *vma,
 
        if (!(vma->vm_flags & VM_SHARED)) {
                /* See can_change_pte_writable(). */
-               page = vm_normal_page_pmd(vma, addr, pmd);
+               page = vm_normal_page_pmd(vma, pmd);
                return page && PageAnon(page) && PageAnonExclusive(page);
        }
 
@@ -2033,7 +2033,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
            can_change_pmd_writable(vma, vmf->address, pmd))
                writable = true;
 
-       folio = vm_normal_folio_pmd(vma, haddr, pmd);
+       folio = vm_normal_folio_pmd(vma, pmd);
        if (!folio)
                goto out_map;
 
diff --git a/mm/memory.c b/mm/memory.c
index ace9c59e97181..34f961024e8e6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -663,8 +663,7 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, 
unsigned long addr,
 }
 
 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
-struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
-                               pmd_t pmd)
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, pmd_t pmd)
 {
        unsigned long pfn = pmd_pfn(pmd);
 
@@ -676,10 +675,9 @@ struct page *vm_normal_page_pmd(struct vm_area_struct 
*vma, unsigned long addr,
        return vm_normal_page_pfn(vma, pfn);
 }
 
-struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
-                                 unsigned long addr, pmd_t pmd)
+struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, pmd_t pmd)
 {
-       struct page *page = vm_normal_page_pmd(vma, addr, pmd);
+       struct page *page = vm_normal_page_pmd(vma, pmd);
 
        if (page)
                return page_folio(page);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 648038247a8d2..0edb7240d090c 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -944,7 +944,7 @@ struct folio *folio_walk_start(struct folio_walk *fw,
                        spin_unlock(ptl);
                        goto pte_table;
                } else if (pmd_present(pmd)) {
-                       page = vm_normal_page_pmd(vma, addr, pmd);
+                       page = vm_normal_page_pmd(vma, pmd);
                        if (page) {
                                goto found;
                        } else if ((flags & FW_ZEROPAGE) &&
-- 
2.49.0


Reply via email to