Now unused, so let's drop it. Signed-off-by: David Hildenbrand <da...@redhat.com> --- mm/internal.h | 2 -- mm/memory.c | 2 -- mm/mm_init.c | 3 --- mm/nommu.c | 1 - 4 files changed, 8 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h index f519eb7217c26..703871905fd6d 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -503,8 +503,6 @@ static inline bool folio_needs_release(struct folio *folio) (mapping && mapping_release_always(mapping)); } -extern unsigned long highest_memmap_pfn; - /* * Maximum number of reclaim retries without progress before the OOM * killer is consider the only way forward. diff --git a/mm/memory.c b/mm/memory.c index 188b84ebf479a..a1b5575db52ac 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -146,8 +146,6 @@ __setup("norandmaps", disable_randmaps); unsigned long zero_pfn __read_mostly; EXPORT_SYMBOL(zero_pfn); -unsigned long highest_memmap_pfn __read_mostly; - /* * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() */ diff --git a/mm/mm_init.c b/mm/mm_init.c index 5c21b3af216b2..1dac66c209984 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -881,9 +881,6 @@ void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone unsigned long pfn, end_pfn = start_pfn + size; struct page *page; - if (highest_memmap_pfn < end_pfn - 1) - highest_memmap_pfn = end_pfn - 1; - #ifdef CONFIG_ZONE_DEVICE /* * Honor reservation requested by the driver for this ZONE_DEVICE diff --git a/mm/nommu.c b/mm/nommu.c index 38c22ea0a95c6..cd9ddbfe1af80 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -42,7 +42,6 @@ #include <asm/mmu_context.h> #include "internal.h" -unsigned long highest_memmap_pfn; int heap_stack_gap = 0; atomic_long_t mmap_pages_allocated; -- 2.49.0