alloc_gigantic_page() implements an allocation method where it scans over
various zones looking for a large contiguous memory block which could not
have been allocated through the buddy allocator. A subsequent patch which
tests arch page table helpers needs such a method to allocate PUD_SIZE
sized memory block. In the future such methods might have other use cases
as well. So alloc_gigantic_page() has been split carving out actual memory
allocation method and made available via new alloc_gigantic_page_order().

Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Vlastimil Babka <vba...@suse.cz>
Cc: Greg Kroah-Hartman <gre...@linuxfoundation.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Mike Rapoport <r...@linux.vnet.ibm.com>
Cc: Mike Kravetz <mike.krav...@oracle.com>
Cc: Jason Gunthorpe <j...@ziepe.ca>
Cc: Dan Williams <dan.j.willi...@intel.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Michal Hocko <mho...@kernel.org>
Cc: Mark Rutland <mark.rutl...@arm.com>
Cc: Mark Brown <broo...@kernel.org>
Cc: Steven Price <steven.pr...@arm.com>
Cc: Ard Biesheuvel <ard.biesheu...@linaro.org>
Cc: Masahiro Yamada <yamada.masah...@socionext.com>
Cc: Kees Cook <keesc...@chromium.org>
Cc: Tetsuo Handa <penguin-ker...@i-love.sakura.ne.jp>
Cc: Matthew Wilcox <wi...@infradead.org>
Cc: Sri Krishna chowdary <schowd...@nvidia.com>
Cc: Dave Hansen <dave.han...@intel.com>
Cc: Russell King - ARM Linux <li...@armlinux.org.uk>
Cc: Michael Ellerman <m...@ellerman.id.au>
Cc: Paul Mackerras <pau...@samba.org>
Cc: Martin Schwidefsky <schwidef...@de.ibm.com>
Cc: Heiko Carstens <heiko.carst...@de.ibm.com>
Cc: "David S. Miller" <da...@davemloft.net>
Cc: Vineet Gupta <vgu...@synopsys.com>
Cc: James Hogan <jho...@kernel.org>
Cc: Paul Burton <paul.bur...@mips.com>
Cc: Ralf Baechle <r...@linux-mips.org>
Cc: Kirill A. Shutemov <kir...@shutemov.name>
Cc: Gerald Schaefer <gerald.schae...@de.ibm.com>
Cc: Christophe Leroy <christophe.le...@c-s.fr>
Cc: linux-snps-...@lists.infradead.org
Cc: linux-m...@vger.kernel.org
Cc: linux-arm-ker...@lists.infradead.org
Cc: linux-i...@vger.kernel.org
Cc: linuxppc-...@lists.ozlabs.org
Cc: linux-s...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: sparcli...@vger.kernel.org
Cc: x...@kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khand...@arm.com>
---
 include/linux/hugetlb.h |  9 +++++++++
 mm/hugetlb.c            | 24 ++++++++++++++++++++++--
 2 files changed, 31 insertions(+), 2 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 53fc34f930d0..cc50d5ad4885 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -299,6 +299,9 @@ static inline bool is_file_hugepages(struct file *file)
 }
 
 
+struct page *
+alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
+                         int nid, nodemask_t *nodemask);
 #else /* !CONFIG_HUGETLBFS */
 
 #define is_file_hugepages(file)                        false
@@ -310,6 +313,12 @@ hugetlb_file_setup(const char *name, size_t size, 
vm_flags_t acctflag,
        return ERR_PTR(-ENOSYS);
 }
 
+static inline struct page *
+alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
+                         int nid, nodemask_t *nodemask)
+{
+       return NULL;
+}
 #endif /* !CONFIG_HUGETLBFS */
 
 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ef37c85423a5..3fb81252f52b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1112,10 +1112,9 @@ static bool zone_spans_last_pfn(const struct zone *zone,
        return zone_spans_pfn(zone, last_pfn);
 }
 
-static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
+struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
                int nid, nodemask_t *nodemask)
 {
-       unsigned int order = huge_page_order(h);
        unsigned long nr_pages = 1 << order;
        unsigned long ret, pfn, flags;
        struct zonelist *zonelist;
@@ -1151,6 +1150,14 @@ static struct page *alloc_gigantic_page(struct hstate 
*h, gfp_t gfp_mask,
        return NULL;
 }
 
+static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
+                                       int nid, nodemask_t *nodemask)
+{
+       unsigned int order = huge_page_order(h);
+
+       return alloc_gigantic_page_order(order, gfp_mask, nid, nodemask);
+}
+
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
 #else /* !CONFIG_CONTIG_ALLOC */
@@ -1159,6 +1166,12 @@ static struct page *alloc_gigantic_page(struct hstate 
*h, gfp_t gfp_mask,
 {
        return NULL;
 }
+
+struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
+                                      int nid, nodemask_t *nodemask)
+{
+       return NULL;
+}
 #endif /* CONFIG_CONTIG_ALLOC */
 
 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
@@ -1167,6 +1180,13 @@ static struct page *alloc_gigantic_page(struct hstate 
*h, gfp_t gfp_mask,
 {
        return NULL;
 }
+
+struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
+                                      int nid, nodemask_t *nodemask)
+{
+       return NULL;
+}
+
 static inline void free_gigantic_page(struct page *page, unsigned int order) { 
}
 static inline void destroy_compound_gigantic_page(struct page *page,
                                                unsigned int order) { }
-- 
2.20.1

Reply via email to