Commit 944d9fec8d7a ("hugetlb: add support for gigantic page allocation at
runtime") has added the runtime gigantic page allocation via
alloc_contig_range(), making this support available only when CONFIG_CMA is
enabled. Because it doesn't depend on MIGRATE_CMA pageblocks and the
associated infrastructure, it is possible with few simple adjustments to
require only CONFIG_MEMORY_ISOLATION instead of full CONFIG_CMA.After this patch, alloc_contig_range() and related functions are available and used for gigantic pages with just CONFIG_MEMORY_ISOLATION enabled. Note CONFIG_CMA selects CONFIG_MEMORY_ISOLATION. This allows supporting runtime gigantic pages without the CMA-specific checks in page allocator fastpaths. Signed-off-by: Vlastimil Babka <[email protected]> Cc: Luiz Capitulino <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Zhang Yanfei <[email protected]> Cc: Yasuaki Ishimatsu <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Davidlohr Bueso <[email protected]> Cc: Hillf Danton <[email protected]> Cc: Mike Kravetz <[email protected]> --- include/linux/gfp.h | 6 +++--- mm/hugetlb.c | 2 +- mm/page_alloc.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 8942af0813e3..752bb6259218 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -539,16 +539,16 @@ static inline bool pm_suspended_storage(void) } #endif /* CONFIG_PM_SLEEP */ -#ifdef CONFIG_CMA - +#ifdef CONFIG_MEMORY_ISOLATION /* The below functions must be run on a range from a single zone. */ extern int alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype); extern void free_contig_range(unsigned long pfn, unsigned nr_pages); +#endif +#ifdef CONFIG_CMA /* CMA stuff */ extern void init_cma_reserved_pageblock(struct page *page); - #endif #endif /* __LINUX_GFP_H */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ef6963b577fd..66529a1c7929 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1002,7 +1002,7 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) ((node = hstate_next_node_to_free(hs, mask)) || 1); \ nr_nodes--) -#if defined(CONFIG_CMA) && defined(CONFIG_X86_64) +#if defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_X86_64) static void destroy_compound_gigantic_page(struct page *page, unsigned int order) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9d666df5ef95..29c530cdd7f4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6599,7 +6599,7 @@ bool is_pageblock_removable_nolock(struct page *page) return !has_unmovable_pages(zone, page, 0, true); } -#ifdef CONFIG_CMA +#ifdef CONFIG_MEMORY_ISOLATION static unsigned long pfn_max_align_down(unsigned long pfn) { -- 2.7.0

