Re: [PATCH 3/4] hugetlbfs: remove hugetlb_add_hstate() warning for existing hstate
On 3/23/20 5:01 PM, Mina Almasry wrote: > On Wed, Mar 18, 2020 at 3:07 PM Mike Kravetz wrote: >> >> The routine hugetlb_add_hstate prints a warning if the hstate already >> exists. This was originally done as part of kernel command line >> parsing. If 'hugepagesz=' was specified more than once, the warning >> pr_warn("hugepagesz= specified twice, ignoring\n"); >> would be printed. >> >> Some architectures want to enable all huge page sizes. They would >> call hugetlb_add_hstate for all supported sizes. However, this was >> done after command line processing and as a result hstates could have >> already been created for some sizes. To make sure no warning were >> printed, there would often be code like: >> if (!size_to_hstate(size) >> hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT) >> >> The only time we want to print the warning is as the result of command >> line processing. So, remove the warning from hugetlb_add_hstate and >> add it to the single arch independent routine processing "hugepagesz=". >> After this, calls to size_to_hstate() in arch specific code can be >> removed and hugetlb_add_hstate can be called without worrying about >> warning messages. >> >> Signed-off-by: Mike Kravetz >> --- >> arch/arm64/mm/hugetlbpage.c | 16 >> arch/powerpc/mm/hugetlbpage.c | 3 +-- >> arch/riscv/mm/hugetlbpage.c | 2 +- >> arch/sparc/mm/init_64.c | 19 --- >> arch/x86/mm/hugetlbpage.c | 2 +- >> mm/hugetlb.c | 10 +++--- >> 6 files changed, 18 insertions(+), 34 deletions(-) >> >> diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c >> index 4aa9534a45d7..050809e6f0a9 100644 >> --- a/arch/arm64/mm/hugetlbpage.c >> +++ b/arch/arm64/mm/hugetlbpage.c >> @@ -441,22 +441,14 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma, >> clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); >> } >> >> -static void __init add_huge_page_size(unsigned long size) >> -{ >> - if (size_to_hstate(size)) >> - return; >> - >> - hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); >> -} >> - >> static int __init hugetlbpage_init(void) >> { >> #ifdef CONFIG_ARM64_4K_PAGES >> - add_huge_page_size(PUD_SIZE); >> + hugetlb_add_hstate(ilog2(PUD_SIZE) - PAGE_SHIFT); >> #endif >> - add_huge_page_size(CONT_PMD_SIZE); >> - add_huge_page_size(PMD_SIZE); >> - add_huge_page_size(CONT_PTE_SIZE); >> + hugetlb_add_hstate(ilog2(CONT_PMD_SIZE) - PAGE_SHIFT); >> + hugetlb_add_hstate(ilog2(PMD_SIZE) - PAGE_SHIFT); >> + hugetlb_add_hstate(ilog2(CONT_PTE_SIZE) - PAGE_SHIFT); >> >> return 0; >> } >> diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c >> index 166960ba1236..f46464ba6fb4 100644 >> --- a/arch/powerpc/mm/hugetlbpage.c >> +++ b/arch/powerpc/mm/hugetlbpage.c >> @@ -584,8 +584,7 @@ static int __init add_huge_page_size(unsigned long long >> size) >> if (!arch_hugetlb_valid_size(size)) >> return -EINVAL; >> >> - if (!size_to_hstate(size)) >> - hugetlb_add_hstate(shift - PAGE_SHIFT); >> + hugetlb_add_hstate(shift - PAGE_SHIFT); >> return 0; >> } >> >> diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c >> index bdf89d7eb714..beaa91941db8 100644 >> --- a/arch/riscv/mm/hugetlbpage.c >> +++ b/arch/riscv/mm/hugetlbpage.c >> @@ -26,7 +26,7 @@ bool __init arch_hugetlb_valid_size(unsigned long long >> size) >> static __init int gigantic_pages_init(void) >> { >> /* With CONTIG_ALLOC, we can allocate gigantic pages at runtime */ >> - if (IS_ENABLED(CONFIG_64BIT) && !size_to_hstate(1UL << PUD_SHIFT)) >> + if (IS_ENABLED(CONFIG_64BIT)) >> hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); >> return 0; >> } >> diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c >> index 5c29203fd460..8f619edc8f8c 100644 >> --- a/arch/sparc/mm/init_64.c >> +++ b/arch/sparc/mm/init_64.c >> @@ -325,23 +325,12 @@ static void __update_mmu_tsb_insert(struct mm_struct >> *mm, unsigned long tsb_inde >> } >> >> #ifdef CONFIG_HUGETLB_PAGE >> -static void __init add_huge_page_size(unsigned long size) >> -{ >> - unsigned int order; >> - >> - if (size_to_hstate(size)) >> - return; >> - >> - order = ilog2(size) - PAGE_SHIFT; >> - hugetlb_add_hstate(order); >> -} >> - >> static int __init hugetlbpage_init(void) >> { >> - add_huge_page_size(1UL << HPAGE_64K_SHIFT); >> - add_huge_page_size(1UL << HPAGE_SHIFT); >> - add_huge_page_size(1UL << HPAGE_256MB_SHIFT); >> - add_huge_page_size(1UL << HPAGE_2GB_SHIFT); >> + hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT); >> + hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT); >> + hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT); >> + hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT); >> >> retur
[PATCH 3/4] hugetlbfs: remove hugetlb_add_hstate() warning for existing hstate
The routine hugetlb_add_hstate prints a warning if the hstate already exists. This was originally done as part of kernel command line parsing. If 'hugepagesz=' was specified more than once, the warning pr_warn("hugepagesz= specified twice, ignoring\n"); would be printed. Some architectures want to enable all huge page sizes. They would call hugetlb_add_hstate for all supported sizes. However, this was done after command line processing and as a result hstates could have already been created for some sizes. To make sure no warning were printed, there would often be code like: if (!size_to_hstate(size) hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT) The only time we want to print the warning is as the result of command line processing. So, remove the warning from hugetlb_add_hstate and add it to the single arch independent routine processing "hugepagesz=". After this, calls to size_to_hstate() in arch specific code can be removed and hugetlb_add_hstate can be called without worrying about warning messages. Signed-off-by: Mike Kravetz --- arch/arm64/mm/hugetlbpage.c | 16 arch/powerpc/mm/hugetlbpage.c | 3 +-- arch/riscv/mm/hugetlbpage.c | 2 +- arch/sparc/mm/init_64.c | 19 --- arch/x86/mm/hugetlbpage.c | 2 +- mm/hugetlb.c | 10 +++--- 6 files changed, 18 insertions(+), 34 deletions(-) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 4aa9534a45d7..050809e6f0a9 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -441,22 +441,14 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma, clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); } -static void __init add_huge_page_size(unsigned long size) -{ - if (size_to_hstate(size)) - return; - - hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); -} - static int __init hugetlbpage_init(void) { #ifdef CONFIG_ARM64_4K_PAGES - add_huge_page_size(PUD_SIZE); + hugetlb_add_hstate(ilog2(PUD_SIZE) - PAGE_SHIFT); #endif - add_huge_page_size(CONT_PMD_SIZE); - add_huge_page_size(PMD_SIZE); - add_huge_page_size(CONT_PTE_SIZE); + hugetlb_add_hstate(ilog2(CONT_PMD_SIZE) - PAGE_SHIFT); + hugetlb_add_hstate(ilog2(PMD_SIZE) - PAGE_SHIFT); + hugetlb_add_hstate(ilog2(CONT_PTE_SIZE) - PAGE_SHIFT); return 0; } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 166960ba1236..f46464ba6fb4 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -584,8 +584,7 @@ static int __init add_huge_page_size(unsigned long long size) if (!arch_hugetlb_valid_size(size)) return -EINVAL; - if (!size_to_hstate(size)) - hugetlb_add_hstate(shift - PAGE_SHIFT); + hugetlb_add_hstate(shift - PAGE_SHIFT); return 0; } diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index bdf89d7eb714..beaa91941db8 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -26,7 +26,7 @@ bool __init arch_hugetlb_valid_size(unsigned long long size) static __init int gigantic_pages_init(void) { /* With CONTIG_ALLOC, we can allocate gigantic pages at runtime */ - if (IS_ENABLED(CONFIG_64BIT) && !size_to_hstate(1UL << PUD_SHIFT)) + if (IS_ENABLED(CONFIG_64BIT)) hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); return 0; } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 5c29203fd460..8f619edc8f8c 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -325,23 +325,12 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde } #ifdef CONFIG_HUGETLB_PAGE -static void __init add_huge_page_size(unsigned long size) -{ - unsigned int order; - - if (size_to_hstate(size)) - return; - - order = ilog2(size) - PAGE_SHIFT; - hugetlb_add_hstate(order); -} - static int __init hugetlbpage_init(void) { - add_huge_page_size(1UL << HPAGE_64K_SHIFT); - add_huge_page_size(1UL << HPAGE_SHIFT); - add_huge_page_size(1UL << HPAGE_256MB_SHIFT); - add_huge_page_size(1UL << HPAGE_2GB_SHIFT); + hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT); + hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT); + hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT); + hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT); return 0; } diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index dd3ed09f6c23..8a3f586e1217 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -195,7 +195,7 @@ bool __init arch_hugetlb_valid_size(unsigned long long size) static __init int gigantic_pages_init(void) { /* With compaction or CMA we can allocate gigantic pages at runtime */ - if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(