Add a kernel parameter hugetlb_free_vmemmap to disable the feature of
freeing unused vmemmap pages associated with each hugetlb page on boot.

Signed-off-by: Muchun Song <songmuc...@bytedance.com>
---
 .../admin-guide/kernel-parameters.txt         |  9 ++++++++
 Documentation/admin-guide/mm/hugetlbpage.rst  |  3 +++
 mm/hugetlb.c                                  | 23 +++++++++++++++++++
 3 files changed, 35 insertions(+)

diff --git a/Documentation/admin-guide/kernel-parameters.txt 
b/Documentation/admin-guide/kernel-parameters.txt
index 5debfe238027..ccf07293cb63 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1551,6 +1551,15 @@
                        Documentation/admin-guide/mm/hugetlbpage.rst.
                        Format: size[KMG]
 
+       hugetlb_free_vmemmap=
+                       [KNL] When CONFIG_HUGETLB_PAGE_FREE_VMEMMAP is set,
+                       this controls freeing unused vmemmap pages associated
+                       with each HugeTLB page.
+                       Format: { on (default) | off }
+
+                       on:  enable the feature
+                       off: disable the feature
+
        hung_task_panic=
                        [KNL] Should the hung task detector generate panics.
                        Format: 0 | 1
diff --git a/Documentation/admin-guide/mm/hugetlbpage.rst 
b/Documentation/admin-guide/mm/hugetlbpage.rst
index f7b1c7462991..7d6129ee97dd 100644
--- a/Documentation/admin-guide/mm/hugetlbpage.rst
+++ b/Documentation/admin-guide/mm/hugetlbpage.rst
@@ -145,6 +145,9 @@ default_hugepagesz
 
        will all result in 256 2M huge pages being allocated.  Valid default
        huge page size is architecture dependent.
+hugetlb_free_vmemmap
+       When CONFIG_HUGETLB_PAGE_FREE_VMEMMAP is set, this disables freeing
+       unused vmemmap pages associated each HugeTLB page.
 
 When multiple huge page sizes are supported, ``/proc/sys/vm/nr_hugepages``
 indicates the current number of pre-allocated huge pages of the default size.
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 509de0732d9f..82467d573fee 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1310,6 +1310,8 @@ static void __free_hugepage(struct hstate *h, struct page 
*page);
        (__boundary - 1 < (end) - 1) ? __boundary : (end);              \
 })
 
+static bool hugetlb_free_vmemmap_disabled __initdata;
+
 static inline unsigned int nr_free_vmemmap(struct hstate *h)
 {
        return h->nr_free_vmemmap_pages;
@@ -1457,6 +1459,13 @@ static void __init hugetlb_vmemmap_init(struct hstate *h)
        unsigned int order = huge_page_order(h);
        unsigned int vmemmap_pages;
 
+       if (hugetlb_free_vmemmap_disabled) {
+               h->nr_free_vmemmap_pages = 0;
+               pr_info("HugeTLB: disable free vmemmap pages for %s\n",
+                       h->name);
+               return;
+       }
+
        vmemmap_pages = ((1 << order) * sizeof(struct page)) >> PAGE_SHIFT;
        /*
         * The head page and the first tail page not free to buddy system,
@@ -1826,6 +1835,20 @@ static inline void clear_subpage_hwpoison(struct page 
*head)
 {
        set_page_private(head + 4, 0);
 }
+
+static int __init early_hugetlb_free_vmemmap_param(char *buf)
+{
+       if (!buf)
+               return -EINVAL;
+
+       if (!strcmp(buf, "off"))
+               hugetlb_free_vmemmap_disabled = true;
+       else if (strcmp(buf, "on"))
+               return -EINVAL;
+
+       return 0;
+}
+early_param("hugetlb_free_vmemmap", early_hugetlb_free_vmemmap_param);
 #else
 static inline void hugetlb_vmemmap_init(struct hstate *h)
 {
-- 
2.20.1

Reply via email to