Re: [External] Re: [PATCH v5 17/21] mm/hugetlb: Add a kernel parameter hugetlb_free_vmemmap

2020-11-20 Thread Muchun Song
On Fri, Nov 20, 2020 at 4:22 PM Michal Hocko  wrote:
>
> On Fri 20-11-20 14:43:21, Muchun Song wrote:
> > Add a kernel parameter hugetlb_free_vmemmap to disable the feature of
> > freeing unused vmemmap pages associated with each hugetlb page on boot.
>
> As replied to the config patch. This is fine but I would argue that the
> default should be flipped. Saving memory is nice but it comes with
> overhead and therefore should be an opt-in. The config option should
> only guard compile time dependencies not a user choice.

Got it. The default will be flipped in the next version. Thanks.

>
> > Signed-off-by: Muchun Song 
> > ---
> >  Documentation/admin-guide/kernel-parameters.txt |  9 +
> >  Documentation/admin-guide/mm/hugetlbpage.rst|  3 +++
> >  mm/hugetlb_vmemmap.c| 21 +
> >  3 files changed, 33 insertions(+)
> >
> > diff --git a/Documentation/admin-guide/kernel-parameters.txt 
> > b/Documentation/admin-guide/kernel-parameters.txt
> > index 5debfe238027..ccf07293cb63 100644
> > --- a/Documentation/admin-guide/kernel-parameters.txt
> > +++ b/Documentation/admin-guide/kernel-parameters.txt
> > @@ -1551,6 +1551,15 @@
> >   Documentation/admin-guide/mm/hugetlbpage.rst.
> >   Format: size[KMG]
> >
> > + hugetlb_free_vmemmap=
> > + [KNL] When CONFIG_HUGETLB_PAGE_FREE_VMEMMAP is set,
> > + this controls freeing unused vmemmap pages associated
> > + with each HugeTLB page.
> > + Format: { on (default) | off }
> > +
> > + on:  enable the feature
> > + off: disable the feature
> > +
> >   hung_task_panic=
> >   [KNL] Should the hung task detector generate panics.
> >   Format: 0 | 1
> > diff --git a/Documentation/admin-guide/mm/hugetlbpage.rst 
> > b/Documentation/admin-guide/mm/hugetlbpage.rst
> > index f7b1c7462991..7d6129ee97dd 100644
> > --- a/Documentation/admin-guide/mm/hugetlbpage.rst
> > +++ b/Documentation/admin-guide/mm/hugetlbpage.rst
> > @@ -145,6 +145,9 @@ default_hugepagesz
> >
> >   will all result in 256 2M huge pages being allocated.  Valid default
> >   huge page size is architecture dependent.
> > +hugetlb_free_vmemmap
> > + When CONFIG_HUGETLB_PAGE_FREE_VMEMMAP is set, this disables freeing
> > + unused vmemmap pages associated each HugeTLB page.
> >
> >  When multiple huge page sizes are supported, ``/proc/sys/vm/nr_hugepages``
> >  indicates the current number of pre-allocated huge pages of the default 
> > size.
> > diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> > index 3629165d8158..c958699d1393 100644
> > --- a/mm/hugetlb_vmemmap.c
> > +++ b/mm/hugetlb_vmemmap.c
> > @@ -144,6 +144,22 @@ static inline bool vmemmap_pmd_huge(pmd_t *pmd)
> >  }
> >  #endif
> >
> > +static bool hugetlb_free_vmemmap_disabled __initdata;
> > +
> > +static int __init early_hugetlb_free_vmemmap_param(char *buf)
> > +{
> > + if (!buf)
> > + return -EINVAL;
> > +
> > + if (!strcmp(buf, "off"))
> > + hugetlb_free_vmemmap_disabled = true;
> > + else if (strcmp(buf, "on"))
> > + return -EINVAL;
> > +
> > + return 0;
> > +}
> > +early_param("hugetlb_free_vmemmap", early_hugetlb_free_vmemmap_param);
> > +
> >  static inline unsigned int vmemmap_pages_per_hpage(struct hstate *h)
> >  {
> >   return free_vmemmap_pages_per_hpage(h) + RESERVE_VMEMMAP_NR;
> > @@ -541,6 +557,11 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
> >   unsigned int order = huge_page_order(h);
> >   unsigned int vmemmap_pages;
> >
> > + if (hugetlb_free_vmemmap_disabled) {
> > + pr_info("disable free vmemmap pages for %s\n", h->name);
> > + return;
> > + }
> > +
> >   vmemmap_pages = ((1 << order) * sizeof(struct page)) >> PAGE_SHIFT;
> >   /*
> >* The head page and the first tail page are not to be freed to buddy
> > --
> > 2.11.0
>
> --
> Michal Hocko
> SUSE Labs



-- 
Yours,
Muchun


Re: [PATCH v5 17/21] mm/hugetlb: Add a kernel parameter hugetlb_free_vmemmap

2020-11-20 Thread Michal Hocko
On Fri 20-11-20 14:43:21, Muchun Song wrote:
> Add a kernel parameter hugetlb_free_vmemmap to disable the feature of
> freeing unused vmemmap pages associated with each hugetlb page on boot.

As replied to the config patch. This is fine but I would argue that the
default should be flipped. Saving memory is nice but it comes with
overhead and therefore should be an opt-in. The config option should
only guard compile time dependencies not a user choice.

> Signed-off-by: Muchun Song 
> ---
>  Documentation/admin-guide/kernel-parameters.txt |  9 +
>  Documentation/admin-guide/mm/hugetlbpage.rst|  3 +++
>  mm/hugetlb_vmemmap.c| 21 +
>  3 files changed, 33 insertions(+)
> 
> diff --git a/Documentation/admin-guide/kernel-parameters.txt 
> b/Documentation/admin-guide/kernel-parameters.txt
> index 5debfe238027..ccf07293cb63 100644
> --- a/Documentation/admin-guide/kernel-parameters.txt
> +++ b/Documentation/admin-guide/kernel-parameters.txt
> @@ -1551,6 +1551,15 @@
>   Documentation/admin-guide/mm/hugetlbpage.rst.
>   Format: size[KMG]
>  
> + hugetlb_free_vmemmap=
> + [KNL] When CONFIG_HUGETLB_PAGE_FREE_VMEMMAP is set,
> + this controls freeing unused vmemmap pages associated
> + with each HugeTLB page.
> + Format: { on (default) | off }
> +
> + on:  enable the feature
> + off: disable the feature
> +
>   hung_task_panic=
>   [KNL] Should the hung task detector generate panics.
>   Format: 0 | 1
> diff --git a/Documentation/admin-guide/mm/hugetlbpage.rst 
> b/Documentation/admin-guide/mm/hugetlbpage.rst
> index f7b1c7462991..7d6129ee97dd 100644
> --- a/Documentation/admin-guide/mm/hugetlbpage.rst
> +++ b/Documentation/admin-guide/mm/hugetlbpage.rst
> @@ -145,6 +145,9 @@ default_hugepagesz
>  
>   will all result in 256 2M huge pages being allocated.  Valid default
>   huge page size is architecture dependent.
> +hugetlb_free_vmemmap
> + When CONFIG_HUGETLB_PAGE_FREE_VMEMMAP is set, this disables freeing
> + unused vmemmap pages associated each HugeTLB page.
>  
>  When multiple huge page sizes are supported, ``/proc/sys/vm/nr_hugepages``
>  indicates the current number of pre-allocated huge pages of the default size.
> diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> index 3629165d8158..c958699d1393 100644
> --- a/mm/hugetlb_vmemmap.c
> +++ b/mm/hugetlb_vmemmap.c
> @@ -144,6 +144,22 @@ static inline bool vmemmap_pmd_huge(pmd_t *pmd)
>  }
>  #endif
>  
> +static bool hugetlb_free_vmemmap_disabled __initdata;
> +
> +static int __init early_hugetlb_free_vmemmap_param(char *buf)
> +{
> + if (!buf)
> + return -EINVAL;
> +
> + if (!strcmp(buf, "off"))
> + hugetlb_free_vmemmap_disabled = true;
> + else if (strcmp(buf, "on"))
> + return -EINVAL;
> +
> + return 0;
> +}
> +early_param("hugetlb_free_vmemmap", early_hugetlb_free_vmemmap_param);
> +
>  static inline unsigned int vmemmap_pages_per_hpage(struct hstate *h)
>  {
>   return free_vmemmap_pages_per_hpage(h) + RESERVE_VMEMMAP_NR;
> @@ -541,6 +557,11 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
>   unsigned int order = huge_page_order(h);
>   unsigned int vmemmap_pages;
>  
> + if (hugetlb_free_vmemmap_disabled) {
> + pr_info("disable free vmemmap pages for %s\n", h->name);
> + return;
> + }
> +
>   vmemmap_pages = ((1 << order) * sizeof(struct page)) >> PAGE_SHIFT;
>   /*
>* The head page and the first tail page are not to be freed to buddy
> -- 
> 2.11.0

-- 
Michal Hocko
SUSE Labs


[PATCH v5 17/21] mm/hugetlb: Add a kernel parameter hugetlb_free_vmemmap

2020-11-19 Thread Muchun Song
Add a kernel parameter hugetlb_free_vmemmap to disable the feature of
freeing unused vmemmap pages associated with each hugetlb page on boot.

Signed-off-by: Muchun Song 
---
 Documentation/admin-guide/kernel-parameters.txt |  9 +
 Documentation/admin-guide/mm/hugetlbpage.rst|  3 +++
 mm/hugetlb_vmemmap.c| 21 +
 3 files changed, 33 insertions(+)

diff --git a/Documentation/admin-guide/kernel-parameters.txt 
b/Documentation/admin-guide/kernel-parameters.txt
index 5debfe238027..ccf07293cb63 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1551,6 +1551,15 @@
Documentation/admin-guide/mm/hugetlbpage.rst.
Format: size[KMG]
 
+   hugetlb_free_vmemmap=
+   [KNL] When CONFIG_HUGETLB_PAGE_FREE_VMEMMAP is set,
+   this controls freeing unused vmemmap pages associated
+   with each HugeTLB page.
+   Format: { on (default) | off }
+
+   on:  enable the feature
+   off: disable the feature
+
hung_task_panic=
[KNL] Should the hung task detector generate panics.
Format: 0 | 1
diff --git a/Documentation/admin-guide/mm/hugetlbpage.rst 
b/Documentation/admin-guide/mm/hugetlbpage.rst
index f7b1c7462991..7d6129ee97dd 100644
--- a/Documentation/admin-guide/mm/hugetlbpage.rst
+++ b/Documentation/admin-guide/mm/hugetlbpage.rst
@@ -145,6 +145,9 @@ default_hugepagesz
 
will all result in 256 2M huge pages being allocated.  Valid default
huge page size is architecture dependent.
+hugetlb_free_vmemmap
+   When CONFIG_HUGETLB_PAGE_FREE_VMEMMAP is set, this disables freeing
+   unused vmemmap pages associated each HugeTLB page.
 
 When multiple huge page sizes are supported, ``/proc/sys/vm/nr_hugepages``
 indicates the current number of pre-allocated huge pages of the default size.
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 3629165d8158..c958699d1393 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -144,6 +144,22 @@ static inline bool vmemmap_pmd_huge(pmd_t *pmd)
 }
 #endif
 
+static bool hugetlb_free_vmemmap_disabled __initdata;
+
+static int __init early_hugetlb_free_vmemmap_param(char *buf)
+{
+   if (!buf)
+   return -EINVAL;
+
+   if (!strcmp(buf, "off"))
+   hugetlb_free_vmemmap_disabled = true;
+   else if (strcmp(buf, "on"))
+   return -EINVAL;
+
+   return 0;
+}
+early_param("hugetlb_free_vmemmap", early_hugetlb_free_vmemmap_param);
+
 static inline unsigned int vmemmap_pages_per_hpage(struct hstate *h)
 {
return free_vmemmap_pages_per_hpage(h) + RESERVE_VMEMMAP_NR;
@@ -541,6 +557,11 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
unsigned int order = huge_page_order(h);
unsigned int vmemmap_pages;
 
+   if (hugetlb_free_vmemmap_disabled) {
+   pr_info("disable free vmemmap pages for %s\n", h->name);
+   return;
+   }
+
vmemmap_pages = ((1 << order) * sizeof(struct page)) >> PAGE_SHIFT;
/*
 * The head page and the first tail page are not to be freed to buddy
-- 
2.11.0