On Wed, 15 Nov 2017, Michal Hocko wrote:

> > > >         if (!hugepages_supported())
> > > >                 return;
> > > >         seq_printf(m,
> > > > @@ -2987,6 +2989,11 @@ void hugetlb_report_meminfo(struct seq_file *m)
> > > >                         h->resv_huge_pages,
> > > >                         h->surplus_huge_pages,
> > > >                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
> > > > +
> > > > +       for_each_hstate(h)
> > > > +               total += (PAGE_SIZE << huge_page_order(h)) * 
> > > > h->nr_huge_pages;
> > > 
> > > Please keep the total calculation consistent with what we have there
> > > already.
> > > 
> > 
> > Yeah, and I'm not sure if your comment eludes to this being racy, but it 
> > would be better to store the default size for default_hstate during the 
> > iteration to total the size for all hstates.
> 
> I just meant to have the code consistent. I do not prefer one or other
> option.

It's always nice when HugePages_Total * Hugepagesize cannot become greater 
than Hugetlb.  Roman, could you factor something like this into your 
change accompanied with a documentation upodate as suggested by Dave?

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2975,20 +2975,33 @@ int hugetlb_overcommit_handler(struct ctl_table *table, 
int write,
 
 void hugetlb_report_meminfo(struct seq_file *m)
 {
-       struct hstate *h = &default_hstate;
+       struct hstate *h;
+       unsigned long total = 0;
+
        if (!hugepages_supported())
                return;
-       seq_printf(m,
-                       "HugePages_Total:   %5lu\n"
-                       "HugePages_Free:    %5lu\n"
-                       "HugePages_Rsvd:    %5lu\n"
-                       "HugePages_Surp:    %5lu\n"
-                       "Hugepagesize:   %8lu kB\n",
-                       h->nr_huge_pages,
-                       h->free_huge_pages,
-                       h->resv_huge_pages,
-                       h->surplus_huge_pages,
-                       1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
+
+       for_each_hstate(h) {
+               unsigned long nr_huge_pages = h->nr_huge_pages;
+
+               total += nr_huge_pages <<
+                        (huge_page_order(h) + PAGE_SHIFT - 10);
+
+               if (h == &default_hstate) {
+                       seq_printf(m,
+                               "HugePages_Total:   %5lu\n"
+                               "HugePages_Free:    %5lu\n"
+                               "HugePages_Rsvd:    %5lu\n"
+                               "HugePages_Surp:    %5lu\n"
+                               "Hugepagesize:   %8lu kB\n",
+                               nr_huge_pages,
+                               h->free_huge_pages,
+                               h->resv_huge_pages,
+                               h->surplus_huge_pages,
+                               1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
+               }
+       }
+       seq_printf(m, "Hugetlb:            %5lu kB\n", total);
 }
 
 int hugetlb_report_node_meminfo(int nid, char *buf)

Reply via email to