Re: [PATCH v9 03/12] mm/hotplug: Prepare shrink_{zone, pgdat}_span for sub-section removal

2019-06-18 Thread Dan Williams
On Mon, Jun 17, 2019 at 6:42 PM Wei Yang  wrote:
>
> On Wed, Jun 05, 2019 at 02:58:04PM -0700, Dan Williams wrote:
> >Sub-section hotplug support reduces the unit of operation of hotplug
> >from section-sized-units (PAGES_PER_SECTION) to sub-section-sized units
> >(PAGES_PER_SUBSECTION). Teach shrink_{zone,pgdat}_span() to consider
> >PAGES_PER_SUBSECTION boundaries as the points where pfn_valid(), not
> >valid_section(), can toggle.
> >
> >Cc: Michal Hocko 
> >Cc: Vlastimil Babka 
> >Cc: Logan Gunthorpe 
> >Reviewed-by: Pavel Tatashin 
> >Reviewed-by: Oscar Salvador 
> >Signed-off-by: Dan Williams 
> >---
> > mm/memory_hotplug.c |   29 -
> > 1 file changed, 8 insertions(+), 21 deletions(-)
> >
> >diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> >index 7b963c2d3a0d..647859a1d119 100644
> >--- a/mm/memory_hotplug.c
> >+++ b/mm/memory_hotplug.c
> >@@ -318,12 +318,8 @@ static unsigned long find_smallest_section_pfn(int nid, 
> >struct zone *zone,
> >unsigned long start_pfn,
> >unsigned long end_pfn)
> > {
> >-  struct mem_section *ms;
> >-
> >-  for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
> >-  ms = __pfn_to_section(start_pfn);
> >-
> >-  if (unlikely(!valid_section(ms)))
> >+  for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
> >+  if (unlikely(!pfn_valid(start_pfn)))
> >   continue;
>
> Hmm, we change the granularity of valid section from SECTION to SUBSECTION.
> But we didn't change the granularity of node id and zone information.
>
> For example, we found the node id of a pfn mismatch, we can skip the whole
> section instead of a subsection.
>
> Maybe this is not a big deal.

I don't see a problem.


Re: [PATCH v9 03/12] mm/hotplug: Prepare shrink_{zone, pgdat}_span for sub-section removal

2019-06-17 Thread Wei Yang
On Wed, Jun 05, 2019 at 02:58:04PM -0700, Dan Williams wrote:
>Sub-section hotplug support reduces the unit of operation of hotplug
>from section-sized-units (PAGES_PER_SECTION) to sub-section-sized units
>(PAGES_PER_SUBSECTION). Teach shrink_{zone,pgdat}_span() to consider
>PAGES_PER_SUBSECTION boundaries as the points where pfn_valid(), not
>valid_section(), can toggle.
>
>Cc: Michal Hocko 
>Cc: Vlastimil Babka 
>Cc: Logan Gunthorpe 
>Reviewed-by: Pavel Tatashin 
>Reviewed-by: Oscar Salvador 
>Signed-off-by: Dan Williams 
>---
> mm/memory_hotplug.c |   29 -
> 1 file changed, 8 insertions(+), 21 deletions(-)
>
>diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
>index 7b963c2d3a0d..647859a1d119 100644
>--- a/mm/memory_hotplug.c
>+++ b/mm/memory_hotplug.c
>@@ -318,12 +318,8 @@ static unsigned long find_smallest_section_pfn(int nid, 
>struct zone *zone,
>unsigned long start_pfn,
>unsigned long end_pfn)
> {
>-  struct mem_section *ms;
>-
>-  for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
>-  ms = __pfn_to_section(start_pfn);
>-
>-  if (unlikely(!valid_section(ms)))
>+  for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
>+  if (unlikely(!pfn_valid(start_pfn)))
>   continue;

Hmm, we change the granularity of valid section from SECTION to SUBSECTION.
But we didn't change the granularity of node id and zone information.

For example, we found the node id of a pfn mismatch, we can skip the whole
section instead of a subsection.

Maybe this is not a big deal.

> 
>   if (unlikely(pfn_to_nid(start_pfn) != nid))
>@@ -343,15 +339,12 @@ static unsigned long find_biggest_section_pfn(int nid, 
>struct zone *zone,
>   unsigned long start_pfn,
>   unsigned long end_pfn)
> {
>-  struct mem_section *ms;
>   unsigned long pfn;
> 
>   /* pfn is the end pfn of a memory section. */
>   pfn = end_pfn - 1;
>-  for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
>-  ms = __pfn_to_section(pfn);
>-
>-  if (unlikely(!valid_section(ms)))
>+  for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
>+  if (unlikely(!pfn_valid(pfn)))
>   continue;
> 
>   if (unlikely(pfn_to_nid(pfn) != nid))
>@@ -373,7 +366,6 @@ static void shrink_zone_span(struct zone *zone, unsigned 
>long start_pfn,
>   unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
>   unsigned long zone_end_pfn = z;
>   unsigned long pfn;
>-  struct mem_section *ms;
>   int nid = zone_to_nid(zone);
> 
>   zone_span_writelock(zone);
>@@ -410,10 +402,8 @@ static void shrink_zone_span(struct zone *zone, unsigned 
>long start_pfn,
>* it check the zone has only hole or not.
>*/
>   pfn = zone_start_pfn;
>-  for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
>-  ms = __pfn_to_section(pfn);
>-
>-  if (unlikely(!valid_section(ms)))
>+  for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
>+  if (unlikely(!pfn_valid(pfn)))
>   continue;
> 
>   if (page_zone(pfn_to_page(pfn)) != zone)
>@@ -441,7 +431,6 @@ static void shrink_pgdat_span(struct pglist_data *pgdat,
>   unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace 
> clash */
>   unsigned long pgdat_end_pfn = p;
>   unsigned long pfn;
>-  struct mem_section *ms;
>   int nid = pgdat->node_id;
> 
>   if (pgdat_start_pfn == start_pfn) {
>@@ -478,10 +467,8 @@ static void shrink_pgdat_span(struct pglist_data *pgdat,
>* has only hole or not.
>*/
>   pfn = pgdat_start_pfn;
>-  for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
>-  ms = __pfn_to_section(pfn);
>-
>-  if (unlikely(!valid_section(ms)))
>+  for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SUBSECTION) {
>+  if (unlikely(!pfn_valid(pfn)))
>   continue;
> 
>   if (pfn_to_nid(pfn) != nid)
>
>___
>Linux-nvdimm mailing list
>linux-nvd...@lists.01.org
>https://lists.01.org/mailman/listinfo/linux-nvdimm

-- 
Wei Yang
Help you, Help me


[PATCH v9 03/12] mm/hotplug: Prepare shrink_{zone, pgdat}_span for sub-section removal

2019-06-05 Thread Dan Williams
Sub-section hotplug support reduces the unit of operation of hotplug
from section-sized-units (PAGES_PER_SECTION) to sub-section-sized units
(PAGES_PER_SUBSECTION). Teach shrink_{zone,pgdat}_span() to consider
PAGES_PER_SUBSECTION boundaries as the points where pfn_valid(), not
valid_section(), can toggle.

Cc: Michal Hocko 
Cc: Vlastimil Babka 
Cc: Logan Gunthorpe 
Reviewed-by: Pavel Tatashin 
Reviewed-by: Oscar Salvador 
Signed-off-by: Dan Williams 
---
 mm/memory_hotplug.c |   29 -
 1 file changed, 8 insertions(+), 21 deletions(-)

diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 7b963c2d3a0d..647859a1d119 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -318,12 +318,8 @@ static unsigned long find_smallest_section_pfn(int nid, 
struct zone *zone,
 unsigned long start_pfn,
 unsigned long end_pfn)
 {
-   struct mem_section *ms;
-
-   for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
-   ms = __pfn_to_section(start_pfn);
-
-   if (unlikely(!valid_section(ms)))
+   for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
+   if (unlikely(!pfn_valid(start_pfn)))
continue;
 
if (unlikely(pfn_to_nid(start_pfn) != nid))
@@ -343,15 +339,12 @@ static unsigned long find_biggest_section_pfn(int nid, 
struct zone *zone,
unsigned long start_pfn,
unsigned long end_pfn)
 {
-   struct mem_section *ms;
unsigned long pfn;
 
/* pfn is the end pfn of a memory section. */
pfn = end_pfn - 1;
-   for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
-   ms = __pfn_to_section(pfn);
-
-   if (unlikely(!valid_section(ms)))
+   for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
+   if (unlikely(!pfn_valid(pfn)))
continue;
 
if (unlikely(pfn_to_nid(pfn) != nid))
@@ -373,7 +366,6 @@ static void shrink_zone_span(struct zone *zone, unsigned 
long start_pfn,
unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
unsigned long zone_end_pfn = z;
unsigned long pfn;
-   struct mem_section *ms;
int nid = zone_to_nid(zone);
 
zone_span_writelock(zone);
@@ -410,10 +402,8 @@ static void shrink_zone_span(struct zone *zone, unsigned 
long start_pfn,
 * it check the zone has only hole or not.
 */
pfn = zone_start_pfn;
-   for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
-   ms = __pfn_to_section(pfn);
-
-   if (unlikely(!valid_section(ms)))
+   for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
+   if (unlikely(!pfn_valid(pfn)))
continue;
 
if (page_zone(pfn_to_page(pfn)) != zone)
@@ -441,7 +431,6 @@ static void shrink_pgdat_span(struct pglist_data *pgdat,
unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace 
clash */
unsigned long pgdat_end_pfn = p;
unsigned long pfn;
-   struct mem_section *ms;
int nid = pgdat->node_id;
 
if (pgdat_start_pfn == start_pfn) {
@@ -478,10 +467,8 @@ static void shrink_pgdat_span(struct pglist_data *pgdat,
 * has only hole or not.
 */
pfn = pgdat_start_pfn;
-   for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
-   ms = __pfn_to_section(pfn);
-
-   if (unlikely(!valid_section(ms)))
+   for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SUBSECTION) {
+   if (unlikely(!pfn_valid(pfn)))
continue;
 
if (pfn_to_nid(pfn) != nid)