Re: [PATCH v4 1/3] mm/free_pcppages_bulk: update pcp->count inside
On Mon, Mar 12, 2018 at 02:22:28PM +0100, Vlastimil Babka wrote: > On 03/01/2018 07:28 AM, Aaron Lu wrote: > > Matthew Wilcox found that all callers of free_pcppages_bulk() currently > > update pcp->count immediately after so it's natural to do it inside > > free_pcppages_bulk(). > > > > No functionality or performance change is expected from this patch. > > Well, it's N decrements instead of one decrement by N / assignment of > zero. But I assume the difference is negligible anyway, right? Yes. > > > Suggested-by: Matthew Wilcox> > Signed-off-by: Aaron Lu > > Acked-by: Vlastimil Babka Thanks! > > --- > > mm/page_alloc.c | 10 +++--- > > 1 file changed, 3 insertions(+), 7 deletions(-) > > > > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > > index cb416723538f..faa33eac1635 100644 > > --- a/mm/page_alloc.c > > +++ b/mm/page_alloc.c > > @@ -1148,6 +1148,7 @@ static void free_pcppages_bulk(struct zone *zone, int > > count, > > page = list_last_entry(list, struct page, lru); > > /* must delete as __free_one_page list manipulates */ > > list_del(>lru); > > + pcp->count--; > > > > mt = get_pcppage_migratetype(page); > > /* MIGRATE_ISOLATE page should not go to pcplists */ > > @@ -2416,10 +2417,8 @@ void drain_zone_pages(struct zone *zone, struct > > per_cpu_pages *pcp) > > local_irq_save(flags); > > batch = READ_ONCE(pcp->batch); > > to_drain = min(pcp->count, batch); > > - if (to_drain > 0) { > > + if (to_drain > 0) > > free_pcppages_bulk(zone, to_drain, pcp); > > - pcp->count -= to_drain; > > - } > > local_irq_restore(flags); > > } > > #endif > > @@ -2441,10 +2440,8 @@ static void drain_pages_zone(unsigned int cpu, > > struct zone *zone) > > pset = per_cpu_ptr(zone->pageset, cpu); > > > > pcp = >pcp; > > - if (pcp->count) { > > + if (pcp->count) > > free_pcppages_bulk(zone, pcp->count, pcp); > > - pcp->count = 0; > > - } > > local_irq_restore(flags); > > } > > > > @@ -2668,7 +2665,6 @@ static void free_unref_page_commit(struct page *page, > > unsigned long pfn) > > if (pcp->count >= pcp->high) { > > unsigned long batch = READ_ONCE(pcp->batch); > > free_pcppages_bulk(zone, batch, pcp); > > - pcp->count -= batch; > > } > > } > > > > >
Re: [PATCH v4 1/3] mm/free_pcppages_bulk: update pcp->count inside
On Mon, Mar 12, 2018 at 02:22:28PM +0100, Vlastimil Babka wrote: > On 03/01/2018 07:28 AM, Aaron Lu wrote: > > Matthew Wilcox found that all callers of free_pcppages_bulk() currently > > update pcp->count immediately after so it's natural to do it inside > > free_pcppages_bulk(). > > > > No functionality or performance change is expected from this patch. > > Well, it's N decrements instead of one decrement by N / assignment of > zero. But I assume the difference is negligible anyway, right? Yes. > > > Suggested-by: Matthew Wilcox > > Signed-off-by: Aaron Lu > > Acked-by: Vlastimil Babka Thanks! > > --- > > mm/page_alloc.c | 10 +++--- > > 1 file changed, 3 insertions(+), 7 deletions(-) > > > > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > > index cb416723538f..faa33eac1635 100644 > > --- a/mm/page_alloc.c > > +++ b/mm/page_alloc.c > > @@ -1148,6 +1148,7 @@ static void free_pcppages_bulk(struct zone *zone, int > > count, > > page = list_last_entry(list, struct page, lru); > > /* must delete as __free_one_page list manipulates */ > > list_del(>lru); > > + pcp->count--; > > > > mt = get_pcppage_migratetype(page); > > /* MIGRATE_ISOLATE page should not go to pcplists */ > > @@ -2416,10 +2417,8 @@ void drain_zone_pages(struct zone *zone, struct > > per_cpu_pages *pcp) > > local_irq_save(flags); > > batch = READ_ONCE(pcp->batch); > > to_drain = min(pcp->count, batch); > > - if (to_drain > 0) { > > + if (to_drain > 0) > > free_pcppages_bulk(zone, to_drain, pcp); > > - pcp->count -= to_drain; > > - } > > local_irq_restore(flags); > > } > > #endif > > @@ -2441,10 +2440,8 @@ static void drain_pages_zone(unsigned int cpu, > > struct zone *zone) > > pset = per_cpu_ptr(zone->pageset, cpu); > > > > pcp = >pcp; > > - if (pcp->count) { > > + if (pcp->count) > > free_pcppages_bulk(zone, pcp->count, pcp); > > - pcp->count = 0; > > - } > > local_irq_restore(flags); > > } > > > > @@ -2668,7 +2665,6 @@ static void free_unref_page_commit(struct page *page, > > unsigned long pfn) > > if (pcp->count >= pcp->high) { > > unsigned long batch = READ_ONCE(pcp->batch); > > free_pcppages_bulk(zone, batch, pcp); > > - pcp->count -= batch; > > } > > } > > > > >
Re: [PATCH v4 1/3] mm/free_pcppages_bulk: update pcp->count inside
On 03/01/2018 07:28 AM, Aaron Lu wrote: > Matthew Wilcox found that all callers of free_pcppages_bulk() currently > update pcp->count immediately after so it's natural to do it inside > free_pcppages_bulk(). > > No functionality or performance change is expected from this patch. Well, it's N decrements instead of one decrement by N / assignment of zero. But I assume the difference is negligible anyway, right? > Suggested-by: Matthew Wilcox> Signed-off-by: Aaron Lu Acked-by: Vlastimil Babka > --- > mm/page_alloc.c | 10 +++--- > 1 file changed, 3 insertions(+), 7 deletions(-) > > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index cb416723538f..faa33eac1635 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -1148,6 +1148,7 @@ static void free_pcppages_bulk(struct zone *zone, int > count, > page = list_last_entry(list, struct page, lru); > /* must delete as __free_one_page list manipulates */ > list_del(>lru); > + pcp->count--; > > mt = get_pcppage_migratetype(page); > /* MIGRATE_ISOLATE page should not go to pcplists */ > @@ -2416,10 +2417,8 @@ void drain_zone_pages(struct zone *zone, struct > per_cpu_pages *pcp) > local_irq_save(flags); > batch = READ_ONCE(pcp->batch); > to_drain = min(pcp->count, batch); > - if (to_drain > 0) { > + if (to_drain > 0) > free_pcppages_bulk(zone, to_drain, pcp); > - pcp->count -= to_drain; > - } > local_irq_restore(flags); > } > #endif > @@ -2441,10 +2440,8 @@ static void drain_pages_zone(unsigned int cpu, struct > zone *zone) > pset = per_cpu_ptr(zone->pageset, cpu); > > pcp = >pcp; > - if (pcp->count) { > + if (pcp->count) > free_pcppages_bulk(zone, pcp->count, pcp); > - pcp->count = 0; > - } > local_irq_restore(flags); > } > > @@ -2668,7 +2665,6 @@ static void free_unref_page_commit(struct page *page, > unsigned long pfn) > if (pcp->count >= pcp->high) { > unsigned long batch = READ_ONCE(pcp->batch); > free_pcppages_bulk(zone, batch, pcp); > - pcp->count -= batch; > } > } > >
Re: [PATCH v4 1/3] mm/free_pcppages_bulk: update pcp->count inside
On 03/01/2018 07:28 AM, Aaron Lu wrote: > Matthew Wilcox found that all callers of free_pcppages_bulk() currently > update pcp->count immediately after so it's natural to do it inside > free_pcppages_bulk(). > > No functionality or performance change is expected from this patch. Well, it's N decrements instead of one decrement by N / assignment of zero. But I assume the difference is negligible anyway, right? > Suggested-by: Matthew Wilcox > Signed-off-by: Aaron Lu Acked-by: Vlastimil Babka > --- > mm/page_alloc.c | 10 +++--- > 1 file changed, 3 insertions(+), 7 deletions(-) > > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index cb416723538f..faa33eac1635 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -1148,6 +1148,7 @@ static void free_pcppages_bulk(struct zone *zone, int > count, > page = list_last_entry(list, struct page, lru); > /* must delete as __free_one_page list manipulates */ > list_del(>lru); > + pcp->count--; > > mt = get_pcppage_migratetype(page); > /* MIGRATE_ISOLATE page should not go to pcplists */ > @@ -2416,10 +2417,8 @@ void drain_zone_pages(struct zone *zone, struct > per_cpu_pages *pcp) > local_irq_save(flags); > batch = READ_ONCE(pcp->batch); > to_drain = min(pcp->count, batch); > - if (to_drain > 0) { > + if (to_drain > 0) > free_pcppages_bulk(zone, to_drain, pcp); > - pcp->count -= to_drain; > - } > local_irq_restore(flags); > } > #endif > @@ -2441,10 +2440,8 @@ static void drain_pages_zone(unsigned int cpu, struct > zone *zone) > pset = per_cpu_ptr(zone->pageset, cpu); > > pcp = >pcp; > - if (pcp->count) { > + if (pcp->count) > free_pcppages_bulk(zone, pcp->count, pcp); > - pcp->count = 0; > - } > local_irq_restore(flags); > } > > @@ -2668,7 +2665,6 @@ static void free_unref_page_commit(struct page *page, > unsigned long pfn) > if (pcp->count >= pcp->high) { > unsigned long batch = READ_ONCE(pcp->batch); > free_pcppages_bulk(zone, batch, pcp); > - pcp->count -= batch; > } > } > >
Re: [PATCH v4 1/3] mm/free_pcppages_bulk: update pcp->count inside
On Thu 01-03-18 14:28:43, Aaron Lu wrote: > Matthew Wilcox found that all callers of free_pcppages_bulk() currently > update pcp->count immediately after so it's natural to do it inside > free_pcppages_bulk(). > > No functionality or performance change is expected from this patch. > > Suggested-by: Matthew Wilcox> Signed-off-by: Aaron Lu Makes a lot of sense to me. Acked-by: Michal Hocko > --- > mm/page_alloc.c | 10 +++--- > 1 file changed, 3 insertions(+), 7 deletions(-) > > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index cb416723538f..faa33eac1635 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -1148,6 +1148,7 @@ static void free_pcppages_bulk(struct zone *zone, int > count, > page = list_last_entry(list, struct page, lru); > /* must delete as __free_one_page list manipulates */ > list_del(>lru); > + pcp->count--; > > mt = get_pcppage_migratetype(page); > /* MIGRATE_ISOLATE page should not go to pcplists */ > @@ -2416,10 +2417,8 @@ void drain_zone_pages(struct zone *zone, struct > per_cpu_pages *pcp) > local_irq_save(flags); > batch = READ_ONCE(pcp->batch); > to_drain = min(pcp->count, batch); > - if (to_drain > 0) { > + if (to_drain > 0) > free_pcppages_bulk(zone, to_drain, pcp); > - pcp->count -= to_drain; > - } > local_irq_restore(flags); > } > #endif > @@ -2441,10 +2440,8 @@ static void drain_pages_zone(unsigned int cpu, struct > zone *zone) > pset = per_cpu_ptr(zone->pageset, cpu); > > pcp = >pcp; > - if (pcp->count) { > + if (pcp->count) > free_pcppages_bulk(zone, pcp->count, pcp); > - pcp->count = 0; > - } > local_irq_restore(flags); > } > > @@ -2668,7 +2665,6 @@ static void free_unref_page_commit(struct page *page, > unsigned long pfn) > if (pcp->count >= pcp->high) { > unsigned long batch = READ_ONCE(pcp->batch); > free_pcppages_bulk(zone, batch, pcp); > - pcp->count -= batch; > } > } > > -- > 2.14.3 > -- Michal Hocko SUSE Labs
Re: [PATCH v4 1/3] mm/free_pcppages_bulk: update pcp->count inside
On Thu 01-03-18 14:28:43, Aaron Lu wrote: > Matthew Wilcox found that all callers of free_pcppages_bulk() currently > update pcp->count immediately after so it's natural to do it inside > free_pcppages_bulk(). > > No functionality or performance change is expected from this patch. > > Suggested-by: Matthew Wilcox > Signed-off-by: Aaron Lu Makes a lot of sense to me. Acked-by: Michal Hocko > --- > mm/page_alloc.c | 10 +++--- > 1 file changed, 3 insertions(+), 7 deletions(-) > > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index cb416723538f..faa33eac1635 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -1148,6 +1148,7 @@ static void free_pcppages_bulk(struct zone *zone, int > count, > page = list_last_entry(list, struct page, lru); > /* must delete as __free_one_page list manipulates */ > list_del(>lru); > + pcp->count--; > > mt = get_pcppage_migratetype(page); > /* MIGRATE_ISOLATE page should not go to pcplists */ > @@ -2416,10 +2417,8 @@ void drain_zone_pages(struct zone *zone, struct > per_cpu_pages *pcp) > local_irq_save(flags); > batch = READ_ONCE(pcp->batch); > to_drain = min(pcp->count, batch); > - if (to_drain > 0) { > + if (to_drain > 0) > free_pcppages_bulk(zone, to_drain, pcp); > - pcp->count -= to_drain; > - } > local_irq_restore(flags); > } > #endif > @@ -2441,10 +2440,8 @@ static void drain_pages_zone(unsigned int cpu, struct > zone *zone) > pset = per_cpu_ptr(zone->pageset, cpu); > > pcp = >pcp; > - if (pcp->count) { > + if (pcp->count) > free_pcppages_bulk(zone, pcp->count, pcp); > - pcp->count = 0; > - } > local_irq_restore(flags); > } > > @@ -2668,7 +2665,6 @@ static void free_unref_page_commit(struct page *page, > unsigned long pfn) > if (pcp->count >= pcp->high) { > unsigned long batch = READ_ONCE(pcp->batch); > free_pcppages_bulk(zone, batch, pcp); > - pcp->count -= batch; > } > } > > -- > 2.14.3 > -- Michal Hocko SUSE Labs
Re: [PATCH v4 1/3] mm/free_pcppages_bulk: update pcp->count inside
On Thu, 1 Mar 2018, Aaron Lu wrote: > Matthew Wilcox found that all callers of free_pcppages_bulk() currently > update pcp->count immediately after so it's natural to do it inside > free_pcppages_bulk(). > > No functionality or performance change is expected from this patch. > > Suggested-by: Matthew Wilcox> Signed-off-by: Aaron Lu Acked-by: David Rientjes
Re: [PATCH v4 1/3] mm/free_pcppages_bulk: update pcp->count inside
On Thu, 1 Mar 2018, Aaron Lu wrote: > Matthew Wilcox found that all callers of free_pcppages_bulk() currently > update pcp->count immediately after so it's natural to do it inside > free_pcppages_bulk(). > > No functionality or performance change is expected from this patch. > > Suggested-by: Matthew Wilcox > Signed-off-by: Aaron Lu Acked-by: David Rientjes
[PATCH v4 1/3] mm/free_pcppages_bulk: update pcp->count inside
Matthew Wilcox found that all callers of free_pcppages_bulk() currently update pcp->count immediately after so it's natural to do it inside free_pcppages_bulk(). No functionality or performance change is expected from this patch. Suggested-by: Matthew WilcoxSigned-off-by: Aaron Lu --- mm/page_alloc.c | 10 +++--- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cb416723538f..faa33eac1635 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1148,6 +1148,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, page = list_last_entry(list, struct page, lru); /* must delete as __free_one_page list manipulates */ list_del(>lru); + pcp->count--; mt = get_pcppage_migratetype(page); /* MIGRATE_ISOLATE page should not go to pcplists */ @@ -2416,10 +2417,8 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) local_irq_save(flags); batch = READ_ONCE(pcp->batch); to_drain = min(pcp->count, batch); - if (to_drain > 0) { + if (to_drain > 0) free_pcppages_bulk(zone, to_drain, pcp); - pcp->count -= to_drain; - } local_irq_restore(flags); } #endif @@ -2441,10 +2440,8 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) pset = per_cpu_ptr(zone->pageset, cpu); pcp = >pcp; - if (pcp->count) { + if (pcp->count) free_pcppages_bulk(zone, pcp->count, pcp); - pcp->count = 0; - } local_irq_restore(flags); } @@ -2668,7 +2665,6 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn) if (pcp->count >= pcp->high) { unsigned long batch = READ_ONCE(pcp->batch); free_pcppages_bulk(zone, batch, pcp); - pcp->count -= batch; } } -- 2.14.3
[PATCH v4 1/3] mm/free_pcppages_bulk: update pcp->count inside
Matthew Wilcox found that all callers of free_pcppages_bulk() currently update pcp->count immediately after so it's natural to do it inside free_pcppages_bulk(). No functionality or performance change is expected from this patch. Suggested-by: Matthew Wilcox Signed-off-by: Aaron Lu --- mm/page_alloc.c | 10 +++--- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cb416723538f..faa33eac1635 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1148,6 +1148,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, page = list_last_entry(list, struct page, lru); /* must delete as __free_one_page list manipulates */ list_del(>lru); + pcp->count--; mt = get_pcppage_migratetype(page); /* MIGRATE_ISOLATE page should not go to pcplists */ @@ -2416,10 +2417,8 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) local_irq_save(flags); batch = READ_ONCE(pcp->batch); to_drain = min(pcp->count, batch); - if (to_drain > 0) { + if (to_drain > 0) free_pcppages_bulk(zone, to_drain, pcp); - pcp->count -= to_drain; - } local_irq_restore(flags); } #endif @@ -2441,10 +2440,8 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) pset = per_cpu_ptr(zone->pageset, cpu); pcp = >pcp; - if (pcp->count) { + if (pcp->count) free_pcppages_bulk(zone, pcp->count, pcp); - pcp->count = 0; - } local_irq_restore(flags); } @@ -2668,7 +2665,6 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn) if (pcp->count >= pcp->high) { unsigned long batch = READ_ONCE(pcp->batch); free_pcppages_bulk(zone, batch, pcp); - pcp->count -= batch; } } -- 2.14.3