On Fri, Jul 27, 2012 at 04:23:47PM +0100, Mel Gorman wrote:
> > > --- a/mm/slub.c
> > > +++ b/mm/slub.c
> > > @@ -1457,6 +1457,7 @@ static struct page *get_any_partial(stru
> > >   struct zone *zone;
> > >   enum zone_type high_zoneidx = gfp_zone(flags);
> > >   struct page *page;
> > > + unsigned int cpuset_mems_cookie;
> > >  
> > >   /*
> > >    * The defrag ratio allows a configuration of the tradeoffs between
> > > @@ -1480,22 +1481,32 @@ static struct page *get_any_partial(stru
> > >                   get_cycles() % 1024 > s->remote_node_defrag_ratio)
> > >           return NULL;
> > >  
> > > - get_mems_allowed();
> > > - zonelist = node_zonelist(slab_node(current->mempolicy), flags);
> > > - for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
> > > -         struct kmem_cache_node *n;
> > > + do {
> > > +         cpuset_mems_cookie = get_mems_allowed();
> > > +         zonelist = node_zonelist(slab_node(current->mempolicy), flags);
> > > +         for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
> > > +                 struct kmem_cache_node *n;
> > >  
> > > -         n = get_node(s, zone_to_nid(zone));
> > > +                 n = get_node(s, zone_to_nid(zone));
> > >  
> > > -         if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
> > > -                         n->nr_partial > s->min_partial) {
> > > -                 page = get_partial_node(n);
> > > -                 if (page) {
> > > -                         put_mems_allowed();
> > > -                         return page;
> > > +                 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
> > > +                                 n->nr_partial > s->min_partial) {
> > > +                         page = get_partial_node(n);
> > > +                         if (page) {
> > > +                                 /*
> > > +                                  * Return the object even if
> > > +                                  * put_mems_allowed indicated that
> > > +                                  * the cpuset mems_allowed was
> > > +                                  * updated in parallel. It's a
> > > +                                  * harmless race between the alloc
> > > +                                  * and the cpuset update.
> > > +                                  */
> > > +                                 put_mems_allowed(cpuset_mems_cookie);
> > > +                                 return page;
> > > +                         }
> > >                   }
> > >           }
> > > - }
> > > + } while (!put_mems_allowed(cpuset_mems_cookie));
> > >   put_mems_allowed();
> > 
> > This doesn't build on 3.0, the backport left the stray put_mems_allowed
> > above:
> > 
> > linux-stable/mm/slub.c: In function 'get_any_partial':
> > linux-stable/mm/slub.c:1510:2: error: too few arguments to function 
> > 'put_mems_allowed'
> > linux-stable/include/linux/cpuset.h:108:20: note: declared here
> > 
> 
> That line should have been deleted and tests were based on slab. My
> apologies.
> 
> ---8<---
> cpuset: mm: Reduce large amounts of memory barrier related damage fix
> 
> linux-stable/mm/slub.c: In function 'get_any_partial':
> linux-stable/mm/slub.c:1510:2: error: too few arguments to function 
> 'put_mems_allowed'
> linux-stable/include/linux/cpuset.h:108:20: note: declared here
> 
> Reported-by: Herton Ronaldo Krzesinski <herton.krzesin...@canonical.com>
> Signed-off-by: Mel Gorman <mgor...@suse.de>
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index 00ccf2c..ae6e80e 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -1507,7 +1507,6 @@ static struct page *get_any_partial(struct kmem_cache 
> *s, gfp_t flags)
>                       }
>               }
>       } while (!put_mems_allowed(cpuset_mems_cookie));
> -     put_mems_allowed();
>  #endif
>       return NULL;
>  }

Thanks, I've merged this with the "original" in the tree, so all should
be good now.

greg k-h
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to