From: Joonsoo Kim <iamjoonsoo....@lge.com>

It's not good practice to modify user input. Instead of using it to
build correct gfp_mask for APIs, this patch introduces another gfp_mask
field, __gfp_mask, for internal usage.

Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>
---
 mm/hugetlb.c  | 19 ++++++++++---------
 mm/internal.h |  2 ++
 2 files changed, 12 insertions(+), 9 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e465582..4757e72 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1068,15 +1068,15 @@ static struct page *dequeue_huge_page_nodemask(struct 
hstate *h,
        struct zoneref *z;
        int node = NUMA_NO_NODE;
 
-       zonelist = node_zonelist(ac->nid, ac->gfp_mask);
+       zonelist = node_zonelist(ac->nid, ac->__gfp_mask);
 
 retry_cpuset:
        cpuset_mems_cookie = read_mems_allowed_begin();
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
-                       gfp_zone(ac->gfp_mask), ac->nmask) {
+                       gfp_zone(ac->__gfp_mask), ac->nmask) {
                struct page *page;
 
-               if (!cpuset_zone_allowed(zone, ac->gfp_mask))
+               if (!cpuset_zone_allowed(zone, ac->__gfp_mask))
                        continue;
                /*
                 * no need to ask again on the same node. Pool is node rather 
than
@@ -1127,8 +1127,8 @@ static struct page *dequeue_huge_page_vma(struct hstate 
*h,
        if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
                goto err;
 
-       ac.gfp_mask = htlb_alloc_mask(h);
-       ac.nid = huge_node(vma, address, ac.gfp_mask, &mpol, &ac.nmask);
+       ac.__gfp_mask = htlb_alloc_mask(h);
+       ac.nid = huge_node(vma, address, ac.__gfp_mask, &mpol, &ac.nmask);
 
        page = dequeue_huge_page_nodemask(h, &ac);
        if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
@@ -1951,7 +1951,7 @@ static struct page *alloc_migrate_huge_page(struct hstate 
*h,
        if (hstate_is_gigantic(h))
                return NULL;
 
-       page = alloc_fresh_huge_page(h, ac->gfp_mask,
+       page = alloc_fresh_huge_page(h, ac->__gfp_mask,
                                ac->nid, ac->nmask, NULL);
        if (!page)
                return NULL;
@@ -1989,9 +1989,10 @@ struct page *alloc_buddy_huge_page_with_mpol(struct 
hstate *h,
 struct page *alloc_huge_page_nodemask(struct hstate *h,
                                struct alloc_control *ac)
 {
-       ac->gfp_mask |= htlb_alloc_mask(h);
+       ac->__gfp_mask = htlb_alloc_mask(h);
+       ac->__gfp_mask |= ac->gfp_mask;
        if (ac->nid == NUMA_NO_NODE)
-               ac->gfp_mask &= ~__GFP_THISNODE;
+               ac->__gfp_mask &= ~__GFP_THISNODE;
 
        spin_lock(&hugetlb_lock);
        if (h->free_huge_pages - h->resv_huge_pages > 0) {
@@ -2010,7 +2011,7 @@ struct page *alloc_huge_page_nodemask(struct hstate *h,
         * will not come from CMA area
         */
        if (ac->skip_cma)
-               ac->gfp_mask &= ~__GFP_MOVABLE;
+               ac->__gfp_mask &= ~__GFP_MOVABLE;
 
        return alloc_migrate_huge_page(h, ac);
 }
diff --git a/mm/internal.h b/mm/internal.h
index 159cfd6..2dc0268 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -619,6 +619,8 @@ struct alloc_control {
        nodemask_t *nmask;
        gfp_t gfp_mask;
        bool skip_cma;
+
+       gfp_t __gfp_mask;       /* Used internally in API implementation */
 };
 
 #endif /* __MM_INTERNAL_H */
-- 
2.7.4

Reply via email to