From: Joonsoo Kim <iamjoonsoo....@lge.com>

gfp_mask handling on alloc_huge_page_(node|nodemask) is
slightly changed, from ASSIGN to OR. It's safe since caller of these
functions doesn't pass extra gfp_mask except htlb_alloc_mask().

This is a preparation step for following patches.

Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>
---
 mm/hugetlb.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 453ba94..dabe460 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1985,7 +1985,7 @@ struct page *alloc_huge_page_node(struct hstate *h,
 {
        struct page *page = NULL;
 
-       ac->gfp_mask = htlb_alloc_mask(h);
+       ac->gfp_mask |= htlb_alloc_mask(h);
        if (ac->nid != NUMA_NO_NODE)
                ac->gfp_mask |= __GFP_THISNODE;
 
@@ -2004,7 +2004,7 @@ struct page *alloc_huge_page_node(struct hstate *h,
 struct page *alloc_huge_page_nodemask(struct hstate *h,
                                struct alloc_control *ac)
 {
-       ac->gfp_mask = htlb_alloc_mask(h);
+       ac->gfp_mask |= htlb_alloc_mask(h);
 
        spin_lock(&hugetlb_lock);
        if (h->free_huge_pages - h->resv_huge_pages > 0) {
-- 
2.7.4

Reply via email to