From: Joonsoo Kim <iamjoonsoo....@lge.com>

Currently, preventing cma area in page allocation is implemented by using
current_gfp_context(). However, there are two problems of this
implementation.

First, this doesn't work for allocation fastpath. In the fastpath,
original gfp_mask is used since current_gfp_context() is introduced in
order to control reclaim and it is on slowpath.
Second, clearing __GFP_MOVABLE has a side effect to exclude the memory
on the ZONE_MOVABLE for allocation target.

To fix these problems, this patch changes the implementation to exclude
cma area in page allocation. Main point of this change is using the
alloc_flags. alloc_flags is mainly used to control allocation so it fits
for excluding cma area in allocation.

Fixes: d7fefcc8de91 (mm/cma: add PF flag to force non cma alloc)
Cc: <sta...@vger.kernel.org>
Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>
---
 include/linux/sched/mm.h |  8 +-------
 mm/page_alloc.c          | 37 ++++++++++++++++++++++++-------------
 2 files changed, 25 insertions(+), 20 deletions(-)

diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 44ad5b7..6c652ec 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -175,14 +175,12 @@ static inline bool in_vfork(struct task_struct *tsk)
  * Applies per-task gfp context to the given allocation flags.
  * PF_MEMALLOC_NOIO implies GFP_NOIO
  * PF_MEMALLOC_NOFS implies GFP_NOFS
- * PF_MEMALLOC_NOCMA implies no allocation from CMA region.
  */
 static inline gfp_t current_gfp_context(gfp_t flags)
 {
        unsigned int pflags = READ_ONCE(current->flags);
 
-       if (unlikely(pflags &
-                    (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | 
PF_MEMALLOC_NOCMA))) {
+       if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) {
                /*
                 * NOIO implies both NOIO and NOFS and it is a weaker context
                 * so always make sure it makes precedence
@@ -191,10 +189,6 @@ static inline gfp_t current_gfp_context(gfp_t flags)
                        flags &= ~(__GFP_IO | __GFP_FS);
                else if (pflags & PF_MEMALLOC_NOFS)
                        flags &= ~__GFP_FS;
-#ifdef CONFIG_CMA
-               if (pflags & PF_MEMALLOC_NOCMA)
-                       flags &= ~__GFP_MOVABLE;
-#endif
        }
        return flags;
 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6416d08..b529220 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2791,7 +2791,7 @@ __rmqueue(struct zone *zone, unsigned int order, int 
migratetype,
         * allocating from CMA when over half of the zone's free memory
         * is in the CMA area.
         */
-       if (migratetype == MIGRATE_MOVABLE &&
+       if (alloc_flags & ALLOC_CMA &&
            zone_page_state(zone, NR_FREE_CMA_PAGES) >
            zone_page_state(zone, NR_FREE_PAGES) / 2) {
                page = __rmqueue_cma_fallback(zone, order);
@@ -2802,7 +2802,7 @@ __rmqueue(struct zone *zone, unsigned int order, int 
migratetype,
 retry:
        page = __rmqueue_smallest(zone, order, migratetype);
        if (unlikely(!page)) {
-               if (migratetype == MIGRATE_MOVABLE)
+               if (alloc_flags & ALLOC_CMA)
                        page = __rmqueue_cma_fallback(zone, order);
 
                if (!page && __rmqueue_fallback(zone, order, migratetype,
@@ -3502,11 +3502,9 @@ static inline long __zone_watermark_unusable_free(struct 
zone *z,
        if (likely(!alloc_harder))
                unusable_free += z->nr_reserved_highatomic;
 
-#ifdef CONFIG_CMA
        /* If allocation can't use CMA areas don't use free CMA pages */
-       if (!(alloc_flags & ALLOC_CMA))
+       if (IS_ENABLED(CONFIG_CMA) && !(alloc_flags & ALLOC_CMA))
                unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
-#endif
 
        return unusable_free;
 }
@@ -3693,6 +3691,20 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
        return alloc_flags;
 }
 
+static inline unsigned int current_alloc_flags(gfp_t gfp_mask,
+                                       unsigned int alloc_flags)
+{
+#ifdef CONFIG_CMA
+       unsigned int pflags = current->flags;
+
+       if (!(pflags & PF_MEMALLOC_NOCMA) &&
+               gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
+               alloc_flags |= ALLOC_CMA;
+
+#endif
+       return alloc_flags;
+}
+
 /*
  * get_page_from_freelist goes through the zonelist trying to allocate
  * a page.
@@ -4339,10 +4351,8 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
        } else if (unlikely(rt_task(current)) && !in_interrupt())
                alloc_flags |= ALLOC_HARDER;
 
-#ifdef CONFIG_CMA
-       if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
-               alloc_flags |= ALLOC_CMA;
-#endif
+       alloc_flags = current_alloc_flags(gfp_mask, alloc_flags);
+
        return alloc_flags;
 }
 
@@ -4642,8 +4652,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int 
order,
                wake_all_kswapds(order, gfp_mask, ac);
 
        reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
-       if (reserve_flags)
+       if (reserve_flags) {
                alloc_flags = reserve_flags;
+               alloc_flags = current_alloc_flags(gfp_mask, alloc_flags);
+       }
 
        /*
         * Reset the nodemask and zonelist iterators if memory policies can be
@@ -4720,7 +4732,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 
        /* Avoid allocations with no watermarks from looping endlessly */
        if (tsk_is_oom_victim(current) &&
-           (alloc_flags == ALLOC_OOM ||
+           (alloc_flags & ALLOC_OOM ||
             (gfp_mask & __GFP_NOMEMALLOC)))
                goto nopage;
 
@@ -4808,8 +4820,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, 
unsigned int order,
        if (should_fail_alloc_page(gfp_mask, order))
                return false;
 
-       if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
-               *alloc_flags |= ALLOC_CMA;
+       *alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags);
 
        return true;
 }
-- 
2.7.4

Reply via email to