From: Joonsoo Kim <iamjoonsoo....@lge.com>

Now, all reserved pages for CMA region are belong to the ZONE_CMA
and it only serves for GFP_HIGHUSER_MOVABLE. Therefore, we don't need to
consider ALLOC_CMA at all.

Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>
---
 mm/internal.h   |  3 +--
 mm/page_alloc.c | 27 +++------------------------
 2 files changed, 4 insertions(+), 26 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index 4c37234..04b75d6 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -468,8 +468,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone 
*zone,
 #define ALLOC_HARDER           0x10 /* try to alloc harder */
 #define ALLOC_HIGH             0x20 /* __GFP_HIGH set */
 #define ALLOC_CPUSET           0x40 /* check for correct cpuset */
-#define ALLOC_CMA              0x80 /* allow allocations from CMA areas */
-#define ALLOC_FAIR             0x100 /* fair zone allocation */
+#define ALLOC_FAIR             0x80 /* fair zone allocation */
 
 enum ttu_flags;
 struct tlbflush_unmap_batch;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 796b271..bab3698 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2798,12 +2798,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int 
order, unsigned long mark,
        else
                min -= min / 4;
 
-#ifdef CONFIG_CMA
-       /* If allocation can't use CMA areas don't use free CMA pages */
-       if (!(alloc_flags & ALLOC_CMA))
-               free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
-#endif
-
        /*
         * Check watermarks for an order-0 allocation request. If these
         * are not met, then a high-order request also cannot go ahead
@@ -2833,10 +2827,8 @@ bool __zone_watermark_ok(struct zone *z, unsigned int 
order, unsigned long mark,
                }
 
 #ifdef CONFIG_CMA
-               if ((alloc_flags & ALLOC_CMA) &&
-                   !list_empty(&area->free_list[MIGRATE_CMA])) {
+               if (!list_empty(&area->free_list[MIGRATE_CMA]))
                        return true;
-               }
 #endif
        }
        return false;
@@ -2853,13 +2845,6 @@ static inline bool zone_watermark_fast(struct zone *z, 
unsigned int order,
                unsigned long mark, int classzone_idx, unsigned int alloc_flags)
 {
        long free_pages = zone_page_state(z, NR_FREE_PAGES);
-       long cma_pages = 0;
-
-#ifdef CONFIG_CMA
-       /* If allocation can't use CMA areas don't use free CMA pages */
-       if (!(alloc_flags & ALLOC_CMA))
-               cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
-#endif
 
        /*
         * Fast check for order-0 only. If this fails then the reserves
@@ -2868,7 +2853,7 @@ static inline bool zone_watermark_fast(struct zone *z, 
unsigned int order,
         * the caller is !atomic then it'll uselessly search the free
         * list. That corner case is then slower but it is harmless.
         */
-       if (!order && (free_pages - cma_pages) > mark + 
z->lowmem_reserve[classzone_idx])
+       if (!order && free_pages > mark + z->lowmem_reserve[classzone_idx])
                return true;
 
        return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
@@ -3475,10 +3460,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
                                 unlikely(test_thread_flag(TIF_MEMDIE))))
                        alloc_flags |= ALLOC_NO_WATERMARKS;
        }
-#ifdef CONFIG_CMA
-       if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
-               alloc_flags |= ALLOC_CMA;
-#endif
+
        return alloc_flags;
 }
 
@@ -3833,9 +3815,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        if (unlikely(!zonelist->_zonerefs->zone))
                return NULL;
 
-       if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
-               alloc_flags |= ALLOC_CMA;
-
 retry_cpuset:
        cpuset_mems_cookie = read_mems_allowed_begin();
 
-- 
1.9.1

Reply via email to