Re: [PATCH v10 07/14] mm: honor PF_MEMALLOC_PIN for all movable pages

2021-02-11 Thread kernel test robot
Hi Pavel,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on kselftest/next]
[also build test ERROR on tip/sched/core tip/perf/core linux/master 
linus/master v5.11-rc7 next-20210211]
[cannot apply to hnaz-linux-mm/master]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:
https://github.com/0day-ci/linux/commits/Pavel-Tatashin/prohibit-pinning-pages-in-ZONE_MOVABLE/20210212-002530
base:   
https://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git next
config: i386-randconfig-m021-20210209 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce (this is a W=1 build):
# 
https://github.com/0day-ci/linux/commit/44c3952d1b1e3998d6fa67b610bf24fa4a4b134c
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review 
Pavel-Tatashin/prohibit-pinning-pages-in-ZONE_MOVABLE/20210212-002530
git checkout 44c3952d1b1e3998d6fa67b610bf24fa4a4b134c
# save the attached .config to linux build tree
make W=1 ARCH=i386 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot 

All errors (new ones prefixed by >>):

   In file included from arch/x86/include/asm/page.h:76,
from arch/x86/include/asm/thread_info.h:12,
from include/linux/thread_info.h:58,
from arch/x86/include/asm/preempt.h:7,
from include/linux/preempt.h:78,
from include/linux/spinlock.h:51,
from include/linux/mmzone.h:8,
from include/linux/gfp.h:6,
from include/linux/slab.h:15,
from include/linux/crypto.h:20,
from arch/x86/kernel/asm-offsets.c:9:
   include/linux/mm.h: In function 'is_pinnable_page':
   include/asm-generic/memory_model.h:64:14: error: implicit declaration of 
function 'page_to_section'; did you mean 'present_section'? 
[-Werror=implicit-function-declaration]
  64 |  int __sec = page_to_section(__pg);   \
 |  ^~~
   include/asm-generic/memory_model.h:81:21: note: in expansion of macro 
'__page_to_pfn'
  81 | #define page_to_pfn __page_to_pfn
 | ^
   include/linux/mmzone.h:94:32: note: in expansion of macro 'page_to_pfn'
  94 |  get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
 |^~~
   include/linux/mmzone.h:74:39: note: in expansion of macro 
'get_pageblock_migratetype'
  74 | #  define is_migrate_cma_page(_page) 
(get_pageblock_migratetype(_page) == MIGRATE_CMA)
 |   ^
   include/linux/mm.h:1128:41: note: in expansion of macro 'is_migrate_cma_page'
1128 |  return !is_zone_movable_page(page) && !is_migrate_cma_page(page);
 | ^~~
   In file included from include/linux/kallsyms.h:12,
from include/linux/bpf.h:21,
from include/linux/bpf-cgroup.h:5,
from include/linux/cgroup-defs.h:22,
from include/linux/cgroup.h:28,
from include/linux/memcontrol.h:13,
from include/linux/swap.h:9,
from include/linux/suspend.h:5,
from arch/x86/kernel/asm-offsets.c:13:
   include/linux/mm.h: At top level:
>> include/linux/mm.h:1502:29: error: conflicting types for 'page_to_section'
1502 | static inline unsigned long page_to_section(const struct page *page)
 | ^~~
   In file included from arch/x86/include/asm/page.h:76,
from arch/x86/include/asm/thread_info.h:12,
from include/linux/thread_info.h:58,
from arch/x86/include/asm/preempt.h:7,
from include/linux/preempt.h:78,
from include/linux/spinlock.h:51,
from include/linux/mmzone.h:8,
from include/linux/gfp.h:6,
from include/linux/slab.h:15,
from include/linux/crypto.h:20,
from arch/x86/kernel/asm-offsets.c:9:
   include/asm-generic/memory_model.h:64:14: note: previous implicit 
declaration of 'page_to_section' was here
  64 |  int __sec = page_to_section(__pg);   \
 |  ^~~
   include/asm-generic/memory_model.h:81:21: note: in expansion of macro 
'__page_to_pfn'
  81 | #define page_to_pfn __page_to_pfn
 | ^
   include/linux/mmzone.h:94:32: note: in expansion of macro 'page_to_pfn'
  94 |  get_pfnblock_flags_mask(page, page_to_pfn(page), MI

[PATCH v10 07/14] mm: honor PF_MEMALLOC_PIN for all movable pages

2021-02-11 Thread Pavel Tatashin
PF_MEMALLOC_PIN is only honored for CMA pages, extend
this flag to work for any allocations from ZONE_MOVABLE by removing
__GFP_MOVABLE from gfp_mask when this flag is passed in the current
context.

Add is_pinnable_page() to return true if page is in a pinnable page.
A pinnable page is not in ZONE_MOVABLE and not of MIGRATE_CMA type.

Signed-off-by: Pavel Tatashin 
Acked-by: Michal Hocko 
---
 include/linux/mm.h   | 18 ++
 include/linux/sched/mm.h |  6 +-
 mm/hugetlb.c |  2 +-
 mm/page_alloc.c  | 20 +---
 4 files changed, 33 insertions(+), 13 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 89fca443e6f1..9a31b2298c1d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1122,6 +1122,24 @@ static inline bool is_zone_device_page(const struct page 
*page)
 }
 #endif
 
+static inline bool is_zone_movable_page(const struct page *page)
+{
+   return page_zonenum(page) == ZONE_MOVABLE;
+}
+
+/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */
+#ifdef CONFIG_MIGRATION
+static inline bool is_pinnable_page(struct page *page)
+{
+   return !is_zone_movable_page(page) && !is_migrate_cma_page(page);
+}
+#else
+static inline bool is_pinnable_page(struct page *page)
+{
+   return true;
+}
+#endif
+
 #ifdef CONFIG_DEV_PAGEMAP_OPS
 void free_devmap_managed_page(struct page *page);
 DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 5f4dd3274734..a55277b0d475 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -150,12 +150,13 @@ static inline bool in_vfork(struct task_struct *tsk)
  * Applies per-task gfp context to the given allocation flags.
  * PF_MEMALLOC_NOIO implies GFP_NOIO
  * PF_MEMALLOC_NOFS implies GFP_NOFS
+ * PF_MEMALLOC_PIN  implies !GFP_MOVABLE
  */
 static inline gfp_t current_gfp_context(gfp_t flags)
 {
unsigned int pflags = READ_ONCE(current->flags);
 
-   if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) {
+   if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | 
PF_MEMALLOC_PIN))) {
/*
 * NOIO implies both NOIO and NOFS and it is a weaker context
 * so always make sure it makes precedence
@@ -164,6 +165,9 @@ static inline gfp_t current_gfp_context(gfp_t flags)
flags &= ~(__GFP_IO | __GFP_FS);
else if (pflags & PF_MEMALLOC_NOFS)
flags &= ~__GFP_FS;
+
+   if (pflags & PF_MEMALLOC_PIN)
+   flags &= ~__GFP_MOVABLE;
}
return flags;
 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1d909879c1b4..90c4d279dec4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1047,7 +1047,7 @@ static struct page *dequeue_huge_page_node_exact(struct 
hstate *h, int nid)
bool pin = !!(current->flags & PF_MEMALLOC_PIN);
 
list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
-   if (pin && is_migrate_cma_page(page))
+   if (pin && !is_pinnable_page(page))
continue;
 
if (PageHWPoison(page))
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 92f1741285c1..d21d3c12aa31 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3808,16 +3808,13 @@ alloc_flags_nofragment(struct zone *zone, gfp_t 
gfp_mask)
return alloc_flags;
 }
 
-static inline unsigned int current_alloc_flags(gfp_t gfp_mask,
-   unsigned int alloc_flags)
+/* Must be called after current_gfp_context() which can change gfp_mask */
+static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
+ unsigned int alloc_flags)
 {
 #ifdef CONFIG_CMA
-   unsigned int pflags = current->flags;
-
-   if (!(pflags & PF_MEMALLOC_PIN) &&
-   gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
+   if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA;
-
 #endif
return alloc_flags;
 }
@@ -4473,7 +4470,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
} else if (unlikely(rt_task(current)) && !in_interrupt())
alloc_flags |= ALLOC_HARDER;
 
-   alloc_flags = current_alloc_flags(gfp_mask, alloc_flags);
+   alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
 
return alloc_flags;
 }
@@ -4775,7 +4772,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 
reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
if (reserve_flags)
-   alloc_flags = current_alloc_flags(gfp_mask, reserve_flags);
+   alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags);
 
/*
 * Reset the nodemask and zonelist iterators if memory policies can be
@@ -4944,7 +4941,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, 
unsigned int order,
if (should_fail_alloc_page(gfp_mask, or