From: Michal Hocko <mho...@suse.com>

kmem_zalloc_large and _xfs_buf_map_pages use memalloc_noio_{save,restore}
API to prevent from reclaim recursion into the fs because vmalloc can
invoke unconditional GFP_KERNEL allocations and these functions might be
called from the NOFS contexts. The memalloc_noio_save will enforce
GFP_NOIO context which is even weaker than GFP_NOFS and that seems to be
unnecessary. Let's use memalloc_nofs_{save,restore} instead as it should
provide exactly what we need here - implicit GFP_NOFS context.

Changes since v1
- s@memalloc_noio_restore@memalloc_nofs_restore@ in _xfs_buf_map_pages
  as per Brian Foster

Acked-by: Vlastimil Babka <vba...@suse.cz>
Reviewed-by: Brian Foster <bfos...@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.w...@oracle.com>
Signed-off-by: Michal Hocko <mho...@suse.com>
---
 fs/xfs/kmem.c    | 12 ++++++------
 fs/xfs/xfs_buf.c |  8 ++++----
 2 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index a76a05dae96b..0c9f94f41b6c 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -65,7 +65,7 @@ kmem_alloc(size_t size, xfs_km_flags_t flags)
 void *
 kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
 {
-       unsigned noio_flag = 0;
+       unsigned nofs_flag = 0;
        void    *ptr;
        gfp_t   lflags;
 
@@ -77,17 +77,17 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
         * __vmalloc() will allocate data pages and auxillary structures (e.g.
         * pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context
         * here. Hence we need to tell memory reclaim that we are in such a
-        * context via PF_MEMALLOC_NOIO to prevent memory reclaim re-entering
+        * context via PF_MEMALLOC_NOFS to prevent memory reclaim re-entering
         * the filesystem here and potentially deadlocking.
         */
-       if ((current->flags & PF_MEMALLOC_NOFS) || (flags & KM_NOFS))
-               noio_flag = memalloc_noio_save();
+       if (flags & KM_NOFS)
+               nofs_flag = memalloc_nofs_save();
 
        lflags = kmem_flags_convert(flags);
        ptr = __vmalloc(size, lflags | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
 
-       if ((current->flags & PF_MEMALLOC_NOFS) || (flags & KM_NOFS))
-               memalloc_noio_restore(noio_flag);
+       if (flags & KM_NOFS)
+               memalloc_nofs_restore(nofs_flag);
 
        return ptr;
 }
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 8c7d01b75922..676a9ae75b9a 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -442,17 +442,17 @@ _xfs_buf_map_pages(
                bp->b_addr = NULL;
        } else {
                int retried = 0;
-               unsigned noio_flag;
+               unsigned nofs_flag;
 
                /*
                 * vm_map_ram() will allocate auxillary structures (e.g.
                 * pagetables) with GFP_KERNEL, yet we are likely to be under
                 * GFP_NOFS context here. Hence we need to tell memory reclaim
-                * that we are in such a context via PF_MEMALLOC_NOIO to prevent
+                * that we are in such a context via PF_MEMALLOC_NOFS to prevent
                 * memory reclaim re-entering the filesystem here and
                 * potentially deadlocking.
                 */
-               noio_flag = memalloc_noio_save();
+               nofs_flag = memalloc_nofs_save();
                do {
                        bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
                                                -1, PAGE_KERNEL);
@@ -460,7 +460,7 @@ _xfs_buf_map_pages(
                                break;
                        vm_unmap_aliases();
                } while (retried++ <= 1);
-               memalloc_noio_restore(noio_flag);
+               memalloc_nofs_restore(nofs_flag);
 
                if (!bp->b_addr)
                        return -ENOMEM;
-- 
2.11.0

Reply via email to