This one moves logic related to pages array creation to a separate
function, which will be used by vrealloc() call as well, which
implementation will follow.

Signed-off-by: Roman Penyaev <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Joe Perches <[email protected]>
Cc: "Luis R. Rodriguez" <[email protected]>
Cc: [email protected]
Cc: [email protected]
---
 mm/vmalloc.c | 36 +++++++++++++++++++++++++++++-------
 1 file changed, 29 insertions(+), 7 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 4851b4a67f55..ad6cd807f6db 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1662,21 +1662,26 @@ EXPORT_SYMBOL(vmap);
 static void *__vmalloc_node(unsigned long size, unsigned long align,
                            gfp_t gfp_mask, pgprot_t prot,
                            int node, const void *caller);
-static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
-                                pgprot_t prot, int node)
+
+static int alloc_vm_area_array(struct vm_struct *area, gfp_t gfp_mask, int 
node)
 {
+       unsigned int nr_pages, array_size;
        struct page **pages;
-       unsigned int nr_pages, array_size, i;
+
        const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
-       const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
        const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
                                        0 :
                                        __GFP_HIGHMEM;
 
+       if (WARN_ON(area->pages))
+               return -EINVAL;
+
        nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
+       if (!nr_pages)
+               return -EINVAL;
+
        array_size = (nr_pages * sizeof(struct page *));
 
-       area->nr_pages = nr_pages;
        /* Please note that the recursion is strictly bounded. */
        if (array_size > PAGE_SIZE) {
                pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
@@ -1684,8 +1689,25 @@ static void *__vmalloc_area_node(struct vm_struct *area, 
gfp_t gfp_mask,
        } else {
                pages = kmalloc_node(array_size, nested_gfp, node);
        }
+       if (!pages)
+               return -ENOMEM;
+
+       area->nr_pages = nr_pages;
        area->pages = pages;
-       if (!area->pages) {
+
+       return 0;
+}
+
+static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
+                                pgprot_t prot, int node)
+{
+       const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
+       const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
+                                       0 :
+                                       __GFP_HIGHMEM;
+       unsigned int i;
+
+       if (alloc_vm_area_array(area, gfp_mask, node)) {
                remove_vm_area(area->addr);
                kfree(area);
                return NULL;
@@ -1709,7 +1731,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, 
gfp_t gfp_mask,
                        cond_resched();
        }
 
-       if (map_vm_area(area, prot, pages))
+       if (map_vm_area(area, prot, area->pages))
                goto fail;
        return area->addr;
 
-- 
2.19.1

Reply via email to