On Mon, 2016-11-21 at 05:32 -0800, Eric Dumazet wrote:

> 
> Oh, this was definitely my intent of course, thanks for noticing this
> typo ;)

V2 is fixing this, and brings back NUMA spreading,
(eg alloc_large_system_hash() done at boot time )


lpaa24:~# grep alloc_large /proc/vmallocinfo 
0xffffc90000009000-0xffffc9000000c000   12288 
alloc_large_system_hash+0x178/0x238 pages=2 vmalloc N0=1 N1=1
0xffffc9000000c000-0xffffc9000000f000   12288 
alloc_large_system_hash+0x178/0x238 pages=2 vmalloc N0=1 N1=1
0xffffc9000001e000-0xffffc9000009f000  528384 
alloc_large_system_hash+0x178/0x238 pages=128 vmalloc N0=64 N1=64
0xffffc9000009f000-0xffffc900000e0000  266240 
alloc_large_system_hash+0x178/0x238 pages=64 vmalloc N0=32 N1=32
0xffffc900001d3000-0xffffc900101d4000 268439552 
alloc_large_system_hash+0x178/0x238 pages=65536 vmalloc vpages N0=32768 N1=32768
0xffffc900101d4000-0xffffc900181d5000 134221824 
alloc_large_system_hash+0x178/0x238 pages=32768 vmalloc vpages N0=16384 N1=16384
0xffffc900181d5000-0xffffc900185d6000 4198400 
alloc_large_system_hash+0x178/0x238 pages=1024 vmalloc vpages N0=512 N1=512
0xffffc900185d6000-0xffffc900189d7000 4198400 
alloc_large_system_hash+0x178/0x238 pages=1024 vmalloc vpages N0=512 N1=512
0xffffc9001b271000-0xffffc9001b672000 4198400 
alloc_large_system_hash+0x178/0x238 pages=1024 vmalloc vpages N0=512 N1=512
0xffffc9001b672000-0xffffc9001b675000   12288 
alloc_large_system_hash+0x178/0x238 pages=2 vmalloc N0=1 N1=1
0xffffc9001b675000-0xffffc9001b776000 1052672 
alloc_large_system_hash+0x178/0x238 pages=256 vmalloc N0=128 N1=128
0xffffc9001b776000-0xffffc9001b977000 2101248 
alloc_large_system_hash+0x178/0x238 pages=512 vmalloc N0=256 N1=256
0xffffc9001b977000-0xffffc9001bb78000 2101248 
alloc_large_system_hash+0x178/0x238 pages=512 vmalloc N0=256 N1=256
0xffffc9001c075000-0xffffc9001c176000 1052672 
alloc_large_system_hash+0x178/0x238 pages=256 vmalloc N0=128 N1=128


 mm/vmalloc.c |   47 +++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 39 insertions(+), 8 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f2481cb4e6b2..f4b9c9238f86 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -21,6 +21,7 @@
 #include <linux/debugobjects.h>
 #include <linux/kallsyms.h>
 #include <linux/list.h>
+#include <linux/mempolicy.h>
 #include <linux/notifier.h>
 #include <linux/rbtree.h>
 #include <linux/radix-tree.h>
@@ -1602,9 +1603,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, 
gfp_t gfp_mask,
                                 pgprot_t prot, int node)
 {
        struct page **pages;
-       unsigned int nr_pages, array_size, i;
+       unsigned int nr_pages, array_size, i, j;
        const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
        const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
+       const gfp_t multi_alloc_mask = (alloc_mask & ~__GFP_DIRECT_RECLAIM) | 
__GFP_NORETRY;
+       int max_node_order = MAX_ORDER - 1;
 
        nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
        array_size = (nr_pages * sizeof(struct page *));
@@ -1624,20 +1627,48 @@ static void *__vmalloc_area_node(struct vm_struct 
*area, gfp_t gfp_mask,
                return NULL;
        }
 
-       for (i = 0; i < area->nr_pages; i++) {
-               struct page *page;
+       if (IS_ENABLED(CONFIG_NUMA) && nr_online_nodes > 1) {
+               struct mempolicy *policy = current->mempolicy;
+               int pages_per_node;
 
-               if (node == NUMA_NO_NODE)
-                       page = alloc_page(alloc_mask);
-               else
-                       page = alloc_pages_node(node, alloc_mask, 0);
+               if (policy && policy->mode == MPOL_INTERLEAVE) {
+                       pages_per_node = DIV_ROUND_UP(nr_pages,
+                                                     nr_online_nodes);
+                       max_node_order = min(max_node_order,
+                                            ilog2(pages_per_node));
+               }
+       }
+
+       for (i = 0; i < area->nr_pages;) {
+               unsigned int chunk_order = min(ilog2(area->nr_pages - i),
+                                              max_node_order);
+               struct page *page = NULL;
+
+               while (chunk_order) {
+                       if (node == NUMA_NO_NODE)
+                               page = alloc_pages(multi_alloc_mask, 
chunk_order);
+                       else
+                               page = alloc_pages_node(node, multi_alloc_mask, 
chunk_order);
+                       if (page) {
+                               split_page(page, chunk_order);
+                               break;
+                       }
+                       chunk_order--;
+               }
+               if (!page) {
+                       if (node == NUMA_NO_NODE)
+                               page = alloc_pages(alloc_mask, 0);
+                       else
+                               page = alloc_pages_node(node, alloc_mask, 0);
+               }
 
                if (unlikely(!page)) {
                        /* Successfully allocated i pages, free them in 
__vunmap() */
                        area->nr_pages = i;
                        goto fail;
                }
-               area->pages[i] = page;
+               for (j = 0; j < (1U << chunk_order); j++)
+                       area->pages[i++] = page++;
                if (gfpflags_allow_blocking(gfp_mask))
                        cond_resched();
        }


Reply via email to