When DEBUG_PAGEALLOC is not set there is no need for the complicated 
cpa pool because no recursion is possible in the memory allocator. 

So ifdef this case. This will save some memory because
the pool won't be needed for normal operation.

Signed-off-by: Andi Kleen <[EMAIL PROTECTED]>

---
 arch/x86/mm/pageattr.c |   27 +++++++++++++++++++++++++--
 1 file changed, 25 insertions(+), 2 deletions(-)

Index: linux/arch/x86/mm/pageattr.c
===================================================================
--- linux.orig/arch/x86/mm/pageattr.c
+++ linux/arch/x86/mm/pageattr.c
@@ -345,6 +345,8 @@ out_unlock:
        return do_split;
 }
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+
 static LIST_HEAD(page_pool);
 static unsigned long pool_size, pool_pages, pool_low;
 static unsigned long pool_used, pool_failed, pool_refill;
@@ -365,14 +367,12 @@ static void cpa_fill_pool(void)
        if (pool_pages >= pool_size || test_and_set_bit_lock(0, &pool_refill))
                return;
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
        /*
         * We could do:
         * gfp = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
         * but this fails on !PREEMPT kernels
         */
        gfp =  GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
-#endif
 
        while (pool_pages < pool_size) {
                p = alloc_pages(gfp, 0);
@@ -416,6 +416,16 @@ void __init cpa_init(void)
               pool_pages, pool_size);
 }
 
+#else
+void __init cpa_init(void)
+{
+}
+
+static inline void cpa_fill_pool(void)
+{
+}
+#endif
+
 static int split_large_page(pte_t *kpte, unsigned long address)
 {
        unsigned long flags, pfn, pfninc = 1;
@@ -424,12 +434,19 @@ static int split_large_page(pte_t *kpte,
        pgprot_t ref_prot;
        struct page *base;
 
+#ifndef CONFIG_DEBUG_PAGEALLOC
+       base = alloc_page(GFP_KERNEL);
+       if (!base)
+               return -ENOMEM;
+#endif
+
        /*
         * Get a page from the pool. The pool list is protected by the
         * pgd_lock, which we have to take anyway for the split
         * operation:
         */
        spin_lock_irqsave(&pgd_lock, flags);
+#ifdef CONFIG_DEBUG_PAGEALLOC
        if (list_empty(&page_pool)) {
                spin_unlock_irqrestore(&pgd_lock, flags);
                return -ENOMEM;
@@ -441,6 +458,7 @@ static int split_large_page(pte_t *kpte,
 
        if (pool_pages < pool_low)
                pool_low = pool_pages;
+#endif
 
        /*
         * Check for races, another CPU might have split this page
@@ -486,6 +504,7 @@ static int split_large_page(pte_t *kpte,
        base = NULL;
 
 out_unlock:
+#ifdef CONFIG_DEBUG_PAGEALLOC
        /*
         * If we dropped out via the lookup_address check under
         * pgd_lock then stick the page back into the pool:
@@ -495,6 +514,10 @@ out_unlock:
                pool_pages++;
        } else
                pool_used++;
+#else
+       if (base)
+               __free_page(base);
+#endif
        spin_unlock_irqrestore(&pgd_lock, flags);
 
        return 0;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to