From: Peter Zijlstra <pet...@infradead.org>

Use the other page_table_lock pattern; this removes the free from
under the lock, reducing worst case hold times and makes it a leaf
lock.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Cc: Andy Lutomirski <l...@amacapital.net>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Link: 
http://lkml.kernel.org/r/20171127085906.uth5hldrtbbqs...@hirez.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/x86/mm/kaiser.c | 24 +++++++++++++++---------
 1 file changed, 15 insertions(+), 9 deletions(-)

diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
index 0282169ede18..0ff502fa655b 100644
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -196,11 +196,13 @@ static pte_t *kaiser_shadow_pagetable_walk(unsigned long 
address,
                        return NULL;
 
                spin_lock(&shadow_table_allocation_lock);
-               if (p4d_none(*p4d))
+               if (p4d_none(*p4d)) {
                        set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
-               else
-                       free_page(new_pud_page);
+                       new_pud_page = 0;
+               }
                spin_unlock(&shadow_table_allocation_lock);
+               if (new_pud_page)
+                       free_page(new_pud_page);
        }
 
        pud = pud_offset(p4d, address);
@@ -215,11 +217,13 @@ static pte_t *kaiser_shadow_pagetable_walk(unsigned long 
address,
                        return NULL;
 
                spin_lock(&shadow_table_allocation_lock);
-               if (pud_none(*pud))
+               if (pud_none(*pud)) {
                        set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
-               else
-                       free_page(new_pmd_page);
+                       new_pmd_page = 0;
+               }
                spin_unlock(&shadow_table_allocation_lock);
+               if (new_pmd_page)
+                       free_page(new_pmd_page);
        }
 
        pmd = pmd_offset(pud, address);
@@ -234,11 +238,13 @@ static pte_t *kaiser_shadow_pagetable_walk(unsigned long 
address,
                        return NULL;
 
                spin_lock(&shadow_table_allocation_lock);
-               if (pmd_none(*pmd))
+               if (pmd_none(*pmd)) {
                        set_pmd(pmd, __pmd(_KERNPG_TABLE  | 
__pa(new_pte_page)));
-               else
-                       free_page(new_pte_page);
+                       new_pte_page = 0;
+               }
                spin_unlock(&shadow_table_allocation_lock);
+               if (new_pte_page)
+                       free_page(new_pte_page);
        }
 
        pte = pte_offset_kernel(pmd, address);
-- 
2.14.1

Reply via email to