From: Peter Zijlstra <[email protected]>

Use the other page_table_lock pattern; this removes the free from
under the lock, reducing worst case hold times and makes it a leaf
lock.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: 
http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
 arch/x86/mm/kaiser.c | 24 +++++++++++++++---------
 1 file changed, 15 insertions(+), 9 deletions(-)

diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
index 0282169ede18..0ff502fa655b 100644
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -196,11 +196,13 @@ static pte_t *kaiser_shadow_pagetable_walk(unsigned long 
address,
                        return NULL;
 
                spin_lock(&shadow_table_allocation_lock);
-               if (p4d_none(*p4d))
+               if (p4d_none(*p4d)) {
                        set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
-               else
-                       free_page(new_pud_page);
+                       new_pud_page = 0;
+               }
                spin_unlock(&shadow_table_allocation_lock);
+               if (new_pud_page)
+                       free_page(new_pud_page);
        }
 
        pud = pud_offset(p4d, address);
@@ -215,11 +217,13 @@ static pte_t *kaiser_shadow_pagetable_walk(unsigned long 
address,
                        return NULL;
 
                spin_lock(&shadow_table_allocation_lock);
-               if (pud_none(*pud))
+               if (pud_none(*pud)) {
                        set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
-               else
-                       free_page(new_pmd_page);
+                       new_pmd_page = 0;
+               }
                spin_unlock(&shadow_table_allocation_lock);
+               if (new_pmd_page)
+                       free_page(new_pmd_page);
        }
 
        pmd = pmd_offset(pud, address);
@@ -234,11 +238,13 @@ static pte_t *kaiser_shadow_pagetable_walk(unsigned long 
address,
                        return NULL;
 
                spin_lock(&shadow_table_allocation_lock);
-               if (pmd_none(*pmd))
+               if (pmd_none(*pmd)) {
                        set_pmd(pmd, __pmd(_KERNPG_TABLE  | 
__pa(new_pte_page)));
-               else
-                       free_page(new_pte_page);
+                       new_pte_page = 0;
+               }
                spin_unlock(&shadow_table_allocation_lock);
+               if (new_pte_page)
+                       free_page(new_pte_page);
        }
 
        pte = pte_offset_kernel(pmd, address);
-- 
2.14.1

Reply via email to