The 32-bit x86 PAT code uses __set_pmd_pte() to update pmds. This uses pgd_list currently, but we don't need the global list as we can walk the task list under RCU.
(This code already holds the pgd_lock.) Cc: Andrew Morton <a...@linux-foundation.org> Cc: Andy Lutomirski <l...@amacapital.net> Cc: Borislav Petkov <b...@alien8.de> Cc: Brian Gerst <brge...@gmail.com> Cc: Denys Vlasenko <dvlas...@redhat.com> Cc: H. Peter Anvin <h...@zytor.com> Cc: Linus Torvalds <torva...@linux-foundation.org> Cc: Peter Zijlstra <pet...@infradead.org> Cc: Rik van Riel <r...@redhat.com> Cc: Thomas Gleixner <t...@linutronix.de> Cc: Waiman Long <waiman.l...@hp.com> Cc: linux...@vger.kernel.org Signed-off-by: Ingo Molnar <mi...@kernel.org> --- arch/x86/mm/pageattr.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 997fc97e9072..93c134fdb398 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -438,18 +438,31 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) set_pte_atomic(kpte, pte); #ifdef CONFIG_X86_32 if (!SHARED_KERNEL_PMD) { - struct page *page; + struct task_struct *g, *p; - list_for_each_entry(page, &pgd_list, lru) { + rcu_read_lock(); + + for_each_process_thread(g, p) { + spinlock_t *pgt_lock; pgd_t *pgd; pud_t *pud; pmd_t *pmd; - pgd = (pgd_t *)page_address(page) + pgd_index(address); + if (!p->mm) + continue; + + pgt_lock = &p->mm->page_table_lock; + spin_lock(pgt_lock); + + pgd = p->mm->pgd + pgd_index(address); pud = pud_offset(pgd, address); pmd = pmd_offset(pud, address); set_pte_atomic((pte_t *)pmd, pte); + + spin_unlock(pgt_lock); } + + rcu_read_unlock(); } #endif } -- 2.1.4 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/