From: Joerg Roedel <jroe...@suse.de>

The walk_pte_level() function just uses __va to get the
virtual address of the PTE page, but that breaks when
the PTE page is not in the direct mapping with HIGHPTE=y.

The result is an unhandled kernel paging request at some
random address when accessing the current_kernel or
current_user file.

Use the correct API to access PTE pages nd fix the oops.

Fixes: fe770bf0310d ('x86: clean up the page table dumper and add 32-bit 
support')
Signed-off-by: Joerg Roedel <jroe...@suse.de>
---
 arch/x86/mm/dump_pagetables.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 956c886f5dff..db1d7a0e6335 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
+#include <linux/highmem.h>
 
 #include <asm/pgtable.h>
 
@@ -344,16 +345,16 @@ static void walk_pte_level(struct seq_file *m, struct 
pg_state *st, pmd_t addr,
                           pgprotval_t eff_in, unsigned long P)
 {
        int i;
-       pte_t *start;
+       pte_t *pte;
        pgprotval_t prot, eff;
 
-       start = (pte_t *)pmd_page_vaddr(addr);
        for (i = 0; i < PTRS_PER_PTE; i++) {
-               prot = pte_flags(*start);
-               eff = effective_prot(eff_in, prot);
                st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
+               pte = pte_offset_map(&addr, st->current_address);
+               prot = pte_flags(*pte);
+               eff = effective_prot(eff_in, prot);
                note_page(m, st, __pgprot(prot), eff, 5);
-               start++;
+               pte_unmap(pte);
        }
 }
 #ifdef CONFIG_KASAN
-- 
2.13.6

Reply via email to