At 02/15/2012 01:07 AM, Jan Kiszka Wrote: > On 2012-02-09 04:21, Wen Congyang wrote: >> Walk cpu's page table and collect all virtual address and physical address >> mapping. >> Then, add these mapping into memory mapping list. >> >> Signed-off-by: Wen Congyang <we...@cn.fujitsu.com> >> --- >> Makefile.target | 2 +- >> cpu-all.h | 7 ++ >> target-i386/arch-dump.c | 254 >> +++++++++++++++++++++++++++++++++++++++++++++++ >> 3 files changed, 262 insertions(+), 1 deletions(-) >> create mode 100644 target-i386/arch-dump.c >> >> diff --git a/Makefile.target b/Makefile.target >> index e35e464..d6e5684 100644 >> --- a/Makefile.target >> +++ b/Makefile.target >> @@ -75,7 +75,7 @@ libobj-$(CONFIG_TCG_INTERPRETER) += tci.o >> libobj-y += fpu/softfloat.o >> libobj-y += op_helper.o helper.o >> ifeq ($(TARGET_BASE_ARCH), i386) >> -libobj-y += cpuid.o >> +libobj-y += cpuid.o arch-dump.o >> endif >> libobj-$(TARGET_SPARC64) += vis_helper.o >> libobj-$(CONFIG_NEED_MMU) += mmu.o >> diff --git a/cpu-all.h b/cpu-all.h >> index e2c3c49..4cd7fbb 100644 >> --- a/cpu-all.h >> +++ b/cpu-all.h >> @@ -22,6 +22,7 @@ >> #include "qemu-common.h" >> #include "qemu-tls.h" >> #include "cpu-common.h" >> +#include "memory_mapping.h" >> >> /* some important defines: >> * >> @@ -523,4 +524,10 @@ void dump_exec_info(FILE *f, fprintf_function >> cpu_fprintf); >> int cpu_memory_rw_debug(CPUState *env, target_ulong addr, >> uint8_t *buf, int len, int is_write); >> >> +#if defined(TARGET_I386) > > Instead of collecting archs here, you could introduce some > HAVE_GET_MEMORY_MAPPING and let the targets that support that define it.
OK > >> +void cpu_get_memory_mapping(MemoryMappingList *list, CPUState *env); >> +#else >> +#define cpu_get_memory_mapping(list, env) > > Better return an error from cpu_get_memory_mapping (and use static > inline) so that the caller can find out and report that dumping is not > supported for the current target. OK, I will fix it. Thanks Wen Congyang > >> +#endif >> + >> #endif /* CPU_ALL_H */ >> diff --git a/target-i386/arch-dump.c b/target-i386/arch-dump.c >> new file mode 100644 >> index 0000000..2e921c7 >> --- /dev/null >> +++ b/target-i386/arch-dump.c >> @@ -0,0 +1,254 @@ >> +/* >> + * i386 dump >> + * >> + * Copyright Fujitsu, Corp. 2011 >> + * >> + * Authors: >> + * Wen Congyang <we...@cn.fujitsu.com> >> + * >> + * This work is licensed under the terms of the GNU GPL, version 2. See >> + * the COPYING file in the top-level directory. >> + * >> + */ >> + >> +#include "cpu.h" >> +#include "cpu-all.h" >> + >> +/* PAE Paging or IA-32e Paging */ >> +static void walk_pte(MemoryMappingList *list, target_phys_addr_t >> pte_start_addr, >> + int32_t a20_mask, target_ulong start_line_addr) >> +{ >> + target_phys_addr_t pte_addr, start_paddr; >> + uint64_t pte; >> + target_ulong start_vaddr; >> + int i; >> + >> + for (i = 0; i < 512; i++) { >> + pte_addr = (pte_start_addr + i * 8) & a20_mask; >> + pte = ldq_phys(pte_addr); >> + if (!(pte & PG_PRESENT_MASK)) { >> + /* not present */ >> + continue; >> + } >> + >> + start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63); >> + if (is_io_addr(start_paddr)) { >> + /* I/O region */ >> + continue; >> + } >> + >> + start_vaddr = start_line_addr | ((i & 0x1fff) << 12); >> + add_to_memory_mapping(list, start_paddr, start_vaddr, 1 << 12); >> + } >> +} >> + >> +/* 32-bit Paging */ >> +static void walk_pte2(MemoryMappingList *list, >> + target_phys_addr_t pte_start_addr, int32_t a20_mask, >> + target_ulong start_line_addr) >> +{ >> + target_phys_addr_t pte_addr, start_paddr; >> + uint32_t pte; >> + target_ulong start_vaddr; >> + int i; >> + >> + for (i = 0; i < 1024; i++) { >> + pte_addr = (pte_start_addr + i * 4) & a20_mask; >> + pte = ldl_phys(pte_addr); >> + if (!(pte & PG_PRESENT_MASK)) { >> + /* not present */ >> + continue; >> + } >> + >> + start_paddr = pte & ~0xfff; >> + if (is_io_addr(start_paddr)) { >> + /* I/O region */ >> + continue; >> + } >> + >> + start_vaddr = start_line_addr | ((i & 0x3ff) << 12); >> + add_to_memory_mapping(list, start_paddr, start_vaddr, 1 << 12); >> + } >> +} >> + >> +/* PAE Paging or IA-32e Paging */ >> +static void walk_pde(MemoryMappingList *list, target_phys_addr_t >> pde_start_addr, >> + int32_t a20_mask, target_ulong start_line_addr) >> +{ >> + target_phys_addr_t pde_addr, pte_start_addr, start_paddr; >> + uint64_t pde; >> + target_ulong line_addr, start_vaddr; >> + int i; >> + >> + for (i = 0; i < 512; i++) { >> + pde_addr = (pde_start_addr + i * 8) & a20_mask; >> + pde = ldq_phys(pde_addr); >> + if (!(pde & PG_PRESENT_MASK)) { >> + /* not present */ >> + continue; >> + } >> + >> + line_addr = start_line_addr | ((i & 0x1ff) << 21); >> + if (pde & PG_PSE_MASK) { >> + /* 2 MB page */ >> + start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63); >> + if (is_io_addr(start_paddr)) { >> + /* I/O region */ >> + continue; >> + } >> + start_vaddr = line_addr; >> + add_to_memory_mapping(list, start_paddr, start_vaddr, 1 << 21); >> + continue; >> + } >> + >> + pte_start_addr = (pde & ~0xfff) & a20_mask; >> + walk_pte(list, pte_start_addr, a20_mask, line_addr); >> + } >> +} >> + >> +/* 32-bit Paging */ >> +static void walk_pde2(MemoryMappingList *list, >> + target_phys_addr_t pde_start_addr, int32_t a20_mask, >> + bool pse) >> +{ >> + target_phys_addr_t pde_addr, pte_start_addr, start_paddr; >> + uint32_t pde; >> + target_ulong line_addr, start_vaddr; >> + int i; >> + >> + for (i = 0; i < 1024; i++) { >> + pde_addr = (pde_start_addr + i * 4) & a20_mask; >> + pde = ldl_phys(pde_addr); >> + if (!(pde & PG_PRESENT_MASK)) { >> + /* not present */ >> + continue; >> + } >> + >> + line_addr = (((unsigned int)i & 0x3ff) << 22); >> + if ((pde & PG_PSE_MASK) && pse) { >> + /* 4 MB page */ >> + start_paddr = (pde & ~0x3fffff) | ((pde & 0x1fe000) << 19); >> + if (is_io_addr(start_paddr)) { >> + /* I/O region */ >> + continue; >> + } >> + start_vaddr = line_addr; >> + add_to_memory_mapping(list, start_paddr, start_vaddr, 1 << 22); >> + continue; >> + } >> + >> + pte_start_addr = (pde & ~0xfff) & a20_mask; >> + walk_pte2(list, pte_start_addr, a20_mask, line_addr); >> + } >> +} >> + >> +/* PAE Paging */ >> +static void walk_pdpe2(MemoryMappingList *list, >> + target_phys_addr_t pdpe_start_addr, int32_t a20_mask) >> +{ >> + target_phys_addr_t pdpe_addr, pde_start_addr; >> + uint64_t pdpe; >> + target_ulong line_addr; >> + int i; >> + >> + for (i = 0; i < 4; i++) { >> + pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask; >> + pdpe = ldq_phys(pdpe_addr); >> + if (!(pdpe & PG_PRESENT_MASK)) { >> + /* not present */ >> + continue; >> + } >> + >> + line_addr = (((unsigned int)i & 0x3) << 30); >> + pde_start_addr = (pdpe & ~0xfff) & a20_mask; >> + walk_pde(list, pde_start_addr, a20_mask, line_addr); >> + } >> +} >> + >> +#ifdef TARGET_X86_64 >> +/* IA-32e Paging */ >> +static void walk_pdpe(MemoryMappingList *list, >> + target_phys_addr_t pdpe_start_addr, int32_t a20_mask, >> + target_ulong start_line_addr) >> +{ >> + target_phys_addr_t pdpe_addr, pde_start_addr, start_paddr; >> + uint64_t pdpe; >> + target_ulong line_addr, start_vaddr; >> + int i; >> + >> + for (i = 0; i < 512; i++) { >> + pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask; >> + pdpe = ldq_phys(pdpe_addr); >> + if (!(pdpe & PG_PRESENT_MASK)) { >> + /* not present */ >> + continue; >> + } >> + >> + line_addr = start_line_addr | ((i & 0x1ffULL) << 30); >> + if (pdpe & PG_PSE_MASK) { >> + /* 1 GB page */ >> + start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63); >> + if (is_io_addr(start_paddr)) { >> + /* I/O region */ >> + continue; >> + } >> + start_vaddr = line_addr; >> + add_to_memory_mapping(list, start_paddr, start_vaddr, 1 << 30); >> + continue; >> + } >> + >> + pde_start_addr = (pdpe & ~0xfff) & a20_mask; >> + walk_pde(list, pde_start_addr, a20_mask, line_addr); >> + } >> +} >> + >> +/* IA-32e Paging */ >> +static void walk_pml4e(MemoryMappingList *list, >> + target_phys_addr_t pml4e_start_addr, int32_t >> a20_mask) >> +{ >> + target_phys_addr_t pml4e_addr, pdpe_start_addr; >> + uint64_t pml4e; >> + target_ulong line_addr; >> + int i; >> + >> + for (i = 0; i < 512; i++) { >> + pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask; >> + pml4e = ldq_phys(pml4e_addr); >> + if (!(pml4e & PG_PRESENT_MASK)) { >> + /* not present */ >> + continue; >> + } >> + >> + line_addr = ((i & 0x1ffULL) << 39) | (0xffffULL << 48); >> + pdpe_start_addr = (pml4e & ~0xfff) & a20_mask; >> + walk_pdpe(list, pdpe_start_addr, a20_mask, line_addr); >> + } >> +} >> +#endif >> + >> +void cpu_get_memory_mapping(MemoryMappingList *list, CPUState *env) >> +{ >> + if (env->cr[4] & CR4_PAE_MASK) { >> +#ifdef TARGET_X86_64 >> + if (env->hflags & HF_LMA_MASK) { >> + target_phys_addr_t pml4e_addr; >> + >> + pml4e_addr = (env->cr[3] & ~0xfff) & env->a20_mask; >> + walk_pml4e(list, pml4e_addr, env->a20_mask); >> + } else >> +#endif >> + { >> + target_phys_addr_t pdpe_addr; >> + >> + pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask; >> + walk_pdpe2(list, pdpe_addr, env->a20_mask); >> + } >> + } else { >> + target_phys_addr_t pde_addr; >> + bool pse; >> + >> + pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask; >> + pse = !!(env->cr[4] & CR4_PSE_MASK); >> + walk_pde2(list, pde_addr, env->a20_mask, pse); >> + } >> +} > > I haven't checked all paging details, but it looks good otherwise. > > Jan >