Re: [Qemu-devel] [PATCH -V6 2/3] target-ppc: Fix page table lookup with kvm enabled

2013-10-27 Thread Alexander Graf

On 15.10.2013, at 01:58, Aneesh Kumar K.V  
wrote:

> From: "Aneesh Kumar K.V" 
> 
> With kvm enabled, we store the hash page table information in the hypervisor.
> Use ioctl to read the htab contents. Without this we get the below error when
> trying to read the guest address
> 
> (gdb) x/10 do_fork
> 0xc0098660 :   Cannot access memory at address 
> 0xc0098660
> (gdb)
> 
> Signed-off-by: Aneesh Kumar K.V 
> ---
> Changes from V5:
> 
> * Added two new patches
> * Address review comments
> 
> hw/ppc/spapr_hcall.c| 47 --
> target-ppc/kvm.c| 53 ++
> target-ppc/kvm_ppc.h| 19 
> target-ppc/mmu-hash64.c | 77 -
> target-ppc/mmu-hash64.h | 23 ++-
> 5 files changed, 181 insertions(+), 38 deletions(-)
> 
> diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
> index f10ba8a..e04bf6c 100644
> --- a/hw/ppc/spapr_hcall.c
> +++ b/hw/ppc/spapr_hcall.c
> @@ -52,6 +52,8 @@ static target_ulong h_enter(PowerPCCPU *cpu, 
> sPAPREnvironment *spapr,
> target_ulong raddr;
> target_ulong i;
> hwaddr hpte;
> +void *token;
> +bool htab_fd;
> 
> /* only handle 4k and 16M pages for now */
> if (pteh & HPTE64_V_LARGE) {
> @@ -94,25 +96,32 @@ static target_ulong h_enter(PowerPCCPU *cpu, 
> sPAPREnvironment *spapr,
> if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) {
> return H_PARAMETER;
> }
> +
> +i = 0;
> +hpte = pte_index * HASH_PTE_SIZE_64;
> if (likely((flags & H_EXACT) == 0)) {
> pte_index &= ~7ULL;
> -hpte = pte_index * HASH_PTE_SIZE_64;
> -for (i = 0; ; ++i) {
> +token = ppc_hash64_start_access(cpu, pte_index, &htab_fd);
> +do {
> if (i == 8) {
> +ppc_hash64_stop_access(token, htab_fd);
> return H_PTEG_FULL;
> }
> -if ((ppc_hash64_load_hpte0(env, hpte) & HPTE64_V_VALID) == 0) {
> +if ((ppc_hash64_load_hpte0(env, token, i) & HPTE64_V_VALID) == 
> 0) {
> break;
> }
> -hpte += HASH_PTE_SIZE_64;
> -}
> +} while (i++);
> +ppc_hash64_stop_access(token, htab_fd);
> } else {
> -i = 0;
> -hpte = pte_index * HASH_PTE_SIZE_64;
> -if (ppc_hash64_load_hpte0(env, hpte) & HPTE64_V_VALID) {
> +token = ppc_hash64_start_access(cpu, pte_index, &htab_fd);
> +if (ppc_hash64_load_hpte0(env, token, 0) & HPTE64_V_VALID) {
> +ppc_hash64_stop_access(token, htab_fd);
> return H_PTEG_FULL;
> }
> +ppc_hash64_stop_access(token, htab_fd);
> }
> +hpte += i * HASH_PTE_SIZE_64;
> +
> ppc_hash64_store_hpte1(env, hpte, ptel);
> /* eieio();  FIXME: need some sort of barrier for smp? */
> ppc_hash64_store_hpte0(env, hpte, pteh | HPTE64_V_HPTE_DIRTY);
> @@ -134,16 +143,18 @@ static RemoveResult remove_hpte(CPUPPCState *env, 
> target_ulong ptex,
> target_ulong *vp, target_ulong *rp)
> {
> hwaddr hpte;
> +void *token;
> +bool htab_fd;
> target_ulong v, r, rb;
> 
> if ((ptex * HASH_PTE_SIZE_64) & ~env->htab_mask) {
> return REMOVE_PARM;
> }
> 
> -hpte = ptex * HASH_PTE_SIZE_64;
> -
> -v = ppc_hash64_load_hpte0(env, hpte);
> -r = ppc_hash64_load_hpte1(env, hpte);
> +token = ppc_hash64_start_access(ppc_env_get_cpu(env), ptex, &htab_fd);
> +v = ppc_hash64_load_hpte0(env, token, 0);
> +r = ppc_hash64_load_hpte1(env, token, 0);
> +ppc_hash64_stop_access(token, htab_fd);
> 
> if ((v & HPTE64_V_VALID) == 0 ||
> ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
> @@ -152,6 +163,7 @@ static RemoveResult remove_hpte(CPUPPCState *env, 
> target_ulong ptex,
> }
> *vp = v;
> *rp = r;
> +hpte = ptex * HASH_PTE_SIZE_64;
> ppc_hash64_store_hpte0(env, hpte, HPTE64_V_HPTE_DIRTY);
> rb = compute_tlbie_rb(v, r, ptex);
> ppc_tlb_invalidate_one(env, rb);
> @@ -260,16 +272,18 @@ static target_ulong h_protect(PowerPCCPU *cpu, 
> sPAPREnvironment *spapr,
> target_ulong pte_index = args[1];
> target_ulong avpn = args[2];
> hwaddr hpte;
> +void *token;
> +bool htab_fd;
> target_ulong v, r, rb;
> 
> if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) {
> return H_PARAMETER;
> }
> 
> -hpte = pte_index * HASH_PTE_SIZE_64;
> -
> -v = ppc_hash64_load_hpte0(env, hpte);
> -r = ppc_hash64_load_hpte1(env, hpte);
> +token = ppc_hash64_start_access(cpu, pte_index, &htab_fd);
> +v = ppc_hash64_load_hpte0(env, token, 0);
> +r = ppc_hash64_load_hpte1(env, token, 0);
> +ppc_hash64_stop_access(token, htab_fd);
> 
> if ((v & HPTE64_V_VALID) == 0 ||
> ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
> @@ -282,6 +296,7 @@ static target_ulong h_protect(PowerPCCPU *cpu, 
> sPAPREnvironment *spapr,
>

[Qemu-devel] [PATCH -V6 2/3] target-ppc: Fix page table lookup with kvm enabled

2013-10-15 Thread Aneesh Kumar K.V
From: "Aneesh Kumar K.V" 

With kvm enabled, we store the hash page table information in the hypervisor.
Use ioctl to read the htab contents. Without this we get the below error when
trying to read the guest address

 (gdb) x/10 do_fork
 0xc0098660 :   Cannot access memory at address 
0xc0098660
 (gdb)

Signed-off-by: Aneesh Kumar K.V 
---
Changes from V5:

* Added two new patches
* Address review comments

 hw/ppc/spapr_hcall.c| 47 --
 target-ppc/kvm.c| 53 ++
 target-ppc/kvm_ppc.h| 19 
 target-ppc/mmu-hash64.c | 77 -
 target-ppc/mmu-hash64.h | 23 ++-
 5 files changed, 181 insertions(+), 38 deletions(-)

diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index f10ba8a..e04bf6c 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -52,6 +52,8 @@ static target_ulong h_enter(PowerPCCPU *cpu, sPAPREnvironment 
*spapr,
 target_ulong raddr;
 target_ulong i;
 hwaddr hpte;
+void *token;
+bool htab_fd;
 
 /* only handle 4k and 16M pages for now */
 if (pteh & HPTE64_V_LARGE) {
@@ -94,25 +96,32 @@ static target_ulong h_enter(PowerPCCPU *cpu, 
sPAPREnvironment *spapr,
 if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) {
 return H_PARAMETER;
 }
+
+i = 0;
+hpte = pte_index * HASH_PTE_SIZE_64;
 if (likely((flags & H_EXACT) == 0)) {
 pte_index &= ~7ULL;
-hpte = pte_index * HASH_PTE_SIZE_64;
-for (i = 0; ; ++i) {
+token = ppc_hash64_start_access(cpu, pte_index, &htab_fd);
+do {
 if (i == 8) {
+ppc_hash64_stop_access(token, htab_fd);
 return H_PTEG_FULL;
 }
-if ((ppc_hash64_load_hpte0(env, hpte) & HPTE64_V_VALID) == 0) {
+if ((ppc_hash64_load_hpte0(env, token, i) & HPTE64_V_VALID) == 0) {
 break;
 }
-hpte += HASH_PTE_SIZE_64;
-}
+} while (i++);
+ppc_hash64_stop_access(token, htab_fd);
 } else {
-i = 0;
-hpte = pte_index * HASH_PTE_SIZE_64;
-if (ppc_hash64_load_hpte0(env, hpte) & HPTE64_V_VALID) {
+token = ppc_hash64_start_access(cpu, pte_index, &htab_fd);
+if (ppc_hash64_load_hpte0(env, token, 0) & HPTE64_V_VALID) {
+ppc_hash64_stop_access(token, htab_fd);
 return H_PTEG_FULL;
 }
+ppc_hash64_stop_access(token, htab_fd);
 }
+hpte += i * HASH_PTE_SIZE_64;
+
 ppc_hash64_store_hpte1(env, hpte, ptel);
 /* eieio();  FIXME: need some sort of barrier for smp? */
 ppc_hash64_store_hpte0(env, hpte, pteh | HPTE64_V_HPTE_DIRTY);
@@ -134,16 +143,18 @@ static RemoveResult remove_hpte(CPUPPCState *env, 
target_ulong ptex,
 target_ulong *vp, target_ulong *rp)
 {
 hwaddr hpte;
+void *token;
+bool htab_fd;
 target_ulong v, r, rb;
 
 if ((ptex * HASH_PTE_SIZE_64) & ~env->htab_mask) {
 return REMOVE_PARM;
 }
 
-hpte = ptex * HASH_PTE_SIZE_64;
-
-v = ppc_hash64_load_hpte0(env, hpte);
-r = ppc_hash64_load_hpte1(env, hpte);
+token = ppc_hash64_start_access(ppc_env_get_cpu(env), ptex, &htab_fd);
+v = ppc_hash64_load_hpte0(env, token, 0);
+r = ppc_hash64_load_hpte1(env, token, 0);
+ppc_hash64_stop_access(token, htab_fd);
 
 if ((v & HPTE64_V_VALID) == 0 ||
 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
@@ -152,6 +163,7 @@ static RemoveResult remove_hpte(CPUPPCState *env, 
target_ulong ptex,
 }
 *vp = v;
 *rp = r;
+hpte = ptex * HASH_PTE_SIZE_64;
 ppc_hash64_store_hpte0(env, hpte, HPTE64_V_HPTE_DIRTY);
 rb = compute_tlbie_rb(v, r, ptex);
 ppc_tlb_invalidate_one(env, rb);
@@ -260,16 +272,18 @@ static target_ulong h_protect(PowerPCCPU *cpu, 
sPAPREnvironment *spapr,
 target_ulong pte_index = args[1];
 target_ulong avpn = args[2];
 hwaddr hpte;
+void *token;
+bool htab_fd;
 target_ulong v, r, rb;
 
 if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) {
 return H_PARAMETER;
 }
 
-hpte = pte_index * HASH_PTE_SIZE_64;
-
-v = ppc_hash64_load_hpte0(env, hpte);
-r = ppc_hash64_load_hpte1(env, hpte);
+token = ppc_hash64_start_access(cpu, pte_index, &htab_fd);
+v = ppc_hash64_load_hpte0(env, token, 0);
+r = ppc_hash64_load_hpte1(env, token, 0);
+ppc_hash64_stop_access(token, htab_fd);
 
 if ((v & HPTE64_V_VALID) == 0 ||
 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
@@ -282,6 +296,7 @@ static target_ulong h_protect(PowerPCCPU *cpu, 
sPAPREnvironment *spapr,
 r |= (flags << 48) & HPTE64_R_KEY_HI;
 r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
 rb = compute_tlbie_rb(v, r, pte_index);
+hpte = pte_index * HASH_PTE_SIZE_64;
 ppc_hash64_store_hpte0(env, hpte, (v & ~HPTE64_V_VALID) | 
HPTE64_V_HPTE_DIR