Fix large page support in TCG. The old code would overwrite the large page table entry with the fake 4 KB one generated here whenever the ref/change bits were updated, causing it to point to the wrong area of memory. Instead of creating a fake PTE, just update the real address at the end.

Signed-off-by: Nathan Whitehorn <nwhiteh...@freebsd.org>
---
 target-ppc/helper.c |   11 +++++------
 1 files changed, 5 insertions(+), 6 deletions(-)

diff --git a/target-ppc/helper.c b/target-ppc/helper.c
index 928fbcf..0f5ad2e 100644
--- a/target-ppc/helper.c
+++ b/target-ppc/helper.c
@@ -597,12 +597,6 @@ static inline int _find_pte(CPUState *env, mmu_ctx_t *ctx,
int is_64b, int h,
                 pte1 = ldq_phys(env->htab_base + pteg_off + (i * 16) + 8);
             }

-            /* We have a TLB that saves 4K pages, so let's
-             * split a huge page to 4k chunks */
-            if (target_page_bits != TARGET_PAGE_BITS)
-                pte1 |= (ctx->eaddr & (( 1 << target_page_bits ) - 1))
- & TARGET_PAGE_MASK;
-
             r = pte64_check(ctx, pte0, pte1, h, rw, type);
LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " "
                     TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n",
@@ -678,6 +672,11 @@ static inline int _find_pte(CPUState *env, mmu_ctx_t *ctx,
int is_64b, int h,
         }
     }

+    /* We have a TLB that saves 4K pages, so let's
+     * split a huge page to 4k chunks */
+    if (target_page_bits != TARGET_PAGE_BITS)
+       ctx->raddr |= (ctx->eaddr & (( 1 << target_page_bits ) - 1))
+ & TARGET_PAGE_MASK;
     return ret;
 }

--
1.7.9


Reply via email to