On Tue, Jun 28, 2016 at 08:48:34AM +0200, Cédric Le Goater wrote: > From: Benjamin Herrenschmidt <b...@kernel.crashing.org> > > We were always advertising only 4K & 16M. Additionally the code wasn't > properly matching the page size with the PTE content, which meant we > could potentially hit an incorrect PTE if the guest used multiple sizes. > > Finally, honor the CPU capabilities when decoding the size from the SLB > so we don't try to use 64K pages on 970. > > This still doesn't add support for MPSS (Multiple Page Sizes per Segment) > > Signed-off-by: Benjamin Herrenschmidt <b...@kernel.crashing.org> > [clg: fixed checkpatch.pl errors > commits 61a36c9b5a12 and 1114e712c998 reworked the hpte code > doing insertion/removal in hw/ppc/spapr_hcall.c. The hunks > modifying these areas were removed. ] > Signed-off-by: Cédric Le Goater <c...@kaod.org>
Applied to ppc-for-2.7. > --- > target-ppc/cpu-qom.h | 3 +++ > target-ppc/mmu-hash64.c | 39 +++++++++++++++++++++++++++++++++++---- > target-ppc/translate_init.c | 22 +++++++++++++++++++--- > 3 files changed, 57 insertions(+), 7 deletions(-) > > diff --git a/target-ppc/cpu-qom.h b/target-ppc/cpu-qom.h > index 0fad2def0a94..286410502f6d 100644 > --- a/target-ppc/cpu-qom.h > +++ b/target-ppc/cpu-qom.h > @@ -70,18 +70,21 @@ enum powerpc_mmu_t { > #define POWERPC_MMU_64 0x00010000 > #define POWERPC_MMU_1TSEG 0x00020000 > #define POWERPC_MMU_AMR 0x00040000 > +#define POWERPC_MMU_64K 0x00080000 > /* 64 bits PowerPC MMU */ > POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001, > /* Architecture 2.03 and later (has LPCR) */ > POWERPC_MMU_2_03 = POWERPC_MMU_64 | 0x00000002, > /* Architecture 2.06 variant */ > POWERPC_MMU_2_06 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG > + | POWERPC_MMU_64K > | POWERPC_MMU_AMR | 0x00000003, > /* Architecture 2.06 "degraded" (no 1T segments) */ > POWERPC_MMU_2_06a = POWERPC_MMU_64 | POWERPC_MMU_AMR > | 0x00000003, > /* Architecture 2.07 variant */ > POWERPC_MMU_2_07 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG > + | POWERPC_MMU_64K > | POWERPC_MMU_AMR | 0x00000004, > /* Architecture 2.07 "degraded" (no 1T segments) */ > POWERPC_MMU_2_07a = POWERPC_MMU_64 | POWERPC_MMU_AMR > diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c > index ed353b2d1539..fa26ad2e875b 100644 > --- a/target-ppc/mmu-hash64.c > +++ b/target-ppc/mmu-hash64.c > @@ -450,9 +450,31 @@ void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t > token) > } > } > > +/* Returns the effective page shift or 0. MPSS isn't supported yet so > + * this will always be the slb_pshift or 0 > + */ > +static uint32_t ppc_hash64_pte_size_decode(uint64_t pte1, uint32_t > slb_pshift) > +{ > + switch (slb_pshift) { > + case 12: > + return 12; > + case 16: > + if ((pte1 & 0xf000) == 0x1000) { > + return 16; > + } > + return 0; > + case 24: > + if ((pte1 & 0xff000) == 0) { > + return 24; > + } > + return 0; > + } > + return 0; > +} > + > static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, > - bool secondary, target_ulong ptem, > - ppc_hash_pte64_t *pte) > + uint32_t slb_pshift, bool secondary, > + target_ulong ptem, ppc_hash_pte64_t > *pte) > { > CPUPPCState *env = &cpu->env; > int i; > @@ -472,6 +494,13 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, > hwaddr hash, > if ((pte0 & HPTE64_V_VALID) > && (secondary == !!(pte0 & HPTE64_V_SECONDARY)) > && HPTE64_V_COMPARE(pte0, ptem)) { > + uint32_t pshift = ppc_hash64_pte_size_decode(pte1, slb_pshift); > + if (pshift == 0) { > + continue; > + } > + /* We don't do anything with pshift yet as qemu TLB only deals > + * with 4K pages anyway > + */ > pte->pte0 = pte0; > pte->pte1 = pte1; > ppc_hash64_stop_access(cpu, token); > @@ -525,7 +554,8 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu, > " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx > " hash=" TARGET_FMT_plx "\n", > env->htab_base, env->htab_mask, vsid, ptem, hash); > - pte_offset = ppc_hash64_pteg_search(cpu, hash, 0, ptem, pte); > + pte_offset = ppc_hash64_pteg_search(cpu, hash, slb->sps->page_shift, > + 0, ptem, pte); > > if (pte_offset == -1) { > /* Secondary PTEG lookup */ > @@ -535,7 +565,8 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu, > " hash=" TARGET_FMT_plx "\n", env->htab_base, > env->htab_mask, vsid, ptem, ~hash); > > - pte_offset = ppc_hash64_pteg_search(cpu, ~hash, 1, ptem, pte); > + pte_offset = ppc_hash64_pteg_search(cpu, ~hash, > slb->sps->page_shift, 1, > + ptem, pte); > } > > return pte_offset; > diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c > index 4820c0bc99fb..d7860fd7f8ee 100644 > --- a/target-ppc/translate_init.c > +++ b/target-ppc/translate_init.c > @@ -10301,8 +10301,8 @@ static void ppc_cpu_initfn(Object *obj) > if (pcc->sps) { > env->sps = *pcc->sps; > } else if (env->mmu_model & POWERPC_MMU_64) { > - /* Use default sets of page sizes */ > - static const struct ppc_segment_page_sizes defsps = { > + /* Use default sets of page sizes. We don't support MPSS */ > + static const struct ppc_segment_page_sizes defsps_4k = { > .sps = { > { .page_shift = 12, /* 4K */ > .slb_enc = 0, > @@ -10314,7 +10314,23 @@ static void ppc_cpu_initfn(Object *obj) > }, > }, > }; > - env->sps = defsps; > + static const struct ppc_segment_page_sizes defsps_64k = { > + .sps = { > + { .page_shift = 12, /* 4K */ > + .slb_enc = 0, > + .enc = { { .page_shift = 12, .pte_enc = 0 } } > + }, > + { .page_shift = 16, /* 64K */ > + .slb_enc = 0x110, > + .enc = { { .page_shift = 16, .pte_enc = 1 } } > + }, > + { .page_shift = 24, /* 16M */ > + .slb_enc = 0x100, > + .enc = { { .page_shift = 24, .pte_enc = 0 } } > + }, > + }, > + }; > + env->sps = (env->mmu_model & POWERPC_MMU_64K) ? defsps_64k : > defsps_4k; > } > #endif /* defined(TARGET_PPC64) */ > -- David Gibson | I'll have my music baroque, and my code david AT gibson.dropbear.id.au | minimalist, thank you. NOT _the_ _other_ | _way_ _around_! http://www.ozlabs.org/~dgibson
signature.asc
Description: PGP signature