On Wed, 2016-01-27 at 21:13 +1100, David Gibson wrote:
> Currently, the ppc_hash64_page_shift() function looks up a page size
> based
> on information in an SLB entry.  It open codes the bit translation
> for
> existing CPUs, however different CPU models can have different SLB
> encodings.  We already store those in the 'sps' table in CPUPPCState,
> but
> we don't currently enforce that that actually matches the logic in
> ppc_hash64_page_shift.
> 
> This patch reworks lookup of page size from SLB in several ways:
>   * ppc_store_slb() will now fail (triggering an illegal instruction
>     exception) if given a bad SLB page size encoding
>   * On success ppc_store_slb() stores a pointer to the relevant entry
> in
>     the page size table in the SLB entry.  This is looked up directly
> from
>     the published table of page size encodings, so can't get out ot
> sync.
>   * ppc_hash64_htab_lookup() and others now use this precached page
> size
>     information rather than decoding the SLB values
>   * Now that callers have easy access to the page_shift,
>     ppc_hash64_pte_raddr() amounts to just a deposit64(), so remove
> it and
>     have the callers use deposit64() directly.
> 
> Signed-off-by: David Gibson <da...@gibson.dropbear.id.au>

Acked-by: Benjamin Herrenschmidt <b...@kernel.crashing.org>

> ---
>  target-ppc/cpu.h        |  1 +
>  target-ppc/machine.c    | 20 +++++++++++++
>  target-ppc/mmu-hash64.c | 74 +++++++++++++++++++++++--------------
> ------------
>  3 files changed, 56 insertions(+), 39 deletions(-)
> 
> diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
> index 2bc96b4..0820390 100644
> --- a/target-ppc/cpu.h
> +++ b/target-ppc/cpu.h
> @@ -419,6 +419,7 @@ typedef struct ppc_slb_t ppc_slb_t;
>  struct ppc_slb_t {
>      uint64_t esid;
>      uint64_t vsid;
> +    const struct ppc_one_seg_page_size *sps;
>  };
>  
>  #define MAX_SLB_ENTRIES         64
> diff --git a/target-ppc/machine.c b/target-ppc/machine.c
> index b61c060..ca62d3e 100644
> --- a/target-ppc/machine.c
> +++ b/target-ppc/machine.c
> @@ -2,6 +2,7 @@
>  #include "hw/boards.h"
>  #include "sysemu/kvm.h"
>  #include "helper_regs.h"
> +#include "mmu-hash64.h"
>  
>  static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
>  {
> @@ -352,11 +353,30 @@ static bool slb_needed(void *opaque)
>      return (cpu->env.mmu_model & POWERPC_MMU_64);
>  }
>  
> +static int slb_post_load(void *opaque, int version_id)
> +{
> +    PowerPCCPU *cpu = opaque;
> +    CPUPPCState *env = &cpu->env;
> +    int i;
> +
> +    /* We've pulled in the raw esid and vsid values from the
> migration
> +     * stream, but we need to recompute the page size pointers */
> +    for (i = 0; i < env->slb_nr; i++) {
> +        if (ppc_store_slb(cpu, i, env->slb[i].esid, env-
> >slb[i].vsid) < 0) {
> +            /* Migration source had bad values in its SLB */
> +            return -1;
> +        }
> +    }
> +
> +    return 0;
> +}
> +
>  static const VMStateDescription vmstate_slb = {
>      .name = "cpu/slb",
>      .version_id = 1,
>      .minimum_version_id = 1,
>      .needed = slb_needed,
> +    .post_load = slb_post_load,
>      .fields = (VMStateField[]) {
>          VMSTATE_INT32_EQUAL(env.slb_nr, PowerPCCPU),
>          VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
> diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
> index 6e05643..b784791 100644
> --- a/target-ppc/mmu-hash64.c
> +++ b/target-ppc/mmu-hash64.c
> @@ -19,6 +19,7 @@
>   */
>  #include "cpu.h"
>  #include "exec/helper-proto.h"
> +#include "qemu/error-report.h"
>  #include "sysemu/kvm.h"
>  #include "kvm_ppc.h"
>  #include "mmu-hash64.h"
> @@ -140,6 +141,8 @@ int ppc_store_slb(PowerPCCPU *cpu, target_ulong
> slot,
>  {
>      CPUPPCState *env = &cpu->env;
>      ppc_slb_t *slb = &env->slb[slot];
> +    const struct ppc_one_seg_page_size *sps = NULL;
> +    int i;
>  
>      if (slot >= env->slb_nr) {
>          return -1; /* Bad slot number */
> @@ -154,8 +157,29 @@ int ppc_store_slb(PowerPCCPU *cpu, target_ulong
> slot,
>          return -1; /* 1T segment on MMU that doesn't support it */
>      }
>  
> +    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
> +        const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
> +
> +        if (!sps1->page_shift) {
> +            break;
> +        }
> +
> +        if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
> +            sps = sps1;
> +            break;
> +        }
> +    }
> +
> +    if (!sps) {
> +        error_report("Bad page size encoding in SLB store: slot
> "TARGET_FMT_lu
> +                     " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
> +                     slot, esid, vsid);
> +        return -1;
> +    }
> +
>      slb->esid = esid;
>      slb->vsid = vsid;
> +    slb->sps = sps;
>  
>      LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016"
> PRIx64
>              " %016" PRIx64 "\n", __func__, slot, esid, vsid,
> @@ -394,24 +418,6 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU
> *cpu, hwaddr hash,
>      return -1;
>  }
>  
> -static uint64_t ppc_hash64_page_shift(ppc_slb_t *slb)
> -{
> -    uint64_t epnshift;
> -
> -    /* Page size according to the SLB, which we use to generate the
> -     * EPN for hash table lookup..  When we implement more recent
> MMU
> -     * extensions this might be different from the actual page size
> -     * encoded in the PTE */
> -    if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_4K) {
> -        epnshift = TARGET_PAGE_BITS;
> -    } else if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_64K) {
> -        epnshift = TARGET_PAGE_BITS_64K;
> -    } else {
> -        epnshift = TARGET_PAGE_BITS_16M;
> -    }
> -    return epnshift;
> -}
> -
>  static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
>                                       ppc_slb_t *slb, target_ulong
> eaddr,
>                                       ppc_hash_pte64_t *pte)
> @@ -419,21 +425,24 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU
> *cpu,
>      CPUPPCState *env = &cpu->env;
>      hwaddr pte_offset;
>      hwaddr hash;
> -    uint64_t vsid, epnshift, epnmask, epn, ptem;
> +    uint64_t vsid, epnmask, epn, ptem;
> +
> +    /* The SLB store path should prevent any bad page size encodings
> +     * getting in there, so: */
> +    assert(slb->sps);
>  
> -    epnshift = ppc_hash64_page_shift(slb);
> -    epnmask = ~((1ULL << epnshift) - 1);
> +    epnmask = ~((1ULL << slb->sps->page_shift) - 1);
>  
>      if (slb->vsid & SLB_VSID_B) {
>          /* 1TB segment */
>          vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
>          epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
> -        hash = vsid ^ (vsid << 25) ^ (epn >> epnshift);
> +        hash = vsid ^ (vsid << 25) ^ (epn >> slb->sps->page_shift);
>      } else {
>          /* 256M segment */
>          vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
>          epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
> -        hash = vsid ^ (epn >> epnshift);
> +        hash = vsid ^ (epn >> slb->sps->page_shift);
>      }
>      ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) &
> HPTE64_V_AVPN);
>  
> @@ -465,20 +474,6 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU
> *cpu,
>      return pte_offset;
>  }
>  
> -static hwaddr ppc_hash64_pte_raddr(ppc_slb_t *slb, ppc_hash_pte64_t
> pte,
> -                                   target_ulong eaddr)
> -{
> -    hwaddr mask;
> -    int target_page_bits;
> -    hwaddr rpn = pte.pte1 & HPTE64_R_RPN;
> -    /*
> -     * We support 4K, 64K and 16M now
> -     */
> -    target_page_bits = ppc_hash64_page_shift(slb);
> -    mask = (1ULL << target_page_bits) - 1;
> -    return (rpn & ~mask) | (eaddr & mask);
> -}
> -
>  int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
>                                  int rwx, int mmu_idx)
>  {
> @@ -600,7 +595,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu,
> target_ulong eaddr,
>  
>      /* 7. Determine the real address from the PTE */
>  
> -    raddr = ppc_hash64_pte_raddr(slb, pte, eaddr);
> +    raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, slb->sps-
> >page_shift, eaddr);
>  
>      tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr &
> TARGET_PAGE_MASK,
>                   prot, mmu_idx, TARGET_PAGE_SIZE);
> @@ -630,7 +625,8 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU
> *cpu, target_ulong addr)
>          return -1;
>      }
>  
> -    return ppc_hash64_pte_raddr(slb, pte, addr) & TARGET_PAGE_MASK;
> +    return deposit64(pte.pte1 & HPTE64_R_RPN, 0, slb->sps-
> >page_shift, addr)
> +        & TARGET_PAGE_MASK;
>  }
>  
>  void ppc_hash64_store_hpte(PowerPCCPU *cpu,

Reply via email to