On 06.08.2013, at 06:19, Paul Mackerras wrote:

> Currently, PR KVM uses 4k pages for the host-side mappings of guest
> memory, regardless of the host page size.  When the host page size is
> 64kB, we might as well use 64k host page mappings for guest mappings
> of 64kB and larger pages and for guest real-mode mappings.  However,
> the magic page has to remain a 4k page.
> 
> To implement this, we first add another flag bit to the guest VSID
> values we use, to indicate that this segment is one where host pages
> should be mapped using 64k pages.  For segments with this bit set
> we set the bits in the shadow SLB entry to indicate a 64k base page
> size.  When faulting in host HPTEs for this segment, we make them
> 64k HPTEs instead of 4k.  We record the pagesize in struct hpte_cache
> for use when invalidating the HPTE.
> 
> Signed-off-by: Paul Mackerras <pau...@samba.org>
> ---
> arch/powerpc/include/asm/kvm_book3s.h |  6 ++++--
> arch/powerpc/kvm/book3s_32_mmu.c      |  1 +
> arch/powerpc/kvm/book3s_64_mmu.c      | 35 ++++++++++++++++++++++++++++++-----
> arch/powerpc/kvm/book3s_64_mmu_host.c | 27 +++++++++++++++++++++------
> arch/powerpc/kvm/book3s_pr.c          |  1 +
> 5 files changed, 57 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
> b/arch/powerpc/include/asm/kvm_book3s.h
> index 175f876..322b539 100644
> --- a/arch/powerpc/include/asm/kvm_book3s.h
> +++ b/arch/powerpc/include/asm/kvm_book3s.h
> @@ -66,6 +66,7 @@ struct hpte_cache {
>       u64 pfn;
>       ulong slot;
>       struct kvmppc_pte pte;
> +     int pagesize;
> };
> 
> struct kvmppc_vcpu_book3s {
> @@ -113,8 +114,9 @@ struct kvmppc_vcpu_book3s {
> #define CONTEXT_GUEST         1
> #define CONTEXT_GUEST_END     2
> 
> -#define VSID_REAL    0x0fffffffffc00000ULL
> -#define VSID_BAT     0x0fffffffffb00000ULL
> +#define VSID_REAL    0x07ffffffffc00000ULL
> +#define VSID_BAT     0x07ffffffffb00000ULL
> +#define VSID_64K     0x0800000000000000ULL
> #define VSID_1T               0x1000000000000000ULL
> #define VSID_REAL_DR  0x2000000000000000ULL
> #define VSID_REAL_IR  0x4000000000000000ULL
> diff --git a/arch/powerpc/kvm/book3s_32_mmu.c 
> b/arch/powerpc/kvm/book3s_32_mmu.c
> index c8cefdd..af04553 100644
> --- a/arch/powerpc/kvm/book3s_32_mmu.c
> +++ b/arch/powerpc/kvm/book3s_32_mmu.c
> @@ -308,6 +308,7 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu 
> *vcpu, gva_t eaddr,
>       ulong mp_ea = vcpu->arch.magic_page_ea;
> 
>       pte->eaddr = eaddr;
> +     pte->page_size = MMU_PAGE_4K;
> 
>       /* Magic page override */
>       if (unlikely(mp_ea) &&
> diff --git a/arch/powerpc/kvm/book3s_64_mmu.c 
> b/arch/powerpc/kvm/book3s_64_mmu.c
> index d5fa26c..658ccd7 100644
> --- a/arch/powerpc/kvm/book3s_64_mmu.c
> +++ b/arch/powerpc/kvm/book3s_64_mmu.c
> @@ -542,6 +542,16 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu 
> *vcpu, ulong va,
>       kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
> }
> 
> +#ifdef CONFIG_PPC_64K_PAGES
> +static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
> +{
> +     ulong mp_ea = vcpu->arch.magic_page_ea;
> +
> +     return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) &&
> +             (mp_ea >> SID_SHIFT) == esid;
> +}
> +#endif
> +
> static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong 
> esid,
>                                            u64 *vsid)
> {
> @@ -549,11 +559,13 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct 
> kvm_vcpu *vcpu, ulong esid,
>       struct kvmppc_slb *slb;
>       u64 gvsid = esid;
>       ulong mp_ea = vcpu->arch.magic_page_ea;
> +     int pagesize = MMU_PAGE_64K;
> 
>       if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
>               slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
>               if (slb) {
>                       gvsid = slb->vsid;
> +                     pagesize = slb->base_page_size;
>                       if (slb->tb) {
>                               gvsid <<= SID_SHIFT_1T - SID_SHIFT;
>                               gvsid |= esid & ((1ul << (SID_SHIFT_1T - 
> SID_SHIFT)) - 1);
> @@ -564,28 +576,41 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct 
> kvm_vcpu *vcpu, ulong esid,
> 
>       switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
>       case 0:
> -             *vsid = VSID_REAL | esid;
> +             gvsid = VSID_REAL | esid;
>               break;
>       case MSR_IR:
> -             *vsid = VSID_REAL_IR | gvsid;
> +             gvsid |= VSID_REAL_IR;
>               break;
>       case MSR_DR:
> -             *vsid = VSID_REAL_DR | gvsid;
> +             gvsid |= VSID_REAL_DR;
>               break;
>       case MSR_DR|MSR_IR:
>               if (!slb)
>                       goto no_slb;
> 
> -             *vsid = gvsid;
>               break;
>       default:
>               BUG();
>               break;
>       }
> 
> +#ifdef CONFIG_PPC_64K_PAGES
> +     /*
> +      * Mark this as a 64k segment if the host is using
> +      * 64k pages, the host MMU supports 64k pages and
> +      * the guest segment page size is >= 64k,
> +      * but not if this segment contains the magic page.

What's the problem with the magic page? As long as we map the magic page as a 
host 64k page and access only the upper 4k (which we handle today already) we 
should be set, no?


Alex

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to