When in split mode, instruction relocation and data relocation are not equal.

So far we implemented this mode by reserving a special pseudo-VSID for the
two cases and flushing all PTEs when going into split mode, which is slow.

Unfortunately 32bit Linux and Mac OS X use split mode extensively. So to not
slow down things too much, I came up with a different idea: Mark the split
mode with a bit in the VSID and then treat it like any other segment.

This means we can just flush the shadow segment cache, but keep the PTEs
intact. I verified that this works with ppc32 Linux and Mac OS X 10.4
guests and does speed them up.

Signed-off-by: Alexander Graf <ag...@suse.de>
---
 arch/powerpc/include/asm/kvm_book3s.h |    9 ++++-----
 arch/powerpc/kvm/book3s.c             |   28 ++++++++++++++--------------
 arch/powerpc/kvm/book3s_32_mmu.c      |   21 +++++++++++++--------
 arch/powerpc/kvm/book3s_64_mmu.c      |   27 +++++++++++++++------------
 4 files changed, 46 insertions(+), 39 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
b/arch/powerpc/include/asm/kvm_book3s.h
index 5d3bd0c..6f74d93 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -100,11 +100,10 @@ struct kvmppc_vcpu_book3s {
 #define CONTEXT_GUEST          1
 #define CONTEXT_GUEST_END      2
 
-#define VSID_REAL_DR   0x7ffffffffff00000ULL
-#define VSID_REAL_IR   0x7fffffffffe00000ULL
-#define VSID_SPLIT_MASK        0x7fffffffffe00000ULL
-#define VSID_REAL      0x7fffffffffc00000ULL
-#define VSID_BAT       0x7fffffffffb00000ULL
+#define VSID_REAL      0x1fffffffffc00000ULL
+#define VSID_BAT       0x1fffffffffb00000ULL
+#define VSID_REAL_DR   0x2000000000000000ULL
+#define VSID_REAL_IR   0x4000000000000000ULL
 #define VSID_PR                0x8000000000000000ULL
 
 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong 
ea_mask);
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index a03163b..91dc42d 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -147,16 +147,8 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
                }
        }
 
-       if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) 
||
-           (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) {
-               bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
-               bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
-
-               /* Flush split mode PTEs */
-               if (dr != ir)
-                       kvmppc_mmu_pte_vflush(vcpu, VSID_SPLIT_MASK,
-                                             VSID_SPLIT_MASK);
-
+       if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) !=
+                  (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
                kvmppc_mmu_flush_segments(vcpu);
                kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
        }
@@ -534,6 +526,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
        bool is_mmio = false;
        bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
        bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
+       u64 vsid;
 
        relocated = data ? dr : ir;
 
@@ -551,13 +544,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
 
        switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
        case 0:
-               pte.vpage |= VSID_REAL;
+               pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
                break;
        case MSR_DR:
-               pte.vpage |= VSID_REAL_DR;
-               break;
        case MSR_IR:
-               pte.vpage |= VSID_REAL_IR;
+               vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
+
+               if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR)
+                       pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
+               else
+                       pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
+               pte.vpage |= vsid;
+
+               if (vsid == -1)
+                       page_found = -EINVAL;
                break;
        }
 
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 33186b7..0b10503 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -330,30 +330,35 @@ static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu 
*vcpu, ulong ea, bool lar
 static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
                                             u64 *vsid)
 {
+       ulong ea = esid << SID_SHIFT;
+       struct kvmppc_sr *sr;
+       u64 gvsid = esid;
+
+       if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+               sr = find_sr(to_book3s(vcpu), ea);
+               if (sr->valid)
+                       gvsid = sr->vsid;
+       }
+
        /* In case we only have one of MSR_IR or MSR_DR set, let's put
           that in the real-mode context (and hope RM doesn't access
           high memory) */
        switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
        case 0:
-               *vsid = (VSID_REAL >> 16) | esid;
+               *vsid = VSID_REAL | esid;
                break;
        case MSR_IR:
-               *vsid = (VSID_REAL_IR >> 16) | esid;
+               *vsid = VSID_REAL_IR | gvsid;
                break;
        case MSR_DR:
-               *vsid = (VSID_REAL_DR >> 16) | esid;
+               *vsid = VSID_REAL_DR | gvsid;
                break;
        case MSR_DR|MSR_IR:
-       {
-               ulong ea = esid << SID_SHIFT;
-               struct kvmppc_sr *sr = find_sr(to_book3s(vcpu), ea);
-
                if (!sr->valid)
                        return -1;
 
                *vsid = sr->vsid;
                break;
-       }
        default:
                BUG();
        }
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index a9241e9..612de6e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -442,29 +442,32 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu 
*vcpu, ulong va,
 static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
                                             u64 *vsid)
 {
+       ulong ea = esid << SID_SHIFT;
+       struct kvmppc_slb *slb;
+       u64 gvsid = esid;
+
+       if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+               slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
+               if (slb)
+                       gvsid = slb->vsid;
+       }
+
        switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
        case 0:
-               *vsid = (VSID_REAL >> 16) | esid;
+               *vsid = VSID_REAL | esid;
                break;
        case MSR_IR:
-               *vsid = (VSID_REAL_IR >> 16) | esid;
+               *vsid = VSID_REAL_IR | gvsid;
                break;
        case MSR_DR:
-               *vsid = (VSID_REAL_DR >> 16) | esid;
+               *vsid = VSID_REAL_DR | gvsid;
                break;
        case MSR_DR|MSR_IR:
-       {
-               ulong ea;
-               struct kvmppc_slb *slb;
-               ea = esid << SID_SHIFT;
-               slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
-               if (slb)
-                       *vsid = slb->vsid;
-               else
+               if (!slb)
                        return -ENOENT;
 
+               *vsid = gvsid;
                break;
-       }
        default:
                BUG();
                break;
-- 
1.6.0.2

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to