We didn't make use of SLB entry 0 because ... of no good reason. SLB entry 0
will always be used by the Linux linear SLB entry, so the fact that slbia
does not invalidate it doesn't matter as we overwrite SLB 0 on exit anyway.

Just enable use of SLB entry 0 for our shadow SLB code.

Signed-off-by: Alexander Graf <ag...@suse.de>

---

v1 -> v2:

  - flush ERAT by writing 0 to slb0
---
 arch/powerpc/kvm/book3s_64_mmu_host.c | 11 ++++-------
 arch/powerpc/kvm/book3s_64_slb.S      |  3 ++-
 2 files changed, 6 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c 
b/arch/powerpc/kvm/book3s_64_mmu_host.c
index e2efb85..0ac9839 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -271,11 +271,8 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, 
ulong esid)
        int found_inval = -1;
        int r;
 
-       if (!svcpu->slb_max)
-               svcpu->slb_max = 1;
-
        /* Are we overwriting? */
-       for (i = 1; i < svcpu->slb_max; i++) {
+       for (i = 0; i < svcpu->slb_max; i++) {
                if (!(svcpu->slb[i].esid & SLB_ESID_V))
                        found_inval = i;
                else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
@@ -285,7 +282,7 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, 
ulong esid)
        }
 
        /* Found a spare entry that was invalidated before */
-       if (found_inval > 0) {
+       if (found_inval >= 0) {
                r = found_inval;
                goto out;
        }
@@ -359,7 +356,7 @@ void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong 
ea, ulong seg_size)
        ulong seg_mask = -seg_size;
        int i;
 
-       for (i = 1; i < svcpu->slb_max; i++) {
+       for (i = 0; i < svcpu->slb_max; i++) {
                if ((svcpu->slb[i].esid & SLB_ESID_V) &&
                    (svcpu->slb[i].esid & seg_mask) == ea) {
                        /* Invalidate this entry */
@@ -373,7 +370,7 @@ void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong 
ea, ulong seg_size)
 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
 {
        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
-       svcpu->slb_max = 1;
+       svcpu->slb_max = 0;
        svcpu->slb[0].esid = 0;
        svcpu_put(svcpu);
 }
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 596140e..84c52c6 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -138,7 +138,8 @@ slb_do_enter:
 
        /* Restore bolted entries from the shadow and fix it along the way */
 
-       /* We don't store anything in entry 0, so we don't need to take care of 
it */
+       li      r0, r0
+       slbmte  r0, r0
        slbia
        isync
 
-- 
1.8.1.4

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to