[PATCH 03 of 16] kvm: ppc: small cosmetic changes to Book E DTLB miss handler

2009-01-03 Thread Hollis Blanchard
Signed-off-by: Hollis Blanchard holl...@us.ibm.com

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -290,6 +290,7 @@ int kvmppc_handle_exit(struct kvm_run *r
struct kvmppc_44x_tlbe *gtlbe;
unsigned long eaddr = vcpu-arch.fault_dear;
int gtlb_index;
+   gpa_t gpaddr;
gfn_t gfn;
 
/* Check the guest TLB. */
@@ -305,8 +306,8 @@ int kvmppc_handle_exit(struct kvm_run *r
}
 
gtlbe = vcpu_44x-guest_tlb[gtlb_index];
-   vcpu-arch.paddr_accessed = tlb_xlate(gtlbe, eaddr);
-   gfn = vcpu-arch.paddr_accessed  PAGE_SHIFT;
+   gpaddr = tlb_xlate(gtlbe, eaddr);
+   gfn = gpaddr  PAGE_SHIFT;
 
if (kvm_is_visible_gfn(vcpu-kvm, gfn)) {
/* The guest TLB had a mapping, but the shadow TLB
@@ -315,13 +316,14 @@ int kvmppc_handle_exit(struct kvm_run *r
 * b) the guest used a large mapping which we're faking
 * Either way, we need to satisfy the fault without
 * invoking the guest. */
-   kvmppc_mmu_map(vcpu, eaddr, vcpu-arch.paddr_accessed, 
gtlbe-tid,
+   kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe-tid,
   gtlbe-word2, get_tlb_bytes(gtlbe), 
gtlb_index);
kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
r = RESUME_GUEST;
} else {
/* Guest has mapped and accessed a page which is not
 * actually RAM. */
+   vcpu-arch.paddr_accessed = gpaddr;
r = kvmppc_emulate_mmio(run, vcpu);
kvmppc_account_exit(vcpu, MMIO_EXITS);
}
--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 04 of 16] kvm: ppc: change kvmppc_mmu_map() parameters

2009-01-03 Thread Hollis Blanchard
Passing just the TLB index will ease an e500 implementation.

Signed-off-by: Hollis Blanchard holl...@us.ibm.com

diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -55,7 +55,6 @@ extern void kvmppc_emulate_dec(struct kv
 /* Core-specific hooks */
 
 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
-   u64 asid, u32 flags, u32 max_bytes,
unsigned int gtlb_idx);
 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -269,15 +269,19 @@ void kvmppc_mmu_destroy(struct kvm_vcpu 
  * Caller must ensure that the specified guest TLB entry is safe to insert into
  * the shadow TLB.
  */
-void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid,
-u32 flags, u32 max_bytes, unsigned int gtlb_index)
+void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
+unsigned int gtlb_index)
 {
struct kvmppc_44x_tlbe stlbe;
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
+   struct kvmppc_44x_tlbe *gtlbe = vcpu_44x-guest_tlb[gtlb_index];
struct kvmppc_44x_shadow_ref *ref;
struct page *new_page;
hpa_t hpaddr;
gfn_t gfn;
+   u32 asid = gtlbe-tid;
+   u32 flags = gtlbe-word2;
+   u32 max_bytes = get_tlb_bytes(gtlbe);
unsigned int victim;
 
/* Select TLB entry to clobber. Indirectly guard against races with the 
TLB
@@ -448,10 +452,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcp
}
 
if (tlbe_is_host_safe(vcpu, tlbe)) {
-   u64 asid;
gva_t eaddr;
gpa_t gpaddr;
-   u32 flags;
u32 bytes;
 
eaddr = get_tlb_eaddr(tlbe);
@@ -462,10 +464,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcp
eaddr = ~(bytes - 1);
gpaddr = ~(bytes - 1);
 
-   asid = (tlbe-word0  PPC44x_TLB_TS) | tlbe-tid;
-   flags = tlbe-word2  0x;
-
-   kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes, 
gtlb_index);
+   kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
}
 
KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe-tid, tlbe-word0,
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -316,8 +316,7 @@ int kvmppc_handle_exit(struct kvm_run *r
 * b) the guest used a large mapping which we're faking
 * Either way, we need to satisfy the fault without
 * invoking the guest. */
-   kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe-tid,
-  gtlbe-word2, get_tlb_bytes(gtlbe), 
gtlb_index);
+   kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
r = RESUME_GUEST;
} else {
@@ -364,8 +363,7 @@ int kvmppc_handle_exit(struct kvm_run *r
 * b) the guest used a large mapping which we're faking
 * Either way, we need to satisfy the fault without
 * invoking the guest. */
-   kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe-tid,
-  gtlbe-word2, get_tlb_bytes(gtlbe), 
gtlb_index);
+   kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
} else {
/* Guest mapped and leaped at non-RAM! */
kvmppc_booke_queue_irqprio(vcpu, 
BOOKE_IRQPRIO_MACHINE_CHECK);
--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html