This patch gives a example of refactor, which is derived from x86.
It aims at collecting comments.
I have not even tried to build it, for I have no 440 toolchain.
I can test it on E500, but it needs some time to move E500 code base on it.
Hollis,
1. Do we need to move 44x_tlb.c into 44x.c?
so that some special struct definition can be moved from kvm_host.h to 44x.c/.h.
2. We could adopt the ops method like x86,
then kvm for different cores can be dynamicly inserted as a module,
or adopt static compile method like current kvmppc.
How do you think of it.
Signed-off-by: Liu Yu <[EMAIL PROTECTED]>
---
arch/powerpc/include/asm/kvm_host.h | 27 ++++++++++++-------
arch/powerpc/kvm/44x.c | 47 ++++++++++++++++++++++++++++++++
arch/powerpc/kvm/44x_tlb.c | 50 ++++++++++++++++++++--------------
arch/powerpc/kvm/44x_tlb.h | 5 +++
arch/powerpc/kvm/booke_host.c | 12 +++-----
arch/powerpc/kvm/powerpc.c | 26 +++--------------
6 files changed, 107 insertions(+), 60 deletions(-)
create mode 100644 arch/powerpc/kvm/44x.c
diff --git a/arch/powerpc/include/asm/kvm_host.h
b/arch/powerpc/include/asm/kvm_host.h
index df73351..930921c 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -75,16 +75,6 @@ struct kvm_arch {
};
struct kvm_vcpu_arch {
- /* Unmodified copy of the guest's TLB. */
- struct kvmppc_44x_tlbe guest_tlb[PPC44x_TLB_SIZE];
- /* TLB that's actually used when the guest is running. */
- struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE];
- /* Pages which are referenced in the shadow TLB. */
- struct page *shadow_pages[PPC44x_TLB_SIZE];
-
- /* Track which TLB entries we've modified in the current exit. */
- u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
-
u32 host_stack;
u32 host_pid;
u32 host_dbcr0;
@@ -156,6 +146,23 @@ struct kvm_vcpu_arch {
unsigned long pending_exceptions;
};
+struct vcpu_44x {
+ struct kvm_vcpu vcpu;
+ /* Unmodified copy of the guest's TLB. */
+ struct kvmppc_44x_tlbe guest_tlb[PPC44x_TLB_SIZE];
+ /* TLB that's actually used when the guest is running. */
+ struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE];
+ /* Pages which are referenced in the shadow TLB. */
+ struct page *shadow_pages[PPC44x_TLB_SIZE];
+ /* Track which TLB entries we've modified in the current exit. */
+ u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
+};
+
+struct kvm_powerpc_ops {
+ struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
+ void (*vcpu_free)(struct kvm_vcpu *vcpu);
+}
+
struct kvm_guest_debug {
int enabled;
unsigned long bp[4];
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
new file mode 100644
index 0000000..d374a8b
--- /dev/null
+++ b/arch/powerpc/kvm/44x.c
@@ -0,0 +1,47 @@
+
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+
+static struct kvm_vcpu *44x_create_vcpu(struct kvm *kvm, unsigned int id)
+{
+ struct vcpu_44x *44x = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ int err;
+ if (!44x)
+ return ERR_PTR(-ENOMEM);
+
+ err = kvm_vcpu_init(vcpu, kvm, id);
+ if (err)
+ goto free_vcpu;
+
+ return &44x->vcpu;
+
+free_vcpu:
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
+ return ERR_PTR(err);
+}
+
+static void 44x_free_vcpu(struct kvm_vcpu *vcpu)
+{
+ kvm_vcpu_uninit(vcpu);
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
+}
+
+static struct kvm_powerpc_ops 44x_powerpc_ops = {
+ .vcpu_create = 44x_create_vcpu,
+ .vcpu_free = 44x_free_vcpu
+};
+
+static void booke_44x_init(void)
+{
+ kvmppc_booke_init();
+ return kvm_init(44x_powerpc_ops, sizeof(struct vcpu_44x), THIS_MODULE);
+}
+
+static void __exit booke_44x_exit(void)
+{
+ kvmppc_booke_exit();
+ kvm_exit();
+}
+
+module_init(booke_44x_init)
+module_exit(booke_44x_exit)
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index dd873fb..1e4988e 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -35,6 +35,7 @@ static unsigned int kvmppc_tlb_44x_pos;
#ifdef DEBUG
void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
{
+ struct vcpu_44x *44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *tlbe;
int i;
@@ -43,7 +44,7 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
"nr", "tid", "word0", "word1", "word2");
for (i = 0; i < PPC44x_TLB_SIZE; i++) {
- tlbe = &vcpu->arch.guest_tlb[i];
+ tlbe = 44x->guest_tlb[i];
if (tlbe->word0 & PPC44x_TLB_VALID)
printk(" G%2d | %02X | %08X | %08X | %08X |\n",
i, tlbe->tid, tlbe->word0, tlbe->word1,
@@ -51,7 +52,7 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
}
for (i = 0; i < PPC44x_TLB_SIZE; i++) {
- tlbe = &vcpu->arch.shadow_tlb[i];
+ tlbe = 44x->shadow_tlb[i];
if (tlbe->word0 & PPC44x_TLB_VALID)
printk(" S%2d | %02X | %08X | %08X | %08X |\n",
i, tlbe->tid, tlbe->word0, tlbe->word1,
@@ -82,11 +83,12 @@ static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int
usermode)
int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
unsigned int as)
{
+ struct vcpu_44x *44x = to_44x(vcpu);
int i;
/* XXX Replace loop with fancy data structures. */
for (i = 0; i < PPC44x_TLB_SIZE; i++) {
- struct kvmppc_44x_tlbe *tlbe = &vcpu->arch.guest_tlb[i];
+ struct kvmppc_44x_tlbe *tlbe = 44x->guest_tlb[i];
unsigned int tid;
if (eaddr < get_tlb_eaddr(tlbe))
@@ -114,25 +116,27 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t
eaddr, unsigned int pid,
struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu,
gva_t eaddr)
{
+ struct vcpu_44x *44x = to_44x(vcpu);
unsigned int as = !!(vcpu->arch.msr & MSR_IS);
unsigned int index;
- index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
+ index = kvmppc_44x_tlb_index(vcpu, eaddr, 44x->arch.pid, as);
if (index == -1)
return NULL;
- return &vcpu->arch.guest_tlb[index];
+ return 44x->guest_tlb[index];
}
struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu,
gva_t eaddr)
{
+ struct vcpu_44x *44x = to_44x(vcpu);
unsigned int as = !!(vcpu->arch.msr & MSR_DS);
unsigned int index;
- index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
+ index = kvmppc_44x_tlb_index(vcpu, eaddr, 44x->arch.pid, as);
if (index == -1)
return NULL;
- return &vcpu->arch.guest_tlb[index];
+ return 44x->guest_tlb[index];
}
static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe)
@@ -140,11 +144,11 @@ static int kvmppc_44x_tlbe_is_writable(struct
kvmppc_44x_tlbe *tlbe)
return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
}
-static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
+static void kvmppc_44x_shadow_release(struct vcpu_44x *44x,
unsigned int index)
{
- struct kvmppc_44x_tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
- struct page *page = vcpu->arch.shadow_pages[index];
+ struct kvmppc_44x_tlbe *stlbe = 44x->shadow_tlb[index];
+ struct page *page = 44x->shadow_pages[index];
if (get_tlb_v(stlbe)) {
if (kvmppc_44x_tlbe_is_writable(stlbe))
@@ -156,7 +160,8 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
{
- vcpu->arch.shadow_tlb_mod[i] = 1;
+ struct vcpu_44x *44x = to_44x(vcpu);
+ 44x->shadow_tlb_mod[i] = 1;
}
/* Caller must ensure that the specified guest TLB entry is safe to insert into
@@ -164,6 +169,7 @@ void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu,
unsigned int i)
void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
u32 flags)
{
+ struct vcpu_44x *44x = to_44x(vcpu);
struct page *new_page;
struct kvmppc_44x_tlbe *stlbe;
hpa_t hpaddr;
@@ -174,7 +180,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr,
gfn_t gfn, u64 asid,
victim = kvmppc_tlb_44x_pos++;
if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
kvmppc_tlb_44x_pos = 0;
- stlbe = &vcpu->arch.shadow_tlb[victim];
+ stlbe = 44x->shadow_tlb[victim];
/* Get reference to new page. */
new_page = gfn_to_page(vcpu->kvm, gfn);
@@ -186,9 +192,9 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr,
gfn_t gfn, u64 asid,
hpaddr = page_to_phys(new_page);
/* Drop reference to old page. */
- kvmppc_44x_shadow_release(vcpu, victim);
+ kvmppc_44x_shadow_release(44x, victim);
- vcpu->arch.shadow_pages[victim] = new_page;
+ 44x->shadow_pages[victim] = new_page;
/* XXX Make sure (va, size) doesn't overlap any other
* entries. 440x6 user manual says the result would be
@@ -213,7 +219,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr,
gfn_t gfn, u64 asid,
handler);
}
-static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
+static void kvmppc_mmu_invalidate(struct vcpu_44x *44x, gva_t eaddr,
gva_t eend, u32 asid)
{
unsigned int pid = !(asid & 0xff);
@@ -221,7 +227,7 @@ static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu,
gva_t eaddr,
/* XXX Replace loop with fancy data structures. */
for (i = 0; i <= tlb_44x_hwater; i++) {
- struct kvmppc_44x_tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
+ struct kvmppc_44x_tlbe *stlbe = 44x->shadow_tlb[i];
unsigned int tid;
if (!get_tlb_v(stlbe))
@@ -237,7 +243,7 @@ static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu,
gva_t eaddr,
if (tid && (tid != pid))
continue;
- kvmppc_44x_shadow_release(vcpu, i);
+ kvmppc_44x_shadow_release(44x, i);
stlbe->word0 = 0;
kvmppc_tlbe_set_modified(vcpu, i);
KVMTRACE_5D(STLB_INVAL, vcpu, i,
@@ -251,15 +257,16 @@ static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu,
gva_t eaddr,
* switching address spaces. */
void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
{
+ struct vcpu_44x *44x = to_44x(vcpu);
int i;
if (vcpu->arch.swap_pid) {
/* XXX Replace loop with fancy data structures. */
for (i = 0; i <= tlb_44x_hwater; i++) {
- struct kvmppc_44x_tlbe *stlbe =
&vcpu->arch.shadow_tlb[i];
+ struct kvmppc_44x_tlbe *stlbe = 44x->shadow_tlb[i];
/* Future optimization: clear only userspace mappings.
*/
- kvmppc_44x_shadow_release(vcpu, i);
+ kvmppc_44x_shadow_release(44x, i);
stlbe->word0 = 0;
kvmppc_tlbe_set_modified(vcpu, i);
KVMTRACE_5D(STLB_INVAL, vcpu, i,
@@ -295,6 +302,7 @@ static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
{
+ struct vcpu_44x *44x = to_44x(vcpu);
u64 eaddr;
u64 raddr;
u64 asid;
@@ -309,13 +317,13 @@ int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8
rs, u8 ws)
return EMULATE_FAIL;
}
- tlbe = &vcpu->arch.guest_tlb[index];
+ tlbe = 44x->guest_tlb[index];
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe->word0 & PPC44x_TLB_VALID) {
eaddr = get_tlb_eaddr(tlbe);
asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
- kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid);
+ kvmppc_mmu_invalidate(44x, eaddr, get_tlb_end(tlbe), asid);
}
switch (ws) {
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h
index e5b0a76..6d82601 100644
--- a/arch/powerpc/kvm/44x_tlb.h
+++ b/arch/powerpc/kvm/44x_tlb.h
@@ -90,4 +90,9 @@ static inline gpa_t tlb_xlate(struct kvmppc_44x_tlbe *tlbe,
gva_t eaddr)
return get_tlb_raddr(tlbe) | (eaddr & pgmask);
}
+static inline struct vcpu_44x *to_44x(struct kvm_vcpu *vcpu)
+{
+ return container_of(vcpu, struct vcpu_44x, vcpu);
+}
+
#endif /* __KVM_POWERPC_TLB_H__ */
diff --git a/arch/powerpc/kvm/booke_host.c b/arch/powerpc/kvm/booke_host.c
index b480341..6d45e7d 100644
--- a/arch/powerpc/kvm/booke_host.c
+++ b/arch/powerpc/kvm/booke_host.c
@@ -19,13 +19,12 @@
#include <linux/errno.h>
#include <linux/kvm_host.h>
-#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/kvm_ppc.h>
unsigned long kvmppc_booke_handlers;
-static int kvmppc_booke_init(void)
+void kvmppc_booke_init(void)
{
unsigned long ivor[16];
unsigned long max_ivor = 0;
@@ -69,15 +68,12 @@ static int kvmppc_booke_init(void)
}
flush_icache_range(kvmppc_booke_handlers,
kvmppc_booke_handlers + max_ivor +
kvmppc_handler_len);
-
- return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
}
+EXPORT_SYMBOL_GPL(kvmppc_booke_init);
-static void __exit kvmppc_booke_exit(void)
+void __exit kvmppc_booke_exit(void)
{
free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
- kvm_exit();
}
+EXPORT_SYMBOL_GPL(kvmppc_booke_exit);
-module_init(kvmppc_booke_init)
-module_exit(kvmppc_booke_exit)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 90a6fc4..6d0e6c5 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -29,6 +29,8 @@
#include <asm/kvm_ppc.h>
#include <asm/tlbflush.h>
+struct kvm_powerpc_ops *kvm_powerpc_ops;
+EXPORT_SYMBOL_GPL(kvm_powerpc_ops);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
{
@@ -177,31 +179,12 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
- struct kvm_vcpu *vcpu;
- int err;
-
- vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
- if (!vcpu) {
- err = -ENOMEM;
- goto out;
- }
-
- err = kvm_vcpu_init(vcpu, kvm, id);
- if (err)
- goto free_vcpu;
-
- return vcpu;
-
-free_vcpu:
- kmem_cache_free(kvm_vcpu_cache, vcpu);
-out:
- return ERR_PTR(err);
+ return kvm_powerpc_ops->vcpu_create(kvm, id);
}
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
- kvm_vcpu_uninit(vcpu);
- kmem_cache_free(kvm_vcpu_cache, vcpu);
+ kvm_powerpc_ops->vcpu_free(vcpu);
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -542,6 +525,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
int kvm_arch_init(void *opaque)
{
+ kvm_powerpc_ops = (struct kvm_powerpc_ops *)opaque;
return 0;
}
--
1.5.4
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html