Re: [PATCH 11/27] Add book3s_64 Host MMU handling

2009-11-02 Thread Alexander Graf


Am 02.11.2009 um 00:39 schrieb Michael Neuling :




+static void invalidate_pte(struct hpte_cache *pte)
+{
+dprintk_mmu("KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n",
+i, pte->pte.eaddr, pte->pte.vpage, pte->host_va);
+
+ppc_md.hpte_invalidate(pte->slot, pte->host_va,
+   MMU_PAGE_4K, MMU_SEGSIZE_256M,
+   false);


Are we assuming 256M segments here (and elsewhere)?


Yes, on the host we only create 256MB segments. What the guest uses is  
a different question.





+static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong  
esid)

+{
+int i;
+int max_slb_size = 64;
+int found_inval = -1;
+int r;
+
+if (!get_paca()->kvm_slb_max)
+get_paca()->kvm_slb_max = 1;
+
+/* Are we overwriting? */
+for (i = 1; i < get_paca()->kvm_slb_max; i++) {
+if (!(get_paca()->kvm_slb[i].esid & SLB_ESID_V))
+found_inval = i;
+else if ((get_paca()->kvm_slb[i].esid & ESID_MASK) == esid)
+return i;
+}
+
+/* Found a spare entry that was invalidated before */
+if (found_inval > 0)
+return found_inval;
+
+/* No spare invalid entry, so create one */
+
+if (mmu_slb_size < 64)
+max_slb_size = mmu_slb_size;


Can we just use the global mmu_slb_size eliminate max_slb_size?


Hm, for a strange reason I wanted to have at most 64 slb entries.  
Maybe the struct can't hold more? I'll check again as soon as I'm on a  
notebook again.


Alex





Mikey
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 11/27] Add book3s_64 Host MMU handling

2009-11-01 Thread Michael Neuling

> +static void invalidate_pte(struct hpte_cache *pte)
> +{
> + dprintk_mmu("KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n",
> + i, pte->pte.eaddr, pte->pte.vpage, pte->host_va);
> +
> + ppc_md.hpte_invalidate(pte->slot, pte->host_va,
> +MMU_PAGE_4K, MMU_SEGSIZE_256M,
> +false);

Are we assuming 256M segments here (and elsewhere)?


> +static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
> +{
> + int i;
> + int max_slb_size = 64;
> + int found_inval = -1;
> + int r;
> +
> + if (!get_paca()->kvm_slb_max)
> + get_paca()->kvm_slb_max = 1;
> +
> + /* Are we overwriting? */
> + for (i = 1; i < get_paca()->kvm_slb_max; i++) {
> + if (!(get_paca()->kvm_slb[i].esid & SLB_ESID_V))
> + found_inval = i;
> + else if ((get_paca()->kvm_slb[i].esid & ESID_MASK) == esid)
> + return i;
> + }
> +
> + /* Found a spare entry that was invalidated before */
> + if (found_inval > 0)
> + return found_inval;
> +
> + /* No spare invalid entry, so create one */
> +
> + if (mmu_slb_size < 64)
> + max_slb_size = mmu_slb_size;

Can we just use the global mmu_slb_size eliminate max_slb_size?



Mikey
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 11/27] Add book3s_64 Host MMU handling

2009-10-30 Thread Alexander Graf
We designed the Book3S port of KVM as modular as possible. Most
of the code could be easily used on a Book3S_32 host as well.

The main difference between 32 and 64 bit cores is the MMU. To keep
things well separated, we treat the book3s_64 MMU as one possible compile
option.

This patch adds all the MMU helpers the rest of the code needs in
order to modify the host's MMU, like setting PTEs and segments.

Signed-off-by: Alexander Graf 

---

v5 -> v6

  - don't take mmap_sem
  - dprintk instead if scattered #ifdef's
  - minor cleanups
  - // -> /* */ (book3s_64_mmu_host.c)
---
 arch/powerpc/kvm/book3s_64_mmu_host.c |  408 +
 1 files changed, 408 insertions(+), 0 deletions(-)
 create mode 100644 arch/powerpc/kvm/book3s_64_mmu_host.c

diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c 
b/arch/powerpc/kvm/book3s_64_mmu_host.c
new file mode 100644
index 000..f2899b2
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
+ *
+ * Authors:
+ * Alexander Graf 
+ * Kevin Wolf 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#define PTE_SIZE 12
+#define VSID_ALL 0
+
+/* #define DEBUG_MMU */
+/* #define DEBUG_SLB */
+
+#ifdef DEBUG_MMU
+#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
+#else
+#define dprintk_mmu(a, ...) do { } while(0)
+#endif
+
+#ifdef DEBUG_SLB
+#define dprintk_slb(a, ...) printk(KERN_INFO a, __VA_ARGS__)
+#else
+#define dprintk_slb(a, ...) do { } while(0)
+#endif
+
+static void invalidate_pte(struct hpte_cache *pte)
+{
+   dprintk_mmu("KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n",
+   i, pte->pte.eaddr, pte->pte.vpage, pte->host_va);
+
+   ppc_md.hpte_invalidate(pte->slot, pte->host_va,
+  MMU_PAGE_4K, MMU_SEGSIZE_256M,
+  false);
+   pte->host_va = 0;
+   kvm_release_pfn_dirty(pte->pfn);
+}
+
+void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask)
+{
+   int i;
+
+   dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%llx & 0x%llx\n",
+   vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
+   BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+
+   guest_ea &= ea_mask;
+   for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
+   struct hpte_cache *pte;
+
+   pte = &vcpu->arch.hpte_cache[i];
+   if (!pte->host_va)
+   continue;
+
+   if ((pte->pte.eaddr & ea_mask) == guest_ea) {
+   invalidate_pte(pte);
+   }
+   }
+
+   /* Doing a complete flush -> start from scratch */
+   if (!ea_mask)
+   vcpu->arch.hpte_cache_offset = 0;
+}
+
+void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
+{
+   int i;
+
+   dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
+   vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
+   BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+
+   guest_vp &= vp_mask;
+   for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
+   struct hpte_cache *pte;
+
+   pte = &vcpu->arch.hpte_cache[i];
+   if (!pte->host_va)
+   continue;
+
+   if ((pte->pte.vpage & vp_mask) == guest_vp) {
+   invalidate_pte(pte);
+   }
+   }
+}
+
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
+{
+   int i;
+
+   dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n",
+   vcpu->arch.hpte_cache_offset, guest_pa, pa_mask);
+   BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+
+   for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
+   struct hpte_cache *pte;
+
+   pte = &vcpu->arch.hpte_cache[i];
+   if (!pte->host_va)
+   continue;
+
+   if ((pte->pte.raddr >= pa_start) &&
+   (pte->pte.raddr < pa_end)) {
+   invalidate_pte(pte);
+   }
+   }
+}
+
+struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool 
data

[PATCH 11/27] Add book3s_64 Host MMU handling

2009-10-21 Thread Alexander Graf
We designed the Book3S port of KVM as modular as possible. Most
of the code could be easily used on a Book3S_32 host as well.

The main difference between 32 and 64 bit cores is the MMU. To keep
things well separated, we treat the book3s_64 MMU as one possible compile
option.

This patch adds all the MMU helpers the rest of the code needs in
order to modify the host's MMU, like setting PTEs and segments.

Signed-off-by: Alexander Graf 
---
 arch/powerpc/kvm/book3s_64_mmu_host.c |  412 +
 1 files changed, 412 insertions(+), 0 deletions(-)
 create mode 100644 arch/powerpc/kvm/book3s_64_mmu_host.c

diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c 
b/arch/powerpc/kvm/book3s_64_mmu_host.c
new file mode 100644
index 000..507f770
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
+ *
+ * Authors:
+ * Alexander Graf 
+ * Kevin Wolf 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#define PTE_SIZE 12
+#define VSID_ALL 0
+
+// #define DEBUG_MMU
+// #define DEBUG_SLB
+
+void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask)
+{
+   int i;
+
+#ifdef DEBUG_MMU
+   printk(KERN_INFO "KVM: Flushing %d Shadow PTEs: 0x%llx & 0x%llx\n",
+   vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
+#endif
+   BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+   guest_ea &= ea_mask;
+   for (i=0; iarch.hpte_cache_offset; i++) {
+   struct hpte_cache *pte;
+
+   pte = &vcpu->arch.hpte_cache[i];
+   if (!pte->host_va)
+   continue;
+
+   if ((pte->pte.eaddr & ea_mask) == guest_ea) {
+#ifdef DEBUG_MMU
+   printk(KERN_INFO "KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n", 
i, pte->pte.eaddr, pte->pte.vpage, pte->host_va);
+#endif
+   ppc_md.hpte_invalidate(pte->slot, pte->host_va,
+  MMU_PAGE_4K, MMU_SEGSIZE_256M,
+  false);
+   pte->host_va = 0;
+   kvm_release_pfn_dirty(pte->pfn);
+   }
+   }
+
+   /* Doing a complete flush -> start from scratch */
+   if (!ea_mask)
+   vcpu->arch.hpte_cache_offset = 0;
+}
+
+void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
+{
+   int i;
+
+#ifdef DEBUG_MMU
+   printk(KERN_INFO "KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
+   vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
+#endif
+   BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+   guest_vp &= vp_mask;
+   for (i=0; iarch.hpte_cache_offset; i++) {
+   struct hpte_cache *pte;
+
+   pte = &vcpu->arch.hpte_cache[i];
+   if (!pte->host_va)
+   continue;
+
+   if ((pte->pte.vpage & vp_mask) == guest_vp) {
+#ifdef DEBUG_MMU
+   printk(KERN_INFO "KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n", 
i, pte->pte.eaddr, pte->pte.vpage, pte->host_va);
+#endif
+   ppc_md.hpte_invalidate(pte->slot, pte->host_va,
+  MMU_PAGE_4K, MMU_SEGSIZE_256M,
+  false);
+   pte->host_va = 0;
+   kvm_release_pfn_dirty(pte->pfn);
+   }
+   }
+}
+
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
+{
+   int i;
+
+#ifdef DEBUG_MMU
+   printk(KERN_INFO "KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n",
+   vcpu->arch.hpte_cache_offset, guest_pa, pa_mask);
+#endif
+   BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+
+   for (i=0; iarch.hpte_cache_offset; i++) {
+   struct hpte_cache *pte;
+
+   pte = &vcpu->arch.hpte_cache[i];
+   if (!pte->host_va)
+   continue;
+
+   if ((pte->pte.raddr >= pa_start) && (pte->pte.raddr < pa_end)) {
+#ifdef DEBUG_MMU
+   printk(KERN_INFO "KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n", 
i, pte->pte.eaddr, pte->pte.raddr, pte->host_va);
+#endif
+  

[PATCH 11/27] Add book3s_64 Host MMU handling

2009-09-29 Thread Alexander Graf
We designed the Book3S port of KVM as modular as possible. Most
of the code could be easily used on a Book3S_32 host as well.

The main difference between 32 and 64 bit cores is the MMU. To keep
things well separated, we treat the book3s_64 MMU as one possible compile
option.

This patch adds all the MMU helpers the rest of the code needs in
order to modify the host's MMU, like setting PTEs and segments.

Signed-off-by: Alexander Graf 
---
 arch/powerpc/kvm/book3s_64_mmu_host.c |  412 +
 1 files changed, 412 insertions(+), 0 deletions(-)
 create mode 100644 arch/powerpc/kvm/book3s_64_mmu_host.c

diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c 
b/arch/powerpc/kvm/book3s_64_mmu_host.c
new file mode 100644
index 000..507f770
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
+ *
+ * Authors:
+ * Alexander Graf 
+ * Kevin Wolf 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#define PTE_SIZE 12
+#define VSID_ALL 0
+
+// #define DEBUG_MMU
+// #define DEBUG_SLB
+
+void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask)
+{
+   int i;
+
+#ifdef DEBUG_MMU
+   printk(KERN_INFO "KVM: Flushing %d Shadow PTEs: 0x%llx & 0x%llx\n",
+   vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
+#endif
+   BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+   guest_ea &= ea_mask;
+   for (i=0; iarch.hpte_cache_offset; i++) {
+   struct hpte_cache *pte;
+
+   pte = &vcpu->arch.hpte_cache[i];
+   if (!pte->host_va)
+   continue;
+
+   if ((pte->pte.eaddr & ea_mask) == guest_ea) {
+#ifdef DEBUG_MMU
+   printk(KERN_INFO "KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n", 
i, pte->pte.eaddr, pte->pte.vpage, pte->host_va);
+#endif
+   ppc_md.hpte_invalidate(pte->slot, pte->host_va,
+  MMU_PAGE_4K, MMU_SEGSIZE_256M,
+  false);
+   pte->host_va = 0;
+   kvm_release_pfn_dirty(pte->pfn);
+   }
+   }
+
+   /* Doing a complete flush -> start from scratch */
+   if (!ea_mask)
+   vcpu->arch.hpte_cache_offset = 0;
+}
+
+void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
+{
+   int i;
+
+#ifdef DEBUG_MMU
+   printk(KERN_INFO "KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
+   vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
+#endif
+   BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+   guest_vp &= vp_mask;
+   for (i=0; iarch.hpte_cache_offset; i++) {
+   struct hpte_cache *pte;
+
+   pte = &vcpu->arch.hpte_cache[i];
+   if (!pte->host_va)
+   continue;
+
+   if ((pte->pte.vpage & vp_mask) == guest_vp) {
+#ifdef DEBUG_MMU
+   printk(KERN_INFO "KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n", 
i, pte->pte.eaddr, pte->pte.vpage, pte->host_va);
+#endif
+   ppc_md.hpte_invalidate(pte->slot, pte->host_va,
+  MMU_PAGE_4K, MMU_SEGSIZE_256M,
+  false);
+   pte->host_va = 0;
+   kvm_release_pfn_dirty(pte->pfn);
+   }
+   }
+}
+
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
+{
+   int i;
+
+#ifdef DEBUG_MMU
+   printk(KERN_INFO "KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n",
+   vcpu->arch.hpte_cache_offset, guest_pa, pa_mask);
+#endif
+   BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+
+   for (i=0; iarch.hpte_cache_offset; i++) {
+   struct hpte_cache *pte;
+
+   pte = &vcpu->arch.hpte_cache[i];
+   if (!pte->host_va)
+   continue;
+
+   if ((pte->pte.raddr >= pa_start) && (pte->pte.raddr < pa_end)) {
+#ifdef DEBUG_MMU
+   printk(KERN_INFO "KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n", 
i, pte->pte.eaddr, pte->pte.raddr, pte->host_va);
+#endif
+