From 720eee463536fdb6e15dcc5388754ea229c71118 Mon Sep 17 00:00:00 2001
From: Sheng Yang <[EMAIL PROTECTED]>
Date: Fri, 1 Feb 2008 06:53:39 +0800
Subject: [PATCH] KVM: VMX: Add swap support for EPT
Add tlp_unmap() in kvm_x86_ops, to support swap of EPT pages. Now EPT can
support linux swap along with mmu_notifier.
Signed-off-by: Sheng Yang <[EMAIL PROTECTED]>
---
arch/x86/kvm/vmx.c | 46
++++++++++++++++++++++++++++++++++++++++++++
include/asm-x86/kvm_host.h | 1 +
2 files changed, 47 insertions(+), 0 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f5b59e7..f07f161 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2889,6 +2889,51 @@ static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
}
}
+static int remove_ept_entry(struct kvm *kvm, gpa_t gpa)
+{
+ eptp_t eptp = kvm->arch.eptp;
+ hpa_t path[VMX_EPT_MAX_GAW];
+ int offsets[VMX_EPT_MAX_GAW];
+ int level, offset;
+ struct page *page, *ept_page;
+ epte_t *table;
+
+ mutex_lock(&kvm->arch.ept_mutex);
+ path[kvm->arch.eptp.fields.gaw] = eptp.fields.asr_mfn;
+ for (level = kvm->arch.eptp.fields.gaw; level >= 0; level--) {
+ ept_page = pfn_to_page(path[level]);
+ table = kmap_atomic(ept_page, KM_USER0);
+ offsets[level] = offset = VMX_GET_EPTE_OFFSET(gpa, level);
+ if (table[offset].entry != 0) {
+ if (level == 0) {
+ page = pfn_to_page(
+ table[offset].fields.addr_mfn);
+ kvm_release_page_clean(page);
+ atomic_dec(&kvm->arch.guest_npages);
+ table[offset].entry = 0;
+ } else
+ path[level - 1] =
+ table[offset].fields.addr_mfn;
+ kunmap_atomic(table, KM_USER0);
+ } else {
+ kunmap_atomic(table, KM_USER0);
+ break;
+ }
+ }
+ mutex_unlock(&kvm->arch.ept_mutex);
+ if (level >= 0)
+ return -1;
+ ept_sync_individual_addr(kvm, gpa);
+ return 0;
+}
+
+static void vmx_unmap_ept(struct kvm *kvm, gfn_t gfn)
+{
+ if (remove_ept_entry(kvm, gfn << PAGE_SHIFT) < 0)
+ printk(KERN_WARNING "EPT: Fail to unmap gfn 0x%lx\n",
+ (long unsigned int)gfn);
+}
+
static int ept_teardown(struct kvm *kvm)
{
const int entries_per_page = PAGE_SIZE / sizeof(u64);
@@ -3108,6 +3153,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_tss_addr = vmx_set_tss_addr,
.tlp_enabled = vm_need_ept,
+ .tlp_unmap = vmx_unmap_ept,
};
static int __init vmx_init(void)
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index b133317..0c3b73a 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -408,6 +408,7 @@ struct kvm_x86_ops {
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*tlp_enabled)(void); /* Two Level Paging */
+ void (*tlp_unmap)(struct kvm *kvm, gfn_t gfn);
};
extern struct kvm_x86_ops *kvm_x86_ops;
--
debian.1.5.3.7.1-dirty
From 720eee463536fdb6e15dcc5388754ea229c71118 Mon Sep 17 00:00:00 2001
From: Sheng Yang <[EMAIL PROTECTED]>
Date: Fri, 1 Feb 2008 06:53:39 +0800
Subject: [PATCH] KVM: VMX: Add swap support for EPT
Add tlp_unmap() in kvm_x86_ops, to support swap of EPT pages. Now EPT can
support linux swap along with mmu_notifier.
Signed-off-by: Sheng Yang <[EMAIL PROTECTED]>
---
arch/x86/kvm/vmx.c | 46 ++++++++++++++++++++++++++++++++++++++++++++
include/asm-x86/kvm_host.h | 1 +
2 files changed, 47 insertions(+), 0 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f5b59e7..f07f161 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2889,6 +2889,51 @@ static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
}
}
+static int remove_ept_entry(struct kvm *kvm, gpa_t gpa)
+{
+ eptp_t eptp = kvm->arch.eptp;
+ hpa_t path[VMX_EPT_MAX_GAW];
+ int offsets[VMX_EPT_MAX_GAW];
+ int level, offset;
+ struct page *page, *ept_page;
+ epte_t *table;
+
+ mutex_lock(&kvm->arch.ept_mutex);
+ path[kvm->arch.eptp.fields.gaw] = eptp.fields.asr_mfn;
+ for (level = kvm->arch.eptp.fields.gaw; level >= 0; level--) {
+ ept_page = pfn_to_page(path[level]);
+ table = kmap_atomic(ept_page, KM_USER0);
+ offsets[level] = offset = VMX_GET_EPTE_OFFSET(gpa, level);
+ if (table[offset].entry != 0) {
+ if (level == 0) {
+ page = pfn_to_page(
+ table[offset].fields.addr_mfn);
+ kvm_release_page_clean(page);
+ atomic_dec(&kvm->arch.guest_npages);
+ table[offset].entry = 0;
+ } else
+ path[level - 1] =
+ table[offset].fields.addr_mfn;
+ kunmap_atomic(table, KM_USER0);
+ } else {
+ kunmap_atomic(table, KM_USER0);
+ break;
+ }
+ }
+ mutex_unlock(&kvm->arch.ept_mutex);
+ if (level >= 0)
+ return -1;
+ ept_sync_individual_addr(kvm, gpa);
+ return 0;
+}
+
+static void vmx_unmap_ept(struct kvm *kvm, gfn_t gfn)
+{
+ if (remove_ept_entry(kvm, gfn << PAGE_SHIFT) < 0)
+ printk(KERN_WARNING "EPT: Fail to unmap gfn 0x%lx\n",
+ (long unsigned int)gfn);
+}
+
static int ept_teardown(struct kvm *kvm)
{
const int entries_per_page = PAGE_SIZE / sizeof(u64);
@@ -3108,6 +3153,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_tss_addr = vmx_set_tss_addr,
.tlp_enabled = vm_need_ept,
+ .tlp_unmap = vmx_unmap_ept,
};
static int __init vmx_init(void)
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index b133317..0c3b73a 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -408,6 +408,7 @@ struct kvm_x86_ops {
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*tlp_enabled)(void); /* Two Level Paging */
+ void (*tlp_unmap)(struct kvm *kvm, gfn_t gfn);
};
extern struct kvm_x86_ops *kvm_x86_ops;
--
debian.1.5.3.7.1-dirty
-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2008.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel