From cb851671421832d37c7d90976b603b59a5c75c79 Mon Sep 17 00:00:00 2001
From: Sheng Yang <[EMAIL PROTECTED]>
Date: Fri, 18 Apr 2008 17:05:06 +0800
Subject: [PATCH 3/5] KVM: MMU: Add EPT support
Enable kvm_set_spte() to generate EPT entries.
Signed-off-by: Sheng Yang <[EMAIL PROTECTED]>
---
arch/x86/kvm/mmu.c | 44
++++++++++++++++++++++++++++++++++----------
arch/x86/kvm/x86.c | 3 +++
include/asm-x86/kvm_host.h | 3 +++
3 files changed, 40 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 108886d..1828837 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -161,6 +161,12 @@ static struct kmem_cache *mmu_page_header_cache;
static u64 __read_mostly shadow_trap_nonpresent_pte;
static u64 __read_mostly shadow_notrap_nonpresent_pte;
+static u64 __read_mostly shadow_base_present_pte;
+static u64 __read_mostly shadow_nx_mask;
+static u64 __read_mostly shadow_x_mask; /* mutual exclusive with
nx_mask */
+static u64 __read_mostly shadow_user_mask;
+static u64 __read_mostly shadow_accessed_mask;
+static u64 __read_mostly shadow_dirty_mask;
void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
{
@@ -169,6 +175,23 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64
notrap_pte)
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
+void kvm_mmu_set_base_ptes(u64 base_pte)
+{
+ shadow_base_present_pte = base_pte;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
+
+void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
+ u64 dirty_mask, u64 nx_mask, u64 x_mask)
+{
+ shadow_user_mask = user_mask;
+ shadow_accessed_mask = accessed_mask;
+ shadow_dirty_mask = dirty_mask;
+ shadow_nx_mask = nx_mask;
+ shadow_x_mask = x_mask;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
+
static int is_write_protection(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr0 & X86_CR0_WP;
@@ -207,7 +230,7 @@ static int is_writeble_pte(unsigned long pte)
static int is_dirty_pte(unsigned long pte)
{
- return pte & PT_DIRTY_MASK;
+ return pte & shadow_dirty_mask;
}
static int is_rmap_pte(u64 pte)
@@ -522,7 +545,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
return;
sp = page_header(__pa(spte));
pfn = spte_to_pfn(*spte);
- if (*spte & PT_ACCESSED_MASK)
+ if (*spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
if (is_writeble_pte(*spte))
kvm_release_pfn_dirty(pfn);
@@ -1048,17 +1071,18 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64
*shadow_pte,
* whether the guest actually used the pte (in order to detect
* demand paging).
*/
- spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
+ spte = shadow_base_present_pte | shadow_dirty_mask;
if (!speculative)
pte_access |= PT_ACCESSED_MASK;
if (!dirty)
pte_access &= ~ACC_WRITE_MASK;
- if (!(pte_access & ACC_EXEC_MASK))
- spte |= PT64_NX_MASK;
-
- spte |= PT_PRESENT_MASK;
+ if (pte_access & ACC_EXEC_MASK) {
+ if (shadow_x_mask)
+ spte |= shadow_x_mask;
+ } else if (shadow_nx_mask)
+ spte |= shadow_nx_mask;
if (pte_access & ACC_USER_MASK)
- spte |= PT_USER_MASK;
+ spte |= shadow_user_mask;
if (largepage)
spte |= PT_PAGE_SIZE_MASK;
@@ -1164,7 +1188,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v,
int write,
}
table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
- | PT_WRITABLE_MASK | PT_USER_MASK;
+ | PT_WRITABLE_MASK | shadow_user_mask;
}
table_addr = table[index] & PT64_BASE_ADDR_MASK;
}
@@ -1608,7 +1632,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu
*vcpu)
{
u64 *spte = vcpu->arch.last_pte_updated;
- return !!(spte && (*spte & PT_ACCESSED_MASK));
+ return !!(spte && (*spte & shadow_accessed_mask));
}
static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0ce5563..0735efb 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2417,6 +2417,9 @@ int kvm_arch_init(void *opaque)
kvm_x86_ops = ops;
kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
+ kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
+ kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
+ PT_DIRTY_MASK, PT64_NX_MASK, 0);
return 0;
out:
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 31aa7d6..9f62773 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -432,6 +432,9 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
int kvm_mmu_setup(struct kvm_vcpu *vcpu);
void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
+void kvm_mmu_set_base_ptes(u64 base_pte);
+void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
+ u64 dirty_mask, u64 nx_mask, u64 x_mask);
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
--
1.5.4.5
From cb851671421832d37c7d90976b603b59a5c75c79 Mon Sep 17 00:00:00 2001
From: Sheng Yang <[EMAIL PROTECTED]>
Date: Fri, 18 Apr 2008 17:05:06 +0800
Subject: [PATCH 3/5] KVM: MMU: Add EPT support
Enable kvm_set_spte() to generate EPT entries.
Signed-off-by: Sheng Yang <[EMAIL PROTECTED]>
---
arch/x86/kvm/mmu.c | 44 ++++++++++++++++++++++++++++++++++----------
arch/x86/kvm/x86.c | 3 +++
include/asm-x86/kvm_host.h | 3 +++
3 files changed, 40 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 108886d..1828837 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -161,6 +161,12 @@ static struct kmem_cache *mmu_page_header_cache;
static u64 __read_mostly shadow_trap_nonpresent_pte;
static u64 __read_mostly shadow_notrap_nonpresent_pte;
+static u64 __read_mostly shadow_base_present_pte;
+static u64 __read_mostly shadow_nx_mask;
+static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
+static u64 __read_mostly shadow_user_mask;
+static u64 __read_mostly shadow_accessed_mask;
+static u64 __read_mostly shadow_dirty_mask;
void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
{
@@ -169,6 +175,23 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
+void kvm_mmu_set_base_ptes(u64 base_pte)
+{
+ shadow_base_present_pte = base_pte;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
+
+void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
+ u64 dirty_mask, u64 nx_mask, u64 x_mask)
+{
+ shadow_user_mask = user_mask;
+ shadow_accessed_mask = accessed_mask;
+ shadow_dirty_mask = dirty_mask;
+ shadow_nx_mask = nx_mask;
+ shadow_x_mask = x_mask;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
+
static int is_write_protection(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr0 & X86_CR0_WP;
@@ -207,7 +230,7 @@ static int is_writeble_pte(unsigned long pte)
static int is_dirty_pte(unsigned long pte)
{
- return pte & PT_DIRTY_MASK;
+ return pte & shadow_dirty_mask;
}
static int is_rmap_pte(u64 pte)
@@ -522,7 +545,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
return;
sp = page_header(__pa(spte));
pfn = spte_to_pfn(*spte);
- if (*spte & PT_ACCESSED_MASK)
+ if (*spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
if (is_writeble_pte(*spte))
kvm_release_pfn_dirty(pfn);
@@ -1048,17 +1071,18 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
* whether the guest actually used the pte (in order to detect
* demand paging).
*/
- spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
+ spte = shadow_base_present_pte | shadow_dirty_mask;
if (!speculative)
pte_access |= PT_ACCESSED_MASK;
if (!dirty)
pte_access &= ~ACC_WRITE_MASK;
- if (!(pte_access & ACC_EXEC_MASK))
- spte |= PT64_NX_MASK;
-
- spte |= PT_PRESENT_MASK;
+ if (pte_access & ACC_EXEC_MASK) {
+ if (shadow_x_mask)
+ spte |= shadow_x_mask;
+ } else if (shadow_nx_mask)
+ spte |= shadow_nx_mask;
if (pte_access & ACC_USER_MASK)
- spte |= PT_USER_MASK;
+ spte |= shadow_user_mask;
if (largepage)
spte |= PT_PAGE_SIZE_MASK;
@@ -1164,7 +1188,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
}
table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
- | PT_WRITABLE_MASK | PT_USER_MASK;
+ | PT_WRITABLE_MASK | shadow_user_mask;
}
table_addr = table[index] & PT64_BASE_ADDR_MASK;
}
@@ -1608,7 +1632,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
{
u64 *spte = vcpu->arch.last_pte_updated;
- return !!(spte && (*spte & PT_ACCESSED_MASK));
+ return !!(spte && (*spte & shadow_accessed_mask));
}
static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0ce5563..0735efb 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2417,6 +2417,9 @@ int kvm_arch_init(void *opaque)
kvm_x86_ops = ops;
kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
+ kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
+ kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
+ PT_DIRTY_MASK, PT64_NX_MASK, 0);
return 0;
out:
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 31aa7d6..9f62773 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -432,6 +432,9 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
int kvm_mmu_setup(struct kvm_vcpu *vcpu);
void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
+void kvm_mmu_set_base_ptes(u64 base_pte);
+void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
+ u64 dirty_mask, u64 nx_mask, u64 x_mask);
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
--
1.5.4.5
-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference
Don't miss this year's exciting event. There's still time to save $100.
Use priority code J8TL2D2.
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel