On 07/31/2013 10:48 PM, Gleb Natapov wrote:
> From: Nadav Har'El <n...@il.ibm.com>
> 
> This is the first patch in a series which adds nested EPT support to KVM's
> nested VMX. Nested EPT means emulating EPT for an L1 guest so that L1 can use
> EPT when running a nested guest L2. When L1 uses EPT, it allows the L2 guest
> to set its own cr3 and take its own page faults without either of L0 or L1
> getting involved. This often significanlty improves L2's performance over the
> previous two alternatives (shadow page tables over EPT, and shadow page
> tables over shadow page tables).
> 
> This patch adds EPT support to paging_tmpl.h.
> 
> paging_tmpl.h contains the code for reading and writing page tables. The code
> for 32-bit and 64-bit tables is very similar, but not identical, so
> paging_tmpl.h is #include'd twice in mmu.c, once with PTTTYPE=32 and once
> with PTTYPE=64, and this generates the two sets of similar functions.
> 
> There are subtle but important differences between the format of EPT tables
> and that of ordinary x86 64-bit page tables, so for nested EPT we need a
> third set of functions to read the guest EPT table and to write the shadow
> EPT table.
> 
> So this patch adds third PTTYPE, PTTYPE_EPT, which creates functions (prefixed
> with "EPT") which correctly read and write EPT tables.
> 
> Signed-off-by: Nadav Har'El <n...@il.ibm.com>
> Signed-off-by: Jun Nakajima <jun.nakaj...@intel.com>
> Signed-off-by: Xinhao Xu <xinhao...@intel.com>
> Signed-off-by: Yang Zhang <yang.z.zh...@intel.com>
> Signed-off-by: Gleb Natapov <g...@redhat.com>
> ---
>  arch/x86/kvm/mmu.c         |    5 +++++
>  arch/x86/kvm/paging_tmpl.h |   37 ++++++++++++++++++++++++++++++++++++-
>  2 files changed, 41 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 4c4274d..b5273c3 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -3494,6 +3494,11 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu, 
> unsigned level, unsigned gp
>       return mmu->last_pte_bitmap & (1 << index);
>  }
> 
> +#define PTTYPE_EPT 18 /* arbitrary */
> +#define PTTYPE PTTYPE_EPT
> +#include "paging_tmpl.h"
> +#undef PTTYPE
> +
>  #define PTTYPE 64
>  #include "paging_tmpl.h"
>  #undef PTTYPE
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index 2c2f635..762c904 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -23,6 +23,13 @@
>   * so the code in this file is compiled twice, once per pte size.
>   */
> 
> +/*
> + * This is used to catch non optimized PT_GUEST_(DIRTY|ACCESS)_SHIFT macro
> + * uses for EPT without A/D paging type.
> + */
> +extern u64 __pure __using_nonexistent_pte_bit(void)
> +            __compiletime_error("wrong use of 
> PT_GUEST_(DIRTY|ACCESS)_SHIFT");
> +
>  #if PTTYPE == 64
>       #define pt_element_t u64
>       #define guest_walker guest_walker64
> @@ -58,6 +65,21 @@
>       #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
>       #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
>       #define CMPXCHG cmpxchg
> +#elif PTTYPE == PTTYPE_EPT
> +     #define pt_element_t u64
> +     #define guest_walker guest_walkerEPT
> +     #define FNAME(name) ept_##name
> +     #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
> +     #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
> +     #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
> +     #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
> +     #define PT_LEVEL_BITS PT64_LEVEL_BITS
> +     #define PT_GUEST_ACCESSED_MASK 0
> +     #define PT_GUEST_DIRTY_MASK 0
> +     #define PT_GUEST_DIRTY_SHIFT __using_nonexistent_pte_bit()
> +     #define PT_GUEST_ACCESSED_SHIFT __using_nonexistent_pte_bit()
> +     #define CMPXCHG cmpxchg64
> +     #define PT_MAX_FULL_LEVELS 4
>  #else
>       #error Invalid PTTYPE value
>  #endif
> @@ -115,7 +137,11 @@ static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, 
> u64 gpte, int level)
> 
>  static inline int FNAME(is_present_gpte)(unsigned long pte)
>  {
> +#if PTTYPE != PTTYPE_EPT
>       return is_present_gpte(pte);
> +#else
> +     return pte & 7;
> +#endif
>  }
> 
>  static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> @@ -165,9 +191,14 @@ no_present:
>  static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
>  {
>       unsigned access;
> -
> +#if PTTYPE == PTTYPE_EPT
> +     access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
> +             ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
> +             ACC_USER_MASK;
> +#else
>       access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
>       access &= ~(gpte >> PT64_NX_SHIFT);
> +#endif

Hmm, why not use shadow_x_mask, shadow_user_mask instead? PT_WRITABLE_MASK
is also suitable for ept, i guess we can remove the "#if/#else/#endif" after
that.

> 
>       return access;
>  }
> @@ -369,6 +400,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
>                                       access);
>  }
> 
> +#if PTTYPE != PTTYPE_EPT
>  static int FNAME(walk_addr_nested)(struct guest_walker *walker,
>                                  struct kvm_vcpu *vcpu, gva_t addr,
>                                  u32 access)
> @@ -376,6 +408,7 @@ static int FNAME(walk_addr_nested)(struct guest_walker 
> *walker,
>       return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
>                                       addr, access);
>  }
> +#endif
> 
>  static bool
>  FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
> @@ -803,6 +836,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, 
> gva_t vaddr, u32 access,
>       return gpa;
>  }
> 
> +#if PTTYPE != PTTYPE_EPT
>  static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
>                                     u32 access,
>                                     struct x86_exception *exception)
> @@ -821,6 +855,7 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu 
> *vcpu, gva_t vaddr,
> 
>       return gpa;
>  }
> +#endif

Can we move the "walk" together, then one "#if" can be dropped?

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to