On 08/01/2013 03:10 PM, Gleb Natapov wrote:
> On Thu, Aug 01, 2013 at 03:00:12PM +0800, Xiao Guangrong wrote:
>> On 07/31/2013 10:48 PM, Gleb Natapov wrote:
>>> From: Nadav Har'El <n...@il.ibm.com>
>>>
>>> This is the first patch in a series which adds nested EPT support to KVM's
>>> nested VMX. Nested EPT means emulating EPT for an L1 guest so that L1 can 
>>> use
>>> EPT when running a nested guest L2. When L1 uses EPT, it allows the L2 guest
>>> to set its own cr3 and take its own page faults without either of L0 or L1
>>> getting involved. This often significanlty improves L2's performance over 
>>> the
>>> previous two alternatives (shadow page tables over EPT, and shadow page
>>> tables over shadow page tables).
>>>
>>> This patch adds EPT support to paging_tmpl.h.
>>>
>>> paging_tmpl.h contains the code for reading and writing page tables. The 
>>> code
>>> for 32-bit and 64-bit tables is very similar, but not identical, so
>>> paging_tmpl.h is #include'd twice in mmu.c, once with PTTTYPE=32 and once
>>> with PTTYPE=64, and this generates the two sets of similar functions.
>>>
>>> There are subtle but important differences between the format of EPT tables
>>> and that of ordinary x86 64-bit page tables, so for nested EPT we need a
>>> third set of functions to read the guest EPT table and to write the shadow
>>> EPT table.
>>>
>>> So this patch adds third PTTYPE, PTTYPE_EPT, which creates functions 
>>> (prefixed
>>> with "EPT") which correctly read and write EPT tables.
>>>
>>> Signed-off-by: Nadav Har'El <n...@il.ibm.com>
>>> Signed-off-by: Jun Nakajima <jun.nakaj...@intel.com>
>>> Signed-off-by: Xinhao Xu <xinhao...@intel.com>
>>> Signed-off-by: Yang Zhang <yang.z.zh...@intel.com>
>>> Signed-off-by: Gleb Natapov <g...@redhat.com>
>>> ---
>>>  arch/x86/kvm/mmu.c         |    5 +++++
>>>  arch/x86/kvm/paging_tmpl.h |   37 ++++++++++++++++++++++++++++++++++++-
>>>  2 files changed, 41 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>>> index 4c4274d..b5273c3 100644
>>> --- a/arch/x86/kvm/mmu.c
>>> +++ b/arch/x86/kvm/mmu.c
>>> @@ -3494,6 +3494,11 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu, 
>>> unsigned level, unsigned gp
>>>     return mmu->last_pte_bitmap & (1 << index);
>>>  }
>>>
>>> +#define PTTYPE_EPT 18 /* arbitrary */
>>> +#define PTTYPE PTTYPE_EPT
>>> +#include "paging_tmpl.h"
>>> +#undef PTTYPE
>>> +
>>>  #define PTTYPE 64
>>>  #include "paging_tmpl.h"
>>>  #undef PTTYPE
>>> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
>>> index 2c2f635..762c904 100644
>>> --- a/arch/x86/kvm/paging_tmpl.h
>>> +++ b/arch/x86/kvm/paging_tmpl.h
>>> @@ -23,6 +23,13 @@
>>>   * so the code in this file is compiled twice, once per pte size.
>>>   */
>>>
>>> +/*
>>> + * This is used to catch non optimized PT_GUEST_(DIRTY|ACCESS)_SHIFT macro
>>> + * uses for EPT without A/D paging type.
>>> + */
>>> +extern u64 __pure __using_nonexistent_pte_bit(void)
>>> +          __compiletime_error("wrong use of 
>>> PT_GUEST_(DIRTY|ACCESS)_SHIFT");
>>> +
>>>  #if PTTYPE == 64
>>>     #define pt_element_t u64
>>>     #define guest_walker guest_walker64
>>> @@ -58,6 +65,21 @@
>>>     #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
>>>     #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
>>>     #define CMPXCHG cmpxchg
>>> +#elif PTTYPE == PTTYPE_EPT
>>> +   #define pt_element_t u64
>>> +   #define guest_walker guest_walkerEPT
>>> +   #define FNAME(name) ept_##name
>>> +   #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
>>> +   #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
>>> +   #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
>>> +   #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
>>> +   #define PT_LEVEL_BITS PT64_LEVEL_BITS
>>> +   #define PT_GUEST_ACCESSED_MASK 0
>>> +   #define PT_GUEST_DIRTY_MASK 0
>>> +   #define PT_GUEST_DIRTY_SHIFT __using_nonexistent_pte_bit()
>>> +   #define PT_GUEST_ACCESSED_SHIFT __using_nonexistent_pte_bit()
>>> +   #define CMPXCHG cmpxchg64
>>> +   #define PT_MAX_FULL_LEVELS 4
>>>  #else
>>>     #error Invalid PTTYPE value
>>>  #endif
>>> @@ -115,7 +137,11 @@ static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu 
>>> *mmu, u64 gpte, int level)
>>>
>>>  static inline int FNAME(is_present_gpte)(unsigned long pte)
>>>  {
>>> +#if PTTYPE != PTTYPE_EPT
>>>     return is_present_gpte(pte);
>>> +#else
>>> +   return pte & 7;
>>> +#endif
>>>  }
>>>
>>>  static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
>>> @@ -165,9 +191,14 @@ no_present:
>>>  static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
>>>  {
>>>     unsigned access;
>>> -
>>> +#if PTTYPE == PTTYPE_EPT
>>> +   access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
>>> +           ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
>>> +           ACC_USER_MASK;
>>> +#else
>>>     access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
>>>     access &= ~(gpte >> PT64_NX_SHIFT);
>>> +#endif
>>
>> Hmm, why not use shadow_x_mask, shadow_user_mask instead? PT_WRITABLE_MASK
>> is also suitable for ept, i guess we can remove the "#if/#else/#endif" after
>> that.
>>
> shadow_x_mask and shadow_user_mask do not depend on guest paging mode,
> so cannot be used here. Since we have to use ifdefs anyway relying on
> VMX_EPT_WRITABLE_MASK == PT_WRITABLE_MASK is not necessary. Makes code
> easier to read.

Oh, yes, you are right.

Reviewed-by: Xiao Guangrong <xiaoguangr...@linux.vnet.ibm.com>


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to