On Monday 07 December 2009 18:47:10 Avi Kivity wrote:
> Some bits of cr4 can be owned by the guest on vmx, so when we read them,
> we copy them to the vcpu structure.  In preparation for making the set of
> guest-owned bits dynamic, use helpers to access these bits so we don't need
> to know where the bit resides.
> 
> No changes to svm since all bits are host-owned there.
> 
> Signed-off-by: Avi Kivity <a...@redhat.com>
> ---
>  arch/x86/include/asm/kvm_host.h |    1 +
>  arch/x86/kvm/kvm_cache_regs.h   |   12 ++++++++++++
>  arch/x86/kvm/mmu.h              |    5 +++--
>  arch/x86/kvm/vmx.c              |   13 ++++++++-----
>  arch/x86/kvm/x86.c              |   16 ++++++----------
>  5 files changed, 30 insertions(+), 17 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h
>  b/arch/x86/include/asm/kvm_host.h index da6dee8..e9f4f12 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -272,6 +272,7 @@ struct kvm_vcpu_arch {
>       unsigned long cr2;
>       unsigned long cr3;
>       unsigned long cr4;
> +     unsigned long cr4_guest_owned_bits;
>       unsigned long cr8;
>       u32 hflags;
>       u64 pdptrs[4]; /* pae */
> diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
> index 7bcc5b6..35acc36 100644
> --- a/arch/x86/kvm/kvm_cache_regs.h
> +++ b/arch/x86/kvm/kvm_cache_regs.h
> @@ -38,4 +38,16 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu,
>  int index) return vcpu->arch.pdptrs[index];
>  }
> 
> +static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
> +{
> +     if (mask & vcpu->arch.cr4_guest_owned_bits)
> +             kvm_x86_ops->decache_cr4_guest_bits(vcpu);
> +     return vcpu->arch.cr4 & mask;
> +}
> +
> +static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
> +{
> +     return kvm_read_cr4_bits(vcpu, ~0UL);
> +}
> +
>  #endif
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index 61a1b38..4567d80 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -2,6 +2,7 @@
>  #define __KVM_X86_MMU_H
> 
>  #include <linux/kvm_host.h>
> +#include "kvm_cache_regs.h"
> 
>  #define PT64_PT_BITS 9
>  #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
> @@ -64,12 +65,12 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu)
> 
>  static inline int is_pae(struct kvm_vcpu *vcpu)
>  {
> -     return vcpu->arch.cr4 & X86_CR4_PAE;
> +     return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
>  }
> 
>  static inline int is_pse(struct kvm_vcpu *vcpu)
>  {
> -     return vcpu->arch.cr4 & X86_CR4_PSE;
> +     return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
>  }
> 
>  static inline int is_paging(struct kvm_vcpu *vcpu)
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 5ef820e..ae95a0c 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -1612,8 +1612,10 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
> 
>  static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
>  {
> -     vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
> -     vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
> +     ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
> +
> +     vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
> +     vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
>  }
> 
>  static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
> @@ -1658,7 +1660,7 @@ static void ept_update_paging_mode_cr0(unsigned long
>  *hw_cr0, (CPU_BASED_CR3_LOAD_EXITING |
>                             CPU_BASED_CR3_STORE_EXITING));
>               vcpu->arch.cr0 = cr0;
> -             vmx_set_cr4(vcpu, vcpu->arch.cr4);
> +             vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
>       } else if (!is_paging(vcpu)) {
>               /* From nonpaging to paging */
>               vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
> @@ -1666,7 +1668,7 @@ static void ept_update_paging_mode_cr0(unsigned long
>  *hw_cr0, ~(CPU_BASED_CR3_LOAD_EXITING |
>                              CPU_BASED_CR3_STORE_EXITING));
>               vcpu->arch.cr0 = cr0;
> -             vmx_set_cr4(vcpu, vcpu->arch.cr4);
> +             vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
>       }

Another place accessed cr4 directly, in ept_update_paging_mode_cr4()

>        } else if (!(vcpu->arch.cr4 & X86_CR4_PAE))                    
>                *hw_cr4 &= ~X86_CR4_PAE;                               

Others looks fine to me.

-- 
regards
Yang, Sheng
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to