Joerg Roedel wrote:
> This patch contains the changes to the KVM MMU necessary for support of the
> Nested Paging feature in AMD Barcelona and Phenom Processors.
>   

good patch, it look like things will be very fixable with it

> Signed-off-by: Joerg Roedel <[EMAIL PROTECTED]>
> ---
>  arch/x86/kvm/mmu.c |   79 
> ++++++++++++++++++++++++++++++++++++++++++++++++++--
>  arch/x86/kvm/mmu.h |    6 ++++
>  2 files changed, 82 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 5e76963..5304d55 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -1081,6 +1081,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
>       int i;
>       gfn_t root_gfn;
>       struct kvm_mmu_page *sp;
> +     int metaphysical = 0;
>  
>       root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
>  
> @@ -1089,14 +1090,20 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
>               hpa_t root = vcpu->arch.mmu.root_hpa;
>  
>               ASSERT(!VALID_PAGE(root));
> +             if (tdp_enabled)
> +                     metaphysical = 1;
>               sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
> -                                   PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
> +                                   PT64_ROOT_LEVEL, metaphysical,
> +                                   ACC_ALL, NULL, NULL);
>               root = __pa(sp->spt);
>               ++sp->root_count;
>               vcpu->arch.mmu.root_hpa = root;
>               return;
>       }
>  #endif
> +     metaphysical = !is_paging(vcpu);
> +     if (tdp_enabled)
> +             metaphysical = 1;
>       for (i = 0; i < 4; ++i) {
>               hpa_t root = vcpu->arch.mmu.pae_root[i];
>  
> @@ -1110,7 +1117,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
>               } else if (vcpu->arch.mmu.root_level == 0)
>                       root_gfn = 0;
>               sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
> -                                   PT32_ROOT_LEVEL, !is_paging(vcpu),
> +                                   PT32_ROOT_LEVEL, metaphysical,
>                                     ACC_ALL, NULL, NULL);
>               root = __pa(sp->spt);
>               ++sp->root_count;
> @@ -1144,6 +1151,36 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, 
> gva_t gva,
>                            error_code & PFERR_WRITE_MASK, gfn);
>  }
>  
> +static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
> +                             u32 error_code)
>   

you probably mean gpa_t ?

> +{
> +     struct page *page;
> +     int r;
> +
> +     ASSERT(vcpu);
> +     ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
> +
> +     r = mmu_topup_memory_caches(vcpu);
> +     if (r)
> +             return r;
> +
> +     down_read(&current->mm->mmap_sem);
> +     page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
> +     if (is_error_page(page)) {
> +             kvm_release_page_clean(page);
> +             up_read(&current->mm->mmap_sem);
> +             return 1;
> +     }
>   

i dont know if it worth checking it here,
in the worth case we will map the error page and the host will be safe

> +     spin_lock(&vcpu->kvm->mmu_lock);
> +     kvm_mmu_free_some_pages(vcpu);
> +     r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
> +                      gpa >> PAGE_SHIFT, page, TDP_ROOT_LEVEL);
> +     spin_unlock(&vcpu->kvm->mmu_lock);
> +     up_read(&current->mm->mmap_sem);
> +
> +     return r;
> +}
> +
>  static void nonpaging_free(struct kvm_vcpu *vcpu)
>  {
>       mmu_free_roots(vcpu);
> @@ -1237,7 +1274,35 @@ static int paging32E_init_context(struct kvm_vcpu 
> *vcpu)
>       return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
>  }
>  
> -static int init_kvm_mmu(struct kvm_vcpu *vcpu)
> tdp_page_fault(struct
> +static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
> +{
> +     struct kvm_mmu *context = &vcpu->arch.mmu;
> +
> +     context->new_cr3 = nonpaging_new_cr3;
> +     context->page_fault = tdp_page_fault;
> +     context->free = nonpaging_free;
> +     context->prefetch_page = nonpaging_prefetch_page;
> +     context->shadow_root_level = TDP_ROOT_LEVEL;
> +     context->root_hpa = INVALID_PAGE;
> +
> +     if (!is_paging(vcpu)) {
> +             context->gva_to_gpa = nonpaging_gva_to_gpa;
> +             context->root_level = 0;
> +     } else if (is_long_mode(vcpu)) {
> +             context->gva_to_gpa = paging64_gva_to_gpa;
> +             context->root_level = PT64_ROOT_LEVEL;
> +     } else if (is_pae(vcpu)) {
> +             context->gva_to_gpa = paging64_gva_to_gpa;
> +             context->root_level = PT32E_ROOT_LEVEL;
> +     } else {
> +             context->gva_to_gpa = paging32_gva_to_gpa;
> +             context->root_level = PT32_ROOT_LEVEL;
> +     }
> +
> +     return 0;
> +}
> +
> +static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
>  {
>       ASSERT(vcpu);
>       ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
> @@ -1252,6 +1317,14 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
>               return paging32_init_context(vcpu);
>  }
>  
> +static int init_kvm_mmu(struct kvm_vcpu *vcpu)
> +{
> +     if (tdp_enabled)
> +             return init_kvm_tdp_mmu(vcpu);
> +     else
> +             return init_kvm_softmmu(vcpu);
> +}
> +
>  static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
>  {
>       ASSERT(vcpu);
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index 1fce19e..e64e9f5 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -3,6 +3,12 @@
>  
>  #include <linux/kvm_host.h>
>  
> +#ifdef CONFIG_X86_64
> +#define TDP_ROOT_LEVEL PT64_ROOT_LEVEL
> +#else
> +#define TDP_ROOT_LEVEL PT32E_ROOT_LEVEL
> +#endif
> +
>  static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
>  {
>       if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
>   


-- 
woof.


-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2008.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to