On 25/03/2016 14:19, Xiao Guangrong wrote:
>       WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
> -     pfec |= PFERR_PRESENT_MASK;
> +     errcode = PFERR_PRESENT_MASK;
>  
>       if (unlikely(mmu->pkru_mask)) {
>               u32 pkru_bits, offset;
> @@ -193,11 +193,11 @@ static inline u8 permission_fault(struct kvm_vcpu 
> *vcpu, struct kvm_mmu *mmu,
>                       ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - 
> PT_USER_SHIFT));
>  
>               pkru_bits &= mmu->pkru_mask >> offset;
> -             pfec |= -pkru_bits & PFERR_PK_MASK;
> +             errcode |= -pkru_bits & PFERR_PK_MASK;
>               fault |= (pkru_bits != 0);
>       }
>  
> -     return -(uint32_t)fault & pfec;
> +     return -(uint32_t)fault & errcode;
>  }

I have another doubt here.

If you get a fault due to U=0, you would not get PFERR_PK_MASK.  This
is checked implicitly through the pte_user bit which we moved to
PFERR_RSVD_BIT.  However, if you get a fault due to W=0 _and_
PKRU.AD=1 or PKRU.WD=1 for the page's protection key, would the PK
bit be set in the error code?  If not, we would need something like
this:

diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 81bffd1524c4..6835a551a5c4 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -172,12 +172,11 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, 
struct kvm_mmu *mmu,
        unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
        int index = (pfec >> 1) +
                    (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
-       bool fault = (mmu->permissions[index] >> pte_access) & 1;
 
        WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
-       errcode = PFERR_PRESENT_MASK;
+       errcode = (mmu->permissions[index] >> pte_access) & PFERR_PRESENT_MASK;
 
-       if (unlikely(mmu->pkru_mask)) {
+       if (unlikely(-errcode & mmu->pkru_mask)) {
                u32 pkru_bits, offset;
 
                /*
@@ -188,11 +187,10 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, 
struct kvm_mmu *mmu,
                        ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - 
PT_USER_SHIFT));
 
                pkru_bits &= mmu->pkru_mask >> offset;
-               errcode |= -pkru_bits & PFERR_PK_MASK;
-               fault |= (pkru_bits != 0);
+               errcode |= pkru_bits ? PFERR_PK_MASK | PFERR_PRESENT_MASK : 0;
        }
 
-       return -(uint32_t)fault & errcode;
+       return errcode;
 }
 
 void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);


Thanks,

Paolo

Reply via email to