Use the logical NOT of KVM_HPAGE_GFN_MASK() to compute the GFN offset
mask instead of open coding the equivalent in a variety of locations.

Signed-off-by: Sean Christopherson <sean.j.christopher...@intel.com>
---
 arch/x86/kvm/mmu/mmu.c      | 2 +-
 arch/x86/kvm/mmu/mmutrace.h | 2 +-
 arch/x86/kvm/mmu/tdp_mmu.c  | 2 +-
 arch/x86/kvm/x86.c          | 6 +++---
 4 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 3bfc7ee44e51..9fb50c666ec5 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2827,7 +2827,7 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t 
gfn,
         * mmu_notifier_retry() was successful and mmu_lock is held, so
         * the pmd can't be split from under us.
         */
-       mask = KVM_PAGES_PER_HPAGE(level) - 1;
+       mask = ~KVM_HPAGE_GFN_MASK(level);
        VM_BUG_ON((gfn & mask) != (pfn & mask));
        *pfnp = pfn & ~mask;
 
diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h
index 213699b27b44..4432ca3c7e4e 100644
--- a/arch/x86/kvm/mmu/mmutrace.h
+++ b/arch/x86/kvm/mmu/mmutrace.h
@@ -372,7 +372,7 @@ TRACE_EVENT(
 
        TP_fast_assign(
                __entry->gfn = addr >> PAGE_SHIFT;
-               __entry->pfn = pfn | (__entry->gfn & 
(KVM_PAGES_PER_HPAGE(level) - 1));
+               __entry->pfn = pfn | (__entry->gfn & 
~KVM_HPAGE_GFN_MASK(level));
                __entry->level = level;
        ),
 
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 27e381c9da6c..681686608c0b 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -209,7 +209,7 @@ static void __handle_changed_spte(struct kvm *kvm, int 
as_id, gfn_t gfn,
 
        WARN_ON(level > PT64_ROOT_MAX_LEVEL);
        WARN_ON(level < PG_LEVEL_4K);
-       WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
+       WARN_ON(gfn & ~KVM_HPAGE_GFN_MASK(level));
 
        /*
         * If this warning were to trigger it would indicate that there was a
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 397f599b20e5..faf4c4ddde94 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10451,16 +10451,16 @@ static int kvm_alloc_memslot_metadata(struct 
kvm_memory_slot *slot,
 
                slot->arch.lpage_info[i - 1] = linfo;
 
-               if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
+               if (slot->base_gfn & ~KVM_HPAGE_GFN_MASK(level))
                        linfo[0].disallow_lpage = 1;
-               if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 
1))
+               if ((slot->base_gfn + npages) & ~KVM_HPAGE_GFN_MASK(level))
                        linfo[lpages - 1].disallow_lpage = 1;
                ugfn = slot->userspace_addr >> PAGE_SHIFT;
                /*
                 * If the gfn and userspace address are not aligned wrt each
                 * other, disable large page support for this slot.
                 */
-               if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) 
{
+               if ((slot->base_gfn ^ ugfn) & ~KVM_HPAGE_GFN_MASK(level)) {
                        unsigned long j;
 
                        for (j = 0; j < lpages; ++j)
-- 
2.28.0

Reply via email to