Since pmd_large() is now always available, pmd_is_leaf() is redundant. Replace all uses with calls to pmd_large().
CC: Benjamin Herrenschmidt <b...@kernel.crashing.org> CC: Michael Ellerman <m...@ellerman.id.au> CC: Paul Mackerras <pau...@ozlabs.org> CC: kvm-...@vger.kernel.org CC: linuxppc-dev@lists.ozlabs.org Signed-off-by: Steven Price <steven.pr...@arm.com> --- arch/powerpc/kvm/book3s_64_mmu_radix.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index f55ef071883f..1b57b4e3f819 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -363,12 +363,6 @@ static void kvmppc_pte_free(pte_t *ptep) kmem_cache_free(kvm_pte_cache, ptep); } -/* Like pmd_huge() and pmd_large(), but works regardless of config options */ -static inline int pmd_is_leaf(pmd_t pmd) -{ - return !!(pmd_val(pmd) & _PAGE_PTE); -} - static pmd_t *kvmppc_pmd_alloc(void) { return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL); @@ -460,7 +454,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, for (im = 0; im < PTRS_PER_PMD; ++im, ++p) { if (!pmd_present(*p)) continue; - if (pmd_is_leaf(*p)) { + if (pmd_large(*p)) { if (full) { pmd_clear(p); } else { @@ -593,7 +587,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, else if (level <= 1) new_pmd = kvmppc_pmd_alloc(); - if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd))) + if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_large(*pmd))) new_ptep = kvmppc_pte_alloc(); /* Check if we might have been invalidated; let the guest retry if so */ @@ -662,7 +656,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, new_pmd = NULL; } pmd = pmd_offset(pud, gpa); - if (pmd_is_leaf(*pmd)) { + if (pmd_large(*pmd)) { unsigned long lgpa = gpa & PMD_MASK; /* Check if we raced and someone else has set the same thing */ -- 2.20.1