From: David Stevens <steve...@chromium.org>

Covert usages of the deprecated gfn_to_pfn functions to the new
gfn_to_pfn_page functions.

Signed-off-by: David Stevens <steve...@chromium.org>
---
 arch/arm64/kvm/mmu.c | 26 ++++++++++++++++----------
 1 file changed, 16 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index c10207fed2f3..c29da690ed74 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -780,7 +780,7 @@ static bool fault_supports_stage2_huge_mapping(struct 
kvm_memory_slot *memslot,
 static unsigned long
 transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
                            unsigned long hva, kvm_pfn_t *pfnp,
-                           phys_addr_t *ipap)
+                           struct page **page, phys_addr_t *ipap)
 {
        kvm_pfn_t pfn = *pfnp;
 
@@ -789,7 +789,7 @@ transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
         * sure that the HVA and IPA are sufficiently aligned and that the
         * block map is contained within the memslot.
         */
-       if (kvm_is_transparent_hugepage(pfn) &&
+       if (*page && kvm_is_transparent_hugepage(pfn) &&
            fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
                /*
                 * The address we faulted on is backed by a transparent huge
@@ -810,10 +810,11 @@ transparent_hugepage_adjust(struct kvm_memory_slot 
*memslot,
                 * page accordingly.
                 */
                *ipap &= PMD_MASK;
-               kvm_release_pfn_clean(pfn);
+               put_page(*page);
                pfn &= ~(PTRS_PER_PMD - 1);
-               kvm_get_pfn(pfn);
                *pfnp = pfn;
+               *page = pfn_to_page(pfn);
+               get_page(*page);
 
                return PMD_SIZE;
        }
@@ -837,6 +838,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
        short vma_shift;
        gfn_t gfn;
        kvm_pfn_t pfn;
+       struct page *page;
        bool logging_active = memslot_is_logging(memslot);
        unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
        unsigned long vma_pagesize, fault_granule;
@@ -933,8 +935,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
         */
        smp_rmb();
 
-       pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
-                                  write_fault, &writable, NULL);
+       pfn = __gfn_to_pfn_page_memslot(memslot, gfn, false, NULL,
+                                       write_fault, &writable, NULL, &page);
        if (pfn == KVM_PFN_ERR_HWPOISON) {
                kvm_send_hwpoison_signal(hva, vma_shift);
                return 0;
@@ -967,7 +969,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
         */
        if (vma_pagesize == PAGE_SIZE && !force_pte)
                vma_pagesize = transparent_hugepage_adjust(memslot, hva,
-                                                          &pfn, &fault_ipa);
+                                                          &pfn, &page,
+                                                          &fault_ipa);
        if (writable)
                prot |= KVM_PGTABLE_PROT_W;
 
@@ -999,14 +1002,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
 
        /* Mark the page dirty only if the fault is handled successfully */
        if (writable && !ret) {
-               kvm_set_pfn_dirty(pfn);
+               if (page)
+                       kvm_set_pfn_dirty(pfn);
                mark_page_dirty_in_slot(kvm, memslot, gfn);
        }
 
 out_unlock:
        spin_unlock(&kvm->mmu_lock);
-       kvm_set_pfn_accessed(pfn);
-       kvm_release_pfn_clean(pfn);
+       if (page) {
+               kvm_set_pfn_accessed(pfn);
+               put_page(page);
+       }
        return ret != -EAGAIN ? ret : 0;
 }
 
-- 
2.32.0.93.g670b81a890-goog

Reply via email to