The page requested for page-in; sometimes, can have transient
references, and hence cannot migrate immediately. Retry a few times
before returning error.

The same is true for non-migrated pages that are migrated in
H_SVM_INIT_DONE hanlder.  Retry a few times before returning error.

H_SVM_PAGE_IN interface is enhanced to return H_BUSY if the page is
not in a migratable state.

Cc: Paul Mackerras <pau...@ozlabs.org>
Cc: Benjamin Herrenschmidt <b...@kernel.crashing.org>
Cc: Michael Ellerman <m...@ellerman.id.au>
Cc: Bharata B Rao <bhar...@linux.ibm.com>
Cc: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
Cc: Sukadev Bhattiprolu <suka...@linux.vnet.ibm.com>
Cc: Laurent Dufour <lduf...@linux.ibm.com>
Cc: Thiago Jung Bauermann <bauer...@linux.ibm.com>
Cc: David Gibson <da...@gibson.dropbear.id.au>
Cc: Claudio Carvalho <cclau...@linux.ibm.com>
Cc: kvm-...@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org

Signed-off-by: Ram Pai <linux...@us.ibm.com>
---
 Documentation/powerpc/ultravisor.rst |   1 +
 arch/powerpc/kvm/book3s_hv_uvmem.c   | 106 ++++++++++++++++++++++++-----------
 2 files changed, 74 insertions(+), 33 deletions(-)

diff --git a/Documentation/powerpc/ultravisor.rst 
b/Documentation/powerpc/ultravisor.rst
index ba6b1bf..fe533ad 100644
--- a/Documentation/powerpc/ultravisor.rst
+++ b/Documentation/powerpc/ultravisor.rst
@@ -1035,6 +1035,7 @@ Return values
        * H_PARAMETER   if ``guest_pa`` is invalid.
        * H_P2          if ``flags`` is invalid.
        * H_P3          if ``order`` of page is invalid.
+       * H_BUSY        if ``page`` is not in a state to pagein
 
 Description
 ~~~~~~~~~~~
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c 
b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 3274663..a206984 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -672,7 +672,7 @@ static int kvmppc_svm_migrate_page(struct vm_area_struct 
*vma,
                return ret;
 
        if (!(*mig.src & MIGRATE_PFN_MIGRATE)) {
-               ret = -1;
+               ret = -2;
                goto out_finalize;
        }
 
@@ -700,43 +700,73 @@ static int kvmppc_svm_migrate_page(struct vm_area_struct 
*vma,
        return ret;
 }
 
-int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
-               const struct kvm_memory_slot *memslot)
+/*
+ * return 1, if some page migration failed because of transient error,
+ * while the remaining pages migrated successfully.
+ * The caller can use this as a hint to retry.
+ *
+ * return 0 otherwise. *ret indicates the success status
+ * of this call.
+ */
+static bool __kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
+               const struct kvm_memory_slot *memslot, int *ret)
 {
        unsigned long gfn = memslot->base_gfn;
        struct vm_area_struct *vma;
        unsigned long start, end;
-       int ret = 0;
+       bool retry = false;
 
+       *ret = 0;
        while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) {
 
                mmap_read_lock(kvm->mm);
                start = gfn_to_hva(kvm, gfn);
                if (kvm_is_error_hva(start)) {
-                       ret = H_STATE;
+                       *ret = H_STATE;
                        goto next;
                }
 
                end = start + (1UL << PAGE_SHIFT);
                vma = find_vma_intersection(kvm->mm, start, end);
                if (!vma || vma->vm_start > start || vma->vm_end < end) {
-                       ret = H_STATE;
+                       *ret = H_STATE;
                        goto next;
                }
 
                mutex_lock(&kvm->arch.uvmem_lock);
-               ret = kvmppc_svm_migrate_page(vma, start, end,
+               *ret = kvmppc_svm_migrate_page(vma, start, end,
                                (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false);
                mutex_unlock(&kvm->arch.uvmem_lock);
-               if (ret)
-                       ret = H_STATE;
 
 next:
                mmap_read_unlock(kvm->mm);
+               if (*ret == -2) {
+                       retry = true;
+                       continue;
+               }
 
-               if (ret)
-                       break;
+               if (*ret)
+                       return false;
        }
+       return retry;
+}
+
+#define REPEAT_COUNT 10
+
+int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
+               const struct kvm_memory_slot *memslot)
+{
+       int ret = 0, repeat_count = REPEAT_COUNT;
+
+       /*
+        * try migration of pages in the memslot 'repeat_count' number of
+        * times, provided each time migration fails because of transient
+        * errors only.
+        */
+       while (__kvmppc_uv_migrate_mem_slot(kvm, memslot, &ret) &&
+               repeat_count--)
+               ;
+
        return ret;
 }
 
@@ -812,7 +842,7 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, 
unsigned long gpa,
        struct vm_area_struct *vma;
        int srcu_idx;
        unsigned long gfn = gpa >> page_shift;
-       int ret;
+       int ret, repeat_count = REPEAT_COUNT;
 
        if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
                return H_UNSUPPORTED;
@@ -826,34 +856,44 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, 
unsigned long gpa,
        if (flags & H_PAGE_IN_SHARED)
                return kvmppc_share_page(kvm, gpa, page_shift);
 
-       ret = H_PARAMETER;
        srcu_idx = srcu_read_lock(&kvm->srcu);
-       mmap_read_lock(kvm->mm);
 
-       start = gfn_to_hva(kvm, gfn);
-       if (kvm_is_error_hva(start))
-               goto out;
-
-       mutex_lock(&kvm->arch.uvmem_lock);
        /* Fail the page-in request of an already paged-in page */
-       if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
-               goto out_unlock;
+       mutex_lock(&kvm->arch.uvmem_lock);
+       ret = kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL);
+       mutex_unlock(&kvm->arch.uvmem_lock);
+       if (ret) {
+               srcu_read_unlock(&kvm->srcu, srcu_idx);
+               return H_PARAMETER;
+       }
 
-       end = start + (1UL << page_shift);
-       vma = find_vma_intersection(kvm->mm, start, end);
-       if (!vma || vma->vm_start > start || vma->vm_end < end)
-               goto out_unlock;
+       do {
+               ret = H_PARAMETER;
+               mmap_read_lock(kvm->mm);
 
-       if (kvmppc_svm_migrate_page(vma, start, end, gpa, kvm, page_shift,
-                               true))
-               goto out_unlock;
+               start = gfn_to_hva(kvm, gfn);
+               if (kvm_is_error_hva(start)) {
+                       mmap_read_unlock(kvm->mm);
+                       break;
+               }
 
-       ret = H_SUCCESS;
+               end = start + (1UL << page_shift);
+               vma = find_vma_intersection(kvm->mm, start, end);
+               if (!vma || vma->vm_start > start || vma->vm_end < end) {
+                       mmap_read_unlock(kvm->mm);
+                       break;
+               }
+
+               mutex_lock(&kvm->arch.uvmem_lock);
+               ret = kvmppc_svm_migrate_page(vma, start, end, gpa, kvm, 
page_shift, true);
+               mutex_unlock(&kvm->arch.uvmem_lock);
+
+               mmap_read_unlock(kvm->mm);
+       } while (ret == -2 && repeat_count--);
+
+       if (ret == -2)
+               ret = H_BUSY;
 
-out_unlock:
-       mutex_unlock(&kvm->arch.uvmem_lock);
-out:
-       mmap_read_unlock(kvm->mm);
        srcu_read_unlock(&kvm->srcu, srcu_idx);
        return ret;
 }
-- 
1.8.3.1

Reply via email to