A guest cannot access quadrants 1 or 2 as this would result in an
exception. Thus introduce the hcall H_COPY_TOFROM_GUEST to be used by a
guest when it wants to perform an access to quadrants 1 or 2, for
example when it wants to access memory for one of its nested guests.

Also provide an implementation for the kvm-hv module.

Signed-off-by: Suraj Jitindar Singh <sjitindarsi...@gmail.com>
---
 arch/powerpc/include/asm/hvcall.h      |  1 +
 arch/powerpc/include/asm/kvm_book3s.h  |  4 ++
 arch/powerpc/kvm/book3s_64_mmu_radix.c |  7 ++--
 arch/powerpc/kvm/book3s_hv.c           |  6 ++-
 arch/powerpc/kvm/book3s_hv_nested.c    | 75 ++++++++++++++++++++++++++++++++++
 5 files changed, 89 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/include/asm/hvcall.h 
b/arch/powerpc/include/asm/hvcall.h
index 33a4fc891947..463c63a9fcf1 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -335,6 +335,7 @@
 #define H_SET_PARTITION_TABLE  0xF800
 #define H_ENTER_NESTED         0xF804
 #define H_TLB_INVALIDATE       0xF808
+#define H_COPY_TOFROM_GUEST    0xF80C
 
 /* Values for 2nd argument to H_SET_MODE */
 #define H_SET_MODE_RESOURCE_SET_CIABR          1
diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
b/arch/powerpc/include/asm/kvm_book3s.h
index ea94110bfde4..720483733bb2 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -188,6 +188,9 @@ extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, 
unsigned long hc);
 extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
                        struct kvm_vcpu *vcpu,
                        unsigned long ea, unsigned long dsisr);
+extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
+                                       gva_t eaddr, void *to, void *from,
+                                       unsigned long n);
 extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
                                        void *to, unsigned long n);
 extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
@@ -302,6 +305,7 @@ long kvmhv_nested_init(void);
 void kvmhv_nested_exit(void);
 void kvmhv_vm_nested_init(struct kvm *kvm);
 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
+long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
 void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
 void kvmhv_release_all_nested(struct kvm *kvm);
 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c 
b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index e1e3ef710bd0..da89d10e5886 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -29,9 +29,9 @@
  */
 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
 
-static unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
-                                       gva_t eaddr, void *to, void *from,
-                                       unsigned long n)
+unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
+                                             gva_t eaddr, void *to, void *from,
+                                             unsigned long n)
 {
        unsigned long quadrant, ret = n;
        int old_pid, old_lpid;
@@ -82,6 +82,7 @@ static unsigned long __kvmhv_copy_tofrom_guest_radix(int 
lpid, int pid,
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix);
 
 static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
                                          void *to, void *from, unsigned long n)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2280bc4778f5..bd07f9b7c5e8 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -996,7 +996,11 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                if (nesting_enabled(vcpu->kvm))
                        ret = kvmhv_do_nested_tlbie(vcpu);
                break;
-
+       case H_COPY_TOFROM_GUEST:
+               ret = H_FUNCTION;
+               if (nesting_enabled(vcpu->kvm))
+                       ret = kvmhv_copy_tofrom_guest_nested(vcpu);
+               break;
        default:
                return RESUME_HOST;
        }
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c 
b/arch/powerpc/kvm/book3s_hv_nested.c
index 991f40ce4eea..5903175751b4 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -462,6 +462,81 @@ long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
 }
 
 /*
+ * Handle the H_COPY_TOFROM_GUEST hcall.
+ * r4 = L1 lpid of nested guest
+ * r5 = pid
+ * r6 = eaddr to access
+ * r7 = to buffer (L1 gpa)
+ * r8 = from buffer (L1 gpa)
+ * r9 = n bytes to copy
+ */
+long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
+{
+       struct kvm_nested_guest *gp;
+       int l1_lpid = kvmppc_get_gpr(vcpu, 4);
+       int pid = kvmppc_get_gpr(vcpu, 5);
+       gva_t eaddr = kvmppc_get_gpr(vcpu, 6);
+       gpa_t gp_to = (gpa_t) kvmppc_get_gpr(vcpu, 7);
+       gpa_t gp_from = (gpa_t) kvmppc_get_gpr(vcpu, 8);
+       void *buf;
+       unsigned long n = kvmppc_get_gpr(vcpu, 9);
+       bool is_load = !!gp_to;
+       long rc;
+
+       if (gp_to && gp_from) /* One must be NULL to determine the direction */
+               return H_PARAMETER;
+
+       if (eaddr & (0xFFFUL << 52))
+               return H_PARAMETER;
+
+       buf = kzalloc(n, GFP_KERNEL);
+       if (!buf)
+               return H_NO_MEM;
+
+       gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
+       if (!gp) {
+               rc = H_PARAMETER;
+               goto out_free;
+       }
+
+       mutex_lock(&gp->tlb_lock);
+
+       if (is_load) {
+               /* Load from the nested guest into our buffer */
+               rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
+                                                    eaddr, buf, NULL, n);
+               if (rc)
+                       goto not_found;
+
+               /* Write what was loaded into our buffer back to the L1 guest */
+               rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
+               if (rc)
+                       goto not_found;
+       } else {
+               /* Load the data to be stored from the L1 guest into our buf */
+               rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
+               if (rc)
+                       goto not_found;
+
+               /* Store from our buffer into the nested guest */
+               rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
+                                                    eaddr, NULL, buf, n);
+               if (rc)
+                       goto not_found;
+       }
+
+out_unlock:
+       mutex_unlock(&gp->tlb_lock);
+       kvmhv_put_nested(gp);
+out_free:
+       kfree(buf);
+       return rc;
+not_found:
+       rc = H_NOT_FOUND;
+       goto out_unlock;
+}
+
+/*
  * Reload the partition table entry for a guest.
  * Caller must hold gp->tlb_lock.
  */
-- 
2.13.6

Reply via email to