Add 2 small infrastructure functions here which to enable pinning the SEV
guest pages used for sev_launch_update_data() using sev_get_page().

Pin the memory for the data being passed to launch_update_data() because it
gets encrypted before the guest is first run and must not be moved which
would corrupt it.

Co-developed-by: Brijesh Singh <brijesh.si...@amd.com>
Signed-off-by: eric van tassell <eric.vantass...@amd.com>
---
 arch/x86/kvm/svm/sev.c | 57 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 57 insertions(+)

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 8d56d1afb33e..4a0157254fef 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -454,6 +454,37 @@ static int sev_get_page(struct kvm *kvm, gfn_t gfn, 
kvm_pfn_t pfn)
        return 0;
 }
 
+static struct kvm_memory_slot *hva_to_memslot(struct kvm *kvm,
+                                             unsigned long hva)
+{
+       struct kvm_memslots *slots = kvm_memslots(kvm);
+       struct kvm_memory_slot *memslot;
+
+       kvm_for_each_memslot(memslot, slots) {
+               if (hva >= memslot->userspace_addr &&
+                   hva < memslot->userspace_addr +
+                             (memslot->npages << PAGE_SHIFT))
+                       return memslot;
+       }
+
+       return NULL;
+}
+
+static bool hva_to_gfn(struct kvm *kvm, unsigned long hva, gfn_t *gfn)
+{
+       struct kvm_memory_slot *memslot;
+       gpa_t gpa_offset;
+
+       memslot = hva_to_memslot(kvm, hva);
+       if (!memslot)
+               return false;
+
+       gpa_offset = hva - memslot->userspace_addr;
+       *gfn = ((memslot->base_gfn << PAGE_SHIFT) + gpa_offset) >> PAGE_SHIFT;
+
+       return true;
+}
+
 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
 {
        unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
@@ -462,6 +493,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct 
kvm_sev_cmd *argp)
        struct sev_data_launch_update_data *data;
        struct page **inpages;
        int ret;
+       int srcu_idx;
 
        if (!sev_guest(kvm))
                return -ENOTTY;
@@ -484,6 +516,31 @@ static int sev_launch_update_data(struct kvm *kvm, struct 
kvm_sev_cmd *argp)
                goto e_free;
        }
 
+       /*
+        * Increment the page ref count so that the pages do not get migrated or
+        * moved after we are done from the LAUNCH_UPDATE_DATA.
+        */
+
+       /* ensure hva_to_gfn translations remain valid */
+       srcu_idx = srcu_read_lock(&kvm->srcu);
+
+       for (i = 0; i < npages; i++) {
+               gfn_t gfn;
+
+               if (!hva_to_gfn(kvm, (vaddr + (i * PAGE_SIZE)) & PAGE_MASK, 
&gfn)) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               ret = sev_get_page(kvm, gfn, page_to_pfn(inpages[i]));
+               if (ret)
+                       break;
+       }
+
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+       if (ret)
+               goto e_unpin;
+
        /*
         * The LAUNCH_UPDATE command will perform in-place encryption of the
         * memory content (i.e it will write the same memory region with C=1).
-- 
2.17.1

Reply via email to