On Wed, Oct 04, 2017 at 08:14:01AM -0500, Brijesh Singh wrote:
> The command is used for encrypting the guest memory region using the VM
> encryption key (VEK) created during KVM_SEV_LAUNCH_START.
> 
> Cc: Thomas Gleixner <t...@linutronix.de>
> Cc: Ingo Molnar <mi...@redhat.com>
> Cc: "H. Peter Anvin" <h...@zytor.com>
> Cc: Paolo Bonzini <pbonz...@redhat.com>
> Cc: "Radim Krčmář" <rkrc...@redhat.com>
> Cc: Joerg Roedel <j...@8bytes.org>
> Cc: Borislav Petkov <b...@suse.de>
> Cc: Tom Lendacky <thomas.lenda...@amd.com>
> Cc: x...@kernel.org
> Cc: k...@vger.kernel.org
> Cc: linux-kernel@vger.kernel.org
> Signed-off-by: Brijesh Singh <brijesh.si...@amd.com>
> ---
>  arch/x86/include/asm/kvm_host.h |   1 +
>  arch/x86/kvm/svm.c              | 193 
> +++++++++++++++++++++++++++++++++++++++-
>  2 files changed, 192 insertions(+), 2 deletions(-)

Just cleanups:

---
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0d4e33eec78c..2bbfcd4ab6bc 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -752,7 +752,7 @@ struct kvm_sev_info {
        unsigned int asid;      /* ASID used for this guest */
        unsigned int handle;    /* SEV firmware handle */
        int fd;                 /* SEV device fd */
-       unsigned long locked;   /* Number of pages locked */
+       unsigned long pages_locked; /* Number of pages locked */
 };
 
 struct kvm_arch {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 41eeeb30b56d..989bc8a9936f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1590,24 +1590,24 @@ static struct page **sev_pin_memory(struct kvm *kvm, 
unsigned long uaddr,
                                    int write)
 {
        struct kvm_sev_info *sev = &kvm->arch.sev_info;
-       unsigned long npages, pinned, size;
+       unsigned long npages, npinned, size;
        unsigned long locked, lock_limit;
        struct page **pages;
        int first, last;
 
-       /* calculate number of pages */
+       /* Calculate number of pages. */
        first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
        last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
        npages = (last - first + 1);
 
-       locked = sev->locked + npages;
+       locked = sev->pages_locked + npages;
        lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
        if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
-               pr_err("locked(%lu) > lock_limit(%lu)\n", locked, lock_limit);
+               pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", 
locked, lock_limit);
                return NULL;
        }
 
-       /* Avoid using vmalloc for smaller buffer */
+       /* Avoid using vmalloc for smaller buffers. */
        size = npages * sizeof(struct page *);
        if (size > PAGE_SIZE)
                pages = vmalloc(size);
@@ -1617,20 +1617,21 @@ static struct page **sev_pin_memory(struct kvm *kvm, 
unsigned long uaddr,
        if (!pages)
                return NULL;
 
-       /* pin the user virtual address */
-       pinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, 
pages);
-       if (pinned != npages) {
-               pr_err("failed to lock %lu pages\n", npages);
+       /* Pin the user virtual address. */
+       npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, 
pages);
+       if (npinned != npages) {
+               pr_err("SEV: Failure locking %lu pages.\n", npages);
                goto err;
        }
 
        *n = npages;
-       sev->locked = locked;
+       sev->pages_locked = locked;
 
        return pages;
+
 err:
-       if (pinned > 0)
-               release_pages(pages, pinned, 0);
+       if (npinned > 0)
+               release_pages(pages, npinned, 0);
 
        kvfree(pages);
        return NULL;
@@ -1643,7 +1644,7 @@ static void sev_unpin_memory(struct kvm *kvm, struct page 
**pages,
 
        release_pages(pages, npages, 0);
        kvfree(pages);
-       sev->locked -= npages;
+       sev->pages_locked -= npages;
 }
 
 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
@@ -5909,8 +5910,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct 
kvm_sev_cmd *argp)
        if (!sev_guest(kvm))
                return -ENOTTY;
 
-       if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
-                          sizeof(struct kvm_sev_launch_update_data)))
+       if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, 
sizeof(params)))
                return -EFAULT;
 
        data = kzalloc(sizeof(*data), GFP_KERNEL);
@@ -5921,7 +5921,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct 
kvm_sev_cmd *argp)
        size = params.len;
        vaddr_end = vaddr + size;
 
-       /* lock the user memory */
+       /* Lock the user memory. */
        inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
        if (!inpages) {
                ret = -ENOMEM;
@@ -5931,9 +5931,8 @@ static int sev_launch_update_data(struct kvm *kvm, struct 
kvm_sev_cmd *argp)
        /*
         * The LAUNCH_UPDATE command will perform in-place encryption of the
         * memory content (i.e it will write the same memory region with C=1).
-        * Its possible that our cache may contain the data with C=0. Lets
-        * invalidate it so that we can get the recent contents after 
LAUNCH_UPDATE
-        * command completes.
+        * It's possible that the cache may contain the data with C=0, i.e.,
+        * unencrypted so invalidate it first.
         */
        sev_clflush_pages(inpages, npages);
 
@@ -5941,12 +5940,12 @@ static int sev_launch_update_data(struct kvm *kvm, 
struct kvm_sev_cmd *argp)
                int offset, len;
 
                /*
-                * since user buffer may not be page aligned, calculate the
-                * offset within the page.
+                * If the user buffer is not page-aligned, calculate the offset
+                * within the page.
                 */
                offset = vaddr & (PAGE_SIZE - 1);
 
-               /* calculate the number of pages that can be encrypted in one 
go */
+               /* Calculate the number of pages that can be encrypted in one 
go. */
                pages = get_num_contig_pages(i, inpages, npages);
 
                len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);

-- 
Regards/Gruss,
    Boris.

Good mailing practices for 400: avoid top-posting and trim the reply.

Reply via email to