On Mon, Jul 24, 2017 at 03:02:58PM -0500, Brijesh Singh wrote:
> The command copies a plain text into guest memory and encrypts it using
> the VM encryption key. The command will be used for debug purposes
> (e.g setting breakpoint through gdbserver)

"setting a breakpoint" or "setting breakpoints"

> 
> Signed-off-by: Brijesh Singh <brijesh.si...@amd.com>
> ---
>  arch/x86/kvm/svm.c | 174 
> +++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 174 insertions(+)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 933384a..75dcaa9 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -6214,6 +6214,176 @@ static int sev_dbg_decrypt(struct kvm *kvm, struct 
> kvm_sev_cmd *argp)
>       return ret;
>  }
>  
> +static int __sev_dbg_enc(struct kvm *kvm, unsigned long __user vaddr,

__sev_dbg_encrypt

> +                      unsigned long paddr, unsigned long __user dst_vaddr,
> +                      unsigned long dst_paddr, int size, int *error)
> +{
> +     struct page *src_tpage = NULL;
> +     struct page *dst_tpage = NULL;
> +     int ret, len = size;
> +
> +     /*
> +      * Debug encrypt command works with 16-byte aligned inputs. Function
> +      * handles the alingment issue as below:
> +      *
> +      * case 1
> +      *  If source buffer is not 16-byte aligned then we copy the data from
> +      *  source buffer into a PAGE aligned intermediate (src_tpage) buffer

"page-aligned"

> +      *  and use this intermediate buffer as source buffer
> +      *
> +      * case 2
> +      *  If destination buffer or length is not 16-byte aligned then:
> +      *   - decrypt portion of destination buffer into intermediate buffer
> +      *     (dst_tpage)
> +      *   - copy the source data into intermediate buffer
> +      *   - use the intermediate buffer as source buffer
> +      */
> +
> +     /* If source is not aligned  (case 1) */

So put the whole case 1 comment here directly - no need to reference it
again. Ditto for case 2 below.

> +     if (!IS_ALIGNED(vaddr, 16)) {
> +             src_tpage = alloc_page(GFP_KERNEL);
> +             if (!src_tpage)
> +                     return -ENOMEM;
> +
> +             if (copy_from_user(page_address(src_tpage),
> +                             (uint8_t *)vaddr, size)) {

Let it stick out.

> +                     __free_page(src_tpage);
> +                     return -EFAULT;
> +             }
> +             paddr = __sme_page_pa(src_tpage);
> +
> +             /* flush the caches to ensure that DRAM has recent contents */

That comment needs improving.

> +             clflush_cache_range(page_address(src_tpage), PAGE_SIZE);
> +     }
> +
> +     /* If destination buffer or length is not aligned (case 2) */
> +     if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
> +             int dst_offset;
> +
> +             dst_tpage = alloc_page(GFP_KERNEL);
> +             if (!dst_tpage) {
> +                     ret = -ENOMEM;
> +                     goto e_free;
> +             }
> +
> +             /* decrypt destination buffer into intermediate buffer */
> +             ret = __sev_dbg_dec(kvm,
> +                                 round_down(dst_paddr, 16),
> +                                 0,
> +                                 (unsigned long)page_address(dst_tpage),
> +                                 __sme_page_pa(dst_tpage),
> +                                 round_up(size, 16),
> +                                 error);
> +             if (ret)
> +                     goto e_free;
> +
> +             dst_offset = dst_paddr & 15;
> +
> +             /*
> +              * modify the intermediate buffer with data from source
> +              * buffer.
> +              */
> +             if (src_tpage)
> +                     memcpy(page_address(dst_tpage) + dst_offset,
> +                             page_address(src_tpage), size);
> +             else {
> +                     if (copy_from_user(page_address(dst_tpage) + dst_offset,
> +                                     (void *) vaddr, size)) {

Let it stick out.

> +                             ret = -EFAULT;
> +                             goto e_free;
> +                     }
> +             }
> +
> +
> +             /* use intermediate buffer as source */
> +             paddr = __sme_page_pa(dst_tpage);
> +
> +             /* flush the caches to ensure that DRAM gets recent updates */

Comment needs improvement.

> +             clflush_cache_range(page_address(dst_tpage), PAGE_SIZE);
> +
> +             /* now we have length and destination buffer aligned */
> +             dst_paddr = round_down(dst_paddr, 16);
> +             len = round_up(size, 16);
> +     }
> +
> +     ret = __sev_dbg_enc_dec(kvm, paddr, dst_paddr, len, error, true);

<---- newline here.

> +e_free:
> +     if (src_tpage)
> +             __free_page(src_tpage);
> +     if (dst_tpage)
> +             __free_page(dst_tpage);
> +     return ret;
> +}
> +
> +static int sev_dbg_encrypt(struct kvm *kvm, struct kvm_sev_cmd *argp)
> +{

This function is almost an exact duplication of sev_dbg_decrypt(). Add
a __sev_dbg_crypt() workhorse function which does it all and has callers
sev_dbg_decrypt() and sev_dbg_encrypt().

-- 
Regards/Gruss,
    Boris.

SUSE Linux GmbH, GF: Felix Imendörffer, Jane Smithard, Graham Norton, HRB 21284 
(AG Nürnberg)
-- 

Reply via email to