[RFC PATCH v2 12/20] x86: Add support for changing memory encryption attribute

2016-08-22 Thread Tom Lendacky
This patch adds support to be change the memory encryption attribute for
one or more memory pages.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/cacheflush.h  |3 +
 arch/x86/include/asm/mem_encrypt.h |   13 ++
 arch/x86/mm/mem_encrypt.c  |   43 +
 arch/x86/mm/pageattr.c |   75 
 4 files changed, 134 insertions(+)

diff --git a/arch/x86/include/asm/cacheflush.h 
b/arch/x86/include/asm/cacheflush.h
index 61518cf..bfb08e5 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -13,6 +13,7 @@
  * Executability : eXeutable, NoteXecutable
  * Read/Write: ReadOnly, ReadWrite
  * Presence  : NotPresent
+ * Encryption: ENCrypted, DECrypted
  *
  * Within a category, the attributes are mutually exclusive.
  *
@@ -48,6 +49,8 @@ int set_memory_ro(unsigned long addr, int numpages);
 int set_memory_rw(unsigned long addr, int numpages);
 int set_memory_np(unsigned long addr, int numpages);
 int set_memory_4k(unsigned long addr, int numpages);
+int set_memory_enc(unsigned long addr, int numpages);
+int set_memory_dec(unsigned long addr, int numpages);
 
 int set_memory_array_uc(unsigned long *addr, int addrinarray);
 int set_memory_array_wc(unsigned long *addr, int addrinarray);
diff --git a/arch/x86/include/asm/mem_encrypt.h 
b/arch/x86/include/asm/mem_encrypt.h
index 2785493..5616ed1 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -23,6 +23,9 @@ extern unsigned long sme_me_mask;
 
 u8 sme_get_me_loss(void);
 
+int sme_set_mem_enc(void *vaddr, unsigned long size);
+int sme_set_mem_dec(void *vaddr, unsigned long size);
+
 void __init sme_early_mem_enc(resource_size_t paddr,
  unsigned long size);
 void __init sme_early_mem_dec(resource_size_t paddr,
@@ -44,6 +47,16 @@ static inline u8 sme_get_me_loss(void)
return 0;
 }
 
+static inline int sme_set_mem_enc(void *vaddr, unsigned long size)
+{
+   return 0;
+}
+
+static inline int sme_set_mem_dec(void *vaddr, unsigned long size)
+{
+   return 0;
+}
+
 static inline void __init sme_early_mem_enc(resource_size_t paddr,
unsigned long size)
 {
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index f35a646..b0f39c5 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -14,12 +14,55 @@
 #include 
 
 #include 
+#include 
 #include 
 #include 
 
 /* Buffer used for early in-place encryption by BSP, no locking needed */
 static char me_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
 
+int sme_set_mem_enc(void *vaddr, unsigned long size)
+{
+   unsigned long addr, numpages;
+
+   if (!sme_me_mask)
+   return 0;
+
+   addr = (unsigned long)vaddr & PAGE_MASK;
+   numpages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+   /*
+* The set_memory_xxx functions take an integer for numpages, make
+* sure it doesn't exceed that.
+*/
+   if (numpages > INT_MAX)
+   return -EINVAL;
+
+   return set_memory_enc(addr, numpages);
+}
+EXPORT_SYMBOL_GPL(sme_set_mem_enc);
+
+int sme_set_mem_dec(void *vaddr, unsigned long size)
+{
+   unsigned long addr, numpages;
+
+   if (!sme_me_mask)
+   return 0;
+
+   addr = (unsigned long)vaddr & PAGE_MASK;
+   numpages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+   /*
+* The set_memory_xxx functions take an integer for numpages, make
+* sure it doesn't exceed that.
+*/
+   if (numpages > INT_MAX)
+   return -EINVAL;
+
+   return set_memory_dec(addr, numpages);
+}
+EXPORT_SYMBOL_GPL(sme_set_mem_dec);
+
 /*
  * This routine does not change the underlying encryption setting of the
  * page(s) that map this memory. It assumes that eventually the memory is
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 72c292d..0ba9382 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1728,6 +1728,81 @@ int set_memory_4k(unsigned long addr, int numpages)
__pgprot(0), 1, 0, NULL);
 }
 
+static int __set_memory_enc_dec(struct cpa_data *cpa)
+{
+   unsigned long addr;
+   int numpages;
+   int ret;
+
+   if (*cpa->vaddr & ~PAGE_MASK) {
+   *cpa->vaddr &= PAGE_MASK;
+
+   /* People should not be passing in unaligned addresses */
+   WARN_ON_ONCE(1);
+   }
+
+   addr = *cpa->vaddr;
+   numpages = cpa->numpages;
+
+   /* Must avoid aliasing mappings in the highmem code */
+   kmap_flush_unused();
+   vm_unmap_aliases();
+
+   ret = __change_page_attr_set_clr(cpa, 1);
+
+   /* Check whether we really changed something */
+   if (!(cpa->flags & CPA_FLUSHTLB))
+   goto out;
+
+   /*
+* On success we use CLFLUSH, when the CPU supports it to
+* avoid the WBINVD.
+ 

Re: [RFC PATCH v2 12/20] x86: Add support for changing memory encryption attribute

2016-09-09 Thread Borislav Petkov
On Mon, Aug 22, 2016 at 05:37:49PM -0500, Tom Lendacky wrote:
> This patch adds support to be change the memory encryption attribute for
> one or more memory pages.
> 
> Signed-off-by: Tom Lendacky 
> ---
>  arch/x86/include/asm/cacheflush.h  |3 +
>  arch/x86/include/asm/mem_encrypt.h |   13 ++
>  arch/x86/mm/mem_encrypt.c  |   43 +
>  arch/x86/mm/pageattr.c |   75 
> 
>  4 files changed, 134 insertions(+)

...

> diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
> index 72c292d..0ba9382 100644
> --- a/arch/x86/mm/pageattr.c
> +++ b/arch/x86/mm/pageattr.c
> @@ -1728,6 +1728,81 @@ int set_memory_4k(unsigned long addr, int numpages)
>   __pgprot(0), 1, 0, NULL);
>  }
>  
> +static int __set_memory_enc_dec(struct cpa_data *cpa)
> +{
> + unsigned long addr;
> + int numpages;
> + int ret;
> +
> + if (*cpa->vaddr & ~PAGE_MASK) {
> + *cpa->vaddr &= PAGE_MASK;
> +
> + /* People should not be passing in unaligned addresses */
> + WARN_ON_ONCE(1);

Let's make this more user-friendly:

if (WARN_ONCE(*cpa->vaddr & ~PAGE_MASK, "Misaligned address: 0x%lx\n", 
*cpa->vaddr))
*cpa->vaddr &= PAGE_MASK;

> + }
> +
> + addr = *cpa->vaddr;
> + numpages = cpa->numpages;
> +
> + /* Must avoid aliasing mappings in the highmem code */
> + kmap_flush_unused();
> + vm_unmap_aliases();
> +
> + ret = __change_page_attr_set_clr(cpa, 1);
> +
> + /* Check whether we really changed something */
> + if (!(cpa->flags & CPA_FLUSHTLB))
> + goto out;
> +
> + /*
> +  * On success we use CLFLUSH, when the CPU supports it to
> +  * avoid the WBINVD.
> +  */
> + if (!ret && static_cpu_has(X86_FEATURE_CLFLUSH))
> + cpa_flush_range(addr, numpages, 1);
> + else
> + cpa_flush_all(1);

So if we fail (ret != 0) we do WBINVD unconditionally even if we don't
have to?

Don't you want this instead:

ret = __change_page_attr_set_clr(cpa, 1);
if (ret)
goto out;

/* Check whether we really changed something */
if (!(cpa->flags & CPA_FLUSHTLB))
goto out;

/*
 * On success we use CLFLUSH, when the CPU supports it to
 * avoid the WBINVD.
 */
if (static_cpu_has(X86_FEATURE_CLFLUSH))
cpa_flush_range(addr, numpages, 1);
else
cpa_flush_all(1);

out:
return ret;
}

?

-- 
Regards/Gruss,
Boris.

ECO tip #101: Trim your mails when you reply.
--
To unsubscribe from this list: send the line "unsubscribe linux-doc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC PATCH v2 12/20] x86: Add support for changing memory encryption attribute

2016-09-12 Thread Tom Lendacky
On 09/09/2016 12:23 PM, Borislav Petkov wrote:
> On Mon, Aug 22, 2016 at 05:37:49PM -0500, Tom Lendacky wrote:
>> This patch adds support to be change the memory encryption attribute for
>> one or more memory pages.
>>
>> Signed-off-by: Tom Lendacky 
>> ---
>>  arch/x86/include/asm/cacheflush.h  |3 +
>>  arch/x86/include/asm/mem_encrypt.h |   13 ++
>>  arch/x86/mm/mem_encrypt.c  |   43 +
>>  arch/x86/mm/pageattr.c |   75 
>> 
>>  4 files changed, 134 insertions(+)
> 
> ...
> 
>> diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
>> index 72c292d..0ba9382 100644
>> --- a/arch/x86/mm/pageattr.c
>> +++ b/arch/x86/mm/pageattr.c
>> @@ -1728,6 +1728,81 @@ int set_memory_4k(unsigned long addr, int numpages)
>>  __pgprot(0), 1, 0, NULL);
>>  }
>>  
>> +static int __set_memory_enc_dec(struct cpa_data *cpa)
>> +{
>> +unsigned long addr;
>> +int numpages;
>> +int ret;
>> +
>> +if (*cpa->vaddr & ~PAGE_MASK) {
>> +*cpa->vaddr &= PAGE_MASK;
>> +
>> +/* People should not be passing in unaligned addresses */
>> +WARN_ON_ONCE(1);
> 
> Let's make this more user-friendly:
> 
>   if (WARN_ONCE(*cpa->vaddr & ~PAGE_MASK, "Misaligned address: 0x%lx\n", 
> *cpa->vaddr))
>   *cpa->vaddr &= PAGE_MASK;

Will do.

> 
>> +}
>> +
>> +addr = *cpa->vaddr;
>> +numpages = cpa->numpages;
>> +
>> +/* Must avoid aliasing mappings in the highmem code */
>> +kmap_flush_unused();
>> +vm_unmap_aliases();
>> +
>> +ret = __change_page_attr_set_clr(cpa, 1);
>> +
>> +/* Check whether we really changed something */
>> +if (!(cpa->flags & CPA_FLUSHTLB))
>> +goto out;
>> +
>> +/*
>> + * On success we use CLFLUSH, when the CPU supports it to
>> + * avoid the WBINVD.
>> + */
>> +if (!ret && static_cpu_has(X86_FEATURE_CLFLUSH))
>> +cpa_flush_range(addr, numpages, 1);
>> +else
>> +cpa_flush_all(1);
> 
> So if we fail (ret != 0) we do WBINVD unconditionally even if we don't
> have to?

Looking at __change_page_attr_set_clr() isn't it possible for some of
the pages to be changed before an error is encountered since it is
looping?  If so, we may still need to flush. The CPA_FLUSHTLB flag
should take care of a failing case where no attributes have actually
been changed.

Thanks,
Tom

> 
> Don't you want this instead:
> 
> ret = __change_page_attr_set_clr(cpa, 1);
> if (ret)
> goto out;
> 
> /* Check whether we really changed something */
> if (!(cpa->flags & CPA_FLUSHTLB))
> goto out;
> 
> /*
>  * On success we use CLFLUSH, when the CPU supports it to
>  * avoid the WBINVD.
>  */
> if (static_cpu_has(X86_FEATURE_CLFLUSH))
> cpa_flush_range(addr, numpages, 1);
> else
> cpa_flush_all(1);
> 
> out:
> return ret;
> }
> 
> ?
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-doc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC PATCH v2 12/20] x86: Add support for changing memory encryption attribute

2016-09-12 Thread Borislav Petkov
On Mon, Sep 12, 2016 at 10:41:29AM -0500, Tom Lendacky wrote:
> Looking at __change_page_attr_set_clr() isn't it possible for some of
> the pages to be changed before an error is encountered since it is
> looping?  If so, we may still need to flush. The CPA_FLUSHTLB flag
> should take care of a failing case where no attributes have actually
> been changed.

Ah, yes, ok, then yours is correct.

-- 
Regards/Gruss,
Boris.

ECO tip #101: Trim your mails when you reply.
--
To unsubscribe from this list: send the line "unsubscribe linux-doc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html