[PATCH v7 27/36] iommu/amd: Allow the AMD IOMMU to work with memory encryption

2017-06-16 Thread Tom Lendacky
The IOMMU is programmed with physical addresses for the various tables
and buffers that are used to communicate between the device and the
driver. When the driver allocates this memory it is encrypted. In order
for the IOMMU to access the memory as encrypted the encryption mask needs
to be included in these physical addresses during configuration.

The PTE entries created by the IOMMU should also include the encryption
mask so that when the device behind the IOMMU performs a DMA, the DMA
will be performed to encrypted memory.

Signed-off-by: Tom Lendacky 
---
 drivers/iommu/amd_iommu.c   |   30 --
 drivers/iommu/amd_iommu_init.c  |   34 --
 drivers/iommu/amd_iommu_proto.h |   10 ++
 drivers/iommu/amd_iommu_types.h |2 +-
 4 files changed, 55 insertions(+), 21 deletions(-)

diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 63cacf5..912008c 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -544,7 +544,7 @@ static void dump_dte_entry(u16 devid)
 
 static void dump_command(unsigned long phys_addr)
 {
-   struct iommu_cmd *cmd = phys_to_virt(phys_addr);
+   struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
int i;
 
for (i = 0; i < 4; ++i)
@@ -865,11 +865,13 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
 
 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
 {
+   u64 paddr = iommu_virt_to_phys((void *)address);
+
WARN_ON(address & 0x7ULL);
 
memset(cmd, 0, sizeof(*cmd));
-   cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
-   cmd->data[1] = upper_32_bits(__pa(address));
+   cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
+   cmd->data[1] = upper_32_bits(paddr);
cmd->data[2] = 1;
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
 }
@@ -1328,7 +1330,7 @@ static bool increase_address_space(struct 
protection_domain *domain,
return false;
 
*pte = PM_LEVEL_PDE(domain->mode,
-   virt_to_phys(domain->pt_root));
+   iommu_virt_to_phys(domain->pt_root));
domain->pt_root  = pte;
domain->mode+= 1;
domain->updated  = true;
@@ -1365,7 +1367,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
if (!page)
return NULL;
 
-   __npte = PM_LEVEL_PDE(level, virt_to_phys(page));
+   __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
 
/* pte could have been changed somewhere. */
if (cmpxchg64(pte, __pte, __npte) != __pte) {
@@ -1481,10 +1483,10 @@ static int iommu_map_page(struct protection_domain *dom,
return -EBUSY;
 
if (count > 1) {
-   __pte = PAGE_SIZE_PTE(phys_addr, page_size);
+   __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
} else
-   __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
+   __pte = __sme_set(phys_addr) | IOMMU_PTE_P | IOMMU_PTE_FC;
 
if (prot & IOMMU_PROT_IR)
__pte |= IOMMU_PTE_IR;
@@ -1700,7 +1702,7 @@ static void free_gcr3_tbl_level1(u64 *tbl)
if (!(tbl[i] & GCR3_VALID))
continue;
 
-   ptr = __va(tbl[i] & PAGE_MASK);
+   ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
 
free_page((unsigned long)ptr);
}
@@ -1715,7 +1717,7 @@ static void free_gcr3_tbl_level2(u64 *tbl)
if (!(tbl[i] & GCR3_VALID))
continue;
 
-   ptr = __va(tbl[i] & PAGE_MASK);
+   ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
 
free_gcr3_tbl_level1(ptr);
}
@@ -1807,7 +1809,7 @@ static void set_dte_entry(u16 devid, struct 
protection_domain *domain, bool ats)
u64 flags = 0;
 
if (domain->mode != PAGE_MODE_NONE)
-   pte_root = virt_to_phys(domain->pt_root);
+   pte_root = iommu_virt_to_phys(domain->pt_root);
 
pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
@@ -1819,7 +1821,7 @@ static void set_dte_entry(u16 devid, struct 
protection_domain *domain, bool ats)
flags |= DTE_FLAG_IOTLB;
 
if (domain->flags & PD_IOMMUV2_MASK) {
-   u64 gcr3 = __pa(domain->gcr3_tbl);
+   u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
u64 glx  = domain->glx;
u64 tmp;
 
@@ -3470,10 +3472,10 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int 
pasid, bool alloc)
if (root == NULL)
return NULL;
 
-   *pte = __pa(root

Re: [PATCH v7 27/36] iommu/amd: Allow the AMD IOMMU to work with memory encryption

2017-06-22 Thread Borislav Petkov
On Fri, Jun 16, 2017 at 01:54:59PM -0500, Tom Lendacky wrote:
> The IOMMU is programmed with physical addresses for the various tables
> and buffers that are used to communicate between the device and the
> driver. When the driver allocates this memory it is encrypted. In order
> for the IOMMU to access the memory as encrypted the encryption mask needs
> to be included in these physical addresses during configuration.
> 
> The PTE entries created by the IOMMU should also include the encryption
> mask so that when the device behind the IOMMU performs a DMA, the DMA
> will be performed to encrypted memory.
> 
> Signed-off-by: Tom Lendacky 
> ---
>  drivers/iommu/amd_iommu.c   |   30 --
>  drivers/iommu/amd_iommu_init.c  |   34 --
>  drivers/iommu/amd_iommu_proto.h |   10 ++
>  drivers/iommu/amd_iommu_types.h |2 +-
>  4 files changed, 55 insertions(+), 21 deletions(-)

Reviewed-by: Borislav Petkov 

Btw, I'm assuming the virt_to_phys() difference on SME systems is only
needed in a handful of places. Otherwise, I'd suggest changing the
virt_to_phys() function/macro directly. But I guess most of the places
need the real physical address without the enc bit.

-- 
Regards/Gruss,
Boris.

Good mailing practices for 400: avoid top-posting and trim the reply.

___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


Re: [PATCH v7 27/36] iommu/amd: Allow the AMD IOMMU to work with memory encryption

2017-06-22 Thread Tom Lendacky

On 6/22/2017 5:56 AM, Borislav Petkov wrote:

On Fri, Jun 16, 2017 at 01:54:59PM -0500, Tom Lendacky wrote:

The IOMMU is programmed with physical addresses for the various tables
and buffers that are used to communicate between the device and the
driver. When the driver allocates this memory it is encrypted. In order
for the IOMMU to access the memory as encrypted the encryption mask needs
to be included in these physical addresses during configuration.

The PTE entries created by the IOMMU should also include the encryption
mask so that when the device behind the IOMMU performs a DMA, the DMA
will be performed to encrypted memory.

Signed-off-by: Tom Lendacky 
---
  drivers/iommu/amd_iommu.c   |   30 --
  drivers/iommu/amd_iommu_init.c  |   34 --
  drivers/iommu/amd_iommu_proto.h |   10 ++
  drivers/iommu/amd_iommu_types.h |2 +-
  4 files changed, 55 insertions(+), 21 deletions(-)


Reviewed-by: Borislav Petkov 

Btw, I'm assuming the virt_to_phys() difference on SME systems is only
needed in a handful of places. Otherwise, I'd suggest changing the
virt_to_phys() function/macro directly. But I guess most of the places
need the real physical address without the enc bit.


Correct.

Thanks,
Tom





___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec