When AMD memory encryption is enabled, all non-blocking DMA allocations
must originate from the atomic pools depending on the device and the gfp
mask of the allocation.

Keep all memory in these pools unencrypted.

Signed-off-by: David Rientjes <rient...@google.com>
---
 arch/x86/Kconfig    | 1 +
 kernel/dma/direct.c | 9 ++++-----
 kernel/dma/remap.c  | 2 ++
 3 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1523,6 +1523,7 @@ config X86_CPA_STATISTICS
 config AMD_MEM_ENCRYPT
        bool "AMD Secure Memory Encryption (SME) support"
        depends on X86_64 && CPU_SUP_AMD
+       select DMA_DIRECT_REMAP
        select DYNAMIC_PHYSICAL_MASK
        select ARCH_USE_MEMREMAP_PROT
        select ARCH_HAS_FORCE_DMA_UNENCRYPTED
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -125,7 +125,6 @@ void *dma_direct_alloc_pages(struct device *dev, size_t 
size,
        void *ret;
 
        if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-           dma_alloc_need_uncached(dev, attrs) &&
            !gfpflags_allow_blocking(gfp)) {
                ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
                if (!ret)
@@ -202,6 +201,10 @@ void dma_direct_free_pages(struct device *dev, size_t 
size, void *cpu_addr,
 {
        unsigned int page_order = get_order(size);
 
+       if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+           dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
+               return;
+
        if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
            !force_dma_unencrypted(dev)) {
                /* cpu_addr is a struct page cookie, not a kernel address */
@@ -209,10 +212,6 @@ void dma_direct_free_pages(struct device *dev, size_t 
size, void *cpu_addr,
                return;
        }
 
-       if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-           dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
-               return;
-
        if (force_dma_unencrypted(dev))
                set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
 
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -8,6 +8,7 @@
 #include <linux/dma-contiguous.h>
 #include <linux/init.h>
 #include <linux/genalloc.h>
+#include <linux/set_memory.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/workqueue.h>
@@ -141,6 +142,7 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t 
pool_size,
        if (!addr)
                goto free_page;
 
+       set_memory_decrypted((unsigned long)page_to_virt(page), nr_pages);
        ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
                                pool_size, NUMA_NO_NODE);
        if (ret)
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to