We must never unencryped memory go back into the general page pool. So if we fail to set it back to encrypted when freeing DMA memory, leak the memory insted and warn the user.
Signed-off-by: Christoph Hellwig <h...@lst.de> --- kernel/dma/direct.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 2fef8dd401fe9..60cb75aa6778e 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -263,9 +263,11 @@ void *dma_direct_alloc(struct device *dev, size_t size, return ret; out_encrypt_pages: - /* If memory cannot be re-encrypted, it must be leaked */ - if (dma_set_encrypted(dev, page_address(page), size)) + if (dma_set_encrypted(dev, page_address(page), size)) { + pr_warn_ratelimited( + "leaking DMA memory that can't be re-encrypted\n"); return NULL; + } out_unmap_pages: if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(ret)) vunmap(ret); @@ -307,7 +309,11 @@ void dma_direct_free(struct device *dev, size_t size, dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) return; - dma_set_encrypted(dev, cpu_addr, 1 << page_order); + if (dma_set_encrypted(dev, cpu_addr, 1 << page_order)) { + pr_warn_ratelimited( + "leaking DMA memory that can't be re-encrypted\n"); + return; + } if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) vunmap(cpu_addr); @@ -365,7 +371,11 @@ void dma_direct_free_pages(struct device *dev, size_t size, dma_free_from_pool(dev, vaddr, size)) return; - dma_set_encrypted(dev, vaddr, 1 << page_order); + if (dma_set_encrypted(dev, vaddr, 1 << page_order)) { + pr_warn_ratelimited( + "leaking DMA memory that can't be re-encrypted\n"); + return; + } __dma_direct_free_pages(dev, page, size); } -- 2.30.2 _______________________________________________ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu