From: David Rientjes <[email protected]>

[ Upstream commit 56fccf21d1961a06e2a0c96ce446ebf036651062 ]

__change_page_attr() can fail which will cause set_memory_encrypted() and
set_memory_decrypted() to return non-zero.

If the device requires unencrypted DMA memory and decryption fails, simply
free the memory and fail.

If attempting to re-encrypt in the failure path and that encryption fails,
there is no alternative other than to leak the memory.

Fixes: c10f07aa27da ("dma/direct: Handle force decryption for DMA coherent 
buffers in common code")
Signed-off-by: David Rientjes <[email protected]>
Signed-off-by: Christoph Hellwig <[email protected]>
Signed-off-by: Sasha Levin <[email protected]>
---
 kernel/dma/direct.c | 19 ++++++++++++++-----
 1 file changed, 14 insertions(+), 5 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 4e789c46ff0bf..98c445dcb308b 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -124,6 +124,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t 
size,
 {
        struct page *page;
        void *ret;
+       int err;
 
        if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
            dma_alloc_need_uncached(dev, attrs) &&
@@ -176,8 +177,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t 
size,
        }
 
        ret = page_address(page);
-       if (force_dma_unencrypted(dev))
-               set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
+       if (force_dma_unencrypted(dev)) {
+               err = set_memory_decrypted((unsigned long)ret,
+                                          1 << get_order(size));
+               if (err)
+                       goto out_free_pages;
+       }
 
        memset(ret, 0, size);
 
@@ -196,9 +201,13 @@ done:
        return ret;
 
 out_encrypt_pages:
-       if (force_dma_unencrypted(dev))
-               set_memory_encrypted((unsigned long)page_address(page),
-                                    1 << get_order(size));
+       if (force_dma_unencrypted(dev)) {
+               err = set_memory_encrypted((unsigned long)page_address(page),
+                                          1 << get_order(size));
+               /* If memory cannot be re-encrypted, it must be leaked */
+               if (err)
+                       return NULL;
+       }
 out_free_pages:
        dma_free_contiguous(dev, page, size);
        return NULL;
-- 
2.25.1

Reply via email to