From: Jiri Pirko <[email protected]> Add a new DMA_HEAP_FLAG_DECRYPTED heap flag to allow userspace to allocate decrypted (shared) memory from the dma-buf system heap for confidential computing (CoCo) VMs.
On CoCo VMs, guest memory is encrypted by default. The hardware uses an encryption bit in page table entries (C-bit on AMD SEV, "shared" bit on Intel TDX) to control whether a given memory access is encrypted or decrypted. The kernel's direct map is set up with encryption enabled, so pages returned by alloc_pages() are encrypted in the direct map by default. To make this memory usable for devices that do not support DMA to encrypted memory (no TDISP support), it has to be explicitly decrypted. A couple of things are needed to properly handle decrypted memory for the dma-buf use case: - set_memory_decrypted() on the direct map after allocation: Besides clearing the encryption bit in the direct map PTEs, this also notifies the hypervisor about the page state change. On free, the inverse set_memory_encrypted() must be called before returning pages to the allocator. If re-encryption fails, pages are intentionally leaked to prevent decrypted memory from being reused as private. - pgprot_decrypted() for userspace and kernel virtual mappings: Any new mapping of the decrypted pages, be it to userspace via mmap or to kernel vmalloc space via vmap, creates PTEs independent of the direct map. These must also have the encryption bit cleared, otherwise accesses through them would see encrypted (garbage) data. - DMA_ATTR_CC_DECRYPTED for DMA mapping: Since the pages are already decrypted, the DMA API needs to be informed via DMA_ATTR_CC_DECRYPTED so it can map them correctly as unencrypted for device access. On non-CoCo VMs the flag is rejected with -EOPNOTSUPP to prevent misuse by userspace that does not understand the security implications of explicitly decrypted memory. Signed-off-by: Jiri Pirko <[email protected]> --- drivers/dma-buf/heaps/system_heap.c | 87 +++++++++++++++++++++++++++-- include/linux/dma-heap.h | 1 + include/uapi/linux/dma-heap.h | 12 +++- 3 files changed, 94 insertions(+), 6 deletions(-) diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index 124dca56e4d8..0f80ecb660ec 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -10,6 +10,7 @@ * Andrew F. Davis <[email protected]> */ +#include <linux/cc_platform.h> #include <linux/dma-buf.h> #include <linux/dma-mapping.h> #include <linux/dma-heap.h> @@ -17,7 +18,9 @@ #include <linux/highmem.h> #include <linux/mm.h> #include <linux/module.h> +#include <linux/pgtable.h> #include <linux/scatterlist.h> +#include <linux/set_memory.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -29,6 +32,7 @@ struct system_heap_buffer { struct sg_table sg_table; int vmap_cnt; void *vaddr; + bool decrypted; }; struct dma_heap_attachment { @@ -36,6 +40,7 @@ struct dma_heap_attachment { struct sg_table table; struct list_head list; bool mapped; + bool decrypted; }; #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO) @@ -52,6 +57,34 @@ static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP}; static const unsigned int orders[] = {8, 4, 0}; #define NUM_ORDERS ARRAY_SIZE(orders) +static int system_heap_set_page_decrypted(struct page *page) +{ + unsigned long addr = (unsigned long)page_address(page); + unsigned int nr_pages = 1 << compound_order(page); + int ret; + + ret = set_memory_decrypted(addr, nr_pages); + if (ret) + pr_warn_ratelimited("dma-buf system heap: failed to decrypt page at %p\n", + page_address(page)); + + return ret; +} + +static int system_heap_set_page_encrypted(struct page *page) +{ + unsigned long addr = (unsigned long)page_address(page); + unsigned int nr_pages = 1 << compound_order(page); + int ret; + + ret = set_memory_encrypted(addr, nr_pages); + if (ret) + pr_warn_ratelimited("dma-buf system heap: failed to re-encrypt page at %p, leaking memory\n", + page_address(page)); + + return ret; +} + static int dup_sg_table(struct sg_table *from, struct sg_table *to) { struct scatterlist *sg, *new_sg; @@ -90,6 +123,7 @@ static int system_heap_attach(struct dma_buf *dmabuf, a->dev = attachment->dev; INIT_LIST_HEAD(&a->list); a->mapped = false; + a->decrypted = buffer->decrypted; attachment->priv = a; @@ -119,9 +153,11 @@ static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attac { struct dma_heap_attachment *a = attachment->priv; struct sg_table *table = &a->table; + unsigned long attrs; int ret; - ret = dma_map_sgtable(attachment->dev, table, direction, 0); + attrs = a->decrypted ? DMA_ATTR_CC_DECRYPTED : 0; + ret = dma_map_sgtable(attachment->dev, table, direction, attrs); if (ret) return ERR_PTR(ret); @@ -188,8 +224,13 @@ static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) unsigned long addr = vma->vm_start; unsigned long pgoff = vma->vm_pgoff; struct scatterlist *sg; + pgprot_t prot; int i, ret; + prot = vma->vm_page_prot; + if (buffer->decrypted) + prot = pgprot_decrypted(prot); + for_each_sgtable_sg(table, sg, i) { unsigned long n = sg->length >> PAGE_SHIFT; @@ -206,8 +247,7 @@ static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) if (addr + size > vma->vm_end) size = vma->vm_end - addr; - ret = remap_pfn_range(vma, addr, page_to_pfn(page), - size, vma->vm_page_prot); + ret = remap_pfn_range(vma, addr, page_to_pfn(page), size, prot); if (ret) return ret; @@ -225,6 +265,7 @@ static void *system_heap_do_vmap(struct system_heap_buffer *buffer) struct page **pages = vmalloc(sizeof(struct page *) * npages); struct page **tmp = pages; struct sg_page_iter piter; + pgprot_t prot; void *vaddr; if (!pages) @@ -235,7 +276,10 @@ static void *system_heap_do_vmap(struct system_heap_buffer *buffer) *tmp++ = sg_page_iter_page(&piter); } - vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL); + prot = PAGE_KERNEL; + if (buffer->decrypted) + prot = pgprot_decrypted(prot); + vaddr = vmap(pages, npages, VM_MAP, prot); vfree(pages); if (!vaddr) @@ -296,6 +340,14 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf) for_each_sgtable_sg(table, sg, i) { struct page *page = sg_page(sg); + /* + * Intentionally leak pages that cannot be re-encrypted + * to prevent decrypted memory from being reused. + */ + if (buffer->decrypted && + system_heap_set_page_encrypted(page)) + continue; + __free_pages(page, compound_order(page)); } sg_free_table(table); @@ -344,6 +396,7 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap, DEFINE_DMA_BUF_EXPORT_INFO(exp_info); unsigned long size_remaining = len; unsigned int max_order = orders[0]; + bool decrypted = heap_flags & DMA_HEAP_FLAG_DECRYPTED; struct dma_buf *dmabuf; struct sg_table *table; struct scatterlist *sg; @@ -351,6 +404,15 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap, struct page *page, *tmp_page; int i, ret = -ENOMEM; + if (decrypted) { + if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT)) + return ERR_PTR(-EOPNOTSUPP); +#ifdef CONFIG_HIGHMEM + /* Sanity check, should not happen. */ + return ERR_PTR(-EINVAL); +#endif + } + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) return ERR_PTR(-ENOMEM); @@ -359,6 +421,7 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap, mutex_init(&buffer->lock); buffer->heap = heap; buffer->len = len; + buffer->decrypted = decrypted; INIT_LIST_HEAD(&pages); i = 0; @@ -393,6 +456,14 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap, list_del(&page->lru); } + if (decrypted) { + for_each_sgtable_sg(table, sg, i) { + ret = system_heap_set_page_decrypted(sg_page(sg)); + if (ret) + goto free_pages; + } + } + /* create the dmabuf */ exp_info.exp_name = dma_heap_get_name(heap); exp_info.ops = &system_heap_buf_ops; @@ -410,6 +481,13 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap, for_each_sgtable_sg(table, sg, i) { struct page *p = sg_page(sg); + /* + * Intentionally leak pages that cannot be re-encrypted + * to prevent decrypted memory from being reused. + */ + if (buffer->decrypted && + system_heap_set_page_encrypted(p)) + continue; __free_pages(p, compound_order(p)); } sg_free_table(table); @@ -430,6 +508,7 @@ static int __init system_heap_create(void) struct dma_heap_export_info exp_info = { .name = "system", .ops = &system_heap_ops, + .valid_heap_flags = DMA_HEAP_FLAG_DECRYPTED, }; struct dma_heap *sys_heap; diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h index 7cfb531a9281..295a7eaa19ca 100644 --- a/include/linux/dma-heap.h +++ b/include/linux/dma-heap.h @@ -10,6 +10,7 @@ #define _DMA_HEAPS_H #include <linux/types.h> +#include <uapi/linux/dma-heap.h> struct dma_heap; diff --git a/include/uapi/linux/dma-heap.h b/include/uapi/linux/dma-heap.h index a4cf716a49fa..6552c88e52f6 100644 --- a/include/uapi/linux/dma-heap.h +++ b/include/uapi/linux/dma-heap.h @@ -18,8 +18,16 @@ /* Valid FD_FLAGS are O_CLOEXEC, O_RDONLY, O_WRONLY, O_RDWR */ #define DMA_HEAP_VALID_FD_FLAGS (O_CLOEXEC | O_ACCMODE) -/* Currently no heap flags */ -#define DMA_HEAP_VALID_HEAP_FLAGS (0ULL) +/** + * DMA_HEAP_FLAG_DECRYPTED - Allocate decrypted (shared) memory + * + * For confidential computing guests (AMD SEV, Intel TDX), this flag + * requests that the allocated memory be marked as decrypted (shared + * with the host). + */ +#define DMA_HEAP_FLAG_DECRYPTED (1ULL << 0) + +#define DMA_HEAP_VALID_HEAP_FLAGS (DMA_HEAP_FLAG_DECRYPTED) /** * struct dma_heap_allocation_data - metadata passed from userspace for -- 2.51.1
