Re: [rfc v2 4/6] dma-direct: atomic allocations must come from atomic coherent pools
On Tue, 14 Apr 2020, Christoph Hellwig wrote: > > + /* > > +* Unencrypted memory must come directly from DMA atomic pools if > > +* blocking is not allowed. > > +*/ > > + if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && > > + force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp)) { > > + ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), , gfp); > > + if (!ret) > > + return NULL; > > + goto done; > > + } > > + > > if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && > > dma_alloc_need_uncached(dev, attrs) && > > !gfpflags_allow_blocking(gfp)) { > > Can we keep a single conditional for the pool allocations? Maybe > add a new dma_alloc_from_pool helper ala: > > static inline bool dma_alloc_from_pool(struct device *dev, gfp_t gfp) > { > if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)) > return false; > if (gfpflags_allow_blocking(gfp)) > return false; > if (force_dma_unencrypted(dev)) > return true; > if (dma_alloc_need_uncached(dev)) > return true; > } Looks good, fixed. I renamed it to dma_should_alloc_from_pool() to avoid confusing it with the actual allocation function and added a dma_should_free_from_pool() as well. diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -75,6 +75,39 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); } +/* + * Decrypting memory is allowed to block, so if this device requires + * unencrypted memory it must come from atomic pools. + */ +static inline bool dma_should_alloc_from_pool(struct device *dev, gfp_t gfp, + unsigned long attrs) +{ + if (!IS_ENABLED(CONFIG_DMA_COHERENTPOOL)) + return false; + if (gfpflags_allow_blocking(gfp)) + return false; + if (force_dma_unencrypted(dev)) + return true; + if (!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) + return false; + if (dma_alloc_need_uncached(dev, attrs)) + return true; + return false; +} + +static inline bool dma_should_free_from_pool(struct device *dev, +unsigned long attrs) +{ + if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL)) + return true; + if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && + !force_dma_unencrypted(dev)) + return false; + if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) + return true; + return false; +} + struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, gfp_t gfp, unsigned long attrs) { @@ -124,9 +157,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, struct page *page; void *ret; - if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && - dma_alloc_need_uncached(dev, attrs) && - !gfpflags_allow_blocking(gfp)) { + if (dma_should_alloc_from_pool(dev, gfp, attrs)) { ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), , gfp); if (!ret) return NULL; @@ -202,6 +233,11 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, { unsigned int page_order = get_order(size); + /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ + if (dma_should_free_from_pool(dev, attrs) && + dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) + return; + if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && !force_dma_unencrypted(dev)) { /* cpu_addr is a struct page cookie, not a kernel address */ @@ -209,10 +245,6 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, return; } - if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && - dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) - return; - if (force_dma_unencrypted(dev)) set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
Re: [rfc v2 4/6] dma-direct: atomic allocations must come from atomic coherent pools
On Thu, 9 Apr 2020, Tom Lendacky wrote: > > When a device required unencrypted memory and the context does not allow > > required => requires > Fixed, thanks. > > blocking, memory must be returned from the atomic coherent pools. > > > > This avoids the remap when CONFIG_DMA_DIRECT_REMAP is not enabled and the > > config only requires CONFIG_DMA_COHERENT_POOL. This will be used for > > CONFIG_AMD_MEM_ENCRYPT in a subsequent patch. > > > > Keep all memory in these pools unencrypted. > > > > Signed-off-by: David Rientjes > > --- > > kernel/dma/direct.c | 16 > > kernel/dma/pool.c | 15 +-- > > 2 files changed, 29 insertions(+), 2 deletions(-) > > > > diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c > > index 70800ca64f13..44165263c185 100644 > > --- a/kernel/dma/direct.c > > +++ b/kernel/dma/direct.c > > @@ -124,6 +124,18 @@ void *dma_direct_alloc_pages(struct device *dev, size_t > > size, > > struct page *page; > > void *ret; > > + /* > > +* Unencrypted memory must come directly from DMA atomic pools if > > +* blocking is not allowed. > > +*/ > > + if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && > > + force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp)) { > > + ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), , gfp); > > + if (!ret) > > + return NULL; > > + goto done; > > + } > > + > > if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && > > dma_alloc_need_uncached(dev, attrs) && > > !gfpflags_allow_blocking(gfp)) { > > @@ -203,6 +215,10 @@ void dma_direct_free_pages(struct device *dev, size_t > > size, void *cpu_addr, > > { > > unsigned int page_order = get_order(size); > > + if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && > > + dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) > > + return; > > + > > if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && > > !force_dma_unencrypted(dev)) { > > /* cpu_addr is a struct page cookie, not a kernel address */ > > diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c > > index e14c5a2da734..6685ab89cfa7 100644 > > --- a/kernel/dma/pool.c > > +++ b/kernel/dma/pool.c > > @@ -9,6 +9,7 @@ > > #include > > #include > > #include > > +#include > > #include > > #include > > #include > > @@ -55,12 +56,20 @@ static int atomic_pool_expand(struct gen_pool *pool, > > size_t pool_size, > > arch_dma_prep_coherent(page, pool_size); > > +#ifdef CONFIG_DMA_DIRECT_REMAP > > addr = dma_common_contiguous_remap(page, pool_size, > >pgprot_dmacoherent(PAGE_KERNEL), > >__builtin_return_address(0)); > > if (!addr) > > goto free_page; > > - > > +#else > > + addr = page_to_virt(page); > > +#endif > > + /* > > +* Memory in the atomic DMA pools must be unencrypted, the pools do > > not > > +* shrink so no re-encryption occurs in dma_direct_free_pages(). > > +*/ > > + set_memory_decrypted((unsigned long)page_to_virt(page), 1 << order); > > ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page), > > pool_size, NUMA_NO_NODE); > > if (ret) > > @@ -69,8 +78,10 @@ static int atomic_pool_expand(struct gen_pool *pool, > > size_t pool_size, > > return 0; > > remove_mapping: > > +#ifdef CONFIG_DMA_DIRECT_REMAP > > dma_common_free_remap(addr, pool_size); > > You're about to free the memory, but you've called set_memory_decrypted() > against it, so you need to do a set_memory_encrypted() to bring it back to a > state ready for allocation again. > Ah, good catch, thanks. I notice that I should also be checking the return value of set_memory_decrypted() because pages added to the coherent pools *must* be unencrypted. If it fails, we fail the expansion. And do the same thing for set_memory_encrypted(), which would be a bizarre situation (decrypt succeeded, encrypt failed), by simply leaking the page. diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -53,22 +54,42 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, arch_dma_prep_coherent(page, pool_size); +#ifdef CONFIG_DMA_DIRECT_REMAP addr = dma_common_contiguous_remap(page, pool_size, pgprot_dmacoherent(PAGE_KERNEL), __builtin_return_address(0)); if (!addr) goto free_page; - +#else + addr = page_to_virt(page); +#endif + /* +* Memory in the atomic DMA pools must be unencrypted, the pools do not +* shrink so no re-encryption occurs in dma_direct_free_pages(). +*/ + ret = set_memory_decrypted((unsigned long)page_to_virt(page), +
Re: [rfc v2 4/6] dma-direct: atomic allocations must come from atomic coherent pools
> + /* > + * Unencrypted memory must come directly from DMA atomic pools if > + * blocking is not allowed. > + */ > + if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && > + force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp)) { > + ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), , gfp); > + if (!ret) > + return NULL; > + goto done; > + } > + > if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && > dma_alloc_need_uncached(dev, attrs) && > !gfpflags_allow_blocking(gfp)) { Can we keep a single conditional for the pool allocations? Maybe add a new dma_alloc_from_pool helper ala: static inline bool dma_alloc_from_pool(struct device *dev, gfp_t gfp) { if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)) return false; if (gfpflags_allow_blocking(gfp)) return false; if (force_dma_unencrypted(dev)) return true; if (dma_alloc_need_uncached(dev)) return true; } } > @@ -203,6 +215,10 @@ void dma_direct_free_pages(struct device *dev, size_t > size, void *cpu_addr, > { > unsigned int page_order = get_order(size); > > + if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && > + dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) > + return; > + Similarly I think we should have a single conditional to free from the pool instead. ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
Re: [rfc v2 4/6] dma-direct: atomic allocations must come from atomic coherent pools
On 4/8/20 4:21 PM, David Rientjes wrote: When a device required unencrypted memory and the context does not allow required => requires blocking, memory must be returned from the atomic coherent pools. This avoids the remap when CONFIG_DMA_DIRECT_REMAP is not enabled and the config only requires CONFIG_DMA_COHERENT_POOL. This will be used for CONFIG_AMD_MEM_ENCRYPT in a subsequent patch. Keep all memory in these pools unencrypted. Signed-off-by: David Rientjes --- kernel/dma/direct.c | 16 kernel/dma/pool.c | 15 +-- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 70800ca64f13..44165263c185 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -124,6 +124,18 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, struct page *page; void *ret; + /* +* Unencrypted memory must come directly from DMA atomic pools if +* blocking is not allowed. +*/ + if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && + force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp)) { + ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), , gfp); + if (!ret) + return NULL; + goto done; + } + if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && dma_alloc_need_uncached(dev, attrs) && !gfpflags_allow_blocking(gfp)) { @@ -203,6 +215,10 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, { unsigned int page_order = get_order(size); + if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && + dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) + return; + if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && !force_dma_unencrypted(dev)) { /* cpu_addr is a struct page cookie, not a kernel address */ diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index e14c5a2da734..6685ab89cfa7 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -55,12 +56,20 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, arch_dma_prep_coherent(page, pool_size); +#ifdef CONFIG_DMA_DIRECT_REMAP addr = dma_common_contiguous_remap(page, pool_size, pgprot_dmacoherent(PAGE_KERNEL), __builtin_return_address(0)); if (!addr) goto free_page; - +#else + addr = page_to_virt(page); +#endif + /* +* Memory in the atomic DMA pools must be unencrypted, the pools do not +* shrink so no re-encryption occurs in dma_direct_free_pages(). +*/ + set_memory_decrypted((unsigned long)page_to_virt(page), 1 << order); ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page), pool_size, NUMA_NO_NODE); if (ret) @@ -69,8 +78,10 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, return 0; remove_mapping: +#ifdef CONFIG_DMA_DIRECT_REMAP dma_common_free_remap(addr, pool_size); You're about to free the memory, but you've called set_memory_decrypted() against it, so you need to do a set_memory_encrypted() to bring it back to a state ready for allocation again. Thanks, Tom -free_page: +#endif +free_page: __maybe_unused if (!dma_release_from_contiguous(NULL, page, 1 << order)) __free_pages(page, order); out: ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
[rfc v2 4/6] dma-direct: atomic allocations must come from atomic coherent pools
When a device required unencrypted memory and the context does not allow blocking, memory must be returned from the atomic coherent pools. This avoids the remap when CONFIG_DMA_DIRECT_REMAP is not enabled and the config only requires CONFIG_DMA_COHERENT_POOL. This will be used for CONFIG_AMD_MEM_ENCRYPT in a subsequent patch. Keep all memory in these pools unencrypted. Signed-off-by: David Rientjes --- kernel/dma/direct.c | 16 kernel/dma/pool.c | 15 +-- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 70800ca64f13..44165263c185 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -124,6 +124,18 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, struct page *page; void *ret; + /* +* Unencrypted memory must come directly from DMA atomic pools if +* blocking is not allowed. +*/ + if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && + force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp)) { + ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), , gfp); + if (!ret) + return NULL; + goto done; + } + if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && dma_alloc_need_uncached(dev, attrs) && !gfpflags_allow_blocking(gfp)) { @@ -203,6 +215,10 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, { unsigned int page_order = get_order(size); + if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && + dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) + return; + if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && !force_dma_unencrypted(dev)) { /* cpu_addr is a struct page cookie, not a kernel address */ diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index e14c5a2da734..6685ab89cfa7 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -55,12 +56,20 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, arch_dma_prep_coherent(page, pool_size); +#ifdef CONFIG_DMA_DIRECT_REMAP addr = dma_common_contiguous_remap(page, pool_size, pgprot_dmacoherent(PAGE_KERNEL), __builtin_return_address(0)); if (!addr) goto free_page; - +#else + addr = page_to_virt(page); +#endif + /* +* Memory in the atomic DMA pools must be unencrypted, the pools do not +* shrink so no re-encryption occurs in dma_direct_free_pages(). +*/ + set_memory_decrypted((unsigned long)page_to_virt(page), 1 << order); ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page), pool_size, NUMA_NO_NODE); if (ret) @@ -69,8 +78,10 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, return 0; remove_mapping: +#ifdef CONFIG_DMA_DIRECT_REMAP dma_common_free_remap(addr, pool_size); -free_page: +#endif +free_page: __maybe_unused if (!dma_release_from_contiguous(NULL, page, 1 << order)) __free_pages(page, order); out: ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu