On Tue, Jun 25, 2019 at 05:05:58PM +0200, Fredrik Noring wrote:
> Provide the algorithm option to DMA allocators as well, along with
> convenience variants for zeroed and aligned memory. The following
> four functions are added:
> 
> - gen_pool_dma_alloc_algo()
> - gen_pool_dma_alloc_align()
> - gen_pool_dma_zalloc_algo()
> - gen_pool_dma_zalloc_align()
> 
> Signed-off-by: Fredrik Noring <[email protected]>

The series fixes the problem I had observed in linux-next.

Tested-by: Guenter Roeck <[email protected]>

Guenter

> ---
> Hi Christoph,
> 
> This patch is based on my v5.0.21 branch, with Laurentiu Tudor's other
> local memory changes.
> 
> Fredrik
> ---
>  include/linux/genalloc.h |  10 +++-
>  lib/genalloc.c           | 100 +++++++++++++++++++++++++++++++++++++--
>  2 files changed, 105 insertions(+), 5 deletions(-)
> 
> diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
> --- a/include/linux/genalloc.h
> +++ b/include/linux/genalloc.h
> @@ -121,7 +121,15 @@ extern unsigned long gen_pool_alloc_algo(struct gen_pool 
> *, size_t,
>               genpool_algo_t algo, void *data);
>  extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
>               dma_addr_t *dma);
> -void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t 
> *dma);
> +extern void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
> +             dma_addr_t *dma, genpool_algo_t algo, void *data);
> +extern void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
> +             dma_addr_t *dma, int align);
> +extern void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, 
> dma_addr_t *dma);
> +extern void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
> +             dma_addr_t *dma, genpool_algo_t algo, void *data);
> +extern void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
> +             dma_addr_t *dma, int align);
>  extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
>  extern void gen_pool_for_each_chunk(struct gen_pool *,
>       void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
> diff --git a/lib/genalloc.c b/lib/genalloc.c
> --- a/lib/genalloc.c
> +++ b/lib/genalloc.c
> @@ -347,13 +347,35 @@ EXPORT_SYMBOL(gen_pool_alloc_algo);
>   * Return: virtual address of the allocated memory, or %NULL on failure
>   */
>  void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
> +{
> +     return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
> +}
> +EXPORT_SYMBOL(gen_pool_dma_alloc);
> +
> +/**
> + * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
> + * usage with the given pool algorithm
> + * @pool: pool to allocate from
> + * @size: number of bytes to allocate from the pool
> + * @dma: DMA-view physical address return value. Use %NULL if unneeded.
> + * @algo: algorithm passed from caller
> + * @data: data passed to algorithm
> + *
> + * Allocate the requested number of bytes from the specified pool. Uses the
> + * given pool allocation function. Can not be used in NMI handler on
> + * architectures without NMI-safe cmpxchg implementation.
> + *
> + * Return: virtual address of the allocated memory, or %NULL on failure
> + */
> +void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
> +             dma_addr_t *dma, genpool_algo_t algo, void *data)
>  {
>       unsigned long vaddr;
>  
>       if (!pool)
>               return NULL;
>  
> -     vaddr = gen_pool_alloc(pool, size);
> +     vaddr = gen_pool_alloc_algo(pool, size, algo, data);
>       if (!vaddr)
>               return NULL;
>  
> @@ -362,7 +384,31 @@ void *gen_pool_dma_alloc(struct gen_pool *pool, size_t 
> size, dma_addr_t *dma)
>  
>       return (void *)vaddr;
>  }
> -EXPORT_SYMBOL(gen_pool_dma_alloc);
> +EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
> +
> +/**
> + * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
> + * usage with the given alignment
> + * @pool: pool to allocate from
> + * @size: number of bytes to allocate from the pool
> + * @dma: DMA-view physical address return value. Use %NULL if unneeded.
> + * @align: alignment in bytes for starting address
> + *
> + * Allocate the requested number bytes from the specified pool, with the 
> given
> + * alignment restriction. Can not be used in NMI handler on architectures
> + * without NMI-safe cmpxchg implementation.
> + *
> + * Return: virtual address of the allocated memory, or %NULL on failure
> + */
> +void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
> +             dma_addr_t *dma, int align)
> +{
> +     struct genpool_data_align data = { .align = align };
> +
> +     return gen_pool_dma_alloc_algo(pool, size, dma,
> +                     gen_pool_first_fit_align, &data);
> +}
> +EXPORT_SYMBOL(gen_pool_dma_alloc_align);
>  
>  /**
>   * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
> @@ -380,14 +426,60 @@ EXPORT_SYMBOL(gen_pool_dma_alloc);
>   */
>  void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t 
> *dma)
>  {
> -     void *vaddr = gen_pool_dma_alloc(pool, size, dma);
> +     return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, 
> pool->data);
> +}
> +EXPORT_SYMBOL(gen_pool_dma_zalloc);
> +
> +/**
> + * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool 
> for
> + * DMA usage with the given pool algorithm
> + * @pool: pool to allocate from
> + * @size: number of bytes to allocate from the pool
> + * @dma: DMA-view physical address return value. Use %NULL if unneeded.
> + * @algo: algorithm passed from caller
> + * @data: data passed to algorithm
> + *
> + * Allocate the requested number of zeroed bytes from the specified pool. 
> Uses
> + * the given pool allocation function. Can not be used in NMI handler on
> + * architectures without NMI-safe cmpxchg implementation.
> + *
> + * Return: virtual address of the allocated zeroed memory, or %NULL on 
> failure
> + */
> +void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
> +             dma_addr_t *dma, genpool_algo_t algo, void *data)
> +{
> +     void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
>  
>       if (vaddr)
>               memset(vaddr, 0, size);
>  
>       return vaddr;
>  }
> -EXPORT_SYMBOL(gen_pool_dma_zalloc);
> +EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
> +
> +/**
> + * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool 
> for
> + * DMA usage with the given alignment
> + * @pool: pool to allocate from
> + * @size: number of bytes to allocate from the pool
> + * @dma: DMA-view physical address return value. Use %NULL if unneeded.
> + * @align: alignment in bytes for starting address
> + *
> + * Allocate the requested number of zeroed bytes from the specified pool,
> + * with the given alignment restriction. Can not be used in NMI handler on
> + * architectures without NMI-safe cmpxchg implementation.
> + *
> + * Return: virtual address of the allocated zeroed memory, or %NULL on 
> failure
> + */
> +void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
> +             dma_addr_t *dma, int align)
> +{
> +     struct genpool_data_align data = { .align = align };
> +
> +     return gen_pool_dma_zalloc_algo(pool, size, dma,
> +                     gen_pool_first_fit_align, &data);
> +}
> +EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
>  
>  /**
>   * gen_pool_free - free allocated special memory back to the pool
> -- 
> 2.21.0
> 

Reply via email to