----- On Oct 26, 2017, at 6:04 PM, Stephen Bates sba...@raithlin.com wrote:

> From: Stephen Bates <sba...@raithlin.com>
> 
> If the amount of resources allocated to a gen_pool exceeds 2^32 then
> the avail atomic overflows and this causes problems when clients try
> and borrow resources from the pool. This is only expected to be an
> issue on 64 bit systems.
> 
> Add the <linux/atomic.h> header to pull in atomic_long* operations. So
> that 32 bit systems continue to use atomic32_t but 64 bit systems can
> use atomic64_t.
> 
> Changes since v1:
>  Change atomic64 to atomic_long as per Mathieu
>  Added a Reviewed-by tag from Logan
> 
> Signed-off-by: Stephen Bates <sba...@raithlin.com>
> Reviewed-by: Logan Gunthorpe <log...@deltatee.com>

Reviewed-by: Mathieu Desnoyers <mathieu.desnoy...@efficios.com>

> ---
> include/linux/genalloc.h |  3 ++-
> lib/genalloc.c           | 10 +++++-----
> 2 files changed, 7 insertions(+), 6 deletions(-)
> 
> diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
> index 6dfec4d..872f930 100644
> --- a/include/linux/genalloc.h
> +++ b/include/linux/genalloc.h
> @@ -32,6 +32,7 @@
> 
> #include <linux/types.h>
> #include <linux/spinlock_types.h>
> +#include <linux/atomic.h>
> 
> struct device;
> struct device_node;
> @@ -71,7 +72,7 @@ struct gen_pool {
>  */
> struct gen_pool_chunk {
>       struct list_head next_chunk;    /* next chunk in pool */
> -     atomic_t avail;
> +     atomic_long_t avail;
>       phys_addr_t phys_addr;          /* physical starting address of memory 
> chunk */
>       unsigned long start_addr;       /* start address of memory chunk */
>       unsigned long end_addr;         /* end address of memory chunk 
> (inclusive) */
> diff --git a/lib/genalloc.c b/lib/genalloc.c
> index 144fe6b..ca06adc 100644
> --- a/lib/genalloc.c
> +++ b/lib/genalloc.c
> @@ -194,7 +194,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long
> virt, phys_addr_t phy
>       chunk->phys_addr = phys;
>       chunk->start_addr = virt;
>       chunk->end_addr = virt + size - 1;
> -     atomic_set(&chunk->avail, size);
> +     atomic_long_set(&chunk->avail, size);
> 
>       spin_lock(&pool->lock);
>       list_add_rcu(&chunk->next_chunk, &pool->chunks);
> @@ -304,7 +304,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool,
> size_t size,
>       nbits = (size + (1UL << order) - 1) >> order;
>       rcu_read_lock();
>       list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
> -             if (size > atomic_read(&chunk->avail))
> +             if (size > atomic_long_read(&chunk->avail))
>                       continue;
> 
>               start_bit = 0;
> @@ -324,7 +324,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool,
> size_t size,
> 
>               addr = chunk->start_addr + ((unsigned long)start_bit << order);
>               size = nbits << order;
> -             atomic_sub(size, &chunk->avail);
> +             atomic_long_sub(size, &chunk->avail);
>               break;
>       }
>       rcu_read_unlock();
> @@ -390,7 +390,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long
> addr, size_t size)
>                       remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
>                       BUG_ON(remain);
>                       size = nbits << order;
> -                     atomic_add(size, &chunk->avail);
> +                     atomic_long_add(size, &chunk->avail);
>                       rcu_read_unlock();
>                       return;
>               }
> @@ -464,7 +464,7 @@ size_t gen_pool_avail(struct gen_pool *pool)
> 
>       rcu_read_lock();
>       list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
> -             avail += atomic_read(&chunk->avail);
> +             avail += atomic_long_read(&chunk->avail);
>       rcu_read_unlock();
>       return avail;
> }
> --
> 2.7.4

-- 
Mathieu Desnoyers
EfficiOS Inc.
http://www.efficios.com

Reply via email to