From: Stephen Bates <sba...@raithlin.com>

If the amount of resources allocated to a gen_pool exceeds 2^32 then
the avail atomic overflows and this causes problems when clients try
and borrow resources from the pool.

Add the <linux/atomic.h> header to pull in atomic64 operations on
platforms that do not support them natively.

Signed-off-by: Stephen Bates <sba...@raithlin.com>
---
 include/linux/genalloc.h |  3 ++-
 lib/genalloc.c           | 10 +++++-----
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 6dfec4d..b327c31 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -32,6 +32,7 @@
 
 #include <linux/types.h>
 #include <linux/spinlock_types.h>
+#include <linux/atomic.h>
 
 struct device;
 struct device_node;
@@ -71,7 +72,7 @@ struct gen_pool {
  */
 struct gen_pool_chunk {
        struct list_head next_chunk;    /* next chunk in pool */
-       atomic_t avail;
+       atomic64_t avail;
        phys_addr_t phys_addr;          /* physical starting address of memory 
chunk */
        unsigned long start_addr;       /* start address of memory chunk */
        unsigned long end_addr;         /* end address of memory chunk 
(inclusive) */
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 144fe6b..a97df2b 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -194,7 +194,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long 
virt, phys_addr_t phy
        chunk->phys_addr = phys;
        chunk->start_addr = virt;
        chunk->end_addr = virt + size - 1;
-       atomic_set(&chunk->avail, size);
+       atomic64_set(&chunk->avail, size);
 
        spin_lock(&pool->lock);
        list_add_rcu(&chunk->next_chunk, &pool->chunks);
@@ -304,7 +304,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, 
size_t size,
        nbits = (size + (1UL << order) - 1) >> order;
        rcu_read_lock();
        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
-               if (size > atomic_read(&chunk->avail))
+               if (size > atomic64_read(&chunk->avail))
                        continue;
 
                start_bit = 0;
@@ -324,7 +324,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, 
size_t size,
 
                addr = chunk->start_addr + ((unsigned long)start_bit << order);
                size = nbits << order;
-               atomic_sub(size, &chunk->avail);
+               atomic64_sub(size, &chunk->avail);
                break;
        }
        rcu_read_unlock();
@@ -390,7 +390,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long 
addr, size_t size)
                        remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
                        BUG_ON(remain);
                        size = nbits << order;
-                       atomic_add(size, &chunk->avail);
+                       atomic64_add(size, &chunk->avail);
                        rcu_read_unlock();
                        return;
                }
@@ -464,7 +464,7 @@ size_t gen_pool_avail(struct gen_pool *pool)
 
        rcu_read_lock();
        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
-               avail += atomic_read(&chunk->avail);
+               avail += atomic64_read(&chunk->avail);
        rcu_read_unlock();
        return avail;
 }
-- 
2.7.4

Reply via email to