Ideally, cma_release could be called from any context.  However, that is
not possible because a mutex is used to protect the per-area bitmap.
Change the bitmap to an irq safe spinlock.

Signed-off-by: Mike Kravetz <[email protected]>
---
 mm/cma.c       | 20 +++++++++++---------
 mm/cma.h       |  2 +-
 mm/cma_debug.c | 10 ++++++----
 3 files changed, 18 insertions(+), 14 deletions(-)

diff --git a/mm/cma.c b/mm/cma.c
index b2393b892d3b..80875fd4487b 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -24,7 +24,6 @@
 #include <linux/memblock.h>
 #include <linux/err.h>
 #include <linux/mm.h>
-#include <linux/mutex.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
 #include <linux/log2.h>
@@ -83,13 +82,14 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long 
pfn,
                             unsigned int count)
 {
        unsigned long bitmap_no, bitmap_count;
+       unsigned long flags;
 
        bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
        bitmap_count = cma_bitmap_pages_to_bits(cma, count);
 
-       mutex_lock(&cma->lock);
+       spin_lock_irqsave(&cma->lock, flags);
        bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
-       mutex_unlock(&cma->lock);
+       spin_unlock_irqrestore(&cma->lock, flags);
 }
 
 static void __init cma_activate_area(struct cma *cma)
@@ -118,7 +118,7 @@ static void __init cma_activate_area(struct cma *cma)
             pfn += pageblock_nr_pages)
                init_cma_reserved_pageblock(pfn_to_page(pfn));
 
-       mutex_init(&cma->lock);
+       spin_lock_init(&cma->lock);
 
 #ifdef CONFIG_CMA_DEBUGFS
        INIT_HLIST_HEAD(&cma->mem_head);
@@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma)
        unsigned long start = 0;
        unsigned long nr_part, nr_total = 0;
        unsigned long nbits = cma_bitmap_maxno(cma);
+       unsigned long flags;
 
-       mutex_lock(&cma->lock);
+       spin_lock_irqsave(&cma->lock, flags);
        pr_info("number of available pages: ");
        for (;;) {
                next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
@@ -407,7 +408,7 @@ static void cma_debug_show_areas(struct cma *cma)
                start = next_zero_bit + nr_zero;
        }
        pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
-       mutex_unlock(&cma->lock);
+       spin_unlock_irqrestore(&cma->lock, flags);
 }
 #else
 static inline void cma_debug_show_areas(struct cma *cma) { }
@@ -430,6 +431,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, 
unsigned int align,
        unsigned long pfn = -1;
        unsigned long start = 0;
        unsigned long bitmap_maxno, bitmap_no, bitmap_count;
+       unsigned long flags;
        size_t i;
        struct page *page = NULL;
        int ret = -ENOMEM;
@@ -454,12 +456,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, 
unsigned int align,
                goto out;
 
        for (;;) {
-               mutex_lock(&cma->lock);
+               spin_lock_irqsave(&cma->lock, flags);
                bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
                                bitmap_maxno, start, bitmap_count, mask,
                                offset);
                if (bitmap_no >= bitmap_maxno) {
-                       mutex_unlock(&cma->lock);
+                       spin_unlock_irqrestore(&cma->lock, flags);
                        break;
                }
                bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
@@ -468,7 +470,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, 
unsigned int align,
                 * our exclusive use. If the migration fails we will take the
                 * lock again and unmark it.
                 */
-               mutex_unlock(&cma->lock);
+               spin_unlock_irqrestore(&cma->lock, flags);
 
                pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
                ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
diff --git a/mm/cma.h b/mm/cma.h
index 68ffad4e430d..2c775877eae2 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -15,7 +15,7 @@ struct cma {
        unsigned long   count;
        unsigned long   *bitmap;
        unsigned int order_per_bit; /* Order of pages represented by one bit */
-       struct mutex    lock;
+       spinlock_t      lock;
 #ifdef CONFIG_CMA_DEBUGFS
        struct hlist_head mem_head;
        spinlock_t mem_head_lock;
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index d5bf8aa34fdc..6379cfbfd568 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -35,11 +35,12 @@ static int cma_used_get(void *data, u64 *val)
 {
        struct cma *cma = data;
        unsigned long used;
+       unsigned long flags;
 
-       mutex_lock(&cma->lock);
+       spin_lock_irqsave(&cma->lock, flags);
        /* pages counter is smaller than sizeof(int) */
        used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
-       mutex_unlock(&cma->lock);
+       spin_unlock_irqrestore(&cma->lock, flags);
        *val = (u64)used << cma->order_per_bit;
 
        return 0;
@@ -52,8 +53,9 @@ static int cma_maxchunk_get(void *data, u64 *val)
        unsigned long maxchunk = 0;
        unsigned long start, end = 0;
        unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
+       unsigned long flags;
 
-       mutex_lock(&cma->lock);
+       spin_lock_irqsave(&cma->lock, flags);
        for (;;) {
                start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
                if (start >= bitmap_maxno)
@@ -61,7 +63,7 @@ static int cma_maxchunk_get(void *data, u64 *val)
                end = find_next_bit(cma->bitmap, bitmap_maxno, start);
                maxchunk = max(end - start, maxchunk);
        }
-       mutex_unlock(&cma->lock);
+       spin_unlock_irqrestore(&cma->lock, flags);
        *val = (u64)maxchunk << cma->order_per_bit;
 
        return 0;
-- 
2.30.2

Reply via email to