Currently, we should take the mutex for manipulating bitmap.
This job may be really simple and short so we don't need to sleep
if contended. So I change it to spinlock.

Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>

diff --git a/mm/cma.c b/mm/cma.c
index 22a5b23..3085e8c 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -27,6 +27,7 @@
 #include <linux/memblock.h>
 #include <linux/err.h>
 #include <linux/mm.h>
+#include <linux/spinlock.h>
 #include <linux/mutex.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
@@ -36,7 +37,7 @@ struct cma {
        unsigned long   count;
        unsigned long   *bitmap;
        int order_per_bit; /* Order of pages represented by one bit */
-       struct mutex    lock;
+       spinlock_t      lock;
 };
 
 /*
@@ -72,9 +73,9 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long 
pfn, int count)
        bitmapno = (pfn - cma->base_pfn) >> cma->order_per_bit;
        nr_bits = cma_bitmap_pages_to_bits(cma, count);
 
-       mutex_lock(&cma->lock);
+       spin_lock(&cma->lock);
        bitmap_clear(cma->bitmap, bitmapno, nr_bits);
-       mutex_unlock(&cma->lock);
+       spin_unlock(&cma->lock);
 }
 
 static int __init cma_activate_area(struct cma *cma)
@@ -112,7 +113,7 @@ static int __init cma_activate_area(struct cma *cma)
                init_cma_reserved_pageblock(pfn_to_page(base_pfn));
        } while (--i);
 
-       mutex_init(&cma->lock);
+       spin_lock_init(&cma->lock);
        return 0;
 
 err:
@@ -261,11 +262,11 @@ struct page *cma_alloc(struct cma *cma, int count, 
unsigned int align)
        nr_bits = cma_bitmap_pages_to_bits(cma, count);
 
        for (;;) {
-               mutex_lock(&cma->lock);
+               spin_lock(&cma->lock);
                bitmapno = bitmap_find_next_zero_area(cma->bitmap,
                                        bitmap_maxno, start, nr_bits, mask);
                if (bitmapno >= bitmap_maxno) {
-                       mutex_unlock(&cma->lock);
+                       spin_unlock(&cma->lock);
                        break;
                }
                bitmap_set(cma->bitmap, bitmapno, nr_bits);
@@ -274,7 +275,7 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned 
int align)
                 * our exclusive use. If the migration fails we will take the
                 * lock again and unmark it.
                 */
-               mutex_unlock(&cma->lock);
+               spin_unlock(&cma->lock);
 
                pfn = cma->base_pfn + (bitmapno << cma->order_per_bit);
                mutex_lock(&cma_mutex);
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to