From: Liu Haitao <haitao....@windriver.com>

commit 217847f57119b5fdd377bfa3d344613ddb98d9fc upstream.

The commit ("kmemleak: Turn kmemleak_lock to raw spinlock on RT")
changed the kmemleak_lock to raw spinlock. However the
kmemleak_object->lock is held after the kmemleak_lock is held in
scan_block().

Make the object->lock a raw_spinlock_t.

Cc: stable...@vger.kernel.org
Link: https://lkml.kernel.org/r/20190927082230.34152-1-yongxin....@windriver.com
Signed-off-by: Liu Haitao <haitao....@windriver.com>
Signed-off-by: Yongxin Liu <yongxin....@windriver.com>
Signed-off-by: Sebastian Andrzej Siewior <bige...@linutronix.de>
---
 mm/kmemleak.c | 72 +++++++++++++++++++++++++++++------------------------------
 1 file changed, 36 insertions(+), 36 deletions(-)

diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index aaee59c0306a..355dd95d0611 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -135,7 +135,7 @@ struct kmemleak_scan_area {
  * (use_count) and freed using the RCU mechanism.
  */
 struct kmemleak_object {
-       spinlock_t lock;
+       raw_spinlock_t lock;
        unsigned int flags;             /* object status flags */
        struct list_head object_list;
        struct list_head gray_list;
@@ -560,7 +560,7 @@ static struct kmemleak_object *create_object(unsigned long 
ptr, size_t size,
        INIT_LIST_HEAD(&object->object_list);
        INIT_LIST_HEAD(&object->gray_list);
        INIT_HLIST_HEAD(&object->area_list);
-       spin_lock_init(&object->lock);
+       raw_spin_lock_init(&object->lock);
        atomic_set(&object->use_count, 1);
        object->flags = OBJECT_ALLOCATED;
        object->pointer = ptr;
@@ -642,9 +642,9 @@ static void __delete_object(struct kmemleak_object *object)
         * Locking here also ensures that the corresponding memory block
         * cannot be freed when it is being scanned.
         */
-       spin_lock_irqsave(&object->lock, flags);
+       raw_spin_lock_irqsave(&object->lock, flags);
        object->flags &= ~OBJECT_ALLOCATED;
-       spin_unlock_irqrestore(&object->lock, flags);
+       raw_spin_unlock_irqrestore(&object->lock, flags);
        put_object(object);
 }
 
@@ -716,9 +716,9 @@ static void paint_it(struct kmemleak_object *object, int 
color)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&object->lock, flags);
+       raw_spin_lock_irqsave(&object->lock, flags);
        __paint_it(object, color);
-       spin_unlock_irqrestore(&object->lock, flags);
+       raw_spin_unlock_irqrestore(&object->lock, flags);
 }
 
 static void paint_ptr(unsigned long ptr, int color)
@@ -778,7 +778,7 @@ static void add_scan_area(unsigned long ptr, size_t size, 
gfp_t gfp)
                goto out;
        }
 
-       spin_lock_irqsave(&object->lock, flags);
+       raw_spin_lock_irqsave(&object->lock, flags);
        if (size == SIZE_MAX) {
                size = object->pointer + object->size - ptr;
        } else if (ptr + size > object->pointer + object->size) {
@@ -794,7 +794,7 @@ static void add_scan_area(unsigned long ptr, size_t size, 
gfp_t gfp)
 
        hlist_add_head(&area->node, &object->area_list);
 out_unlock:
-       spin_unlock_irqrestore(&object->lock, flags);
+       raw_spin_unlock_irqrestore(&object->lock, flags);
 out:
        put_object(object);
 }
@@ -817,9 +817,9 @@ static void object_set_excess_ref(unsigned long ptr, 
unsigned long excess_ref)
                return;
        }
 
-       spin_lock_irqsave(&object->lock, flags);
+       raw_spin_lock_irqsave(&object->lock, flags);
        object->excess_ref = excess_ref;
-       spin_unlock_irqrestore(&object->lock, flags);
+       raw_spin_unlock_irqrestore(&object->lock, flags);
        put_object(object);
 }
 
@@ -839,9 +839,9 @@ static void object_no_scan(unsigned long ptr)
                return;
        }
 
-       spin_lock_irqsave(&object->lock, flags);
+       raw_spin_lock_irqsave(&object->lock, flags);
        object->flags |= OBJECT_NO_SCAN;
-       spin_unlock_irqrestore(&object->lock, flags);
+       raw_spin_unlock_irqrestore(&object->lock, flags);
        put_object(object);
 }
 
@@ -902,11 +902,11 @@ static void early_alloc(struct early_log *log)
                               log->min_count, GFP_ATOMIC);
        if (!object)
                goto out;
-       spin_lock_irqsave(&object->lock, flags);
+       raw_spin_lock_irqsave(&object->lock, flags);
        for (i = 0; i < log->trace_len; i++)
                object->trace[i] = log->trace[i];
        object->trace_len = log->trace_len;
-       spin_unlock_irqrestore(&object->lock, flags);
+       raw_spin_unlock_irqrestore(&object->lock, flags);
 out:
        rcu_read_unlock();
 }
@@ -1096,9 +1096,9 @@ void __ref kmemleak_update_trace(const void *ptr)
                return;
        }
 
-       spin_lock_irqsave(&object->lock, flags);
+       raw_spin_lock_irqsave(&object->lock, flags);
        object->trace_len = __save_stack_trace(object->trace);
-       spin_unlock_irqrestore(&object->lock, flags);
+       raw_spin_unlock_irqrestore(&object->lock, flags);
 
        put_object(object);
 }
@@ -1346,7 +1346,7 @@ static void scan_block(void *_start, void *_end,
                 * previously acquired in scan_object(). These locks are
                 * enclosed by scan_mutex.
                 */
-               spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
+               raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
                /* only pass surplus references (object already gray) */
                if (color_gray(object)) {
                        excess_ref = object->excess_ref;
@@ -1355,7 +1355,7 @@ static void scan_block(void *_start, void *_end,
                        excess_ref = 0;
                        update_refs(object);
                }
-               spin_unlock(&object->lock);
+               raw_spin_unlock(&object->lock);
 
                if (excess_ref) {
                        object = lookup_object(excess_ref, 0);
@@ -1364,9 +1364,9 @@ static void scan_block(void *_start, void *_end,
                        if (object == scanned)
                                /* circular reference, ignore */
                                continue;
-                       spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_lock_nested(&object->lock, 
SINGLE_DEPTH_NESTING);
                        update_refs(object);
-                       spin_unlock(&object->lock);
+                       raw_spin_unlock(&object->lock);
                }
        }
        raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
@@ -1402,7 +1402,7 @@ static void scan_object(struct kmemleak_object *object)
         * Once the object->lock is acquired, the corresponding memory block
         * cannot be freed (the same lock is acquired in delete_object).
         */
-       spin_lock_irqsave(&object->lock, flags);
+       raw_spin_lock_irqsave(&object->lock, flags);
        if (object->flags & OBJECT_NO_SCAN)
                goto out;
        if (!(object->flags & OBJECT_ALLOCATED))
@@ -1421,9 +1421,9 @@ static void scan_object(struct kmemleak_object *object)
                        if (start >= end)
                                break;
 
-                       spin_unlock_irqrestore(&object->lock, flags);
+                       raw_spin_unlock_irqrestore(&object->lock, flags);
                        cond_resched();
-                       spin_lock_irqsave(&object->lock, flags);
+                       raw_spin_lock_irqsave(&object->lock, flags);
                } while (object->flags & OBJECT_ALLOCATED);
        } else
                hlist_for_each_entry(area, &object->area_list, node)
@@ -1431,7 +1431,7 @@ static void scan_object(struct kmemleak_object *object)
                                   (void *)(area->start + area->size),
                                   object);
 out:
-       spin_unlock_irqrestore(&object->lock, flags);
+       raw_spin_unlock_irqrestore(&object->lock, flags);
 }
 
 /*
@@ -1484,7 +1484,7 @@ static void kmemleak_scan(void)
        /* prepare the kmemleak_object's */
        rcu_read_lock();
        list_for_each_entry_rcu(object, &object_list, object_list) {
-               spin_lock_irqsave(&object->lock, flags);
+               raw_spin_lock_irqsave(&object->lock, flags);
 #ifdef DEBUG
                /*
                 * With a few exceptions there should be a maximum of
@@ -1501,7 +1501,7 @@ static void kmemleak_scan(void)
                if (color_gray(object) && get_object(object))
                        list_add_tail(&object->gray_list, &gray_list);
 
-               spin_unlock_irqrestore(&object->lock, flags);
+               raw_spin_unlock_irqrestore(&object->lock, flags);
        }
        rcu_read_unlock();
 
@@ -1569,14 +1569,14 @@ static void kmemleak_scan(void)
         */
        rcu_read_lock();
        list_for_each_entry_rcu(object, &object_list, object_list) {
-               spin_lock_irqsave(&object->lock, flags);
+               raw_spin_lock_irqsave(&object->lock, flags);
                if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
                    && update_checksum(object) && get_object(object)) {
                        /* color it gray temporarily */
                        object->count = object->min_count;
                        list_add_tail(&object->gray_list, &gray_list);
                }
-               spin_unlock_irqrestore(&object->lock, flags);
+               raw_spin_unlock_irqrestore(&object->lock, flags);
        }
        rcu_read_unlock();
 
@@ -1596,7 +1596,7 @@ static void kmemleak_scan(void)
         */
        rcu_read_lock();
        list_for_each_entry_rcu(object, &object_list, object_list) {
-               spin_lock_irqsave(&object->lock, flags);
+               raw_spin_lock_irqsave(&object->lock, flags);
                if (unreferenced_object(object) &&
                    !(object->flags & OBJECT_REPORTED)) {
                        object->flags |= OBJECT_REPORTED;
@@ -1606,7 +1606,7 @@ static void kmemleak_scan(void)
 
                        new_leaks++;
                }
-               spin_unlock_irqrestore(&object->lock, flags);
+               raw_spin_unlock_irqrestore(&object->lock, flags);
        }
        rcu_read_unlock();
 
@@ -1758,10 +1758,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void 
*v)
        struct kmemleak_object *object = v;
        unsigned long flags;
 
-       spin_lock_irqsave(&object->lock, flags);
+       raw_spin_lock_irqsave(&object->lock, flags);
        if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
                print_unreferenced(seq, object);
-       spin_unlock_irqrestore(&object->lock, flags);
+       raw_spin_unlock_irqrestore(&object->lock, flags);
        return 0;
 }
 
@@ -1791,9 +1791,9 @@ static int dump_str_object_info(const char *str)
                return -EINVAL;
        }
 
-       spin_lock_irqsave(&object->lock, flags);
+       raw_spin_lock_irqsave(&object->lock, flags);
        dump_object_info(object);
-       spin_unlock_irqrestore(&object->lock, flags);
+       raw_spin_unlock_irqrestore(&object->lock, flags);
 
        put_object(object);
        return 0;
@@ -1812,11 +1812,11 @@ static void kmemleak_clear(void)
 
        rcu_read_lock();
        list_for_each_entry_rcu(object, &object_list, object_list) {
-               spin_lock_irqsave(&object->lock, flags);
+               raw_spin_lock_irqsave(&object->lock, flags);
                if ((object->flags & OBJECT_REPORTED) &&
                    unreferenced_object(object))
                        __paint_it(object, KMEMLEAK_GREY);
-               spin_unlock_irqrestore(&object->lock, flags);
+               raw_spin_unlock_irqrestore(&object->lock, flags);
        }
        rcu_read_unlock();
 
-- 
2.14.4

-- 
_______________________________________________
linux-yocto mailing list
linux-yocto@yoctoproject.org
https://lists.yoctoproject.org/listinfo/linux-yocto

Reply via email to