With the change, we can slightly adjust the code to iterate the queue in BFS
search, which simplifies the code. No functional change.

Signed-off-by: Yuyang Du <duyuy...@gmail.com>
---
 kernel/locking/lockdep.c | 21 +++++++++++++--------
 1 file changed, 13 insertions(+), 8 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 8167d69..ad16793 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1317,14 +1317,21 @@ static inline int __cq_enqueue(struct circular_queue 
*cq, struct lock_list *elem
        return 0;
 }
 
-static inline int __cq_dequeue(struct circular_queue *cq, struct lock_list 
**elem)
+/*
+ * Dequeue an element from the circular_queue, return the lock if the queue
+ * is not empty, or NULL if otherwise
+ */
+static inline struct lock_list * __cq_dequeue(struct circular_queue *cq)
 {
+       struct lock_list * lock;
+
        if (__cq_empty(cq))
-               return -1;
+               return NULL;
 
-       *elem = cq->element[cq->front];
+       lock = cq->element[cq->front];
        cq->front = (cq->front + 1) & CQ_MASK;
-       return 0;
+
+       return lock;
 }
 
 static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
@@ -1376,6 +1383,7 @@ static int __bfs(struct lock_list *source_entry,
                 int forward)
 {
        struct lock_list *entry;
+       struct lock_list *lock;
        struct list_head *head;
        struct circular_queue *cq = &lock_cq;
        int ret = 1;
@@ -1397,10 +1405,7 @@ static int __bfs(struct lock_list *source_entry,
        __cq_init(cq);
        __cq_enqueue(cq, source_entry);
 
-       while (!__cq_empty(cq)) {
-               struct lock_list *lock;
-
-               __cq_dequeue(cq, &lock);
+       while ((lock = __cq_dequeue(cq))) {
 
                if (!lock->class) {
                        ret = -2;
-- 
1.8.3.1

Reply via email to