If someone else has found active waitqueue and updated
bt->wake_index, do not modify it again. This saves an
atomic read.

Signed-off-by: Wenbo Wang <mail_weber_w...@163.com>
CC: linux-bl...@vger.kernel.org
CC: linux-kernel@vger.kernel.org
---
 block/blk-mq-tag.c | 9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index abdbb47..5ed9111 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -356,16 +356,15 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data 
*data)
 
 static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
 {
-       int i, wake_index;
+       int i, wake_index, orig_wake_index;
 
-       wake_index = atomic_read(&bt->wake_index);
+       orig_wake_index = wake_index = atomic_read(&bt->wake_index);
        for (i = 0; i < BT_WAIT_QUEUES; i++) {
                struct bt_wait_state *bs = &bt->bs[wake_index];
 
                if (waitqueue_active(&bs->wait)) {
-                       int o = atomic_read(&bt->wake_index);
-                       if (wake_index != o)
-                               atomic_cmpxchg(&bt->wake_index, o, wake_index);
+                       if (wake_index != orig_wake_index)
+                               atomic_cmpxchg(&bt->wake_index, 
orig_wake_index, wake_index);
 
                        return bs;
                }
-- 
1.8.3.1
---
Not seeing too much benifit from this patch, but it makes the logic here more 
clear

Reply via email to