From: Trond Myklebust <trond.mykleb...@hammerspace.com>

[ Upstream commit e4c72201b6ec3173dfe13fa2e2335a3ad78d4921 ]

Currently, we wake up the tasks by priority queue ordering, which means
that we ignore the batching that is supposed to help with QoS issues.

Fixes: c049f8ea9a0d ("SUNRPC: Remove the bh-safe lock requirement on the 
rpc_wait_queue->lock")
Signed-off-by: Trond Myklebust <trond.mykleb...@hammerspace.com>
Signed-off-by: Sasha Levin <sas...@kernel.org>
---
 net/sunrpc/sched.c | 65 +++++++++++++++++++++++++---------------------
 1 file changed, 35 insertions(+), 30 deletions(-)

diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 53d8b82eda006..7afbf15bcbd9a 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -699,6 +699,23 @@ struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue 
*queue)
 }
 EXPORT_SYMBOL_GPL(rpc_wake_up_next);
 
+/**
+ * rpc_wake_up_locked - wake up all rpc_tasks
+ * @queue: rpc_wait_queue on which the tasks are sleeping
+ *
+ */
+static void rpc_wake_up_locked(struct rpc_wait_queue *queue)
+{
+       struct rpc_task *task;
+
+       for (;;) {
+               task = __rpc_find_next_queued(queue);
+               if (task == NULL)
+                       break;
+               rpc_wake_up_task_queue_locked(queue, task);
+       }
+}
+
 /**
  * rpc_wake_up - wake up all rpc_tasks
  * @queue: rpc_wait_queue on which the tasks are sleeping
@@ -707,25 +724,28 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_next);
  */
 void rpc_wake_up(struct rpc_wait_queue *queue)
 {
-       struct list_head *head;
-
        spin_lock(&queue->lock);
-       head = &queue->tasks[queue->maxpriority];
+       rpc_wake_up_locked(queue);
+       spin_unlock(&queue->lock);
+}
+EXPORT_SYMBOL_GPL(rpc_wake_up);
+
+/**
+ * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status 
value.
+ * @queue: rpc_wait_queue on which the tasks are sleeping
+ * @status: status value to set
+ */
+static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status)
+{
+       struct rpc_task *task;
+
        for (;;) {
-               while (!list_empty(head)) {
-                       struct rpc_task *task;
-                       task = list_first_entry(head,
-                                       struct rpc_task,
-                                       u.tk_wait.list);
-                       rpc_wake_up_task_queue_locked(queue, task);
-               }
-               if (head == &queue->tasks[0])
+               task = __rpc_find_next_queued(queue);
+               if (task == NULL)
                        break;
-               head--;
+               rpc_wake_up_task_queue_set_status_locked(queue, task, status);
        }
-       spin_unlock(&queue->lock);
 }
-EXPORT_SYMBOL_GPL(rpc_wake_up);
 
 /**
  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
@@ -736,23 +756,8 @@ EXPORT_SYMBOL_GPL(rpc_wake_up);
  */
 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
 {
-       struct list_head *head;
-
        spin_lock(&queue->lock);
-       head = &queue->tasks[queue->maxpriority];
-       for (;;) {
-               while (!list_empty(head)) {
-                       struct rpc_task *task;
-                       task = list_first_entry(head,
-                                       struct rpc_task,
-                                       u.tk_wait.list);
-                       task->tk_status = status;
-                       rpc_wake_up_task_queue_locked(queue, task);
-               }
-               if (head == &queue->tasks[0])
-                       break;
-               head--;
-       }
+       rpc_wake_up_status_locked(queue, status);
        spin_unlock(&queue->lock);
 }
 EXPORT_SYMBOL_GPL(rpc_wake_up_status);
-- 
2.27.0



Reply via email to