The patch solves thundering herd problem. So far as previous patches ensured
that only allocations for background may block, it's safe to wake up one
waiter. Whoever it is, it will wake up another one in request_end() afterwards.

Signed-off-by: Maxim Patlasov <[email protected]>
---
 fs/fuse/dev.c |   20 ++++++++++++++++----
 1 files changed, 16 insertions(+), 4 deletions(-)

diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 1f7ce89..ea99e2a 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -152,7 +152,8 @@ struct fuse_req *fuse_get_req_internal(struct fuse_conn 
*fc, unsigned npages,
                int intr;
 
                block_sigs(&oldset);
-               intr = wait_event_interruptible(fc->blocked_waitq, !*flag_p);
+               intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
+                                                         !*flag_p);
                restore_sigs(&oldset);
                err = -EINTR;
                if (intr)
@@ -265,6 +266,13 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct 
fuse_conn *fc,
 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
 {
        if (atomic_dec_and_test(&req->count)) {
+               if (unlikely(req->background)) {
+                       spin_lock(&fc->lock);
+                       if (!fc->blocked)
+                               wake_up(&fc->blocked_waitq);
+                       spin_unlock(&fc->lock);
+               }
+
                if (req->waiting)
                        atomic_dec(&fc->num_waiting);
 
@@ -362,10 +370,14 @@ __releases(fc->lock)
        list_del(&req->intr_entry);
        req->state = FUSE_REQ_FINISHED;
        if (req->background) {
-               if (fc->num_background == fc->max_background) {
+               req->background = 0;
+
+               if (fc->num_background == fc->max_background)
                        fc->blocked = 0;
-                       wake_up_all(&fc->blocked_waitq);
-               }
+
+               if (!fc->blocked)
+                       wake_up(&fc->blocked_waitq);
+
                if (fc->num_background == fc->congestion_threshold &&
                    fc->connected && fc->bdi_initialized) {
                        clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);

_______________________________________________
Devel mailing list
[email protected]
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to