On Mon, Nov 05, 2018 at 12:30:48PM +1100, NeilBrown wrote:
> When we find an existing lock which conflicts with a request,
> and the request wants to wait, we currently add the request
> to a list.  When the lock is removed, the whole list is woken.
> This can cause the thundering-herd problem.
> To reduce the problem, we make use of the (new) fact that
> a pending request can itself have a list of blocked requests.
> When we find a conflict, we look through the existing blocked requests.
> If any one of them blocks the new request, the new request is attached
> below that request, otherwise it is added to the list of blocked
> requests, which are now known to be mutually non-conflicting.
> 
> This way, when the lock is released, only a set of non-conflicting
> locks will be woken, the rest can stay asleep.
> If the lock request cannot be granted and the request needs to be
> requeued, all the other requests it blocks will then be woken

So, to make sure I understand: the tree of blocking locks only ever has
three levels (the active lock, the locks blocking on it, and their
children?)

--b.

> 
> Reported-and-tested-by: Martin Wilck <mwi...@suse.de>
> Signed-off-by: NeilBrown <ne...@suse.com>
> ---
>  fs/locks.c |   29 +++++++++++++++++++++++------
>  1 file changed, 23 insertions(+), 6 deletions(-)
> 
> diff --git a/fs/locks.c b/fs/locks.c
> index 802d5853acd5..1b0eac6b2918 100644
> --- a/fs/locks.c
> +++ b/fs/locks.c
> @@ -715,11 +715,25 @@ static void locks_delete_block(struct file_lock *waiter)
>   * fl_blocked list itself is protected by the blocked_lock_lock, but by 
> ensuring
>   * that the flc_lock is also held on insertions we can avoid taking the
>   * blocked_lock_lock in some cases when we see that the fl_blocked list is 
> empty.
> + *
> + * Rather than just adding to the list, we check for conflicts with any 
> existing
> + * waiters, and add beneath any waiter that blocks the new waiter.
> + * Thus wakeups don't happen until needed.
>   */
>  static void __locks_insert_block(struct file_lock *blocker,
> -                                     struct file_lock *waiter)
> +                              struct file_lock *waiter,
> +                              bool conflict(struct file_lock *,
> +                                            struct file_lock *))
>  {
> +     struct file_lock *fl;
>       BUG_ON(!list_empty(&waiter->fl_block));
> +
> +new_blocker:
> +     list_for_each_entry(fl, &blocker->fl_blocked, fl_block)
> +             if (conflict(fl, waiter)) {
> +                     blocker =  fl;
> +                     goto new_blocker;
> +             }
>       waiter->fl_blocker = blocker;
>       list_add_tail(&waiter->fl_block, &blocker->fl_blocked);
>       if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
> @@ -734,10 +748,12 @@ static void __locks_insert_block(struct file_lock 
> *blocker,
>  
>  /* Must be called with flc_lock held. */
>  static void locks_insert_block(struct file_lock *blocker,
> -                                     struct file_lock *waiter)
> +                            struct file_lock *waiter,
> +                            bool conflict(struct file_lock *,
> +                                          struct file_lock *))
>  {
>       spin_lock(&blocked_lock_lock);
> -     __locks_insert_block(blocker, waiter);
> +     __locks_insert_block(blocker, waiter, conflict);
>       spin_unlock(&blocked_lock_lock);
>  }
>  
> @@ -996,7 +1012,7 @@ static int flock_lock_inode(struct inode *inode, struct 
> file_lock *request)
>               if (!(request->fl_flags & FL_SLEEP))
>                       goto out;
>               error = FILE_LOCK_DEFERRED;
> -             locks_insert_block(fl, request);
> +             locks_insert_block(fl, request, flock_locks_conflict);
>               goto out;
>       }
>       if (request->fl_flags & FL_ACCESS)
> @@ -1071,7 +1087,8 @@ static int posix_lock_inode(struct inode *inode, struct 
> file_lock *request,
>                       spin_lock(&blocked_lock_lock);
>                       if (likely(!posix_locks_deadlock(request, fl))) {
>                               error = FILE_LOCK_DEFERRED;
> -                             __locks_insert_block(fl, request);
> +                             __locks_insert_block(fl, request,
> +                                                  posix_locks_conflict);
>                       }
>                       spin_unlock(&blocked_lock_lock);
>                       goto out;
> @@ -1542,7 +1559,7 @@ int __break_lease(struct inode *inode, unsigned int 
> mode, unsigned int type)
>               break_time -= jiffies;
>       if (break_time == 0)
>               break_time++;
> -     locks_insert_block(fl, new_fl);
> +     locks_insert_block(fl, new_fl, leases_conflict);
>       trace_break_lease_block(inode, new_fl);
>       spin_unlock(&ctx->flc_lock);
>       percpu_up_read_preempt_enable(&file_rwsem);
> 

Reply via email to