Re: [PATCH v3 25/47] filelock: convert __locks_insert_block, conflict and deadlock checks to use file_lock_core

2024-02-18 Thread Jeff Layton
On Wed, 2024-01-31 at 18:02 -0500, Jeff Layton wrote:
> Have both __locks_insert_block and the deadlock and conflict checking
> functions take a struct file_lock_core pointer instead of a struct
> file_lock one. Also, change posix_locks_deadlock to return bool.
> 
> Signed-off-by: Jeff Layton 
> ---
>  fs/locks.c | 132 
> +
>  1 file changed, 72 insertions(+), 60 deletions(-)
> 
> diff --git a/fs/locks.c b/fs/locks.c
> index 1e8b943bd7f9..0dc1c9da858c 100644
> --- a/fs/locks.c
> +++ b/fs/locks.c
> @@ -757,39 +757,41 @@ EXPORT_SYMBOL(locks_delete_block);
>   * waiters, and add beneath any waiter that blocks the new waiter.
>   * Thus wakeups don't happen until needed.
>   */
> -static void __locks_insert_block(struct file_lock *blocker,
> -  struct file_lock *waiter,
> -  bool conflict(struct file_lock *,
> -struct file_lock *))
> +static void __locks_insert_block(struct file_lock *blocker_fl,
> +  struct file_lock *waiter_fl,
> +  bool conflict(struct file_lock_core *,
> +struct file_lock_core *))
>  {
> - struct file_lock *fl;
> - BUG_ON(!list_empty(>c.flc_blocked_member));
> + struct file_lock_core *blocker = _fl->c;
> + struct file_lock_core *waiter = _fl->c;
> + struct file_lock_core *flc;
>  
> + BUG_ON(!list_empty(>flc_blocked_member));
>  new_blocker:
> - list_for_each_entry(fl, >c.flc_blocked_requests,
> - c.flc_blocked_member)
> - if (conflict(fl, waiter)) {
> - blocker =  fl;
> + list_for_each_entry(flc, >flc_blocked_requests, 
> flc_blocked_member)
> + if (conflict(flc, waiter)) {
> + blocker =  flc;
>   goto new_blocker;
>   }
> - waiter->c.flc_blocker = blocker;
> - list_add_tail(>c.flc_blocked_member,
> -   >c.flc_blocked_requests);
> - if ((blocker->c.flc_flags & (FL_POSIX|FL_OFDLCK)) == FL_POSIX)
> - locks_insert_global_blocked(>c);
> + waiter->flc_blocker = file_lock(blocker);
> + list_add_tail(>flc_blocked_member,
> +   >flc_blocked_requests);
>  
> - /* The requests in waiter->fl_blocked are known to conflict with
> + if ((blocker->flc_flags & (FL_POSIX|FL_OFDLCK)) == (FL_POSIX|FL_OFDLCK))

Christian,

There is a bug in the above delta. That should read:

if ((blocker->flc_flags & (FL_POSIX|FL_OFDLCK)) == FL_POSIX)

I suspect that is the cause of the performance regression noted by the
KTR.

I believe the bug is fairly harmless -- it's just putting OFD locks into
the global hash when it doesn't need to, which probably slows down
deadlock checking. I'm going to spin up a patch and test it today, but I
wanted to give you a heads up.

I'll send the patch later today or tomorrow.
 
> + locks_insert_global_blocked(waiter);
> +
> + /* The requests in waiter->flc_blocked are known to conflict with
>* waiter, but might not conflict with blocker, or the requests
>* and lock which block it.  So they all need to be woken.
>*/
> - __locks_wake_up_blocks(>c);
> + __locks_wake_up_blocks(waiter);
>  }
>  
>  /* Must be called with flc_lock held. */
>  static void locks_insert_block(struct file_lock *blocker,
>  struct file_lock *waiter,
> -bool conflict(struct file_lock *,
> -  struct file_lock *))
> +bool conflict(struct file_lock_core *,
> +  struct file_lock_core *))
>  {
>   spin_lock(_lock_lock);
>   __locks_insert_block(blocker, waiter, conflict);
> @@ -846,12 +848,12 @@ locks_delete_lock_ctx(struct file_lock *fl, struct 
> list_head *dispose)
>  /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
>   * checks for shared/exclusive status of overlapping locks.
>   */
> -static bool locks_conflict(struct file_lock *caller_fl,
> -struct file_lock *sys_fl)
> +static bool locks_conflict(struct file_lock_core *caller_flc,
> +struct file_lock_core *sys_flc)
>  {
> - if (lock_is_write(sys_fl))
> + if (sys_flc->flc_type == F_WRLCK)
>   return true;
> - if (lock_is_write(caller_fl))
> + if (caller_flc->flc_type == F_WRLCK)
>   return true;
>   return false;
>  }
> @@ -859,20 +861,23 @@ static bool locks_conflict(struct file_lock *caller_fl,
>  /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
>   * checking before calling the locks_conflict().
>   */
> -static bool posix_locks_conflict(struct file_lock *caller_fl,
> -  struct file_lock *sys_fl)
> +static 

[PATCH v3 25/47] filelock: convert __locks_insert_block, conflict and deadlock checks to use file_lock_core

2024-01-31 Thread Jeff Layton
Have both __locks_insert_block and the deadlock and conflict checking
functions take a struct file_lock_core pointer instead of a struct
file_lock one. Also, change posix_locks_deadlock to return bool.

Signed-off-by: Jeff Layton 
---
 fs/locks.c | 132 +
 1 file changed, 72 insertions(+), 60 deletions(-)

diff --git a/fs/locks.c b/fs/locks.c
index 1e8b943bd7f9..0dc1c9da858c 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -757,39 +757,41 @@ EXPORT_SYMBOL(locks_delete_block);
  * waiters, and add beneath any waiter that blocks the new waiter.
  * Thus wakeups don't happen until needed.
  */
-static void __locks_insert_block(struct file_lock *blocker,
-struct file_lock *waiter,
-bool conflict(struct file_lock *,
-  struct file_lock *))
+static void __locks_insert_block(struct file_lock *blocker_fl,
+struct file_lock *waiter_fl,
+bool conflict(struct file_lock_core *,
+  struct file_lock_core *))
 {
-   struct file_lock *fl;
-   BUG_ON(!list_empty(>c.flc_blocked_member));
+   struct file_lock_core *blocker = _fl->c;
+   struct file_lock_core *waiter = _fl->c;
+   struct file_lock_core *flc;
 
+   BUG_ON(!list_empty(>flc_blocked_member));
 new_blocker:
-   list_for_each_entry(fl, >c.flc_blocked_requests,
-   c.flc_blocked_member)
-   if (conflict(fl, waiter)) {
-   blocker =  fl;
+   list_for_each_entry(flc, >flc_blocked_requests, 
flc_blocked_member)
+   if (conflict(flc, waiter)) {
+   blocker =  flc;
goto new_blocker;
}
-   waiter->c.flc_blocker = blocker;
-   list_add_tail(>c.flc_blocked_member,
- >c.flc_blocked_requests);
-   if ((blocker->c.flc_flags & (FL_POSIX|FL_OFDLCK)) == FL_POSIX)
-   locks_insert_global_blocked(>c);
+   waiter->flc_blocker = file_lock(blocker);
+   list_add_tail(>flc_blocked_member,
+ >flc_blocked_requests);
 
-   /* The requests in waiter->fl_blocked are known to conflict with
+   if ((blocker->flc_flags & (FL_POSIX|FL_OFDLCK)) == (FL_POSIX|FL_OFDLCK))
+   locks_insert_global_blocked(waiter);
+
+   /* The requests in waiter->flc_blocked are known to conflict with
 * waiter, but might not conflict with blocker, or the requests
 * and lock which block it.  So they all need to be woken.
 */
-   __locks_wake_up_blocks(>c);
+   __locks_wake_up_blocks(waiter);
 }
 
 /* Must be called with flc_lock held. */
 static void locks_insert_block(struct file_lock *blocker,
   struct file_lock *waiter,
-  bool conflict(struct file_lock *,
-struct file_lock *))
+  bool conflict(struct file_lock_core *,
+struct file_lock_core *))
 {
spin_lock(_lock_lock);
__locks_insert_block(blocker, waiter, conflict);
@@ -846,12 +848,12 @@ locks_delete_lock_ctx(struct file_lock *fl, struct 
list_head *dispose)
 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
  * checks for shared/exclusive status of overlapping locks.
  */
-static bool locks_conflict(struct file_lock *caller_fl,
-  struct file_lock *sys_fl)
+static bool locks_conflict(struct file_lock_core *caller_flc,
+  struct file_lock_core *sys_flc)
 {
-   if (lock_is_write(sys_fl))
+   if (sys_flc->flc_type == F_WRLCK)
return true;
-   if (lock_is_write(caller_fl))
+   if (caller_flc->flc_type == F_WRLCK)
return true;
return false;
 }
@@ -859,20 +861,23 @@ static bool locks_conflict(struct file_lock *caller_fl,
 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
  * checking before calling the locks_conflict().
  */
-static bool posix_locks_conflict(struct file_lock *caller_fl,
-struct file_lock *sys_fl)
+static bool posix_locks_conflict(struct file_lock_core *caller_flc,
+struct file_lock_core *sys_flc)
 {
+   struct file_lock *caller_fl = file_lock(caller_flc);
+   struct file_lock *sys_fl = file_lock(sys_flc);
+
/* POSIX locks owned by the same process do not conflict with
 * each other.
 */
-   if (posix_same_owner(_fl->c, _fl->c))
+   if (posix_same_owner(caller_flc, sys_flc))
return false;
 
/* Check whether they overlap */
if (!locks_overlap(caller_fl, sys_fl))
return false;
 
-   return locks_conflict(caller_fl, sys_fl);
+