Re: [PATCH v15 10/23] locking/refcount, kref: Add kref_put_ww_mutex()

2023-08-28 Thread Dmitry Osipenko
On 8/28/23 12:26, Boris Brezillon wrote:
> On Sun, 27 Aug 2023 20:54:36 +0300
> Dmitry Osipenko  wrote:
> 
>> Introduce kref_put_ww_mutex() helper that will handle the wait-wound
>> mutex auto-locking on kref_put(). This helper is wanted by DRM drivers
>> that extensively use dma-reservation locking which in turns uses ww-mutex.
>>
>> Signed-off-by: Dmitry Osipenko 
>> ---
>>  include/linux/kref.h | 12 
>>  include/linux/refcount.h |  5 +
>>  lib/refcount.c   | 34 ++
>>  3 files changed, 51 insertions(+)
>>
>> diff --git a/include/linux/kref.h b/include/linux/kref.h
>> index d32e21a2538c..b2d8dc6e9ae0 100644
>> --- a/include/linux/kref.h
>> +++ b/include/linux/kref.h
>> @@ -90,6 +90,18 @@ static inline int kref_put_lock(struct kref *kref,
>>  return 0;
>>  }
>>  
>> +static inline int kref_put_ww_mutex(struct kref *kref,
>> +void (*release)(struct kref *kref),
>> +struct ww_mutex *lock,
>> +struct ww_acquire_ctx *ctx)
>> +{
>> +if (refcount_dec_and_ww_mutex_lock(>refcount, lock, ctx)) {
>> +release(kref);
>> +return 1;
>> +}
>> +return 0;
>> +}
>> +
>>  /**
>>   * kref_get_unless_zero - Increment refcount for object unless it is zero.
>>   * @kref: object.
>> diff --git a/include/linux/refcount.h b/include/linux/refcount.h
>> index a62fcca97486..be9ad272bc77 100644
>> --- a/include/linux/refcount.h
>> +++ b/include/linux/refcount.h
>> @@ -99,6 +99,8 @@
>>  #include 
>>  
>>  struct mutex;
>> +struct ww_mutex;
>> +struct ww_acquire_ctx;
>>  
>>  /**
>>   * typedef refcount_t - variant of atomic_t specialized for reference counts
>> @@ -366,4 +368,7 @@ extern __must_check bool 
>> refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
>>  extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
>> spinlock_t *lock,
>> unsigned long *flags) 
>> __cond_acquires(lock);
>> +extern __must_check bool refcount_dec_and_ww_mutex_lock(refcount_t *r,
>> +struct ww_mutex *lock,
>> +struct ww_acquire_ctx 
>> *ctx) __cond_acquires(>base);
>>  #endif /* _LINUX_REFCOUNT_H */
>> diff --git a/lib/refcount.c b/lib/refcount.c
>> index a207a8f22b3c..3f6fd0ceed02 100644
>> --- a/lib/refcount.c
>> +++ b/lib/refcount.c
>> @@ -6,6 +6,7 @@
>>  #include 
>>  #include 
>>  #include 
>> +#include 
>>  #include 
>>  
>>  #define REFCOUNT_WARN(str)  WARN_ONCE(1, "refcount_t: " str ".\n")
>> @@ -184,3 +185,36 @@ bool refcount_dec_and_lock_irqsave(refcount_t *r, 
>> spinlock_t *lock,
>>  return true;
>>  }
>>  EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
>> +
>> +/**
>> + * refcount_dec_and_ww_mutex_lock - return holding ww-mutex if able to
>> + *  decrement refcount to 0
>> + * @r: the refcount
>> + * @lock: the ww-mutex to be locked
>> + * @ctx: wait-wound context
>> + *
>> + * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
>> + * decrement when saturated at REFCOUNT_SATURATED.
>> + *
>> + * Provides release memory ordering, such that prior loads and stores are 
>> done
>> + * before, and provides a control dependency such that free() must come 
>> after.
>> + * See the comment on top.
>> + *
>> + * Return: true and hold ww-mutex lock if able to decrement refcount to 0,
>> + * false otherwise
>> + */
>> +bool refcount_dec_and_ww_mutex_lock(refcount_t *r, struct ww_mutex *lock,
>> +struct ww_acquire_ctx *ctx)
>> +{
>> +if (refcount_dec_not_one(r))
>> +return false;
>> +
>> +ww_mutex_lock(lock, ctx);
> 
> Unless I'm wrong, ww_mutex_lock() can return -EDEADLK when ctx !=
> NULL, in which case, the lock is not held when it returns. Question is,
> do we really have a use case for ctx != NULL in that kref_put_ww_mutex()
> path. If we need to acquire other ww_locks, this lock, and the other
> locks should have been acquired beforehand, and we can simply call
> kref_put() when we want to release the ref on the resource.

Right, I completely forgot about the deadlocking

-- 
Best regards,
Dmitry



Re: [PATCH v15 10/23] locking/refcount, kref: Add kref_put_ww_mutex()

2023-08-28 Thread Boris Brezillon
On Sun, 27 Aug 2023 20:54:36 +0300
Dmitry Osipenko  wrote:

> Introduce kref_put_ww_mutex() helper that will handle the wait-wound
> mutex auto-locking on kref_put(). This helper is wanted by DRM drivers
> that extensively use dma-reservation locking which in turns uses ww-mutex.
> 
> Signed-off-by: Dmitry Osipenko 
> ---
>  include/linux/kref.h | 12 
>  include/linux/refcount.h |  5 +
>  lib/refcount.c   | 34 ++
>  3 files changed, 51 insertions(+)
> 
> diff --git a/include/linux/kref.h b/include/linux/kref.h
> index d32e21a2538c..b2d8dc6e9ae0 100644
> --- a/include/linux/kref.h
> +++ b/include/linux/kref.h
> @@ -90,6 +90,18 @@ static inline int kref_put_lock(struct kref *kref,
>   return 0;
>  }
>  
> +static inline int kref_put_ww_mutex(struct kref *kref,
> + void (*release)(struct kref *kref),
> + struct ww_mutex *lock,
> + struct ww_acquire_ctx *ctx)
> +{
> + if (refcount_dec_and_ww_mutex_lock(>refcount, lock, ctx)) {
> + release(kref);
> + return 1;
> + }
> + return 0;
> +}
> +
>  /**
>   * kref_get_unless_zero - Increment refcount for object unless it is zero.
>   * @kref: object.
> diff --git a/include/linux/refcount.h b/include/linux/refcount.h
> index a62fcca97486..be9ad272bc77 100644
> --- a/include/linux/refcount.h
> +++ b/include/linux/refcount.h
> @@ -99,6 +99,8 @@
>  #include 
>  
>  struct mutex;
> +struct ww_mutex;
> +struct ww_acquire_ctx;
>  
>  /**
>   * typedef refcount_t - variant of atomic_t specialized for reference counts
> @@ -366,4 +368,7 @@ extern __must_check bool refcount_dec_and_lock(refcount_t 
> *r, spinlock_t *lock)
>  extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
>  spinlock_t *lock,
>  unsigned long *flags) 
> __cond_acquires(lock);
> +extern __must_check bool refcount_dec_and_ww_mutex_lock(refcount_t *r,
> + struct ww_mutex *lock,
> + struct ww_acquire_ctx 
> *ctx) __cond_acquires(>base);
>  #endif /* _LINUX_REFCOUNT_H */
> diff --git a/lib/refcount.c b/lib/refcount.c
> index a207a8f22b3c..3f6fd0ceed02 100644
> --- a/lib/refcount.c
> +++ b/lib/refcount.c
> @@ -6,6 +6,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  
>  #define REFCOUNT_WARN(str)   WARN_ONCE(1, "refcount_t: " str ".\n")
> @@ -184,3 +185,36 @@ bool refcount_dec_and_lock_irqsave(refcount_t *r, 
> spinlock_t *lock,
>   return true;
>  }
>  EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
> +
> +/**
> + * refcount_dec_and_ww_mutex_lock - return holding ww-mutex if able to
> + *  decrement refcount to 0
> + * @r: the refcount
> + * @lock: the ww-mutex to be locked
> + * @ctx: wait-wound context
> + *
> + * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
> + * decrement when saturated at REFCOUNT_SATURATED.
> + *
> + * Provides release memory ordering, such that prior loads and stores are 
> done
> + * before, and provides a control dependency such that free() must come 
> after.
> + * See the comment on top.
> + *
> + * Return: true and hold ww-mutex lock if able to decrement refcount to 0,
> + * false otherwise
> + */
> +bool refcount_dec_and_ww_mutex_lock(refcount_t *r, struct ww_mutex *lock,
> + struct ww_acquire_ctx *ctx)
> +{
> + if (refcount_dec_not_one(r))
> + return false;
> +
> + ww_mutex_lock(lock, ctx);

Unless I'm wrong, ww_mutex_lock() can return -EDEADLK when ctx !=
NULL, in which case, the lock is not held when it returns. Question is,
do we really have a use case for ctx != NULL in that kref_put_ww_mutex()
path. If we need to acquire other ww_locks, this lock, and the other
locks should have been acquired beforehand, and we can simply call
kref_put() when we want to release the ref on the resource.

> + if (!refcount_dec_and_test(r)) {
> + ww_mutex_unlock(lock);
> + return false;
> + }
> +
> + return true;
> +}
> +EXPORT_SYMBOL(refcount_dec_and_ww_mutex_lock);



[PATCH v15 10/23] locking/refcount, kref: Add kref_put_ww_mutex()

2023-08-27 Thread Dmitry Osipenko
Introduce kref_put_ww_mutex() helper that will handle the wait-wound
mutex auto-locking on kref_put(). This helper is wanted by DRM drivers
that extensively use dma-reservation locking which in turns uses ww-mutex.

Signed-off-by: Dmitry Osipenko 
---
 include/linux/kref.h | 12 
 include/linux/refcount.h |  5 +
 lib/refcount.c   | 34 ++
 3 files changed, 51 insertions(+)

diff --git a/include/linux/kref.h b/include/linux/kref.h
index d32e21a2538c..b2d8dc6e9ae0 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -90,6 +90,18 @@ static inline int kref_put_lock(struct kref *kref,
return 0;
 }
 
+static inline int kref_put_ww_mutex(struct kref *kref,
+   void (*release)(struct kref *kref),
+   struct ww_mutex *lock,
+   struct ww_acquire_ctx *ctx)
+{
+   if (refcount_dec_and_ww_mutex_lock(>refcount, lock, ctx)) {
+   release(kref);
+   return 1;
+   }
+   return 0;
+}
+
 /**
  * kref_get_unless_zero - Increment refcount for object unless it is zero.
  * @kref: object.
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index a62fcca97486..be9ad272bc77 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -99,6 +99,8 @@
 #include 
 
 struct mutex;
+struct ww_mutex;
+struct ww_acquire_ctx;
 
 /**
  * typedef refcount_t - variant of atomic_t specialized for reference counts
@@ -366,4 +368,7 @@ extern __must_check bool refcount_dec_and_lock(refcount_t 
*r, spinlock_t *lock)
 extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
   spinlock_t *lock,
   unsigned long *flags) 
__cond_acquires(lock);
+extern __must_check bool refcount_dec_and_ww_mutex_lock(refcount_t *r,
+   struct ww_mutex *lock,
+   struct ww_acquire_ctx 
*ctx) __cond_acquires(>base);
 #endif /* _LINUX_REFCOUNT_H */
diff --git a/lib/refcount.c b/lib/refcount.c
index a207a8f22b3c..3f6fd0ceed02 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -6,6 +6,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #define REFCOUNT_WARN(str) WARN_ONCE(1, "refcount_t: " str ".\n")
@@ -184,3 +185,36 @@ bool refcount_dec_and_lock_irqsave(refcount_t *r, 
spinlock_t *lock,
return true;
 }
 EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
+
+/**
+ * refcount_dec_and_ww_mutex_lock - return holding ww-mutex if able to
+ *  decrement refcount to 0
+ * @r: the refcount
+ * @lock: the ww-mutex to be locked
+ * @ctx: wait-wound context
+ *
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ *
+ * Return: true and hold ww-mutex lock if able to decrement refcount to 0,
+ * false otherwise
+ */
+bool refcount_dec_and_ww_mutex_lock(refcount_t *r, struct ww_mutex *lock,
+   struct ww_acquire_ctx *ctx)
+{
+   if (refcount_dec_not_one(r))
+   return false;
+
+   ww_mutex_lock(lock, ctx);
+   if (!refcount_dec_and_test(r)) {
+   ww_mutex_unlock(lock);
+   return false;
+   }
+
+   return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_ww_mutex_lock);
-- 
2.41.0