Make hlist_bl_lock() and hlist_bl_unlock() variadic to help with the
transition.  Also add hlist_bl_lock_nested().

Signed-off-by: Matthew Wilcox (Oracle) <wi...@infradead.org>
---
 include/linux/list_bl.h | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index ae1b541446c9..1bfdb441c8bc 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -143,12 +143,19 @@ static inline void hlist_bl_del_init(struct hlist_bl_node 
*n)
        }
 }
 
-static inline void hlist_bl_lock(struct hlist_bl_head *b)
+static inline void hlist_bl_lock(struct hlist_bl_head *b, ...)
 {
        bit_spin_lock(0, (unsigned long *)b);
 }
 
-static inline void hlist_bl_unlock(struct hlist_bl_head *b)
+static inline void hlist_bl_lock_nested(struct hlist_bl_head *b,
+               struct split_lock *sl, unsigned int subclass)
+{
+       bit_spin_lock_nested(0, (unsigned long *)b, sl, subclass);
+}
+
+static inline void hlist_bl_unlock(struct hlist_bl_head *b,
+                                       ...)
 {
        __bit_spin_unlock(0, (unsigned long *)b);
 }
-- 
2.30.2

Reply via email to