Use kernel standard bit spin-lock instead of custom mess. Even, it has
a bug which doesn't disable preemption. The reason we don't have any
problem is that we have used it during preemption disable section
by class->lock spinlock. So no need to go to stable.

Reviewed-by: Sergey Senozhatsky <sergey.senozhat...@gmail.com>
Signed-off-by: Minchan Kim <minc...@kernel.org>
---
 mm/zsmalloc.c | 10 +++-------
 1 file changed, 3 insertions(+), 7 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 79295c73dc9f..39f29aedd5d6 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -868,21 +868,17 @@ static unsigned long obj_idx_to_offset(struct page *page,
 
 static inline int trypin_tag(unsigned long handle)
 {
-       unsigned long *ptr = (unsigned long *)handle;
-
-       return !test_and_set_bit_lock(HANDLE_PIN_BIT, ptr);
+       return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
 }
 
 static void pin_tag(unsigned long handle)
 {
-       while (!trypin_tag(handle));
+       bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
 }
 
 static void unpin_tag(unsigned long handle)
 {
-       unsigned long *ptr = (unsigned long *)handle;
-
-       clear_bit_unlock(HANDLE_PIN_BIT, ptr);
+       bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
 }
 
 static void reset_page(struct page *page)
-- 
1.9.1

Reply via email to