Rewrite the generic REFCOUNT_FULL implementation so that the saturation
point is moved to INT_MIN / 2. This allows us to defer the sanity checks
until after the atomic operation, which removes many uses of cmpxchg()
in favour of atomic_fetch_{add,sub}().

Cc: Kees Cook <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Elena Reshetova <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Tested-by: Hanjun Guo <[email protected]>
Tested-by: Jan Glauber <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
---
 include/linux/refcount.h | 87 +++++++++++++++++++-----------------------------
 1 file changed, 34 insertions(+), 53 deletions(-)

diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index e719b5b1220e..eea17f39c4df 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -47,8 +47,8 @@ static inline unsigned int refcount_read(const refcount_t *r)
 #ifdef CONFIG_REFCOUNT_FULL
 #include <linux/bug.h>
 
-#define REFCOUNT_MAX           (UINT_MAX - 1)
-#define REFCOUNT_SATURATED     UINT_MAX
+#define REFCOUNT_MAX           INT_MAX
+#define REFCOUNT_SATURATED     (INT_MIN / 2)
 
 /*
  * Variant of atomic_t specialized for reference counts.
@@ -109,25 +109,19 @@ static inline unsigned int refcount_read(const refcount_t 
*r)
  */
 static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
 {
-       unsigned int new, val = atomic_read(&r->refs);
+       int old = refcount_read(r);
 
        do {
-               if (!val)
-                       return false;
-
-               if (unlikely(val == REFCOUNT_SATURATED))
-                       return true;
-
-               new = val + i;
-               if (new < val)
-                       new = REFCOUNT_SATURATED;
+               if (!old)
+                       break;
+       } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
 
-       } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
-
-       WARN_ONCE(new == REFCOUNT_SATURATED,
-                 "refcount_t: saturated; leaking memory.\n");
+       if (unlikely(old < 0 || old + i < 0)) {
+               refcount_set(r, REFCOUNT_SATURATED);
+               WARN_ONCE(1, "refcount_t: saturated; leaking memory.\n");
+       }
 
-       return true;
+       return old;
 }
 
 /**
@@ -148,7 +142,13 @@ static inline __must_check bool refcount_add_not_zero(int 
i, refcount_t *r)
  */
 static inline void refcount_add(int i, refcount_t *r)
 {
-       WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; 
use-after-free.\n");
+       int old = atomic_fetch_add_relaxed(i, &r->refs);
+
+       WARN_ONCE(!old, "refcount_t: addition on 0; use-after-free.\n");
+       if (unlikely(old <= 0 || old + i <= 0)) {
+               refcount_set(r, REFCOUNT_SATURATED);
+               WARN_ONCE(1, "refcount_t: saturated; leaking memory.\n");
+       }
 }
 
 /**
@@ -166,23 +166,7 @@ static inline void refcount_add(int i, refcount_t *r)
  */
 static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
 {
-       unsigned int new, val = atomic_read(&r->refs);
-
-       do {
-               new = val + 1;
-
-               if (!val)
-                       return false;
-
-               if (unlikely(!new))
-                       return true;
-
-       } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
-
-       WARN_ONCE(new == REFCOUNT_SATURATED,
-                 "refcount_t: saturated; leaking memory.\n");
-
-       return true;
+       return refcount_add_not_zero(1, r);
 }
 
 /**
@@ -199,7 +183,7 @@ static inline __must_check bool 
refcount_inc_not_zero(refcount_t *r)
  */
 static inline void refcount_inc(refcount_t *r)
 {
-       WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; 
use-after-free.\n");
+       refcount_add(1, r);
 }
 
 /**
@@ -224,26 +208,19 @@ static inline void refcount_inc(refcount_t *r)
  */
 static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
 {
-       unsigned int new, val = atomic_read(&r->refs);
-
-       do {
-               if (unlikely(val == REFCOUNT_SATURATED))
-                       return false;
+       int old = atomic_fetch_sub_release(i, &r->refs);
 
-               new = val - i;
-               if (new > val) {
-                       WARN_ONCE(new > val, "refcount_t: underflow; 
use-after-free.\n");
-                       return false;
-               }
-
-       } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
-
-       if (!new) {
+       if (old == i) {
                smp_acquire__after_ctrl_dep();
                return true;
        }
-       return false;
 
+       if (unlikely(old - i < 0)) {
+               refcount_set(r, REFCOUNT_SATURATED);
+               WARN_ONCE(1, "refcount_t: underflow; use-after-free.\n");
+       }
+
+       return false;
 }
 
 /**
@@ -276,9 +253,13 @@ static inline __must_check bool 
refcount_dec_and_test(refcount_t *r)
  */
 static inline void refcount_dec(refcount_t *r)
 {
-       WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; 
leaking memory.\n");
-}
+       int old = atomic_fetch_sub_release(1, &r->refs);
 
+       if (unlikely(old <= 1)) {
+               refcount_set(r, REFCOUNT_SATURATED);
+               WARN_ONCE(1, "refcount_t: decrement hit 0; leaking memory.\n");
+       }
+}
 #else /* CONFIG_REFCOUNT_FULL */
 
 #define REFCOUNT_MAX           INT_MAX
-- 
2.11.0

Reply via email to