It is completely unused and implemented only on x86.
Remove it.

Signed-off-by: Dmitry Vyukov <dvyu...@google.com>
Suggested-by: Andrey Ryabinin <aryabi...@virtuozzo.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: "H. Peter Anvin" <h...@zytor.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
Cc: x...@kernel.org

---

Andrew, I think it needs to go into mm as
the atomic-instrumented.h patch is in mm.
---
 arch/tile/lib/atomic_asm_32.S             |  3 +--
 arch/x86/include/asm/atomic.h             | 13 -------------
 include/asm-generic/atomic-instrumented.h |  6 ------
 3 files changed, 1 insertion(+), 21 deletions(-)

diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 1a70e6c0f259..94709ab41ed8 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -24,8 +24,7 @@
  * has an opportunity to return -EFAULT to the user if needed.
  * The 64-bit routines just return a "long long" with the value,
  * since they are only used from kernel space and don't expect to fault.
- * Support for 16-bit ops is included in the framework but we don't provide
- * any (x86_64 has an atomic_inc_short(), so we might want to some day).
+ * Support for 16-bit ops is included in the framework but we don't provide 
any.
  *
  * Note that the caller is advised to issue a suitable L1 or L2
  * prefetch on the address being manipulated to avoid extra stalls.
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 0cde164f058a..9ff8738103eb 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -237,19 +237,6 @@ ATOMIC_OPS(xor, ^)
 
 int __arch_atomic_add_unless(atomic_t *v, int a, int u);
 
-/**
- * arch_atomic_inc_short - increment of a short integer
- * @v: pointer to type int
- *
- * Atomically adds 1 to @v
- * Returns the new value of @u
- */
-static __always_inline short int arch_atomic_inc_short(short int *v)
-{
-       asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
-       return *v;
-}
-
 #ifdef CONFIG_X86_32
 # include <asm/atomic64_32.h>
 #else
diff --git a/include/asm-generic/atomic-instrumented.h 
b/include/asm-generic/atomic-instrumented.h
index 951bcd083925..70742da13087 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -78,12 +78,6 @@ static __always_inline bool atomic64_add_unless(atomic64_t 
*v, long long a,
        return arch_atomic64_add_unless(v, a, u);
 }
 
-static __always_inline short int atomic_inc_short(short int *v)
-{
-       kasan_check_write(v, sizeof(*v));
-       return arch_atomic_inc_short(v);
-}
-
 #define __INSTR_VOID1(op, sz)                                          \
 static __always_inline void atomic##sz##_##op(atomic##sz##_t *v)       \
 {                                                                      \
-- 
2.12.1.500.gab5fba24ee-goog

Reply via email to