Currently bitops-instrumented.h assumes that the architecture provides both the atomic and non-atomic versions of the bitops (e.g. both set_bit and __set_bit). This is true on x86, but is not always true: there is a generic bitops/non-atomic.h header that provides generic non-atomic versions. powerpc uses this generic version, so it does not have it's own e.g. __set_bit that could be renamed arch___set_bit.
Rearrange bitops-instrumented.h. As operations in bitops/non-atomic.h will already be instrumented (they use regular memory accesses), put the instrumenting wrappers for them behind an ifdef. Only include these instrumentation wrappers if non-atomic.h has not been included. Signed-off-by: Daniel Axtens <d...@axtens.net> --- include/asm-generic/bitops-instrumented.h | 144 ++++++++++++---------- 1 file changed, 76 insertions(+), 68 deletions(-) diff --git a/include/asm-generic/bitops-instrumented.h b/include/asm-generic/bitops-instrumented.h index ddd1c6d9d8db..2fe8f7e12a11 100644 --- a/include/asm-generic/bitops-instrumented.h +++ b/include/asm-generic/bitops-instrumented.h @@ -29,21 +29,6 @@ static inline void set_bit(long nr, volatile unsigned long *addr) arch_set_bit(nr, addr); } -/** - * __set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic. If it is called on the same - * region of memory concurrently, the effect may be that only one operation - * succeeds. - */ -static inline void __set_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - arch___set_bit(nr, addr); -} - /** * clear_bit - Clears a bit in memory * @nr: Bit to clear @@ -57,21 +42,6 @@ static inline void clear_bit(long nr, volatile unsigned long *addr) arch_clear_bit(nr, addr); } -/** - * __clear_bit - Clears a bit in memory - * @nr: the bit to clear - * @addr: the address to start counting from - * - * Unlike clear_bit(), this function is non-atomic. If it is called on the same - * region of memory concurrently, the effect may be that only one operation - * succeeds. - */ -static inline void __clear_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - arch___clear_bit(nr, addr); -} - /** * clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set @@ -116,21 +86,6 @@ static inline void change_bit(long nr, volatile unsigned long *addr) arch_change_bit(nr, addr); } -/** - * __change_bit - Toggle a bit in memory - * @nr: the bit to change - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic. If it is called on the same - * region of memory concurrently, the effect may be that only one operation - * succeeds. - */ -static inline void __change_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - arch___change_bit(nr, addr); -} - /** * test_and_set_bit - Set a bit and return its old value * @nr: Bit to set @@ -144,20 +99,6 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) return arch_test_and_set_bit(nr, addr); } -/** - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic. If two instances of this operation race, one - * can appear to succeed but actually fail. - */ -static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - return arch___test_and_set_bit(nr, addr); -} - /** * test_and_set_bit_lock - Set a bit and return its old value, for lock * @nr: Bit to set @@ -187,30 +128,96 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) } /** - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This is an atomic fully-ordered operation (implied full memory barrier). + */ +static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_and_change_bit(nr, addr); +} + +/* + * If the arch is using the generic non-atomic bit ops, they are already + * instrumented, and we don't need to create wrappers. Only wrap if we + * haven't included that header. + */ +#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic. If it is called on the same + * region of memory concurrently, the effect may be that only one operation + * succeeds. + */ +static inline void __set_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch___set_bit(nr, addr); +} + +/** + * __clear_bit - Clears a bit in memory + * @nr: the bit to clear + * @addr: the address to start counting from + * + * Unlike clear_bit(), this function is non-atomic. If it is called on the same + * region of memory concurrently, the effect may be that only one operation + * succeeds. + */ +static inline void __clear_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch___clear_bit(nr, addr); +} + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic. If it is called on the same + * region of memory concurrently, the effect may be that only one operation + * succeeds. + */ +static inline void __change_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch___change_bit(nr, addr); +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set * @addr: Address to count from * * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ -static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) +static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - return arch___test_and_clear_bit(nr, addr); + return arch___test_and_set_bit(nr, addr); } /** - * test_and_change_bit - Change a bit and return its old value - * @nr: Bit to change + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear * @addr: Address to count from * - * This is an atomic fully-ordered operation (implied full memory barrier). + * This operation is non-atomic. If two instances of this operation race, one + * can appear to succeed but actually fail. */ -static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) +static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) { kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - return arch_test_and_change_bit(nr, addr); + return arch___test_and_clear_bit(nr, addr); } /** @@ -237,6 +244,7 @@ static inline bool test_bit(long nr, const volatile unsigned long *addr) kasan_check_read(addr + BIT_WORD(nr), sizeof(long)); return arch_test_bit(nr, addr); } +#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ #if defined(arch_clear_bit_unlock_is_negative_byte) /** -- 2.20.1