This API is no longer used. Reviewed-by: Pierrick Bouvier <[email protected]> Signed-off-by: Richard Henderson <[email protected]> --- include/qemu/stats64.h | 199 ----------------------------------------- util/stats64.c | 148 ------------------------------ util/meson.build | 1 - 3 files changed, 348 deletions(-) delete mode 100644 include/qemu/stats64.h delete mode 100644 util/stats64.c
diff --git a/include/qemu/stats64.h b/include/qemu/stats64.h deleted file mode 100644 index 99b5cb724a..0000000000 --- a/include/qemu/stats64.h +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Atomic operations on 64-bit quantities. - * - * Copyright (C) 2017 Red Hat, Inc. - * - * Author: Paolo Bonzini <[email protected]> - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef QEMU_STATS64_H -#define QEMU_STATS64_H - -#include "qemu/atomic.h" - -/* This provides atomic operations on 64-bit type, using a reader-writer - * spinlock on architectures that do not have 64-bit accesses. Even on - * those architectures, it tries hard not to take the lock. - */ - -typedef struct Stat64 { -#ifdef CONFIG_ATOMIC64 - aligned_uint64_t value; -#else - uint32_t low, high; - uint32_t lock; -#endif -} Stat64; - -#ifdef CONFIG_ATOMIC64 -static inline void stat64_init(Stat64 *s, uint64_t value) -{ - /* This is not guaranteed to be atomic! */ - *s = (Stat64) { value }; -} - -static inline uint64_t stat64_get(const Stat64 *s) -{ - return qatomic_read__nocheck(&s->value); -} - -static inline void stat64_set(Stat64 *s, uint64_t value) -{ - qatomic_set__nocheck(&s->value, value); -} - -static inline void stat64_add(Stat64 *s, uint64_t value) -{ - qatomic_add(&s->value, value); -} - -static inline void stat64_min(Stat64 *s, uint64_t value) -{ - uint64_t orig = qatomic_read__nocheck(&s->value); - while (orig > value) { - orig = qatomic_cmpxchg__nocheck(&s->value, orig, value); - } -} - -static inline void stat64_max(Stat64 *s, uint64_t value) -{ - uint64_t orig = qatomic_read__nocheck(&s->value); - while (orig < value) { - orig = qatomic_cmpxchg__nocheck(&s->value, orig, value); - } -} -#else -uint64_t stat64_get(const Stat64 *s); -void stat64_set(Stat64 *s, uint64_t value); -bool stat64_min_slow(Stat64 *s, uint64_t value); -bool stat64_max_slow(Stat64 *s, uint64_t value); -bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high); - -static inline void stat64_init(Stat64 *s, uint64_t value) -{ - /* This is not guaranteed to be atomic! */ - *s = (Stat64) { .low = value, .high = value >> 32, .lock = 0 }; -} - -static inline void stat64_add(Stat64 *s, uint64_t value) -{ - uint32_t low, high; - high = value >> 32; - low = (uint32_t) value; - if (!low) { - if (high) { - qatomic_add(&s->high, high); - } - return; - } - - for (;;) { - uint32_t orig = s->low; - uint32_t result = orig + low; - uint32_t old; - - if (result < low || high) { - /* If the high part is affected, take the lock. */ - if (stat64_add32_carry(s, low, high)) { - return; - } - continue; - } - - /* No carry, try with a 32-bit cmpxchg. The result is independent of - * the high 32 bits, so it can race just fine with stat64_add32_carry - * and even stat64_get! - */ - old = qatomic_cmpxchg(&s->low, orig, result); - if (orig == old) { - return; - } - } -} - -static inline void stat64_min(Stat64 *s, uint64_t value) -{ - uint32_t low, high; - uint32_t orig_low, orig_high; - - high = value >> 32; - low = (uint32_t) value; - do { - orig_high = qatomic_read(&s->high); - if (orig_high < high) { - return; - } - - if (orig_high == high) { - /* High 32 bits are equal. Read low after high, otherwise we - * can get a false positive (e.g. 0x1235,0x0000 changes to - * 0x1234,0x8000 and we read it as 0x1234,0x0000). Pairs with - * the write barrier in stat64_min_slow. - */ - smp_rmb(); - orig_low = qatomic_read(&s->low); - if (orig_low <= low) { - return; - } - - /* See if we were lucky and a writer raced against us. The - * barrier is theoretically unnecessary, but if we remove it - * we may miss being lucky. - */ - smp_rmb(); - orig_high = qatomic_read(&s->high); - if (orig_high < high) { - return; - } - } - - /* If the value changes in any way, we have to take the lock. */ - } while (!stat64_min_slow(s, value)); -} - -static inline void stat64_max(Stat64 *s, uint64_t value) -{ - uint32_t low, high; - uint32_t orig_low, orig_high; - - high = value >> 32; - low = (uint32_t) value; - do { - orig_high = qatomic_read(&s->high); - if (orig_high > high) { - return; - } - - if (orig_high == high) { - /* High 32 bits are equal. Read low after high, otherwise we - * can get a false positive (e.g. 0x1234,0x8000 changes to - * 0x1235,0x0000 and we read it as 0x1235,0x8000). Pairs with - * the write barrier in stat64_max_slow. - */ - smp_rmb(); - orig_low = qatomic_read(&s->low); - if (orig_low >= low) { - return; - } - - /* See if we were lucky and a writer raced against us. The - * barrier is theoretically unnecessary, but if we remove it - * we may miss being lucky. - */ - smp_rmb(); - orig_high = qatomic_read(&s->high); - if (orig_high > high) { - return; - } - } - - /* If the value changes in any way, we have to take the lock. */ - } while (!stat64_max_slow(s, value)); -} - -#endif - -#endif diff --git a/util/stats64.c b/util/stats64.c deleted file mode 100644 index 09736014ec..0000000000 --- a/util/stats64.c +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Atomic operations on 64-bit quantities. - * - * Copyright (C) 2017 Red Hat, Inc. - * - * Author: Paolo Bonzini <[email protected]> - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#include "qemu/osdep.h" -#include "qemu/atomic.h" -#include "qemu/stats64.h" -#include "qemu/processor.h" - -#ifndef CONFIG_ATOMIC64 -static inline void stat64_rdlock(Stat64 *s) -{ - /* Keep out incoming writers to avoid them starving us. */ - qatomic_add(&s->lock, 2); - - /* If there is a concurrent writer, wait for it. */ - while (qatomic_read(&s->lock) & 1) { - cpu_relax(); - } -} - -static inline void stat64_rdunlock(Stat64 *s) -{ - qatomic_sub(&s->lock, 2); -} - -static inline bool stat64_wrtrylock(Stat64 *s) -{ - return qatomic_cmpxchg(&s->lock, 0, 1) == 0; -} - -static inline void stat64_wrunlock(Stat64 *s) -{ - qatomic_dec(&s->lock); -} - -uint64_t stat64_get(const Stat64 *s) -{ - uint32_t high, low; - - stat64_rdlock((Stat64 *)s); - - /* 64-bit writes always take the lock, so we can read in - * any order. - */ - high = qatomic_read(&s->high); - low = qatomic_read(&s->low); - stat64_rdunlock((Stat64 *)s); - - return ((uint64_t)high << 32) | low; -} - -void stat64_set(Stat64 *s, uint64_t val) -{ - while (!stat64_wrtrylock(s)) { - cpu_relax(); - } - - qatomic_set(&s->high, val >> 32); - qatomic_set(&s->low, val); - stat64_wrunlock(s); -} - -bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high) -{ - uint32_t old; - - if (!stat64_wrtrylock(s)) { - cpu_relax(); - return false; - } - - /* 64-bit reads always take the lock, so they don't care about the - * order of our update. By updating s->low first, we can check - * whether we have to carry into s->high. - */ - old = qatomic_fetch_add(&s->low, low); - high += (old + low) < old; - qatomic_add(&s->high, high); - stat64_wrunlock(s); - return true; -} - -bool stat64_min_slow(Stat64 *s, uint64_t value) -{ - uint32_t high, low; - uint64_t orig; - - if (!stat64_wrtrylock(s)) { - cpu_relax(); - return false; - } - - high = qatomic_read(&s->high); - low = qatomic_read(&s->low); - - orig = ((uint64_t)high << 32) | low; - if (value < orig) { - /* We have to set low before high, just like stat64_min reads - * high before low. The value may become higher temporarily, but - * stat64_get does not notice (it takes the lock) and the only ill - * effect on stat64_min is that the slow path may be triggered - * unnecessarily. - */ - qatomic_set(&s->low, (uint32_t)value); - smp_wmb(); - qatomic_set(&s->high, value >> 32); - } - stat64_wrunlock(s); - return true; -} - -bool stat64_max_slow(Stat64 *s, uint64_t value) -{ - uint32_t high, low; - uint64_t orig; - - if (!stat64_wrtrylock(s)) { - cpu_relax(); - return false; - } - - high = qatomic_read(&s->high); - low = qatomic_read(&s->low); - - orig = ((uint64_t)high << 32) | low; - if (value > orig) { - /* We have to set low before high, just like stat64_max reads - * high before low. The value may become lower temporarily, but - * stat64_get does not notice (it takes the lock) and the only ill - * effect on stat64_max is that the slow path may be triggered - * unnecessarily. - */ - qatomic_set(&s->low, (uint32_t)value); - smp_wmb(); - qatomic_set(&s->high, value >> 32); - } - stat64_wrunlock(s); - return true; -} -#endif diff --git a/util/meson.build b/util/meson.build index 35029380a3..d7d6b213f6 100644 --- a/util/meson.build +++ b/util/meson.build @@ -59,7 +59,6 @@ util_ss.add(files('qht.c')) util_ss.add(files('qsp.c')) util_ss.add(files('range.c')) util_ss.add(files('reserved-region.c')) -util_ss.add(files('stats64.c')) util_ss.add(files('systemd.c')) util_ss.add(files('transactions.c')) util_ss.add(files('guest-random.c')) -- 2.43.0
