> On May 16, 2024, at 7:12 PM, Stephen Hemminger <step...@networkplumber.org> 
> wrote:
> 
> This header implements 64 bit counters that are NOT atomic
> but are safe against load/store splits on 32 bit platforms.
> 
> Signed-off-by: Stephen Hemminger <step...@networkplumber.org>
> Acked-by: Morten Brørup <m...@smartsharesystems.com>
> ---
> lib/eal/include/meson.build   |  1 +
> lib/eal/include/rte_counter.h | 98 +++++++++++++++++++++++++++++++++++
> 2 files changed, 99 insertions(+)
> create mode 100644 lib/eal/include/rte_counter.h
> 
> diff --git a/lib/eal/include/meson.build b/lib/eal/include/meson.build
> index e94b056d46..c070dd0079 100644
> --- a/lib/eal/include/meson.build
> +++ b/lib/eal/include/meson.build
> @@ -12,6 +12,7 @@ headers += files(
>         'rte_class.h',
>         'rte_common.h',
>         'rte_compat.h',
> +        'rte_counter.h',
>         'rte_debug.h',
>         'rte_dev.h',
>         'rte_devargs.h',
> diff --git a/lib/eal/include/rte_counter.h b/lib/eal/include/rte_counter.h
> new file mode 100644
> index 0000000000..d623195d63
> --- /dev/null
> +++ b/lib/eal/include/rte_counter.h
> @@ -0,0 +1,98 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright (c) Stephen Hemminger <step...@networkplumber.org>
> + */
> +
> +#ifndef _RTE_COUNTER_H_
> +#define _RTE_COUNTER_H_
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +/**
> + * @file
> + * RTE Counter
> + *
> + * A counter is 64 bit value that is safe from split read/write
> + * on 32 bit platforms. It assumes that only one cpu at a time
If we are defining the counter in this manner, then implementation cannot be 
generic. I think architectures will have constraints if they have to ensure the 
64b variables are not split.

I think we at least need the counter to be aligned on 8B boundary to have 
generic code.

> + * will update the counter, and another CPU may want to read it.
> + *
> + * This is a much weaker guarantee than full atomic variables
> + * but is faster since no locked operations are required for update.
> + */
> +
> +#ifdef RTE_ARCH_64
> +/*
> + * On a platform that can support native 64 bit type, no special handling.
> + * These are just wrapper around 64 bit value.
> + */
> +typedef uint64_t rte_counter64_t;
> +
> +/**
> + * Add value to counter.
> + */
> +__rte_experimental
> +static inline void
> +rte_counter64_add(rte_counter64_t *counter, uint32_t val)
> +{
> + *counter += val;
> +}
> +
> +__rte_experimental
> +static inline uint64_t
> +rte_counter64_fetch(const rte_counter64_t *counter)
> +{
> + return *counter;
> +}
> +
> +__rte_experimental
> +static inline void
> +rte_counter64_set(rte_counter64_t *counter, uint64_t val)
> +{
> + *counter = val;
> +}
> +
> +#else
> +
> +#include <rte_stdatomic.h>
> +
> +/*
> + * On a 32 bit platform need to use atomic to force the compler to not
> + * split 64 bit read/write.
> + */
> +typedef RTE_ATOMIC(uint64_t) rte_counter64_t;
> +
> +__rte_experimental
> +static inline void
> +rte_counter64_add(rte_counter64_t *counter, uint32_t val)
> +{
> + rte_atomic_fetch_add_explicit(counter, val, rte_memory_order_relaxed);
> +}
> +
> +__rte_experimental
> +static inline uint64_t
> +rte_counter64_fetch(const rte_counter64_t *counter)
> +{
> + return rte_atomic_load_explicit(counter, rte_memory_order_consume);
> +}
> +
> +__rte_experimental
> +static inline void
> +rte_counter64_set(rte_counter64_t *counter, uint64_t val)
> +{
> + rte_atomic_store_explicit(counter, val, rte_memory_order_release);
> +}
> +#endif
> +
> +__rte_experimental
> +static inline void
> +rte_counter64_reset(rte_counter64_t *counter)
> +{
> + rte_counter64_set(counter, 0);
> +}
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_COUNTER_H_ */
> -- 
> 2.43.0
> 

Reply via email to