Extend bitops tests to cover the
rte_bit_atomic_[set|clear|assign|test|test_and_[set|clear|assign]]()
family of functions.

Signed-off-by: Mattias Rönnblom <mattias.ronnb...@ericsson.com>
---
 app/test/test_bitops.c       | 233 ++++++++++++++++++++++++++++++++++-
 lib/eal/include/rte_bitops.h |   1 -
 2 files changed, 232 insertions(+), 2 deletions(-)

diff --git a/app/test/test_bitops.c b/app/test/test_bitops.c
index 12c1027e36..a0967260aa 100644
--- a/app/test/test_bitops.c
+++ b/app/test/test_bitops.c
@@ -3,10 +3,13 @@
  * Copyright(c) 2024 Ericsson AB
  */
 
+#include <inttypes.h>
 #include <stdbool.h>
 
-#include <rte_launch.h>
 #include <rte_bitops.h>
+#include <rte_cycles.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
 #include <rte_random.h>
 #include "test.h"
 
@@ -60,6 +63,228 @@ GEN_TEST_BIT_ACCESS(test_bit_once_access_64, 
rte_bit_once_set,              \
                    rte_bit_once_clear, rte_bit_once_assign,            \
                    rte_bit_once_test, 64)
 
+#define bit_atomic_set(addr, nr)                               \
+       rte_bit_atomic_set(addr, nr, rte_memory_order_relaxed)
+
+#define bit_atomic_clear(addr, nr)                                     \
+       rte_bit_atomic_clear(addr, nr, rte_memory_order_relaxed)
+
+#define bit_atomic_assign(addr, nr, value)                             \
+       rte_bit_atomic_assign(addr, nr, value, rte_memory_order_relaxed)
+
+#define bit_atomic_test(addr, nr)                              \
+       rte_bit_atomic_test(addr, nr, rte_memory_order_relaxed)
+
+GEN_TEST_BIT_ACCESS(test_bit_atomic_access_32, bit_atomic_set, \
+                   bit_atomic_clear, bit_atomic_assign,        \
+                   bit_atomic_test, 32)
+
+GEN_TEST_BIT_ACCESS(test_bit_atomic_access_64, bit_atomic_set, \
+                   bit_atomic_clear, bit_atomic_assign,        \
+                   bit_atomic_test, 64)
+
+#define PARALLEL_TEST_RUNTIME 0.25
+
+#define GEN_TEST_BIT_PARALLEL_ASSIGN(size)                             \
+                                                                       \
+       struct parallel_access_lcore_ ## size                           \
+       {                                                               \
+               unsigned int bit;                                       \
+               uint ## size ##_t *word;                                \
+               bool failed;                                            \
+       };                                                              \
+                                                                       \
+       static int                                                      \
+       run_parallel_assign_ ## size(void *arg)                         \
+       {                                                               \
+               struct parallel_access_lcore_ ## size *lcore = arg;     \
+               uint64_t deadline = rte_get_timer_cycles() +            \
+                       PARALLEL_TEST_RUNTIME * rte_get_timer_hz();     \
+               bool value = false;                                     \
+                                                                       \
+               do {                                                    \
+                       bool new_value = rte_rand() & 1;                \
+                       bool use_test_and_modify = rte_rand() & 1;      \
+                       bool use_assign = rte_rand() & 1;               \
+                                                                       \
+                       if (rte_bit_atomic_test(lcore->word, lcore->bit, \
+                                               rte_memory_order_relaxed) != 
value) { \
+                               lcore->failed = true;                   \
+                               break;                                  \
+                       }                                               \
+                                                                       \
+                       if (use_test_and_modify) {                      \
+                               bool old_value;                         \
+                               if (use_assign)                         \
+                                       old_value = 
rte_bit_atomic_test_and_assign( \
+                                               lcore->word, lcore->bit, 
new_value, \
+                                               rte_memory_order_relaxed); \
+                               else {                                  \
+                                       old_value = new_value ?         \
+                                               rte_bit_atomic_test_and_set( \
+                                                       lcore->word, 
lcore->bit, \
+                                                       
rte_memory_order_relaxed) : \
+                                               rte_bit_atomic_test_and_clear( \
+                                                       lcore->word, 
lcore->bit, \
+                                                       
rte_memory_order_relaxed); \
+                               }                                       \
+                               if (old_value != value) {               \
+                                       lcore->failed = true;           \
+                                       break;                          \
+                               }                                       \
+                       } else {                                        \
+                               if (use_assign)                         \
+                                       rte_bit_atomic_assign(lcore->word, 
lcore->bit, \
+                                                             new_value, \
+                                                             
rte_memory_order_relaxed); \
+                               else {                                  \
+                                       if (new_value)                  \
+                                               rte_bit_atomic_set(     \
+                                                       lcore->word, 
lcore->bit, \
+                                                       
rte_memory_order_relaxed); \
+                                       else                            \
+                                               rte_bit_atomic_clear(   \
+                                                       lcore->word, 
lcore->bit, \
+                                                       
rte_memory_order_relaxed); \
+                               }                                       \
+                       }                                               \
+                                                                       \
+                       value = new_value;                              \
+               } while (rte_get_timer_cycles() < deadline);            \
+                                                                       \
+               return 0;                                               \
+       }                                                               \
+                                                                       \
+       static int                                                      \
+       test_bit_atomic_parallel_assign_ ## size(void)                  \
+       {                                                               \
+               unsigned int worker_lcore_id;                           \
+               uint ## size ## _t word = 0;                            \
+               struct parallel_access_lcore_ ## size main = {          \
+                       .word = &word                                   \
+               };                                                      \
+               struct parallel_access_lcore_ ## size worker = {        \
+                       .word = &word                                   \
+               };                                                      \
+                                                                       \
+               if (rte_lcore_count() < 2) {                            \
+                       printf("Need multiple cores to run parallel test.\n"); \
+                       return TEST_SKIPPED;                            \
+               }                                                       \
+                                                                       \
+               worker_lcore_id = rte_get_next_lcore(-1, 1, 0);         \
+                                                                       \
+               main.bit = rte_rand_max(size);                          \
+               do {                                                    \
+                       worker.bit = rte_rand_max(size);                \
+               } while (worker.bit == main.bit);                       \
+                                                                       \
+               int rc = rte_eal_remote_launch(run_parallel_assign_ ## size, \
+                                              &worker, worker_lcore_id); \
+               TEST_ASSERT(rc == 0, "Worker thread launch failed");    \
+                                                                       \
+               run_parallel_assign_ ## size(&main);                    \
+                                                                       \
+               rte_eal_mp_wait_lcore();                                \
+                                                                       \
+               TEST_ASSERT(!main.failed, "Main lcore atomic access failed"); \
+               TEST_ASSERT(!worker.failed, "Worker lcore atomic access " \
+                           "failed");                                  \
+                                                                       \
+               return TEST_SUCCESS;                                    \
+       }
+
+GEN_TEST_BIT_PARALLEL_ASSIGN(32)
+GEN_TEST_BIT_PARALLEL_ASSIGN(64)
+
+#define GEN_TEST_BIT_PARALLEL_TEST_AND_MODIFY(size)                    \
+                                                                       \
+       struct parallel_test_and_set_lcore_ ## size                     \
+       {                                                               \
+               uint ## size ##_t *word;                                \
+               unsigned int bit;                                       \
+               uint64_t flips;                                         \
+       };                                                              \
+                                                                       \
+       static int                                                      \
+       run_parallel_test_and_modify_ ## size(void *arg)                \
+       {                                                               \
+               struct parallel_test_and_set_lcore_ ## size *lcore = arg; \
+               uint64_t deadline = rte_get_timer_cycles() +            \
+                       PARALLEL_TEST_RUNTIME * rte_get_timer_hz();     \
+               do {                                                    \
+                       bool old_value;                                 \
+                       bool new_value = rte_rand() & 1;                \
+                       bool use_assign = rte_rand() & 1;               \
+                                                                       \
+                       if (use_assign)                                 \
+                               old_value = rte_bit_atomic_test_and_assign( \
+                                       lcore->word, lcore->bit, new_value, \
+                                       rte_memory_order_relaxed);      \
+                       else                                            \
+                               old_value = new_value ?                 \
+                                       rte_bit_atomic_test_and_set(    \
+                                               lcore->word, lcore->bit, \
+                                               rte_memory_order_relaxed) : \
+                                       rte_bit_atomic_test_and_clear(  \
+                                               lcore->word, lcore->bit, \
+                                               rte_memory_order_relaxed); \
+                       if (old_value != new_value)                     \
+                               lcore->flips++;                         \
+               } while (rte_get_timer_cycles() < deadline);            \
+                                                                       \
+               return 0;                                               \
+       }                                                               \
+                                                                       \
+       static int                                                      \
+       test_bit_atomic_parallel_test_and_modify_ ## size(void)         \
+       {                                                               \
+               unsigned int worker_lcore_id;                           \
+               uint ## size ## _t word = 0;                            \
+               unsigned int bit = rte_rand_max(size);                  \
+               struct parallel_test_and_set_lcore_ ## size main = {    \
+                       .word = &word,                                 \
+                       .bit = bit \
+               };                                                      \
+               struct parallel_test_and_set_lcore_ ## size worker = {  \
+                       .word = &word,                                  \
+                       .bit = bit                                      \
+               };                                                      \
+                                                                       \
+               if (rte_lcore_count() < 2) {                            \
+                       printf("Need multiple cores to run parallel test.\n"); \
+                       return TEST_SKIPPED;                            \
+               }                                                       \
+                                                                       \
+               worker_lcore_id = rte_get_next_lcore(-1, 1, 0);         \
+                                                                       \
+               int rc = rte_eal_remote_launch(run_parallel_test_and_modify_ ## 
size, \
+                                              &worker, worker_lcore_id); \
+               TEST_ASSERT(rc == 0, "Worker thread launch failed");    \
+                                                                       \
+               run_parallel_test_and_modify_ ## size(&main);           \
+                                                                       \
+               rte_eal_mp_wait_lcore();                                \
+                                                                       \
+               uint64_t total_flips = main.flips + worker.flips;       \
+               bool expected_value = total_flips % 2;                  \
+                                                                       \
+               TEST_ASSERT(expected_value == rte_bit_test(&word, bit), \
+                           "After %"PRId64" flips, the bit value "     \
+                           "should be %d", total_flips, expected_value); \
+                                                                       \
+               uint64_t expected_word = 0;                             \
+               rte_bit_assign(&expected_word, bit, expected_value);    \
+                                                                       \
+               TEST_ASSERT(expected_word == word, "Untouched bits have " \
+                           "changed value");                           \
+                                                                       \
+               return TEST_SUCCESS;                                    \
+       }
+
+GEN_TEST_BIT_PARALLEL_TEST_AND_MODIFY(32)
+GEN_TEST_BIT_PARALLEL_TEST_AND_MODIFY(64)
+
 static uint32_t val32;
 static uint64_t val64;
 
@@ -178,6 +403,12 @@ static struct unit_test_suite test_suite = {
                TEST_CASE(test_bit_access_64),
                TEST_CASE(test_bit_once_access_32),
                TEST_CASE(test_bit_once_access_64),
+               TEST_CASE(test_bit_atomic_access_32),
+               TEST_CASE(test_bit_atomic_access_64),
+               TEST_CASE(test_bit_atomic_parallel_assign_32),
+               TEST_CASE(test_bit_atomic_parallel_assign_64),
+               TEST_CASE(test_bit_atomic_parallel_test_and_modify_32),
+               TEST_CASE(test_bit_atomic_parallel_test_and_modify_64),
                TEST_CASE(test_bit_relaxed_set),
                TEST_CASE(test_bit_relaxed_clear),
                TEST_CASE(test_bit_relaxed_test_set_clear),
diff --git a/lib/eal/include/rte_bitops.h b/lib/eal/include/rte_bitops.h
index 8c38a1ac03..bc6d79086b 100644
--- a/lib/eal/include/rte_bitops.h
+++ b/lib/eal/include/rte_bitops.h
@@ -485,7 +485,6 @@ extern "C" {
                 uint32_t *: __rte_bit_atomic_test_and_clear32,         \
                 uint64_t *: __rte_bit_atomic_test_and_clear64)(addr, nr, \
                                                                memory_order)
-
 /**
  * @warning
  * @b EXPERIMENTAL: this API may change without prior notice.
-- 
2.34.1

Reply via email to