The tt.local_changes atomic is either written with tt.changes_list_lock
or close to it (see batadv_tt_local_event()). Thus the performance gain
using an atomic was limited (or because of atomic_read() impact even
negative). Using atomic also comes with the need to be wary of potential
negative tt.local_changes value.

Simplify the tt.local_changes usage by removing the atomic property and
modifying it only with tt.changes_list_lock held.

Signed-off-by: Remi Pommarel <[email protected]>
---
 net/batman-adv/soft-interface.c    |  2 +-
 net/batman-adv/translation-table.c | 24 +++++++++++-------------
 net/batman-adv/types.h             |  4 ++--
 3 files changed, 14 insertions(+), 16 deletions(-)

diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 2758aba47a2f..5666c268cead 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -783,13 +783,13 @@ static int batadv_softif_init_late(struct net_device *dev)
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
        atomic_set(&bat_priv->bcast_seqno, 1);
        atomic_set(&bat_priv->tt.vn, 0);
-       atomic_set(&bat_priv->tt.local_changes, 0);
        atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
 #ifdef CONFIG_BATMAN_ADV_BLA
        atomic_set(&bat_priv->bla.num_requests, 0);
 #endif
        atomic_set(&bat_priv->tp_num, 0);
 
+       WRITE_ONCE(bat_priv->tt.local_changes, 0);
        bat_priv->tt.last_changeset = NULL;
        bat_priv->tt.last_changeset_len = 0;
        bat_priv->isolation_mark = 0;
diff --git a/net/batman-adv/translation-table.c 
b/net/batman-adv/translation-table.c
index 53dea8ae96e4..f7e894811e7f 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -463,8 +463,8 @@ static void batadv_tt_local_event(struct batadv_priv 
*bat_priv,
        struct batadv_tt_change_node *tt_change_node, *entry, *safe;
        struct batadv_tt_common_entry *common = &tt_local_entry->common;
        u8 flags = common->flags | event_flags;
-       bool event_removed = false;
        bool del_op_requested, del_op_entry;
+       size_t changes;
 
        tt_change_node = kmem_cache_alloc(batadv_tt_change_cache, GFP_ATOMIC);
        if (!tt_change_node)
@@ -480,6 +480,7 @@ static void batadv_tt_local_event(struct batadv_priv 
*bat_priv,
 
        /* check for ADD+DEL or DEL+ADD events */
        spin_lock_bh(&bat_priv->tt.changes_list_lock);
+       changes = READ_ONCE(bat_priv->tt.local_changes);
        list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
                                 list) {
                if (!batadv_compare_eth(entry->change.addr, common->addr))
@@ -508,21 +509,18 @@ static void batadv_tt_local_event(struct batadv_priv 
*bat_priv,
 del:
                list_del(&entry->list);
                kmem_cache_free(batadv_tt_change_cache, entry);
+               changes--;
                kmem_cache_free(batadv_tt_change_cache, tt_change_node);
-               event_removed = true;
-               goto unlock;
+               goto update_changes;
        }
 
        /* track the change in the OGMinterval list */
        list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
+       changes++;
 
-unlock:
+update_changes:
+       WRITE_ONCE(bat_priv->tt.local_changes, changes);
        spin_unlock_bh(&bat_priv->tt.changes_list_lock);
-
-       if (event_removed)
-               atomic_dec(&bat_priv->tt.local_changes);
-       else
-               atomic_inc(&bat_priv->tt.local_changes);
 }
 
 /**
@@ -994,7 +992,7 @@ static void batadv_tt_tvlv_container_update(struct 
batadv_priv *bat_priv)
        size_t tt_extra_len = 0;
        u16 tvlv_len;
 
-       tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
+       tt_diff_entries_num = READ_ONCE(bat_priv->tt.local_changes);
        tt_diff_len = batadv_tt_len(tt_diff_entries_num);
 
        /* if we have too many changes for one packet don't send any
@@ -1021,7 +1019,7 @@ static void batadv_tt_tvlv_container_update(struct 
batadv_priv *bat_priv)
                goto container_register;
 
        spin_lock_bh(&bat_priv->tt.changes_list_lock);
-       atomic_set(&bat_priv->tt.local_changes, 0);
+       WRITE_ONCE(bat_priv->tt.local_changes, 0);
 
        list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
                                 list) {
@@ -1437,7 +1435,7 @@ static void batadv_tt_changes_list_free(struct 
batadv_priv *bat_priv)
                kmem_cache_free(batadv_tt_change_cache, entry);
        }
 
-       atomic_set(&bat_priv->tt.local_changes, 0);
+       WRITE_ONCE(bat_priv->tt.local_changes, 0);
        spin_unlock_bh(&bat_priv->tt.changes_list_lock);
 }
 
@@ -3707,7 +3705,7 @@ static void batadv_tt_local_commit_changes_nolock(struct 
batadv_priv *bat_priv)
 {
        lockdep_assert_held(&bat_priv->tt.commit_lock);
 
-       if (atomic_read(&bat_priv->tt.local_changes) < 1) {
+       if (READ_ONCE(bat_priv->tt.local_changes) == 0) {
                if (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))
                        batadv_tt_tvlv_container_update(bat_priv);
                return;
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 04f6398b3a40..f491bff8c51b 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1022,7 +1022,7 @@ struct batadv_priv_tt {
        atomic_t ogm_append_cnt;
 
        /** @local_changes: changes registered in an originator interval */
-       atomic_t local_changes;
+       size_t local_changes;
 
        /**
         * @changes_list: tracks tt local changes within an originator interval
@@ -1044,7 +1044,7 @@ struct batadv_priv_tt {
         */
        struct list_head roam_list;
 
-       /** @changes_list_lock: lock protecting changes_list */
+       /** @changes_list_lock: lock protecting changes_list & local_changes */
        spinlock_t changes_list_lock;
 
        /** @req_list_lock: lock protecting req_list */
-- 
2.40.0

Reply via email to