Use actual CPU number instead of hardcoded value to decide the size
of 'cpu_used_mask' in 'struct sw_flow'. Below is the reason.

'struct cpumask cpu_used_mask' is embedded in struct sw_flow.
Its size is hardcoded to CONFIG_NR_CPUS bits, which can be
8192 by default, it costs memory and slows down ovs_flow_alloc.

To address this:
 Redefine cpu_used_mask to pointer.
 Append cpumask_size() bytes after 'stat' to hold cpumask.
 Initialization cpu_used_mask right after stats_last_writer.

APIs like cpumask_next and cpumask_set_cpu never access bits
beyond cpu count, cpumask_size() bytes of memory is enough.

Signed-off-by: Eddy Tao <taoyuan_e...@hotmail.com>
---
 V7 -> V8: add blanc space after colon in subject line
 V6 -> V7: initialize cpu_used_mask after stats_last_writer
 V5 -> V6: add tab to align the wrapped cpumask_set_cpu call
 V4 -> V5: fix max-line-length warning
 V3 -> V4: no change, sorry, my bad
 V2 -> V3: add 'net-next' in prefix
           fix max-line-length warning
           remove the comment
 V1 -> V2: make comment imperitive
           remove unnecessary parentheses

 net/openvswitch/flow.c       | 9 ++++++---
 net/openvswitch/flow.h       | 2 +-
 net/openvswitch/flow_table.c | 8 +++++---
 3 files changed, 12 insertions(+), 7 deletions(-)

diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index e20d1a973417..416976f70322 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -107,7 +107,8 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 
tcp_flags,
 
                                        rcu_assign_pointer(flow->stats[cpu],
                                                           new_stats);
-                                       cpumask_set_cpu(cpu, 
&flow->cpu_used_mask);
+                                       cpumask_set_cpu(cpu,
+                                                       flow->cpu_used_mask);
                                        goto unlock;
                                }
                        }
@@ -135,7 +136,8 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
        memset(ovs_stats, 0, sizeof(*ovs_stats));
 
        /* We open code this to make sure cpu 0 is always considered */
-       for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, 
&flow->cpu_used_mask)) {
+       for (cpu = 0; cpu < nr_cpu_ids;
+            cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
                struct sw_flow_stats *stats = 
rcu_dereference_ovsl(flow->stats[cpu]);
 
                if (stats) {
@@ -159,7 +161,8 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
        int cpu;
 
        /* We open code this to make sure cpu 0 is always considered */
-       for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, 
&flow->cpu_used_mask)) {
+       for (cpu = 0; cpu < nr_cpu_ids;
+            cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
                struct sw_flow_stats *stats = 
ovsl_dereference(flow->stats[cpu]);
 
                if (stats) {
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 073ab73ffeaa..b5711aff6e76 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -229,7 +229,7 @@ struct sw_flow {
                                         */
        struct sw_flow_key key;
        struct sw_flow_id id;
-       struct cpumask cpu_used_mask;
+       struct cpumask *cpu_used_mask;
        struct sw_flow_mask *mask;
        struct sw_flow_actions __rcu *sf_acts;
        struct sw_flow_stats __rcu *stats[]; /* One for each CPU.  First one
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 0a0e4c283f02..791504b7f42b 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -79,6 +79,7 @@ struct sw_flow *ovs_flow_alloc(void)
                return ERR_PTR(-ENOMEM);
 
        flow->stats_last_writer = -1;
+       flow->cpu_used_mask = (struct cpumask *)&flow->stats[nr_cpu_ids];
 
        /* Initialize the default stat node. */
        stats = kmem_cache_alloc_node(flow_stats_cache,
@@ -91,7 +92,7 @@ struct sw_flow *ovs_flow_alloc(void)
 
        RCU_INIT_POINTER(flow->stats[0], stats);
 
-       cpumask_set_cpu(0, &flow->cpu_used_mask);
+       cpumask_set_cpu(0, flow->cpu_used_mask);
 
        return flow;
 err:
@@ -115,7 +116,7 @@ static void flow_free(struct sw_flow *flow)
                                          flow->sf_acts);
        /* We open code this to make sure cpu 0 is always considered */
        for (cpu = 0; cpu < nr_cpu_ids;
-            cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
+            cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
                if (flow->stats[cpu])
                        kmem_cache_free(flow_stats_cache,
                                        (struct sw_flow_stats __force 
*)flow->stats[cpu]);
@@ -1196,7 +1197,8 @@ int ovs_flow_init(void)
 
        flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
                                       + (nr_cpu_ids
-                                         * sizeof(struct sw_flow_stats *)),
+                                         * sizeof(struct sw_flow_stats *))
+                                      + cpumask_size(),
                                       0, 0, NULL);
        if (flow_cache == NULL)
                return -ENOMEM;
-- 
2.27.0

_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to