Re: [ovs-dev] [PATCH net-next v6 04/10] net: openvswitch: optimize flow mask cache hash collision

2019-11-02 Thread Pravin Shelar
On Fri, Nov 1, 2019 at 7:24 AM  wrote:
>
> From: Tonghao Zhang 
>
> Port the codes to linux upstream and with little changes.
>
> Pravin B Shelar, says:
> | In case hash collision on mask cache, OVS does extra flow
> | lookup. Following patch avoid it.
>
> Link: 
> https://github.com/openvswitch/ovs/commit/0e6efbe2712da03522532dc5e84806a96f6a0dd1
> Signed-off-by: Tonghao Zhang 
> Tested-by: Greg Rose 
> ---
Signed-off-by: Pravin B Shelar 
___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


[ovs-dev] [PATCH net-next v6 04/10] net: openvswitch: optimize flow mask cache hash collision

2019-11-01 Thread xiangxia . m . yue
From: Tonghao Zhang 

Port the codes to linux upstream and with little changes.

Pravin B Shelar, says:
| In case hash collision on mask cache, OVS does extra flow
| lookup. Following patch avoid it.

Link: 
https://github.com/openvswitch/ovs/commit/0e6efbe2712da03522532dc5e84806a96f6a0dd1
Signed-off-by: Tonghao Zhang 
Tested-by: Greg Rose 
---
 net/openvswitch/flow_table.c | 95 
 1 file changed, 53 insertions(+), 42 deletions(-)

diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 0c0fcd6..c7ba435 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -508,6 +508,9 @@ static struct sw_flow *masked_flow_lookup(struct 
table_instance *ti,
return NULL;
 }
 
+/* Flow lookup does full lookup on flow table. It starts with
+ * mask from index passed in *index.
+ */
 static struct sw_flow *flow_lookup(struct flow_table *tbl,
   struct table_instance *ti,
   struct mask_array *ma,
@@ -515,19 +518,32 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
   u32 *n_mask_hit,
   u32 *index)
 {
+   struct sw_flow_mask *mask;
struct sw_flow *flow;
int i;
 
-   for (i = 0; i < ma->max; i++)  {
-   struct sw_flow_mask *mask;
-
-   mask = rcu_dereference_ovsl(ma->masks[i]);
+   if (*index < ma->max) {
+   mask = rcu_dereference_ovsl(ma->masks[*index]);
if (mask) {
flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
-   if (flow) { /* Found */
-   *index = i;
+   if (flow)
return flow;
-   }
+   }
+   }
+
+   for (i = 0; i < ma->max; i++)  {
+
+   if (i == *index)
+   continue;
+
+   mask = rcu_dereference_ovsl(ma->masks[i]);
+   if (!mask)
+   continue;
+
+   flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
+   if (flow) { /* Found */
+   *index = i;
+   return flow;
}
}
 
@@ -546,58 +562,54 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct 
flow_table *tbl,
  u32 skb_hash,
  u32 *n_mask_hit)
 {
-   struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
-   struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
-   struct mask_cache_entry  *entries, *ce, *del;
+   struct mask_array *ma = rcu_dereference(tbl->mask_array);
+   struct table_instance *ti = rcu_dereference(tbl->ti);
+   struct mask_cache_entry *entries, *ce;
struct sw_flow *flow;
-   u32 hash = skb_hash;
+   u32 hash;
int seg;
 
*n_mask_hit = 0;
if (unlikely(!skb_hash)) {
-   u32 __always_unused mask_index;
+   u32 mask_index = 0;
 
return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
}
 
-   del = NULL;
+   /* Pre and post recirulation flows usually have the same skb_hash
+* value. To avoid hash collisions, rehash the 'skb_hash' with
+* 'recirc_id'.  */
+   if (key->recirc_id)
+   skb_hash = jhash_1word(skb_hash, key->recirc_id);
+
+   ce = NULL;
+   hash = skb_hash;
entries = this_cpu_ptr(tbl->mask_cache);
 
+   /* Find the cache entry 'ce' to operate on. */
for (seg = 0; seg < MC_HASH_SEGS; seg++) {
-   int index;
-
-   index = hash & (MC_HASH_ENTRIES - 1);
-   ce = &entries[index];
-
-   if (ce->skb_hash == skb_hash) {
-   struct sw_flow_mask *mask;
-   struct sw_flow *flow;
-
-   mask = rcu_dereference_ovsl(ma->masks[ce->mask_index]);
-   if (mask) {
-   flow = masked_flow_lookup(ti, key, mask,
- n_mask_hit);
-   if (flow)  /* Found */
-   return flow;
-   }
-
-   del = ce;
-   break;
+   int index = hash & (MC_HASH_ENTRIES - 1);
+   struct mask_cache_entry *e;
+
+   e = &entries[index];
+   if (e->skb_hash == skb_hash) {
+   flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
+  &e->mask_index);
+   if (!flow)
+   e->skb_hash = 0;
+   return flow;
}
 
-   if (!del || (del->skb_hash && !ce->skb_hash) ||
-