Re: [ovs-dev] [PATCH net-next v6 01/10] net: openvswitch: add flow-mask cache for performance

2019-11-02 Thread Pravin Shelar
On Fri, Nov 1, 2019 at 7:24 AM  wrote:
>
> From: Tonghao Zhang 
>
> The idea of this optimization comes from a patch which
> is committed in 2014, openvswitch community. The author
> is Pravin B Shelar. In order to get high performance, I
> implement it again. Later patches will use it.
>
> Pravin B Shelar, says:
> | On every packet OVS needs to lookup flow-table with every
> | mask until it finds a match. The packet flow-key is first
> | masked with mask in the list and then the masked key is
> | looked up in flow-table. Therefore number of masks can
> | affect packet processing performance.
>
> Link: 
> https://github.com/openvswitch/ovs/commit/5604935e4e1cbc16611d2d97f50b717aa31e8ec5
> Signed-off-by: Tonghao Zhang 
> Tested-by: Greg Rose 
> Acked-by: William Tu 
> ---
Signed-off-by: Pravin B Shelar 
___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


Re: [ovs-dev] [PATCH net-next v6 01/10] net: openvswitch: add flow-mask cache for performance

2019-11-02 Thread Tonghao Zhang
On Sat, Nov 2, 2019 at 7:40 AM William Tu  wrote:
>
> On Fri, Nov 1, 2019 at 7:25 AM  wrote:
> >
> > From: Tonghao Zhang 
> >
> > The idea of this optimization comes from a patch which
> > is committed in 2014, openvswitch community. The author
> > is Pravin B Shelar. In order to get high performance, I
> > implement it again. Later patches will use it.
> >
> > Pravin B Shelar, says:
> > | On every packet OVS needs to lookup flow-table with every
> > | mask until it finds a match. The packet flow-key is first
> > | masked with mask in the list and then the masked key is
> > | looked up in flow-table. Therefore number of masks can
> > | affect packet processing performance.
> >
> > Link: 
> > https://github.com/openvswitch/ovs/commit/5604935e4e1cbc16611d2d97f50b717aa31e8ec5
> > Signed-off-by: Tonghao Zhang 
> > Tested-by: Greg Rose 
> > Acked-by: William Tu 
> > ---
>
> Do you consider change author of this patch to Pravin?
The commit message of patches explain who is the author, and the url
of patches is in commit message.
we should change the patches again (change the commit author)?
> Regards,
> William
>
> 
___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


Re: [ovs-dev] [PATCH net-next v6 01/10] net: openvswitch: add flow-mask cache for performance

2019-11-01 Thread William Tu
On Fri, Nov 1, 2019 at 7:25 AM  wrote:
>
> From: Tonghao Zhang 
>
> The idea of this optimization comes from a patch which
> is committed in 2014, openvswitch community. The author
> is Pravin B Shelar. In order to get high performance, I
> implement it again. Later patches will use it.
>
> Pravin B Shelar, says:
> | On every packet OVS needs to lookup flow-table with every
> | mask until it finds a match. The packet flow-key is first
> | masked with mask in the list and then the masked key is
> | looked up in flow-table. Therefore number of masks can
> | affect packet processing performance.
>
> Link: 
> https://github.com/openvswitch/ovs/commit/5604935e4e1cbc16611d2d97f50b717aa31e8ec5
> Signed-off-by: Tonghao Zhang 
> Tested-by: Greg Rose 
> Acked-by: William Tu 
> ---

Do you consider change author of this patch to Pravin?

Regards,
William


___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


[ovs-dev] [PATCH net-next v6 01/10] net: openvswitch: add flow-mask cache for performance

2019-11-01 Thread xiangxia . m . yue
From: Tonghao Zhang 

The idea of this optimization comes from a patch which
is committed in 2014, openvswitch community. The author
is Pravin B Shelar. In order to get high performance, I
implement it again. Later patches will use it.

Pravin B Shelar, says:
| On every packet OVS needs to lookup flow-table with every
| mask until it finds a match. The packet flow-key is first
| masked with mask in the list and then the masked key is
| looked up in flow-table. Therefore number of masks can
| affect packet processing performance.

Link: 
https://github.com/openvswitch/ovs/commit/5604935e4e1cbc16611d2d97f50b717aa31e8ec5
Signed-off-by: Tonghao Zhang 
Tested-by: Greg Rose 
Acked-by: William Tu 
---
 net/openvswitch/datapath.c   |   3 +-
 net/openvswitch/flow_table.c | 109 +--
 net/openvswitch/flow_table.h |  11 -
 3 files changed, 107 insertions(+), 16 deletions(-)

diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f30e406..9fea7e1 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -227,7 +227,8 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct 
sw_flow_key *key)
stats = this_cpu_ptr(dp->stats_percpu);
 
/* Look up flow. */
-   flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
+   flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
+&n_mask_hit);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
 
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index cf3582c..3d515c0 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -36,6 +36,10 @@
 #define TBL_MIN_BUCKETS1024
 #define REHASH_INTERVAL(10 * 60 * HZ)
 
+#define MC_HASH_SHIFT  8
+#define MC_HASH_ENTRIES(1u << MC_HASH_SHIFT)
+#define MC_HASH_SEGS   ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
+
 static struct kmem_cache *flow_cache;
 struct kmem_cache *flow_stats_cache __read_mostly;
 
@@ -168,10 +172,15 @@ int ovs_flow_tbl_init(struct flow_table *table)
 {
struct table_instance *ti, *ufid_ti;
 
-   ti = table_instance_alloc(TBL_MIN_BUCKETS);
+   table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
+  MC_HASH_ENTRIES,
+  __alignof__(struct 
mask_cache_entry));
+   if (!table->mask_cache)
+   return -ENOMEM;
 
+   ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!ti)
-   return -ENOMEM;
+   goto free_mask_cache;
 
ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!ufid_ti)
@@ -187,6 +196,8 @@ int ovs_flow_tbl_init(struct flow_table *table)
 
 free_ti:
__table_instance_destroy(ti);
+free_mask_cache:
+   free_percpu(table->mask_cache);
return -ENOMEM;
 }
 
@@ -243,6 +254,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
struct table_instance *ti = rcu_dereference_raw(table->ti);
struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
 
+   free_percpu(table->mask_cache);
table_instance_destroy(ti, ufid_ti, false);
 }
 
@@ -425,7 +437,8 @@ static bool ovs_flow_cmp_unmasked_key(const struct sw_flow 
*flow,
 
 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
  const struct sw_flow_key *unmasked,
- const struct sw_flow_mask *mask)
+ const struct sw_flow_mask *mask,
+ u32 *n_mask_hit)
 {
struct sw_flow *flow;
struct hlist_head *head;
@@ -435,6 +448,8 @@ static struct sw_flow *masked_flow_lookup(struct 
table_instance *ti,
ovs_flow_mask_key(&masked_key, unmasked, false, mask);
hash = flow_hash(&masked_key, &mask->range);
head = find_bucket(ti, hash);
+   (*n_mask_hit)++;
+
hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
if (flow->mask == mask && flow->flow_table.hash == hash &&
flow_cmp_masked_key(flow, &masked_key, &mask->range))
@@ -443,30 +458,97 @@ static struct sw_flow *masked_flow_lookup(struct 
table_instance *ti,
return NULL;
 }
 
-struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
-   const struct sw_flow_key *key,
-   u32 *n_mask_hit)
+static struct sw_flow *flow_lookup(struct flow_table *tbl,
+  struct table_instance *ti,
+  const struct sw_flow_key *key,
+  u32 *n_mask_hit)
 {
-   struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
struct sw_flow_mask *mask;
struct sw_flow *flow;
 
-   *n