Re: [ovs-dev] [PATCH net-next v3 02/10] net: openvswitch: convert mask list in mask array

2019-10-13 Thread kbuild test robot
Hi,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on net-next/master]

url:
https://github.com/0day-ci/linux/commits/xiangxia-m-yue-gmail-com/optimize-openvswitch-flow-looking-up/20191013-161404
reproduce:
# apt-get install sparse
# sparse version: v0.6.1-rc1-43-g0ccb3b4-dirty
make ARCH=x86_64 allmodconfig
make C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__'

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot 


sparse warnings: (new ones prefixed by >>)

>> net/openvswitch/flow_table.c:307:9: sparse: sparse: incorrect type in 
>> argument 1 (different address spaces) @@expected struct callback_head 
>> *head @@got struct callback_hestruct callback_head *head @@
>> net/openvswitch/flow_table.c:307:9: sparse:expected struct callback_head 
>> *head
>> net/openvswitch/flow_table.c:307:9: sparse:got struct callback_head 
>> [noderef]  *

vim +307 net/openvswitch/flow_table.c

   297  
   298  /* No need for locking this function is called from RCU callback or
   299   * error path.
   300   */
   301  void ovs_flow_tbl_destroy(struct flow_table *table)
   302  {
   303  struct table_instance *ti = rcu_dereference_raw(table->ti);
   304  struct table_instance *ufid_ti = 
rcu_dereference_raw(table->ufid_ti);
   305  
   306  free_percpu(table->mask_cache);
 > 307  kfree_rcu(table->mask_array, rcu);
   308  table_instance_destroy(ti, ufid_ti, false);
   309  }
   310  

---
0-DAY kernel test infrastructureOpen Source Technology Center
https://lists.01.org/pipermail/kbuild-all   Intel Corporation
___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


[ovs-dev] [PATCH net-next v3 02/10] net: openvswitch: convert mask list in mask array

2019-10-11 Thread xiangxia . m . yue
From: Tonghao Zhang 

Port the codes to linux upstream and with little changes.

Pravin B Shelar, says:
| mask caches index of mask in mask_list. On packet recv OVS
| need to traverse mask-list to get cached mask. Therefore array
| is better for retrieving cached mask. This also allows better
| cache replacement algorithm by directly checking mask's existence.

Link: 
https://github.com/openvswitch/ovs/commit/d49fc3ff53c65e4eca9cabd52ac63396746a7ef5
Signed-off-by: Tonghao Zhang 
---
 net/openvswitch/flow.h   |   1 -
 net/openvswitch/flow_table.c | 210 ---
 net/openvswitch/flow_table.h |   8 +-
 3 files changed, 167 insertions(+), 52 deletions(-)

diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index b830d5f..8080518 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -166,7 +166,6 @@ struct sw_flow_key_range {
 struct sw_flow_mask {
int ref_count;
struct rcu_head rcu;
-   struct list_head list;
struct sw_flow_key_range range;
struct sw_flow_key key;
 };
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 3d515c0..aab7a27 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -34,6 +34,7 @@
 #include 
 
 #define TBL_MIN_BUCKETS1024
+#define MASK_ARRAY_SIZE_MIN16
 #define REHASH_INTERVAL(10 * 60 * HZ)
 
 #define MC_HASH_SHIFT  8
@@ -168,9 +169,51 @@ static struct table_instance *table_instance_alloc(int 
new_size)
return ti;
 }
 
+static struct mask_array *tbl_mask_array_alloc(int size)
+{
+   struct mask_array *new;
+
+   size = max(MASK_ARRAY_SIZE_MIN, size);
+   new = kzalloc(sizeof(struct mask_array) +
+ sizeof(struct sw_flow_mask *) * size, GFP_KERNEL);
+   if (!new)
+   return NULL;
+
+   new->count = 0;
+   new->max = size;
+
+   return new;
+}
+
+static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
+{
+   struct mask_array *old;
+   struct mask_array *new;
+
+   new = tbl_mask_array_alloc(size);
+   if (!new)
+   return -ENOMEM;
+
+   old = ovsl_dereference(tbl->mask_array);
+   if (old) {
+   int i;
+
+   for (i = 0; i < old->max; i++) {
+   if (ovsl_dereference(old->masks[i]))
+   new->masks[new->count++] = old->masks[i];
+   }
+   }
+
+   rcu_assign_pointer(tbl->mask_array, new);
+   kfree_rcu(old, rcu);
+
+   return 0;
+}
+
 int ovs_flow_tbl_init(struct flow_table *table)
 {
struct table_instance *ti, *ufid_ti;
+   struct mask_array *ma;
 
table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
   MC_HASH_ENTRIES,
@@ -178,9 +221,13 @@ int ovs_flow_tbl_init(struct flow_table *table)
if (!table->mask_cache)
return -ENOMEM;
 
+   ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
+   if (!ma)
+   goto free_mask_cache;
+
ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!ti)
-   goto free_mask_cache;
+   goto free_mask_array;
 
ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!ufid_ti)
@@ -188,7 +235,7 @@ int ovs_flow_tbl_init(struct flow_table *table)
 
rcu_assign_pointer(table->ti, ti);
rcu_assign_pointer(table->ufid_ti, ufid_ti);
-   INIT_LIST_HEAD(>mask_list);
+   rcu_assign_pointer(table->mask_array, ma);
table->last_rehash = jiffies;
table->count = 0;
table->ufid_count = 0;
@@ -196,6 +243,8 @@ int ovs_flow_tbl_init(struct flow_table *table)
 
 free_ti:
__table_instance_destroy(ti);
+free_mask_array:
+   kfree(ma);
 free_mask_cache:
free_percpu(table->mask_cache);
return -ENOMEM;
@@ -255,6 +304,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
 
free_percpu(table->mask_cache);
+   kfree_rcu(table->mask_array, rcu);
table_instance_destroy(ti, ufid_ti, false);
 }
 
@@ -460,17 +510,27 @@ static struct sw_flow *masked_flow_lookup(struct 
table_instance *ti,
 
 static struct sw_flow *flow_lookup(struct flow_table *tbl,
   struct table_instance *ti,
+  struct mask_array *ma,
   const struct sw_flow_key *key,
-  u32 *n_mask_hit)
+  u32 *n_mask_hit,
+  u32 *index)
 {
-   struct sw_flow_mask *mask;
struct sw_flow *flow;
+   int i;
 
-   list_for_each_entry_rcu(mask, >mask_list, list) {
-   flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
-   if (flow)  /* Found */
-   return flow;
+