Packet batch sorting is removed for three reasons:

1) The following patches for NAT change the locking
    marshalling so batching loses benefit.

2) For real mixtures of flows either in hypervisors
   or gateways, the batch sorting won't provide benefit
   and will just be a tax.

3) Code clarity.

Signed-off-by: Darrell Ball <dlu...@gmail.com>
---
 lib/conntrack.c | 49 +++++++++++--------------------------------------
 1 file changed, 11 insertions(+), 38 deletions(-)

diff --git a/lib/conntrack.c b/lib/conntrack.c
index 0a611a2..d0e106f 100644
--- a/lib/conntrack.c
+++ b/lib/conntrack.c
@@ -284,16 +284,8 @@ conntrack_execute(struct conntrack *ct, struct 
dp_packet_batch *pkt_batch,
     enum { KEY_ARRAY_SIZE = NETDEV_MAX_BURST };
 #endif
     struct conn_lookup_ctx ctxs[KEY_ARRAY_SIZE];
-    int8_t bucket_list[CONNTRACK_BUCKETS];
-    struct {
-        unsigned bucket;
-        unsigned long maps;
-    } arr[KEY_ARRAY_SIZE];
     long long now = time_msec();
     size_t i = 0;
-    uint8_t arrcnt = 0;
-
-    BUILD_ASSERT_DECL(sizeof arr[0].maps * CHAR_BIT >= NETDEV_MAX_BURST);
 
     if (helper) {
         static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
@@ -302,48 +294,29 @@ conntrack_execute(struct conntrack *ct, struct 
dp_packet_batch *pkt_batch,
         /* Continue without the helper */
     }
 
-    memset(bucket_list, INT8_C(-1), sizeof bucket_list);
     for (i = 0; i < cnt; i++) {
-        unsigned bucket;
 
         if (!conn_key_extract(ct, pkts[i], dl_type, &ctxs[i], zone)) {
             write_ct_md(pkts[i], CS_INVALID, zone, 0, OVS_U128_ZERO);
             continue;
         }
 
-        bucket = hash_to_bucket(ctxs[i].hash);
-        if (bucket_list[bucket] == INT8_C(-1)) {
-            bucket_list[bucket] = arrcnt;
-
-            arr[arrcnt].maps = 0;
-            ULLONG_SET1(arr[arrcnt].maps, i);
-            arr[arrcnt++].bucket = bucket;
-        } else {
-            ULLONG_SET1(arr[bucket_list[bucket]].maps, i);
-        }
-    }
-
-    for (i = 0; i < arrcnt; i++) {
-        struct conntrack_bucket *ctb = &ct->buckets[arr[i].bucket];
-        size_t j;
-
+        unsigned bucket = hash_to_bucket(ctxs[i].hash);
+        struct conntrack_bucket *ctb = &ct->buckets[bucket];
         ct_lock_lock(&ctb->lock);
+        conn_key_lookup(ctb, &ctxs[i], now);
 
-        ULLONG_FOR_EACH_1(j, arr[i].maps) {
-            struct conn *conn;
-
-            conn_key_lookup(ctb, &ctxs[j], now);
+        struct conn *conn = process_one(ct, pkts[i], &ctxs[i], zone,
+                                        commit, now);
 
-            conn = process_one(ct, pkts[j], &ctxs[j], zone, commit, now);
-
-            if (conn && setmark) {
-                set_mark(pkts[j], conn, setmark[0], setmark[1]);
-            }
+        if (conn && setmark) {
+            set_mark(pkts[i], conn, setmark[0], setmark[1]);
+        }
 
-            if (conn && setlabel) {
-                set_label(pkts[j], conn, &setlabel[0], &setlabel[1]);
-            }
+        if (conn && setlabel) {
+            set_label(pkts[i], conn, &setlabel[0], &setlabel[1]);
         }
+
         ct_lock_unlock(&ctb->lock);
     }
 
-- 
1.9.1

_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to