On 1/16/23 21:00, Mike Pattrick wrote:
On Mon, Jan 16, 2023 at 10:17 AM Adrián Moreno <amore...@redhat.com> wrote:

From: Adrian Moreno <amore...@redhat.com>

IPFIX templates have to be sent for each Observation Domain ID.
Currently, a timer is kept at each dpif_ipfix_exporter to send them.
This works fine for per-bridge sampling where there is only one
Observation Domain ID per exporter. However, this is does not work for
per-flow sampling where more than one Observation Domain IDs can be
specified by the controller. In this case, ovs-vswitchd will only send
template information for one (arbitrary) DomainID.

Fix per-flow sampling by using an hmap to keep a timer for each
Observation Domain ID.

Signed-off-by: Adrian Moreno <amore...@redhat.com>

---
- v1:
   - Added unit test.
   - Drop domain_id from "struct dpif_ipfix_domain" since the hashmap
   already uses it as index.
   - Added OVS_REQUES(mutex) to dpif_ipfix_exporter_find_domain.
---
  ofproto/ofproto-dpif-ipfix.c | 127 ++++++++++++++++++++++++++++-------
  tests/system-traffic.at      |  51 ++++++++++++++
  2 files changed, 155 insertions(+), 23 deletions(-)

diff --git a/ofproto/ofproto-dpif-ipfix.c b/ofproto/ofproto-dpif-ipfix.c
index 742eed399..fa71fda0f 100644
--- a/ofproto/ofproto-dpif-ipfix.c
+++ b/ofproto/ofproto-dpif-ipfix.c
@@ -124,11 +124,18 @@ struct dpif_ipfix_port {
      uint32_t ifindex;
  };

+struct dpif_ipfix_domain {
+    struct hmap_node hmap_node; /* In struct dpif_ipfix_exporter's domains. */
+    time_t last_template_set_time;
+};
+
  struct dpif_ipfix_exporter {
      uint32_t exporter_id; /* Exporting Process identifier */
      struct collectors *collectors;
      uint32_t seq_number;
-    time_t last_template_set_time;
+    struct hmap domains; /* Contains struct dpif_ipfix_domain indexed by
+                            observation domain id. */
+    time_t last_stats_sent_time;
      struct hmap cache_flow_key_map;  /* ipfix_flow_cache_entry. */
      struct ovs_list cache_flow_start_timestamp_list;  /* 
ipfix_flow_cache_entry. */
      uint32_t cache_active_timeout;  /* In seconds. */
@@ -617,6 +624,9 @@ static void get_export_time_now(uint64_t *, uint32_t *);

  static void dpif_ipfix_cache_expire_now(struct dpif_ipfix_exporter *, bool);

+static void dpif_ipfix_exporter_del_domain(struct dpif_ipfix_exporter * ,
+                                           struct dpif_ipfix_domain * );
+
  static bool
  ofproto_ipfix_bridge_exporter_options_equal(
      const struct ofproto_ipfix_bridge_exporter_options *a,
@@ -697,13 +707,14 @@ dpif_ipfix_exporter_init(struct dpif_ipfix_exporter 
*exporter)
      exporter->exporter_id = ++exporter_total_count;
      exporter->collectors = NULL;
      exporter->seq_number = 1;
-    exporter->last_template_set_time = 0;
+    exporter->last_stats_sent_time = 0;
      hmap_init(&exporter->cache_flow_key_map);
      ovs_list_init(&exporter->cache_flow_start_timestamp_list);
      exporter->cache_active_timeout = 0;
      exporter->cache_max_flows = 0;
      exporter->virtual_obs_id = NULL;
      exporter->virtual_obs_len = 0;
+    hmap_init(&exporter->domains);

      memset(&exporter->ipfix_global_stats, 0,
             sizeof(struct dpif_ipfix_global_stats));
@@ -711,6 +722,7 @@ dpif_ipfix_exporter_init(struct dpif_ipfix_exporter 
*exporter)

  static void
  dpif_ipfix_exporter_clear(struct dpif_ipfix_exporter *exporter)
+    OVS_REQUIRES(mutex)
  {
      /* Flush the cache with flow end reason "forced end." */
      dpif_ipfix_cache_expire_now(exporter, true);
@@ -719,22 +731,29 @@ dpif_ipfix_exporter_clear(struct dpif_ipfix_exporter 
*exporter)
      exporter->exporter_id = 0;
      exporter->collectors = NULL;
      exporter->seq_number = 1;
-    exporter->last_template_set_time = 0;
+    exporter->last_stats_sent_time = 0;
      exporter->cache_active_timeout = 0;
      exporter->cache_max_flows = 0;
      free(exporter->virtual_obs_id);
      exporter->virtual_obs_id = NULL;
      exporter->virtual_obs_len = 0;

+    struct dpif_ipfix_domain *dom;
+    HMAP_FOR_EACH_SAFE (dom, hmap_node, &exporter->domains) {
+        dpif_ipfix_exporter_del_domain(exporter, dom);
+    }
+
      memset(&exporter->ipfix_global_stats, 0,
             sizeof(struct dpif_ipfix_global_stats));
  }

  static void
  dpif_ipfix_exporter_destroy(struct dpif_ipfix_exporter *exporter)
+    OVS_REQUIRES(mutex)
  {
      dpif_ipfix_exporter_clear(exporter);
      hmap_destroy(&exporter->cache_flow_key_map);
+    hmap_destroy(&exporter->domains);
  }

  static bool
@@ -742,7 +761,7 @@ dpif_ipfix_exporter_set_options(struct dpif_ipfix_exporter 
*exporter,
                                  const struct sset *targets,
                                  const uint32_t cache_active_timeout,
                                  const uint32_t cache_max_flows,
-                                const char *virtual_obs_id)
+                                const char *virtual_obs_id) OVS_REQUIRES(mutex)
  {
      size_t virtual_obs_len;
      collectors_destroy(exporter->collectors);
@@ -769,6 +788,37 @@ dpif_ipfix_exporter_set_options(struct dpif_ipfix_exporter 
*exporter,
      return true;
  }

+static struct dpif_ipfix_domain *
+dpif_ipfix_exporter_find_domain(const struct dpif_ipfix_exporter *exporter,
+                                uint32_t domain_id) OVS_REQUIRES(mutex)
+{
+    struct dpif_ipfix_domain *dom;
+    HMAP_FOR_EACH_WITH_HASH (dom, hmap_node, hash_int(domain_id, 0),
+                             &exporter->domains) {
+        return dom;
+    }
+    return NULL;
+}
+
+static struct dpif_ipfix_domain *
+dpif_ipfix_exporter_insert_domain(struct dpif_ipfix_exporter *exporter,
+                                  const uint32_t domain_id) OVS_REQUIRES(mutex)
+{
+    struct dpif_ipfix_domain *dom = xmalloc(sizeof *dom);
+    dom->last_template_set_time = 0;
+    hmap_insert(&exporter->domains, &dom->hmap_node, hash_int(domain_id, 0));
+    return dom;
+}
+
+static void
+dpif_ipfix_exporter_del_domain(struct dpif_ipfix_exporter *exporter,
+                               struct dpif_ipfix_domain *dom)
+    OVS_REQUIRES(mutex)
+{
+    hmap_remove(&exporter->domains, &dom->hmap_node);
+    free(dom);
+}
+
  static struct dpif_ipfix_port *
  dpif_ipfix_find_port(const struct dpif_ipfix *di,
                       odp_port_t odp_port) OVS_REQUIRES(mutex)
@@ -909,6 +959,7 @@ dpif_ipfix_bridge_exporter_init(struct 
dpif_ipfix_bridge_exporter *exporter)

  static void
  dpif_ipfix_bridge_exporter_clear(struct dpif_ipfix_bridge_exporter *exporter)
+    OVS_REQUIRES(mutex)
  {
      dpif_ipfix_exporter_clear(&exporter->exporter);
      ofproto_ipfix_bridge_exporter_options_destroy(exporter->options);
@@ -918,6 +969,7 @@ dpif_ipfix_bridge_exporter_clear(struct 
dpif_ipfix_bridge_exporter *exporter)

  static void
  dpif_ipfix_bridge_exporter_destroy(struct dpif_ipfix_bridge_exporter 
*exporter)
+    OVS_REQUIRES(mutex)
  {
      dpif_ipfix_bridge_exporter_clear(exporter);
      dpif_ipfix_exporter_destroy(&exporter->exporter);
@@ -927,7 +979,7 @@ static void
  dpif_ipfix_bridge_exporter_set_options(
      struct dpif_ipfix_bridge_exporter *exporter,
      const struct ofproto_ipfix_bridge_exporter_options *options,
-    bool *options_changed)
+    bool *options_changed) OVS_REQUIRES(mutex)
  {
      if (!options || sset_is_empty(&options->targets)) {
          /* No point in doing any work if there are no targets. */
@@ -1003,6 +1055,7 @@ dpif_ipfix_flow_exporter_init(struct 
dpif_ipfix_flow_exporter *exporter)

  static void
  dpif_ipfix_flow_exporter_clear(struct dpif_ipfix_flow_exporter *exporter)
+    OVS_REQUIRES(mutex)
  {
      dpif_ipfix_exporter_clear(&exporter->exporter);
      ofproto_ipfix_flow_exporter_options_destroy(exporter->options);
@@ -1011,6 +1064,7 @@ dpif_ipfix_flow_exporter_clear(struct 
dpif_ipfix_flow_exporter *exporter)

  static void
  dpif_ipfix_flow_exporter_destroy(struct dpif_ipfix_flow_exporter *exporter)
+    OVS_REQUIRES(mutex)
  {
      dpif_ipfix_flow_exporter_clear(exporter);
      dpif_ipfix_exporter_destroy(&exporter->exporter);
@@ -1020,7 +1074,7 @@ static bool
  dpif_ipfix_flow_exporter_set_options(
      struct dpif_ipfix_flow_exporter *exporter,
      const struct ofproto_ipfix_flow_exporter_options *options,
-    bool *options_changed)
+    bool *options_changed) OVS_REQUIRES(mutex)
  {
      if (sset_is_empty(&options->targets)) {
          /* No point in doing any work if there are no targets. */
@@ -1071,6 +1125,7 @@ dpif_ipfix_flow_exporter_set_options(
  static void
  remove_flow_exporter(struct dpif_ipfix *di,
                       struct dpif_ipfix_flow_exporter_map_node *node)
+                     OVS_REQUIRES(mutex)
  {
      hmap_remove(&di->flow_exporter_map, &node->node);
      dpif_ipfix_flow_exporter_destroy(&node->exporter);
@@ -2000,6 +2055,7 @@ static void
  ipfix_cache_update(struct dpif_ipfix_exporter *exporter,
                     struct ipfix_flow_cache_entry *entry,
                     enum ipfix_sampled_packet_type sampled_pkt_type)
+                   OVS_REQUIRES(mutex)
  {
      struct ipfix_flow_cache_entry *old_entry;
      size_t current_flows = 0;
@@ -2811,14 +2867,36 @@ dpif_ipfix_flow_sample(struct dpif_ipfix *di, const 
struct dp_packet *packet,
      ovs_mutex_unlock(&mutex);
  }

+static bool
+dpif_ipfix_should_send_template(struct dpif_ipfix_exporter *exporter,
+                                const uint32_t observation_domain_id,
+                                const uint32_t export_time_sec)
+    OVS_REQUIRES(mutex)
+{
+    struct dpif_ipfix_domain *domain;
+    domain = dpif_ipfix_exporter_find_domain(exporter,
+                                             observation_domain_id);
+    if (!domain) {
+        /* First time we see this obs_domain_id. */
+        domain = dpif_ipfix_exporter_insert_domain(exporter,
+                                                   observation_domain_id);
+    }
+
+    if ((domain->last_template_set_time + IPFIX_TEMPLATE_INTERVAL)
+        <= export_time_sec) {
+        domain->last_template_set_time = export_time_sec;
+        return true;
+    }
+    return false;
+}
+
  static void
  dpif_ipfix_cache_expire(struct dpif_ipfix_exporter *exporter,
                          bool forced_end, const uint64_t export_time_usec,
-                        const uint32_t export_time_sec)
+                        const uint32_t export_time_sec) OVS_REQUIRES(mutex)
  {
      struct ipfix_flow_cache_entry *entry;
      uint64_t max_flow_start_timestamp_usec;
-    bool template_msg_sent = false;
      enum ipfix_flow_end_reason flow_end_reason;

      if (ovs_list_is_empty(&exporter->cache_flow_start_timestamp_list)) {
@@ -2844,25 +2922,28 @@ dpif_ipfix_cache_expire(struct dpif_ipfix_exporter 
*exporter,
              break;
          }

-        ovs_list_remove(&entry->cache_flow_start_timestamp_list_node);
-        hmap_remove(&exporter->cache_flow_key_map,
-                    &entry->flow_key_map_node);
+        /* XXX: Make frequency of the (Options) Template and Exporter Process
+         * Statistics transmission configurable.
+         * Cf. IETF RFC 5101 Section 4.3. and 10.3.6. */
+        if ((exporter->last_stats_sent_time + IPFIX_TEMPLATE_INTERVAL)
+             <= export_time_sec) {
+            exporter->last_stats_sent_time = export_time_sec;
+            ipfix_send_exporter_data_msg(exporter, export_time_sec);
+        }

-         /* XXX: Make frequency of the (Options) Template and Exporter Process
-          * Statistics transmission configurable.
-          * Cf. IETF RFC 5101 Section 4.3. and 10.3.6. */
-        if (!template_msg_sent
-            && (exporter->last_template_set_time + IPFIX_TEMPLATE_INTERVAL)
-                <= export_time_sec) {
+        if (dpif_ipfix_should_send_template(exporter,
+                                            entry->flow_key.obs_domain_id,
+                                            export_time_sec)) {
+            VLOG_DBG("Sending templates for ObservationDomainID %"PRIu32,
+                     entry->flow_key.obs_domain_id);
              ipfix_send_template_msgs(exporter, export_time_sec,
                                       entry->flow_key.obs_domain_id);
-            exporter->last_template_set_time = export_time_sec;
-            template_msg_sent = true;
-
-            /* Send Exporter Process Statistics. */
-            ipfix_send_exporter_data_msg(exporter, export_time_sec);
          }

+        ovs_list_remove(&entry->cache_flow_start_timestamp_list_node);
+        hmap_remove(&exporter->cache_flow_key_map,
+                    &entry->flow_key_map_node);
+
          /* XXX: Group multiple data records for the same obs domain id
           * into the same message. */
          ipfix_send_data_msg(exporter, export_time_sec, entry, 
flow_end_reason);
@@ -2883,7 +2964,7 @@ get_export_time_now(uint64_t *export_time_usec, uint32_t 
*export_time_sec)

  static void
  dpif_ipfix_cache_expire_now(struct dpif_ipfix_exporter *exporter,
-                            bool forced_end)
+                            bool forced_end) OVS_REQUIRES(mutex)
  {
      uint64_t export_time_usec;
      uint32_t export_time_sec;
diff --git a/tests/system-traffic.at b/tests/system-traffic.at
index 08c78ff57..84e3f090e 100644
--- a/tests/system-traffic.at
+++ b/tests/system-traffic.at
@@ -7488,3 +7488,54 @@ OVS_WAIT_UNTIL([cat p2.pcap | grep -E "0x0050: *0000 
*0000 *5002 *2000 *b85e *00

  OVS_TRAFFIC_VSWITCHD_STOP
  AT_CLEANUP
+
+AT_SETUP([ipfix - muliple domains])
+AT_SKIP_IF([test $HAVE_TCPDUMP = no])
+OVS_TRAFFIC_VSWITCHD_START()
+
+ADD_NAMESPACES(at_ns0, at_ns1)
+
+ADD_VETH(p0, at_ns0, br0, "10.1.1.1/24")
+ADD_VETH(p1, at_ns1, br0, "10.1.1.2/24")
+
+dnl Create a dummy IPFIX collector just in localhost so that port seems open.
+OVS_DAEMONIZE([nc -l -u 127.0.0.1 5555 > /dev/null], [nc.pid])
+
+OVS_DAEMONIZE([tcpdump -n -i lo udp port 5555 -w ipfix.pcap], [tcpdump.pid])
+sleep 1
+
+dnl Configure a Flow Sample Collector Set with id = 1.
+AT_CHECK([ovs-vsctl -- --id=@br get Bridge br0 \
+                    -- --id=@i create IPFIX targets=\"127.0.0.1:5555\" \
+                    -- create Flow_Sample_Collector_Set bridge=@br id=1 
ipfix=@i],
+         [ignore], [ignore])
+
+dnl Push flows that will allow communication between the network namespaces
+dnl and sample all ICMP traffic with multiple observation domain IDs.
+AT_DATA([flows.txt], [dnl
+table=0,priority=100,in_port=ovs-p0,ip,icmp 
actions=sample(probability=65535,collector_set_id=1,obs_domain_id=1),NORMAL
+table=0,priority=100,in_port=ovs-p1,ip,icmp 
actions=sample(probability=65535,collector_set_id=1,obs_domain_id=2),NORMAL
+table=0,priority=0,actions=NORMAL
+])
+AT_CHECK([ovs-ofctl add-flows br0 flows.txt])
+
+NS_CHECK_EXEC([at_ns0], [ping -q -c 3 -i 0.3 -w 2 10.1.1.2 | FORMAT_PING], 
[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+sleep 1
+kill $(cat tcpdump.pid)

Great unit test here, but I actually found that this was a race
condition. Tcpdump may not completely die and write the file by the
time that the next tcpdump reads it. I think this is why the test
failed on intel CI as well. I propose the following line:

wait $(cat tcpdump.pid)


Nice catch! Thanks, I'll fix it in the next version.


+
+dnl Check templates (Flow ID = 0x02) where sent to ObservationDomain 1.
+AT_CHECK([test $(tcpdump -r ipfix.pcap --count "udp port 5555 and udp[[20:4]] = 0x00000001  and 
udp[[24:2]] = 0x02" 2>/dev/null | tr -d "packets") -gt 0])
+dnl Check that exactly 3 samples (Flow ID != 0x02) where sent to 
ObservationDomain 1.
+AT_CHECK([test $(tcpdump -r ipfix.pcap --count "udp port 5555 and udp[[20:4]] = 0x00000001 and 
udp[[24:2]] != 0x02" 2>/dev/null | tr -d "packets") -eq 3])
+
+dnl Check templates (Flow ID = 0x02) where sent to ObservationDomain 2.
+AT_CHECK([test $(tcpdump -r ipfix.pcap --count "udp port 5555 and udp[[20:4]] = 0x00000002  and 
udp[[24:2]] = 0x02" 2>/dev/null | tr -d "packets") -gt 0])
+dnl Check that exactly 3 samples (Flow ID != 0x02) where sent to 
ObservationDomain 2.
+AT_CHECK([test $(tcpdump -r ipfix.pcap --count "udp port 5555 and udp[[20:4]] = 0x00000002 and 
udp[[24:2]] != 0x02" 2>/dev/null | tr -d "packets") -eq 3])
+
+OVS_TRAFFIC_VSWITCHD_STOP
+AT_CLEANUP
+
--
2.38.1



--
Adrián Moreno

_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to