Signed-off-by: Eelco Chaudron <[email protected]>
---
lib/dpif-netdev.c | 121 +++++++++++++++++++++++++++++++++-----
lib/dpif-offload-dpdk.c | 13 ++--
lib/netdev-offload-dpdk.c | 16 +++--
lib/netdev-offload.c | 75 -----------------------
lib/netdev-offload.h | 18 ------
5 files changed, 120 insertions(+), 123 deletions(-)
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 6575cb9e8..272294daa 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -409,11 +409,18 @@ struct dp_offload_thread {
static struct dp_offload_thread *dp_offload_threads;
static void *dp_netdev_flow_offload_main(void *arg);
+/* XXX: Temporarily forward declarations, will be removed during cleanup. */
+static unsigned int dpdk_offload_ufid_to_thread_id(const ovs_u128 ufid);
+static unsigned int dpdk_offload_thread_init(void);
+void dpdk_offload_thread_set_thread_nb(unsigned int thread_nb);
+unsigned int dpdk_offload_thread_nb(void);
+unsigned int dpdk_offload_thread_id(void);
+
static void
dp_netdev_offload_init(void)
{
static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
- unsigned int nb_offload_thread = netdev_offload_thread_nb();
+ unsigned int nb_offload_thread = dpdk_offload_thread_nb();
unsigned int tid;
if (!ovsthread_once_start(&once)) {
@@ -433,7 +440,7 @@ dp_netdev_offload_init(void)
atomic_init(&thread->enqueued_item, 0);
mov_avg_cma_init(&thread->cma);
mov_avg_ema_init(&thread->ema, 100);
- ovs_thread_create("hw_offload", dp_netdev_flow_offload_main, thread);
+ ovs_thread_create("dpdk_offload", dp_netdev_flow_offload_main, thread);
}
ovsthread_once_done(&once);
@@ -2513,12 +2520,12 @@ static uint32_t
flow_mark_alloc(void)
{
static struct ovsthread_once init_once = OVSTHREAD_ONCE_INITIALIZER;
- unsigned int tid = netdev_offload_thread_id();
+ unsigned int tid = dpdk_offload_thread_id();
uint32_t mark;
if (ovsthread_once_start(&init_once)) {
/* Haven't initiated yet, do it here */
- flow_mark_pool = id_fpool_create(netdev_offload_thread_nb(),
+ flow_mark_pool = id_fpool_create(dpdk_offload_thread_nb(),
1, MAX_FLOW_MARK);
ovsthread_once_done(&init_once);
}
@@ -2533,7 +2540,7 @@ flow_mark_alloc(void)
static void
flow_mark_free(uint32_t mark)
{
- unsigned int tid = netdev_offload_thread_id();
+ unsigned int tid = dpdk_offload_thread_id();
id_fpool_free_id(flow_mark_pool, tid, mark);
}
@@ -2544,7 +2551,7 @@ megaflow_to_mark_associate(const ovs_u128 *mega_ufid,
uint32_t mark)
{
size_t hash = dp_netdev_flow_hash(mega_ufid);
struct megaflow_to_mark_data *data = xzalloc(sizeof(*data));
- unsigned int tid = netdev_offload_thread_id();
+ unsigned int tid = dpdk_offload_thread_id();
data->mega_ufid = *mega_ufid;
data->mark = mark;
@@ -2559,7 +2566,7 @@ megaflow_to_mark_disassociate(const ovs_u128 *mega_ufid)
{
size_t hash = dp_netdev_flow_hash(mega_ufid);
struct megaflow_to_mark_data *data;
- unsigned int tid = netdev_offload_thread_id();
+ unsigned int tid = dpdk_offload_thread_id();
CMAP_FOR_EACH_WITH_HASH (data, node, hash,
&dp_offload_threads[tid].megaflow_to_mark) {
@@ -2580,7 +2587,7 @@ megaflow_to_mark_find(const ovs_u128 *mega_ufid)
{
size_t hash = dp_netdev_flow_hash(mega_ufid);
struct megaflow_to_mark_data *data;
- unsigned int tid = netdev_offload_thread_id();
+ unsigned int tid = dpdk_offload_thread_id();
CMAP_FOR_EACH_WITH_HASH (data, node, hash,
&dp_offload_threads[tid].megaflow_to_mark) {
@@ -2598,7 +2605,7 @@ megaflow_to_mark_find(const ovs_u128 *mega_ufid)
static void
mark_to_flow_associate(const uint32_t mark, struct dp_netdev_flow *flow)
{
- unsigned int tid = netdev_offload_thread_id();
+ unsigned int tid = dpdk_offload_thread_id();
dp_netdev_flow_ref(flow);
cmap_insert(&dp_offload_threads[tid].mark_to_flow,
@@ -2613,7 +2620,7 @@ mark_to_flow_associate(const uint32_t mark, struct
dp_netdev_flow *flow)
static bool
flow_mark_has_no_ref(uint32_t mark)
{
- unsigned int tid = netdev_offload_thread_id();
+ unsigned int tid = dpdk_offload_thread_id();
struct dp_netdev_flow *flow;
CMAP_FOR_EACH_WITH_HASH (flow, mark_node, hash_int(mark, 0),
@@ -2633,7 +2640,7 @@ mark_to_flow_disassociate(struct dp_netdev *dp,
const char *dpif_type_str = dpif_normalize_type(dp->class->type);
struct cmap_node *mark_node = CONST_CAST(struct cmap_node *,
&flow->mark_node);
- unsigned int tid = netdev_offload_thread_id();
+ unsigned int tid = dpdk_offload_thread_id();
uint32_t mark = flow->mark;
int ret = 0;
@@ -2689,7 +2696,7 @@ mark_to_flow_find(const struct dp_netdev_pmd_thread *pmd,
}
hash = hash_int(mark, 0);
- for (tid = 0; tid < netdev_offload_thread_nb(); tid++) {
+ for (tid = 0; tid < dpdk_offload_thread_nb(); tid++) {
CMAP_FOR_EACH_WITH_HASH (flow, mark_node, hash,
&dp_offload_threads[tid].mark_to_flow) {
if (flow->mark == mark && flow->pmd_id == pmd->core_id &&
@@ -2775,7 +2782,7 @@ dp_netdev_offload_flow_enqueue(struct
dp_offload_thread_item *item)
ovs_assert(item->type == DP_OFFLOAD_FLOW);
- tid = netdev_offload_ufid_to_thread_id(flow_offload->flow->mega_ufid);
+ tid = dpdk_offload_ufid_to_thread_id(flow_offload->flow->mega_ufid);
dp_netdev_append_offload(item, tid);
}
@@ -3123,7 +3130,7 @@ dp_netdev_offload_flush_enqueue(struct dp_netdev *dp,
unsigned int tid;
long long int now_us = time_usec();
- for (tid = 0; tid < netdev_offload_thread_nb(); tid++) {
+ for (tid = 0; tid < dpdk_offload_thread_nb(); tid++) {
struct dp_offload_thread_item *item;
struct dp_offload_flush_item *flush;
@@ -3180,7 +3187,7 @@ dp_netdev_offload_flush(struct dp_netdev *dp,
ovs_mutex_lock(&flush_mutex);
/* This thread and the offload threads. */
- ovs_barrier_init(&barrier, 1 + netdev_offload_thread_nb());
+ ovs_barrier_init(&barrier, 1 + dpdk_offload_thread_nb());
netdev = netdev_ref(port->netdev);
dp_netdev_offload_flush_enqueue(dp, netdev, &barrier);
@@ -4770,7 +4777,7 @@ dpif_netdev_offload_stats_get(struct dpif *dpif,
return EINVAL;
}
- nb_thread = netdev_offload_thread_nb();
+ nb_thread = dpdk_offload_thread_nb();
if (!nb_thread) {
return EINVAL;
}
@@ -10560,3 +10567,85 @@ dpcls_lookup(struct dpcls *cls, const struct
netdev_flow_key *keys[],
}
return false;
}
+/* XXX: Temporarily duplicates definition in dpif-offload-dpdk.c. */
+#define DEFAULT_OFFLOAD_THREAD_NB 1
+static unsigned int offload_thread_nb = DEFAULT_OFFLOAD_THREAD_NB;
+
+DECLARE_EXTERN_PER_THREAD_DATA(unsigned int, dpdk_offload_thread_id);
+DEFINE_EXTERN_PER_THREAD_DATA(dpdk_offload_thread_id, OVSTHREAD_ID_UNSET);
+
+unsigned int
+dpdk_offload_thread_id(void)
+{
+ unsigned int id = *dpdk_offload_thread_id_get();
+
+ if (OVS_UNLIKELY(id == OVSTHREAD_ID_UNSET)) {
+ id = dpdk_offload_thread_init();
+ }
+
+ return id;
+}
+
+unsigned int
+dpdk_offload_thread_nb(void)
+{
+ return offload_thread_nb;
+}
+
+void
+dpdk_offload_thread_set_thread_nb(unsigned int thread_nb)
+{
+ offload_thread_nb = thread_nb;
+}
+
+static unsigned int
+dpdk_offload_ufid_to_thread_id(const ovs_u128 ufid)
+{
+ uint32_t ufid_hash;
+
+ if (dpdk_offload_thread_nb() == 1) {
+ return 0;
+ }
+
+ ufid_hash = hash_words64_inline(
+ (const uint64_t [2]){ ufid.u64.lo,
+ ufid.u64.hi }, 2, 1);
+ return ufid_hash % dpdk_offload_thread_nb();
+}
+
+static unsigned int
+dpdk_offload_thread_init(void)
+{
+ static atomic_count next_id = ATOMIC_COUNT_INIT(0);
+ bool thread_is_hw_offload;
+ bool thread_is_rcu;
+
+ thread_is_hw_offload = !strncmp(get_subprogram_name(),
+ "dpdk_offload", strlen("dpdk_offload"));
+ thread_is_rcu = !strncmp(get_subprogram_name(), "urcu", strlen("urcu"));
+
+ /* Panic if any other thread besides offload and RCU tries
+ * to initialize their thread ID. */
+ ovs_assert(thread_is_hw_offload || thread_is_rcu);
+
+ if (*dpdk_offload_thread_id_get() == OVSTHREAD_ID_UNSET) {
+ unsigned int id;
+
+ if (thread_is_rcu) {
+ /* RCU will compete with other threads for shared object access.
+ * Reclamation functions using a thread ID must be thread-safe.
+ * For that end, and because RCU must consider all potential shared
+ * objects anyway, its thread-id can be whichever, so return 0.
+ */
+ id = 0;
+ } else {
+ /* Only the actual offload threads have their own ID. */
+ id = atomic_count_inc(&next_id);
+ }
+ /* Panic if any offload thread is getting a spurious ID. */
+ ovs_assert(id < dpdk_offload_thread_nb());
+ return *dpdk_offload_thread_id_get() = id;
+ } else {
+ return *dpdk_offload_thread_id_get();
+ }
+}
diff --git a/lib/dpif-offload-dpdk.c b/lib/dpif-offload-dpdk.c
index 155f631b3..6014db38c 100644
--- a/lib/dpif-offload-dpdk.c
+++ b/lib/dpif-offload-dpdk.c
@@ -165,6 +165,9 @@ static bool dpif_offload_dpdk_late_enable(struct
dpif_offload_port_mgr_port *p,
return false;
}
+/* XXX: External reference, will be removed after full integration. */
+void dpdk_offload_thread_set_thread_nb(unsigned int thread_nb);
+
static void
dpif_offload_dpdk_set_config(struct dpif_offload *offload_,
const struct smap *other_cfg)
@@ -193,6 +196,8 @@ dpif_offload_dpdk_set_config(struct dpif_offload *offload_,
offload_thread_nb > 1 ? "s" : "");
}
+ dpdk_offload_thread_set_thread_nb(offload_thread_nb);
+
dpif_offload_port_mgr_traverse_ports(offload->port_mgr,
dpif_offload_dpdk_late_enable,
offload);
@@ -327,11 +332,3 @@ struct dpif_offload_class dpif_offload_dpdk_class = {
.netdev_hw_miss_packet_postprocess = \
dpif_offload_dpdk_netdev_hw_miss_packet_postprocess,
};
-
-/* XXX: Temporary functions below, which will be removed once fully
- * refactored. */
-unsigned int dpif_offload_dpdk_get_thread_nb(void);
-unsigned int dpif_offload_dpdk_get_thread_nb(void)
-{
- return offload_thread_nb;
-}
diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c
index c3d5e83f5..7f7bd4448 100644
--- a/lib/netdev-offload-dpdk.c
+++ b/lib/netdev-offload-dpdk.c
@@ -39,6 +39,10 @@
VLOG_DEFINE_THIS_MODULE(netdev_offload_dpdk);
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(600, 600);
+/* XXX: Temporarily external declarations, will be removed during cleanup. */
+unsigned int dpdk_offload_thread_nb(void);
+unsigned int dpdk_offload_thread_id(void);
+
/* Thread-safety
* =============
*
@@ -84,7 +88,7 @@ offload_data_init(struct netdev *netdev)
data = xzalloc(sizeof *data);
ovs_mutex_init(&data->map_lock);
cmap_init(&data->ufid_to_rte_flow);
- data->rte_flow_counters = xcalloc(netdev_offload_thread_nb(),
+ data->rte_flow_counters = xcalloc(dpdk_offload_thread_nb(),
sizeof *data->rte_flow_counters);
ovsrcu_set(&netdev->hw_info.offload_data, (void *) data);
@@ -245,7 +249,7 @@ ufid_to_rte_flow_associate(const ovs_u128 *ufid, struct
netdev *netdev,
data->physdev = netdev != physdev ? netdev_ref(physdev) : physdev;
data->rte_flow = rte_flow;
data->actions_offloaded = actions_offloaded;
- data->creation_tid = netdev_offload_thread_id();
+ data->creation_tid = dpdk_offload_thread_id();
ovs_mutex_init(&data->lock);
cmap_insert(map, CONST_CAST(struct cmap_node *, &data->node), hash);
@@ -927,7 +931,7 @@ netdev_offload_dpdk_flow_create(struct netdev *netdev,
flow = netdev_dpdk_rte_flow_create(netdev, attr, items, actions, error);
if (flow) {
struct netdev_offload_dpdk_data *data;
- unsigned int tid = netdev_offload_thread_id();
+ unsigned int tid = dpdk_offload_thread_id();
data = (struct netdev_offload_dpdk_data *)
ovsrcu_get(void *, &netdev->hw_info.offload_data);
@@ -2376,7 +2380,7 @@ netdev_offload_dpdk_flow_destroy(struct
ufid_to_rte_flow_data *rte_flow_data)
if (ret == 0) {
struct netdev_offload_dpdk_data *data;
- unsigned int tid = netdev_offload_thread_id();
+ unsigned int tid = dpdk_offload_thread_id();
data = (struct netdev_offload_dpdk_data *)
ovsrcu_get(void *, &physdev->hw_info.offload_data);
@@ -2577,7 +2581,7 @@ out:
static void
flush_netdev_flows_in_related(struct netdev *netdev, struct netdev *related)
{
- unsigned int tid = netdev_offload_thread_id();
+ unsigned int tid = dpdk_offload_thread_id();
struct cmap *map = offload_data_map(related);
struct ufid_to_rte_flow_data *data;
@@ -2789,7 +2793,7 @@ netdev_offload_dpdk_flow_get_n_offloaded(struct netdev *netdev)
return 0;
}
- for (tid = 0; tid < netdev_offload_thread_nb(); tid++) {
+ for (tid = 0; tid < dpdk_offload_thread_nb(); tid++) {
total += data->rte_flow_counters[tid];
}
diff --git a/lib/netdev-offload.c b/lib/netdev-offload.c
index 174afee33..0852e2536 100644
--- a/lib/netdev-offload.c
+++ b/lib/netdev-offload.c
@@ -58,18 +58,6 @@
VLOG_DEFINE_THIS_MODULE(netdev_offload);
-/* XXX: Temporarily duplicates definition in dpif-offload-dpdk.c. */
-#define MAX_OFFLOAD_THREAD_NB 10
-#define DEFAULT_OFFLOAD_THREAD_NB 1
-static unsigned int offload_thread_nb = DEFAULT_OFFLOAD_THREAD_NB;
-
-unsigned int dpif_offload_dpdk_get_thread_nb(void); /* XXX: Temporarily
- * external declaration
- * until fully refactored.
- */
-
-DEFINE_EXTERN_PER_THREAD_DATA(netdev_offload_thread_id, OVSTHREAD_ID_UNSET);
-
/* Protects 'netdev_flow_apis'. */
static struct ovs_mutex netdev_flow_api_provider_mutex =
OVS_MUTEX_INITIALIZER;
@@ -385,64 +373,6 @@ netdev_any_oor(void)
return oor;
}
-unsigned int
-netdev_offload_thread_nb(void)
-{
- return offload_thread_nb;
-}
-
-unsigned int
-netdev_offload_ufid_to_thread_id(const ovs_u128 ufid)
-{
- uint32_t ufid_hash;
-
- if (netdev_offload_thread_nb() == 1) {
- return 0;
- }
-
- ufid_hash = hash_words64_inline(
- (const uint64_t [2]){ ufid.u64.lo,
- ufid.u64.hi }, 2, 1);
- return ufid_hash % netdev_offload_thread_nb();
-}
-
-unsigned int
-netdev_offload_thread_init(void)
-{
- static atomic_count next_id = ATOMIC_COUNT_INIT(0);
- bool thread_is_hw_offload;
- bool thread_is_rcu;
-
- thread_is_hw_offload = !strncmp(get_subprogram_name(),
- "hw_offload", strlen("hw_offload"));
- thread_is_rcu = !strncmp(get_subprogram_name(), "urcu", strlen("urcu"));
-
- /* Panic if any other thread besides offload and RCU tries
- * to initialize their thread ID. */
- ovs_assert(thread_is_hw_offload || thread_is_rcu);
-
- if (*netdev_offload_thread_id_get() == OVSTHREAD_ID_UNSET) {
- unsigned int id;
-
- if (thread_is_rcu) {
- /* RCU will compete with other threads for shared object access.
- * Reclamation functions using a thread ID must be thread-safe.
- * For that end, and because RCU must consider all potential shared
- * objects anyway, its thread-id can be whichever, so return 0.
- */
- id = 0;
- } else {
- /* Only the actual offload threads have their own ID. */
- id = atomic_count_inc(&next_id);
- }
- /* Panic if any offload thread is getting a spurious ID. */
- ovs_assert(id < netdev_offload_thread_nb());
- return *netdev_offload_thread_id_get() = id;
- } else {
- return *netdev_offload_thread_id_get();
- }
-}
-
void
netdev_ports_traverse(const char *dpif_type,
bool (*cb)(struct netdev *, odp_port_t, void *),
@@ -639,11 +569,6 @@ netdev_set_flow_api_enabled(const struct smap
*ovs_other_config OVS_UNUSED)
static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
if (ovsthread_once_start(&once)) {
-
-#ifdef DPDK_NETDEV
- offload_thread_nb = dpif_offload_dpdk_get_thread_nb();
-#endif
-
netdev_ports_flow_init();
ovsthread_once_done(&once);
}
diff --git a/lib/netdev-offload.h b/lib/netdev-offload.h
index 5570d5b71..4cdf7102f 100644
--- a/lib/netdev-offload.h
+++ b/lib/netdev-offload.h
@@ -76,24 +76,6 @@ struct offload_info {
odp_port_t orig_in_port; /* Originating in_port for tnl flows. */
};
-DECLARE_EXTERN_PER_THREAD_DATA(unsigned int, netdev_offload_thread_id);
-
-unsigned int netdev_offload_thread_nb(void);
-unsigned int netdev_offload_thread_init(void);
-unsigned int netdev_offload_ufid_to_thread_id(const ovs_u128 ufid);
-
-static inline unsigned int
-netdev_offload_thread_id(void)
-{
- unsigned int id = *netdev_offload_thread_id_get();
-
- if (OVS_UNLIKELY(id == OVSTHREAD_ID_UNSET)) {
- id = netdev_offload_thread_init();
- }
-
- return id;
-}
-
int netdev_flow_put(struct netdev *, struct match *, struct nlattr *actions,
size_t actions_len, const ovs_u128 *,
struct offload_info *, struct dpif_flow_stats *);