Setup one port event port for Tx and link the respective event queue.
Register the Tx function as a service to be called from a service core.
The Tx function dequeues the events from the event queue and transmits
the packet to its respective ethernet port.

Signed-off-by: Pavan Nikhilesh <pbhagavat...@caviumnetworks.com>
---
 app/test-eventdev/test_pipeline_common.c | 119 +++++++++++++++++++++++++++++++
 app/test-eventdev/test_pipeline_common.h |  19 ++++-
 2 files changed, 136 insertions(+), 2 deletions(-)

diff --git a/app/test-eventdev/test_pipeline_common.c 
b/app/test-eventdev/test_pipeline_common.c
index 63f54daef..98df423ae 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -5,6 +5,58 @@
 
 #include "test_pipeline_common.h"
 
+static int32_t
+pipeline_event_tx_burst_service_func(void *args)
+{
+
+       int i;
+       struct tx_service_data *tx = args;
+       const uint8_t dev = tx->dev_id;
+       const uint8_t port = tx->port_id;
+       struct rte_event ev[BURST_SIZE + 1];
+
+       uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+
+       if (!nb_rx) {
+               for (i = 0; i < tx->nb_ethports; i++)
+                       rte_eth_tx_buffer_flush(i, 0, tx->tx_buf[i]);
+               return 0;
+       }
+
+       for (i = 0; i < nb_rx; i++) {
+               struct rte_mbuf *m = ev[i].mbuf;
+               rte_eth_tx_buffer(m->port, 0, tx->tx_buf[m->port], m);
+       }
+       tx->processed_pkts += nb_rx;
+
+       return 0;
+}
+
+static int32_t
+pipeline_event_tx_service_func(void *args)
+{
+
+       int i;
+       struct tx_service_data *tx = args;
+       const uint8_t dev = tx->dev_id;
+       const uint8_t port = tx->port_id;
+       struct rte_event ev;
+
+       uint16_t nb_rx = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+       if (!nb_rx) {
+               for (i = 0; i < tx->nb_ethports; i++)
+                       rte_eth_tx_buffer_flush(i, 0, tx->tx_buf[i]);
+               return 0;
+       }
+
+       struct rte_mbuf *m = ev.mbuf;
+       rte_eth_tx_buffer(m->port, 0, tx->tx_buf[m->port], m);
+       tx->processed_pkts++;
+
+       return 0;
+}
+
 int
 pipeline_test_result(struct evt_test *test, struct evt_options *opt)
 {
@@ -151,6 +203,10 @@ pipeline_ethdev_setup(struct evt_test *test, struct 
evt_options *opt)
                }
 
                t->mt_unsafe |= mt_state;
+               t->tx_service.tx_buf[i] =
+                       rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(BURST_SIZE), 0);
+               if (t->tx_service.tx_buf[i] == NULL)
+                       rte_panic("Unable to allocate Tx buffer memory.");
                rte_eth_promiscuous_enable(i);
        }
 
@@ -271,12 +327,75 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, 
uint8_t stride,
        return ret;
 }
 
+int
+pipeline_event_tx_service_setup(struct evt_test *test, struct evt_options *opt,
+               uint8_t tx_queue_id, uint8_t tx_port_id,
+               const struct rte_event_port_conf p_conf)
+{
+       int ret;
+       struct rte_service_spec serv;
+       struct test_pipeline *t = evt_test_priv(test);
+       struct tx_service_data *tx = &t->tx_service;
+
+       ret = rte_event_port_setup(opt->dev_id, tx_port_id, &p_conf);
+       if (ret) {
+               evt_err("failed to setup port %d", tx_port_id);
+               return ret;
+       }
+
+       if (rte_event_port_link(opt->dev_id, tx_port_id, &tx_queue_id,
+                               NULL, 1) != 1) {
+               evt_err("failed to link queues to port %d", tx_port_id);
+               return -EINVAL;
+       }
+
+       tx->dev_id = opt->dev_id;
+       tx->queue_id = tx_queue_id;
+       tx->port_id = tx_port_id;
+       tx->nb_ethports = rte_eth_dev_count();
+       tx->t = t;
+
+       /* Register Tx service */
+       memset(&serv, 0, sizeof(struct rte_service_spec));
+       snprintf(serv.name, sizeof(serv.name), "Tx_service");
+
+       if (evt_has_burst_mode(opt->dev_id))
+               serv.callback = pipeline_event_tx_burst_service_func;
+       else
+               serv.callback = pipeline_event_tx_service_func;
+
+       serv.callback_userdata = (void *)tx;
+       ret = rte_service_component_register(&serv, &tx->service_id);
+       if (ret) {
+               evt_err("failed to register Tx service");
+               return ret;
+       }
+
+       ret = evt_service_setup(tx->service_id);
+       if (ret) {
+               evt_err("Failed to setup service core for Tx service\n");
+               return ret;
+       }
+
+       rte_service_runstate_set(tx->service_id, 1);
+
+       return 0;
+}
+
+
 void
 pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
 {
        int i;
        RTE_SET_USED(test);
        RTE_SET_USED(opt);
+       struct test_pipeline *t = evt_test_priv(test);
+
+       if (t->mt_unsafe) {
+               rte_service_component_runstate_set(t->tx_service.service_id, 0);
+               rte_service_runstate_set(t->tx_service.service_id, 0);
+               rte_service_component_unregister(t->tx_service.service_id);
+       }
 
        for (i = 0; i < rte_eth_dev_count(); i++) {
                rte_event_eth_rx_adapter_stop(i);
diff --git a/app/test-eventdev/test_pipeline_common.h 
b/app/test-eventdev/test_pipeline_common.h
index f8c46c0d7..acbf688fb 100644
--- a/app/test-eventdev/test_pipeline_common.h
+++ b/app/test-eventdev/test_pipeline_common.h
@@ -19,6 +19,8 @@
 #include <rte_mempool.h>
 #include <rte_prefetch.h>
 #include <rte_spinlock.h>
+#include <rte_service.h>
+#include <rte_service_component.h>
 
 #include "evt_common.h"
 #include "evt_options.h"
@@ -33,6 +35,17 @@ struct worker_data {
        struct test_pipeline *t;
 } __rte_cache_aligned;
 
+struct tx_service_data {
+       uint8_t dev_id;
+       uint8_t queue_id;
+       uint8_t port_id;
+       uint32_t service_id;
+       uint64_t processed_pkts;
+       uint16_t nb_ethports;
+       struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
+       struct test_pipeline *t;
+} __rte_cache_aligned;
+
 struct test_pipeline {
        /* Don't change the offset of "done". Signal handler use this memory
         * to terminate all lcores work.
@@ -45,8 +58,7 @@ struct test_pipeline {
        uint64_t outstand_pkts;
        struct rte_mempool *pool;
        struct worker_data worker[EVT_MAX_PORTS];
-       struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
-       rte_spinlock_t tx_lk[RTE_MAX_ETHPORTS];
+       struct tx_service_data tx_service;
        struct evt_options *opt;
        uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
 } __rte_cache_aligned;
@@ -57,6 +69,9 @@ int pipeline_test_setup(struct evt_test *test, struct 
evt_options *opt);
 int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt);
 int pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
                struct rte_event_port_conf prod_conf);
+int pipeline_event_tx_service_setup(struct evt_test *test,
+               struct evt_options *opt, uint8_t tx_queue_id,
+               uint8_t tx_port_id, const struct rte_event_port_conf p_conf);
 int pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt);
 int pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt,
                uint8_t *queue_arr, uint8_t nb_queues,
-- 
2.14.1

Reply via email to