Add an event buffer flush when the current invocation
of the Rx adapter is completed.

This patch provides lower latency in case there is a
BATCH_SIZE of events in the event buffer.

Suggested-by: Narender Vangati <narender.vang...@intel.com>
Signed-off-by: Nikhil Rao <nikhil....@intel.com>
Cc: sta...@dpdk.org
---
 lib/librte_eventdev/rte_event_eth_rx_adapter.c | 14 ++++++--------
 1 file changed, 6 insertions(+), 8 deletions(-)

diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c 
b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
index 53a3788..3a70058 100644
--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
@@ -490,7 +490,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)
  * the hypervisor's switching layer where adjustments can be made to deal with
  * it.
  */
-static inline uint32_t
+static inline void
 eth_rx_poll(struct rte_event_eth_rx_adapter *rx_adapter)
 {
        uint32_t num_queue;
@@ -519,7 +519,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)
                        flush_event_buffer(rx_adapter);
                if (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count)) {
                        rx_adapter->wrr_pos = wrr_pos;
-                       break;
+                       return;
                }
 
                stats->rx_poll_count++;
@@ -535,7 +535,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)
                        if (nb_rx > max_nb_rx) {
                                rx_adapter->wrr_pos =
                                    (wrr_pos + 1) % rx_adapter->wrr_len;
-                               return nb_rx;
+                               break;
                        }
                }
 
@@ -543,20 +543,18 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)
                        wrr_pos = 0;
        }
 
-       return nb_rx;
+       if (buf->count >= BATCH_SIZE)
+               flush_event_buffer(rx_adapter);
 }
 
 static int
 event_eth_rx_adapter_service_func(void *args)
 {
        struct rte_event_eth_rx_adapter *rx_adapter = args;
-       struct rte_eth_event_enqueue_buffer *buf;
 
-       buf = &rx_adapter->event_enqueue_buffer;
        if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
                return 0;
-       if (eth_rx_poll(rx_adapter) == 0 && buf->count)
-               flush_event_buffer(rx_adapter);
+       eth_rx_poll(rx_adapter);
        rte_spinlock_unlock(&rx_adapter->rx_lock);
        return 0;
 }
-- 
1.8.3.1

Reply via email to