On 4/20/22 11:16, Feifei Wang wrote:
Add API for enabling direct rearm mode and for mapping RX and TX
queues. Currently, the API supports 1:1(txq : rxq) mapping.

Suggested-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>
Signed-off-by: Feifei Wang <feifei.wa...@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.w...@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>
---
  lib/ethdev/ethdev_driver.h | 15 +++++++++++++++
  lib/ethdev/rte_ethdev.c    | 14 ++++++++++++++
  lib/ethdev/rte_ethdev.h    | 31 +++++++++++++++++++++++++++++++
  lib/ethdev/version.map     |  1 +
  4 files changed, 61 insertions(+)

diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
index 69d9dc21d8..22022f6da9 100644
--- a/lib/ethdev/ethdev_driver.h
+++ b/lib/ethdev/ethdev_driver.h
@@ -485,6 +485,16 @@ typedef int (*eth_rx_enable_intr_t)(struct rte_eth_dev 
*dev,
  typedef int (*eth_rx_disable_intr_t)(struct rte_eth_dev *dev,
                                    uint16_t rx_queue_id);
+/** @internal Enable direct rearm of a receive queue of an Ethernet device. */
+typedef int (*eth_rx_direct_rearm_enable_t)(struct rte_eth_dev *dev,
+                                               uint16_t queue_id);
+
+/**< @internal map Rx/Tx queue of direct rearm mode */
+typedef int (*eth_rx_direct_rearm_map_t)(struct rte_eth_dev *dev,
+                                       uint16_t rx_queue_id,
+                                       uint16_t tx_port_id,
+                                       uint16_t tx_queue_id);
+
  /** @internal Release memory resources allocated by given Rx/Tx queue. */
  typedef void (*eth_queue_release_t)(struct rte_eth_dev *dev,
                                    uint16_t queue_id);
@@ -1152,6 +1162,11 @@ struct eth_dev_ops {
        /** Disable Rx queue interrupt */
        eth_rx_disable_intr_t      rx_queue_intr_disable;
+ /** Enable Rx queue direct rearm mode */
+       eth_rx_direct_rearm_enable_t rx_queue_direct_rearm_enable;
+       /** Map Rx/Tx queue for direct rearm mode */
+       eth_rx_direct_rearm_map_t  rx_queue_direct_rearm_map;
+
        eth_tx_queue_setup_t       tx_queue_setup;/**< Set up device Tx queue */
        eth_queue_release_t        tx_queue_release; /**< Release Tx queue */
        eth_tx_done_cleanup_t      tx_done_cleanup;/**< Free Tx ring mbufs */
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 29a3d80466..8e6f0284f4 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -2139,6 +2139,20 @@ rte_eth_tx_hairpin_queue_setup(uint16_t port_id, 
uint16_t tx_queue_id,
        return eth_err(port_id, ret);
  }
+int
+rte_eth_direct_rxrearm_map(uint16_t rx_port_id, uint16_t rx_queue_id,
+               uint16_t tx_port_id, uint16_t tx_queue_id)
+{
+       struct rte_eth_dev *dev;
+
+       dev = &rte_eth_devices[rx_port_id];

I think it is rather control path. So:
We need standard checks that rx_port_id is valid.
tx_port_id must be checked as well.
rx_queue_id and tx_queue_id must be checked to be in the rate.

+       (*dev->dev_ops->rx_queue_direct_rearm_enable)(dev, rx_queue_id);
+       (*dev->dev_ops->rx_queue_direct_rearm_map)(dev, rx_queue_id,
+                       tx_port_id, tx_queue_id);

We must check that function pointers are not NULL as usual.
Return values must be checked.
Isn't is safe to setup map and than enable.
Otherwise we definitely need disable.
Also, what should happen on Tx port unplug? How to continue if
we still have Rx port up and running?

+
+       return 0;
+}
+
  int
  rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
  {
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index 04cff8ee10..4a431fcbed 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -5190,6 +5190,37 @@ __rte_experimental
  int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
                                       struct rte_eth_hairpin_cap *cap);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Enable direct re-arm mode. In this mode the RX queue will be re-armed using
+ * buffers that have completed transmission on the transmit side.
+ *
+ * @note
+ *   It is assumed that the buffers have completed transmission belong to the
+ *   mempool used at the receive side, and have refcnt = 1.

I think it is possible to avoid such limitations, but
implementation will be less optimized - more checks.

+ *
+ * @param rx_port_id
+ *   Port identifying the receive side.
+ * @param rx_queue_id
+ *   The index of the receive queue identifying the receive side.
+ *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
+ *   to rte_eth_dev_configure().
+ * @param tx_port_id
+ *   Port identifying the transmit side.

I guess there is an assumption that Rx and Tx ports are
serviced by the same driver. If so and if it is an API
limitation, ethdev layer must check it.

+ * @param tx_queue_id
+ *   The index of the transmit queue identifying the transmit side.
+ *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ *   to rte_eth_dev_configure().
+ *
+ * @return
+ *   - (0) if successful.
+ */
+__rte_experimental
+int rte_eth_direct_rxrearm_map(uint16_t rx_port_id, uint16_t rx_queue_id,
+                              uint16_t tx_port_id, uint16_t tx_queue_id);
+
  /**
   * @warning
   * @b EXPERIMENTAL: this structure may change without prior notice.
diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map
index 20391ab29e..68d664498c 100644
--- a/lib/ethdev/version.map
+++ b/lib/ethdev/version.map
@@ -279,6 +279,7 @@ EXPERIMENTAL {
        rte_flow_async_action_handle_create;
        rte_flow_async_action_handle_destroy;
        rte_flow_async_action_handle_update;
+       rte_eth_direct_rxrearm_map;
  };
INTERNAL {

Reply via email to