From: Nithin Dabilpuram <ndabilpu...@marvell.com>

Add device start operation and update the correct
function pointers for Rx and Tx burst functions.

This patch also update the octeontx2 NIC specific
documentation.

Signed-off-by: Nithin Dabilpuram <ndabilpu...@marvell.com>
Signed-off-by: Vamsi Attunuru <vattun...@marvell.com>
Signed-off-by: Jerin Jacob <jer...@marvell.com>
---
 doc/guides/nics/octeontx2.rst           |  91 ++++++++++++
 drivers/net/octeontx2/otx2_ethdev.c     | 180 ++++++++++++++++++++++++
 drivers/net/octeontx2/otx2_flow.c       |   4 +-
 drivers/net/octeontx2/otx2_flow_parse.c |   4 +-
 drivers/net/octeontx2/otx2_ptp.c        |   8 ++
 drivers/net/octeontx2/otx2_vlan.c       |   1 +
 6 files changed, 286 insertions(+), 2 deletions(-)

diff --git a/doc/guides/nics/octeontx2.rst b/doc/guides/nics/octeontx2.rst
index 90ca4e2d2..d4a458262 100644
--- a/doc/guides/nics/octeontx2.rst
+++ b/doc/guides/nics/octeontx2.rst
@@ -34,6 +34,7 @@ Features of the OCTEON TX2 Ethdev PMD are:
 - Vector Poll mode driver
 - Debug utilities - Context dump and error interrupt support
 - IEEE1588 timestamping
+- HW offloaded `ethdev Rx queue` to `eventdev event queue` packet injection
 
 Prerequisites
 -------------
@@ -49,6 +50,63 @@ The following options may be modified in the ``config`` file.
 
   Toggle compilation of the ``librte_pmd_octeontx2`` driver.
 
+Driver compilation and testing
+------------------------------
+
+Refer to the document :ref:`compiling and testing a PMD for a NIC 
<pmd_build_and_test>`
+for details.
+
+To compile the OCTEON TX2 PMD for Linux arm64 gcc,
+use arm64-octeontx2-linux-gcc as target.
+
+#. Running testpmd:
+
+   Follow instructions available in the document
+   :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+   to run testpmd.
+
+   Example output:
+
+   .. code-block:: console
+
+      ./build/app/testpmd -c 0x300 -w 0002:02:00.0 -- --portmask=0x1 
--nb-cores=1 --port-topology=loop --rxq=1 --txq=1
+      EAL: Detected 24 lcore(s)
+      EAL: Detected 1 NUMA nodes
+      EAL: Multi-process socket /var/run/dpdk/rte/mp_socket
+      EAL: No available hugepages reported in hugepages-2048kB
+      EAL: Probing VFIO support...
+      EAL: VFIO support initialized
+      EAL: PCI device 0002:02:00.0 on NUMA socket 0
+      EAL:   probe driver: 177d:a063 net_octeontx2
+      EAL:   using IOMMU type 1 (Type 1)
+      testpmd: create a new mbuf pool <mbuf_pool_socket_0>: n=267456, 
size=2176, socket=0
+      testpmd: preferred mempool ops selected: octeontx2_npa
+      Configuring Port 0 (socket 0)
+      PMD: Port 0: Link Up - speed 40000 Mbps - full-duplex
+
+      Port 0: link state change event
+      Port 0: 36:10:66:88:7A:57
+      Checking link statuses...
+      Done
+      No commandline core given, start packet forwarding
+      io packet forwarding - ports=1 - cores=1 - streams=1 - NUMA support 
enabled, MP allocation mode: native
+      Logical Core 9 (socket 0) forwards packets on 1 streams:
+        RX P=0/Q=0 (socket 0) -> TX P=0/Q=0 (socket 0) peer=02:00:00:00:00:00
+
+        io packet forwarding packets/burst=32
+        nb forwarding cores=1 - nb forwarding ports=1
+        port 0: RX queue number: 1 Tx queue number: 1
+          Rx offloads=0x0 Tx offloads=0x10000
+          RX queue: 0
+            RX desc=512 - RX free threshold=0
+            RX threshold registers: pthresh=0 hthresh=0  wthresh=0
+            RX Offloads=0x0
+          TX queue: 0
+            TX desc=512 - TX free threshold=0
+            TX threshold registers: pthresh=0 hthresh=0  wthresh=0
+            TX offloads=0x10000 - TX RS bit threshold=0
+      Press enter to exit
+
 Runtime Config Options
 ----------------------
 
@@ -116,6 +174,39 @@ Runtime Config Options
    parameters to all the PCIe devices if application requires to configure on
    all the ethdev ports.
 
+Limitations
+-----------
+
+``mempool_octeontx2`` external mempool handler dependency
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The OCTEON TX2 SoC family NIC has inbuilt HW assisted external mempool manager.
+``net_octeontx2`` pmd only works with ``mempool_octeontx2`` mempool handler
+as it is performance wise most effective way for packet allocation and Tx 
buffer
+recycling on OCTEON TX2 SoC platform.
+
+CRC striping
+~~~~~~~~~~~~
+
+The OCTEON TX2 SoC family NICs strip the CRC for every packet being received by
+the host interface irrespective of the offload configuration.
+
+
+Debugging Options
+-----------------
+
+.. _table_octeontx2_ethdev_debug_options:
+
+.. table:: OCTEON TX2 ethdev debug options
+
+   +---+------------+-------------------------------------------------------+
+   | # | Component  | EAL log command                                       |
+   +===+============+=======================================================+
+   | 1 | NIX        | --log-level='pmd\.net.octeontx2,8'                    |
+   +---+------------+-------------------------------------------------------+
+   | 2 | NPC        | --log-level='pmd\.net.octeontx2\.flow,8'              |
+   +---+------------+-------------------------------------------------------+
+
 RTE Flow Support
 ----------------
 
diff --git a/drivers/net/octeontx2/otx2_ethdev.c 
b/drivers/net/octeontx2/otx2_ethdev.c
index 44753cbf5..7f33f8808 100644
--- a/drivers/net/octeontx2/otx2_ethdev.c
+++ b/drivers/net/octeontx2/otx2_ethdev.c
@@ -135,6 +135,55 @@ otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev)
        return otx2_mbox_process(mbox);
 }
 
+static int
+npc_rx_enable(struct otx2_eth_dev *dev)
+{
+       struct otx2_mbox *mbox = dev->mbox;
+
+       otx2_mbox_alloc_msg_nix_lf_start_rx(mbox);
+
+       return otx2_mbox_process(mbox);
+}
+
+static int
+npc_rx_disable(struct otx2_eth_dev *dev)
+{
+       struct otx2_mbox *mbox = dev->mbox;
+
+       otx2_mbox_alloc_msg_nix_lf_stop_rx(mbox);
+
+       return otx2_mbox_process(mbox);
+}
+
+static int
+nix_cgx_start_link_event(struct otx2_eth_dev *dev)
+{
+       struct otx2_mbox *mbox = dev->mbox;
+
+       if (otx2_dev_is_vf(dev))
+               return 0;
+
+       otx2_mbox_alloc_msg_cgx_start_linkevents(mbox);
+
+       return otx2_mbox_process(mbox);
+}
+
+static int
+cgx_intlbk_enable(struct otx2_eth_dev *dev, bool en)
+{
+       struct otx2_mbox *mbox = dev->mbox;
+
+       if (otx2_dev_is_vf(dev))
+               return 0;
+
+       if (en)
+               otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox);
+       else
+               otx2_mbox_alloc_msg_cgx_intlbk_disable(mbox);
+
+       return otx2_mbox_process(mbox);
+}
+
 static inline void
 nix_rx_queue_reset(struct otx2_eth_rxq *rxq)
 {
@@ -478,6 +527,74 @@ nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
                return NIX_MAXSQESZ_W8;
 }
 
+static uint16_t
+nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct rte_eth_conf *conf = &data->dev_conf;
+       struct rte_eth_rxmode *rxmode = &conf->rxmode;
+       uint16_t flags = 0;
+
+       if (rxmode->mq_mode == ETH_MQ_RX_RSS)
+               flags |= NIX_RX_OFFLOAD_RSS_F;
+
+       if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
+                        DEV_RX_OFFLOAD_UDP_CKSUM))
+               flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
+
+       if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
+                               DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+               flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
+
+       if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+               flags |= NIX_RX_MULTI_SEG_F;
+
+       if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
+                               DEV_RX_OFFLOAD_QINQ_STRIP))
+               flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
+
+       if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+               flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+
+       return flags;
+}
+
+static uint16_t
+nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       uint64_t conf = dev->tx_offloads;
+       uint16_t flags = 0;
+
+       /* Fastpath is dependent on these enums */
+       RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
+       RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
+       RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
+
+       if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
+           conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+               flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+
+       if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+           conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+               flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
+
+       if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
+           conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
+           conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
+           conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+               flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
+
+       if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+               flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
+
+       if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+               flags |= NIX_TX_MULTI_SEG_F;
+
+       return flags;
+}
+
 static int
 nix_sq_init(struct otx2_eth_txq *txq)
 {
@@ -1092,6 +1209,8 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
 
        dev->rx_offloads = rxmode->offloads;
        dev->tx_offloads = txmode->offloads;
+       dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
+       dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
        dev->rss_info.rss_grps = NIX_RSS_GRPS;
 
        nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
@@ -1131,6 +1250,13 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
                goto free_nix_lf;
        }
 
+       /* Configure loop back mode */
+       rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode);
+       if (rc) {
+               otx2_err("Failed to configure cgx loop back mode rc=%d", rc);
+               goto free_nix_lf;
+       }
+
        rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true);
        if (rc) {
                otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc);
@@ -1280,6 +1406,59 @@ otx2_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, 
uint16_t qidx)
        return rc;
 }
 
+static int
+otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       int rc, i;
+
+       /* Start rx queues */
+       for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+               rc = otx2_nix_rx_queue_start(eth_dev, i);
+               if (rc)
+                       return rc;
+       }
+
+       /* Start tx queues  */
+       for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+               rc = otx2_nix_tx_queue_start(eth_dev, i);
+               if (rc)
+                       return rc;
+       }
+
+       rc = otx2_nix_update_flow_ctrl_mode(eth_dev);
+       if (rc) {
+               otx2_err("Failed to update flow ctrl mode %d", rc);
+               return rc;
+       }
+
+       rc = npc_rx_enable(dev);
+       if (rc) {
+               otx2_err("Failed to enable NPC rx %d", rc);
+               return rc;
+       }
+
+       otx2_nix_toggle_flag_link_cfg(dev, true);
+
+       rc = nix_cgx_start_link_event(dev);
+       if (rc) {
+               otx2_err("Failed to start cgx link event %d", rc);
+               goto rx_disable;
+       }
+
+       otx2_nix_toggle_flag_link_cfg(dev, false);
+       otx2_eth_set_tx_function(eth_dev);
+       otx2_eth_set_rx_function(eth_dev);
+
+       return 0;
+
+rx_disable:
+       npc_rx_disable(dev);
+       otx2_nix_toggle_flag_link_cfg(dev, false);
+       return rc;
+}
+
+
 /* Initialize and register driver with DPDK Application */
 static const struct eth_dev_ops otx2_eth_dev_ops = {
        .dev_infos_get            = otx2_nix_info_get,
@@ -1289,6 +1468,7 @@ static const struct eth_dev_ops otx2_eth_dev_ops = {
        .tx_queue_release         = otx2_nix_tx_queue_release,
        .rx_queue_setup           = otx2_nix_rx_queue_setup,
        .rx_queue_release         = otx2_nix_rx_queue_release,
+       .dev_start                = otx2_nix_dev_start,
        .tx_queue_start           = otx2_nix_tx_queue_start,
        .tx_queue_stop            = otx2_nix_tx_queue_stop,
        .rx_queue_start           = otx2_nix_rx_queue_start,
diff --git a/drivers/net/octeontx2/otx2_flow.c 
b/drivers/net/octeontx2/otx2_flow.c
index 3ddecfb23..982100df4 100644
--- a/drivers/net/octeontx2/otx2_flow.c
+++ b/drivers/net/octeontx2/otx2_flow.c
@@ -528,8 +528,10 @@ otx2_flow_destroy(struct rte_eth_dev *dev,
                        return -EINVAL;
 
                /* Clear mark offload flag if there are no more mark actions */
-               if (rte_atomic32_sub_return(&npc->mark_actions, 1) == 0)
+               if (rte_atomic32_sub_return(&npc->mark_actions, 1) == 0) {
                        hw->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
+                       otx2_eth_set_rx_function(dev);
+               }
        }
 
        rc = flow_free_rss_action(dev, flow);
diff --git a/drivers/net/octeontx2/otx2_flow_parse.c 
b/drivers/net/octeontx2/otx2_flow_parse.c
index 4cf5ce17e..375d00620 100644
--- a/drivers/net/octeontx2/otx2_flow_parse.c
+++ b/drivers/net/octeontx2/otx2_flow_parse.c
@@ -926,9 +926,11 @@ otx2_flow_parse_actions(struct rte_eth_dev *dev,
        if (mark)
                flow->npc_action |= (uint64_t)mark << 40;
 
-       if (rte_atomic32_read(&npc->mark_actions) == 1)
+       if (rte_atomic32_read(&npc->mark_actions) == 1) {
                hw->rx_offload_flags |=
                        NIX_RX_OFFLOAD_MARK_UPDATE_F;
+               otx2_eth_set_rx_function(dev);
+       }
 
 set_pf_func:
        /* Ideally AF must ensure that correct pf_func is set */
diff --git a/drivers/net/octeontx2/otx2_ptp.c b/drivers/net/octeontx2/otx2_ptp.c
index 5291da241..0186c629a 100644
--- a/drivers/net/octeontx2/otx2_ptp.c
+++ b/drivers/net/octeontx2/otx2_ptp.c
@@ -118,6 +118,10 @@ otx2_nix_timesync_enable(struct rte_eth_dev *eth_dev)
                        struct otx2_eth_txq *txq = eth_dev->data->tx_queues[i];
                        otx2_nix_form_default_desc(txq);
                }
+
+               /* Setting up the function pointers as per new offload flags */
+               otx2_eth_set_rx_function(eth_dev);
+               otx2_eth_set_tx_function(eth_dev);
        }
        return rc;
 }
@@ -147,6 +151,10 @@ otx2_nix_timesync_disable(struct rte_eth_dev *eth_dev)
                        struct otx2_eth_txq *txq = eth_dev->data->tx_queues[i];
                        otx2_nix_form_default_desc(txq);
                }
+
+               /* Setting up the function pointers as per new offload flags */
+               otx2_eth_set_rx_function(eth_dev);
+               otx2_eth_set_tx_function(eth_dev);
        }
        return rc;
 }
diff --git a/drivers/net/octeontx2/otx2_vlan.c 
b/drivers/net/octeontx2/otx2_vlan.c
index dc0f4e032..189c45174 100644
--- a/drivers/net/octeontx2/otx2_vlan.c
+++ b/drivers/net/octeontx2/otx2_vlan.c
@@ -760,6 +760,7 @@ otx2_nix_vlan_offload_set(struct rte_eth_dev *eth_dev, int 
mask)
                        DEV_RX_OFFLOAD_QINQ_STRIP)) {
                dev->rx_offloads |= offloads;
                dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
+               otx2_eth_set_rx_function(eth_dev);
        }
 
 done:
-- 
2.21.0

Reply via email to