In non-SRIOV environment, VMDq RSS could be enabled by MRQC register.
In theory, the queue number per pool could be 2 or 4, but only 2 queues are
available due to HW limitation, the same limit also exist in Linux ixgbe driver.

Signed-off-by: Changchun Ouyang <changchun.ouyang at intel.com>
---
 lib/librte_ether/rte_ethdev.c     | 40 +++++++++++++++++++
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 82 +++++++++++++++++++++++++++++++++------
 2 files changed, 111 insertions(+), 11 deletions(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 03fce08..be9105f 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -983,6 +983,16 @@ rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t 
nb_rx_q)
        return 0;
 }

+#define VMDQ_RSS_RX_QUEUE_NUM_MAX 4
+
+static int
+rte_eth_dev_check_vmdq_rss_rxq_num(__rte_unused uint8_t port_id, uint16_t 
nb_rx_q)
+{
+       if (nb_rx_q > VMDQ_RSS_RX_QUEUE_NUM_MAX)
+               return -EINVAL;
+       return 0;
+}
+
 static int
 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
                      const struct rte_eth_conf *dev_conf)
@@ -1143,6 +1153,36 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t 
nb_rx_q, uint16_t nb_tx_q,
                                return (-EINVAL);
                        }
                }
+
+               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_RSS) {
+                       uint32_t nb_queue_pools =
+                               
dev_conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
+                       struct rte_eth_dev_info dev_info;
+
+                       rte_eth_dev_info_get(port_id, &dev_info);
+                       dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+                       if (nb_queue_pools == ETH_32_POOLS || nb_queue_pools == 
ETH_64_POOLS)
+                               RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
+                                       dev_info.max_rx_queues/nb_queue_pools;
+                       else {
+                               PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ "
+                                               "nb_queue_pools=%d invalid "
+                                               "in VMDQ RSS\n"
+                                               port_id,
+                                               nb_queue_pools);
+                               return -EINVAL;
+                       }
+
+                       if (rte_eth_dev_check_vmdq_rss_rxq_num(port_id,
+                               RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) != 0) {
+                               PMD_DEBUG_TRACE("ethdev port_id=%d"
+                                       " SRIOV active, invalid queue"
+                                       " number for VMDQ RSS, allowed"
+                                       " value are 1, 2 or 4\n",
+                                       port_id);
+                               return -EINVAL;
+                       }
+               }
        }
        return 0;
 }
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c 
b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 96c4b98..5a6227f 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -3172,15 +3172,15 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
 }

 /*
- * VMDq only support for 10 GbE NIC.
+ * Config pool for VMDq on 10 GbE NIC.
  */
 static void
-ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+ixgbe_vmdq_pool_configure(struct rte_eth_dev *dev)
 {
        struct rte_eth_vmdq_rx_conf *cfg;
        struct ixgbe_hw *hw;
        enum rte_eth_nb_pools num_pools;
-       uint32_t mrqc, vt_ctl, vlanctrl;
+       uint32_t vt_ctl, vlanctrl;
        uint32_t vmolr = 0;
        int i;

@@ -3189,12 +3189,6 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
        cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
        num_pools = cfg->nb_queue_pools;

-       ixgbe_rss_disable(dev);
-
-       /* MRQC: enable vmdq */
-       mrqc = IXGBE_MRQC_VMDQEN;
-       IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
-
        /* PFVTCTL: turn on virtualisation and set the default pool */
        vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
        if (cfg->enable_default_pool)
@@ -3261,6 +3255,28 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 }

 /*
+ * VMDq only support for 10 GbE NIC.
+ */
+static void
+ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw;
+       uint32_t mrqc;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       ixgbe_rss_disable(dev);
+
+       /* MRQC: enable vmdq */
+       mrqc = IXGBE_MRQC_VMDQEN;
+       IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+       IXGBE_WRITE_FLUSH(hw);
+
+       ixgbe_vmdq_pool_configure(dev);
+}
+
+/*
  * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
  * @hw: pointer to hardware structure
  */
@@ -3365,6 +3381,41 @@ ixgbe_config_vf_rss(struct rte_eth_dev *dev)
 }

 static int
+ixgbe_config_vmdq_rss(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw;
+       uint32_t mrqc;
+
+       ixgbe_rss_configure(dev);
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* MRQC: enable VMDQ RSS */
+       mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+       mrqc &= ~IXGBE_MRQC_MRQE_MASK;
+
+       switch (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) {
+       case 2:
+               mrqc |= IXGBE_MRQC_VMDQRSS64EN;
+               break;
+
+       case 4:
+               mrqc |= IXGBE_MRQC_VMDQRSS32EN;
+               break;
+
+       default:
+               PMD_INIT_LOG(ERR, "Invalid pool number in non-IOV mode with 
VMDQ RSS");
+               return -EINVAL;
+       }
+
+       IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+       ixgbe_vmdq_pool_configure(dev);
+
+       return 0;
+}
+
+static int
 ixgbe_config_vf_default(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
@@ -3420,6 +3471,10 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
                                ixgbe_vmdq_rx_hw_configure(dev);
                                break;

+                       case ETH_MQ_RX_VMDQ_RSS:
+                               ixgbe_config_vmdq_rss(dev);
+                               break;
+
                        case ETH_MQ_RX_NONE:
                                /* if mq_mode is none, disable rss mode.*/
                        default: ixgbe_rss_disable(dev);
@@ -3579,6 +3634,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)

        /* Setup RX queues */
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               uint32_t psrtype = 0;
+
                rxq = dev->data->rx_queues[i];

                /*
@@ -3608,12 +3665,10 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                if (dev->data->dev_conf.rxmode.header_split) {
                        if (hw->mac.type == ixgbe_mac_82599EB) {
                                /* Must setup the PSRTYPE register */
-                               uint32_t psrtype;
                                psrtype = IXGBE_PSRTYPE_TCPHDR |
                                        IXGBE_PSRTYPE_UDPHDR   |
                                        IXGBE_PSRTYPE_IPV4HDR  |
                                        IXGBE_PSRTYPE_IPV6HDR;
-                               IXGBE_WRITE_REG(hw, 
IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
                        }
                        srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
                                IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
@@ -3623,6 +3678,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 #endif
                        srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;

+               /* Set RQPL for VMDQ RSS according to max Rx queue */
+               psrtype |= (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool >> 1) <<
+                       IXGBE_PSRTYPE_RQPL_SHIFT;
+               IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
+
                /* Set if packets are dropped when no descriptors available */
                if (rxq->drop_en)
                        srrctl |= IXGBE_SRRCTL_DROP_EN;
-- 
1.8.4.2

Reply via email to