The current virtchnl structure for enable/disable queues only supports
max 32 queue pairs. Use a new opcode and structure to indicate up to 256
queue pairs, in order to enable/disable queues in large VF case.

Signed-off-by: Steve Yang <stevex.y...@intel.com>
Signed-off-by: Kevin Liu <kevinx....@intel.com>
---
 drivers/net/ice/ice_dcf.c        | 99 +++++++++++++++++++++++++++++++-
 drivers/net/ice/ice_dcf.h        |  5 ++
 drivers/net/ice/ice_dcf_ethdev.c | 26 +++++++--
 drivers/net/ice/ice_dcf_ethdev.h |  8 +--
 4 files changed, 125 insertions(+), 13 deletions(-)

diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 290f754049..23edfd09b1 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -90,7 +90,6 @@ ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw *hw, enum 
virtchnl_ops op,
                        *rsp_msglen = event.msg_len;
 
                return rte_le_to_cpu_32(event.desc.cookie_low);
-
 again:
                rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
        } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
@@ -896,7 +895,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw)
 {
        struct rte_eth_dev *dev = hw->eth_dev;
        struct rte_eth_rss_conf *rss_conf;
-       uint8_t i, j, nb_q;
+       uint16_t i, j, nb_q;
        int ret;
 
        rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
@@ -1075,6 +1074,12 @@ ice_dcf_request_queues(struct ice_dcf_hw *hw, uint16_t 
num)
                return err;
        }
 
+       /* request queues succeeded, vf is resetting */
+       if (hw->resetting) {
+               PMD_DRV_LOG(INFO, "vf is resetting");
+               return 0;
+       }
+
        /* request additional queues failed, return available number */
        num_queue_pairs = ((struct virtchnl_vf_res_request *)
                                args.rsp_msgbuf)->num_queue_pairs;
@@ -1185,7 +1190,8 @@ ice_dcf_config_irq_map_lv(struct ice_dcf_hw *hw,
        args.req_msg = (u8 *)map_info;
        args.req_msglen = len;
        args.rsp_msgbuf = hw->arq_buf;
-       args.req_msglen = ICE_DCF_AQ_BUF_SZ;
+       args.rsp_msglen = ICE_DCF_AQ_BUF_SZ;
+       args.rsp_buflen = ICE_DCF_AQ_BUF_SZ;
        err = ice_dcf_execute_virtchnl_cmd(hw, &args);
        if (err)
                PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
@@ -1225,6 +1231,50 @@ ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t 
qid, bool rx, bool on)
        return err;
 }
 
+int
+ice_dcf_switch_queue_lv(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on)
+{
+       struct virtchnl_del_ena_dis_queues *queue_select;
+       struct virtchnl_queue_chunk *queue_chunk;
+       struct dcf_virtchnl_cmd args;
+       int err, len;
+
+       len = sizeof(struct virtchnl_del_ena_dis_queues);
+       queue_select = rte_zmalloc("queue_select", len, 0);
+       if (!queue_select)
+               return -ENOMEM;
+
+       queue_chunk = queue_select->chunks.chunks;
+       queue_select->chunks.num_chunks = 1;
+       queue_select->vport_id = hw->vsi_res->vsi_id;
+
+       if (rx) {
+               queue_chunk->type = VIRTCHNL_QUEUE_TYPE_RX;
+               queue_chunk->start_queue_id = qid;
+               queue_chunk->num_queues = 1;
+       } else {
+               queue_chunk->type = VIRTCHNL_QUEUE_TYPE_TX;
+               queue_chunk->start_queue_id = qid;
+               queue_chunk->num_queues = 1;
+       }
+
+       if (on)
+               args.v_op = VIRTCHNL_OP_ENABLE_QUEUES_V2;
+       else
+               args.v_op = VIRTCHNL_OP_DISABLE_QUEUES_V2;
+       args.req_msg = (u8 *)queue_select;
+       args.req_msglen = len;
+       args.rsp_msgbuf = hw->arq_buf;
+       args.rsp_msglen = ICE_DCF_AQ_BUF_SZ;
+       args.rsp_buflen = ICE_DCF_AQ_BUF_SZ;
+       err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+       if (err)
+               PMD_DRV_LOG(ERR, "Failed to execute command of %s",
+                           on ? "OP_ENABLE_QUEUES_V2" : 
"OP_DISABLE_QUEUES_V2");
+       rte_free(queue_select);
+       return err;
+}
+
 int
 ice_dcf_disable_queues(struct ice_dcf_hw *hw)
 {
@@ -1254,6 +1304,49 @@ ice_dcf_disable_queues(struct ice_dcf_hw *hw)
        return err;
 }
 
+int
+ice_dcf_disable_queues_lv(struct ice_dcf_hw *hw)
+{
+       struct virtchnl_del_ena_dis_queues *queue_select;
+       struct virtchnl_queue_chunk *queue_chunk;
+       struct dcf_virtchnl_cmd args;
+       int err, len;
+
+       len = sizeof(struct virtchnl_del_ena_dis_queues) +
+                 sizeof(struct virtchnl_queue_chunk) *
+                 (ICE_DCF_RXTX_QUEUE_CHUNKS_NUM - 1);
+       queue_select = rte_zmalloc("queue_select", len, 0);
+       if (!queue_select)
+               return -ENOMEM;
+
+       queue_chunk = queue_select->chunks.chunks;
+       queue_select->chunks.num_chunks = ICE_DCF_RXTX_QUEUE_CHUNKS_NUM;
+       queue_select->vport_id = hw->vsi_res->vsi_id;
+
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
+                                       hw->eth_dev->data->nb_tx_queues;
+
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
+       queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
+                                       hw->eth_dev->data->nb_rx_queues;
+
+       args.v_op = VIRTCHNL_OP_DISABLE_QUEUES_V2;
+       args.req_msg = (u8 *)queue_select;
+       args.req_msglen = len;
+       args.rsp_msgbuf = hw->arq_buf;
+       args.rsp_msglen = ICE_DCF_AQ_BUF_SZ;
+       args.rsp_buflen = ICE_DCF_AQ_BUF_SZ;
+       err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+       if (err)
+               PMD_DRV_LOG(ERR,
+                           "Failed to execute command of 
OP_DISABLE_QUEUES_V2");
+       rte_free(queue_select);
+       return err;
+}
+
 int
 ice_dcf_query_stats(struct ice_dcf_hw *hw,
                                   struct virtchnl_eth_stats *pstats)
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index ce57a687ab..78ab23aaa6 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -15,6 +15,8 @@
 #include "base/ice_type.h"
 #include "ice_logs.h"
 
+#define ICE_DCF_RXTX_QUEUE_CHUNKS_NUM  2
+
 struct dcf_virtchnl_cmd {
        TAILQ_ENTRY(dcf_virtchnl_cmd) next;
 
@@ -143,7 +145,10 @@ int ice_dcf_config_irq_map(struct ice_dcf_hw *hw);
 int ice_dcf_config_irq_map_lv(struct ice_dcf_hw *hw,
                              uint16_t num, uint16_t index);
 int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool 
on);
+int ice_dcf_switch_queue_lv(struct ice_dcf_hw *hw,
+                           uint16_t qid, bool rx, bool on);
 int ice_dcf_disable_queues(struct ice_dcf_hw *hw);
+int ice_dcf_disable_queues_lv(struct ice_dcf_hw *hw);
 int ice_dcf_query_stats(struct ice_dcf_hw *hw,
                        struct virtchnl_eth_stats *pstats);
 int ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw,
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index 1ddba02ebb..e46c8405aa 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -317,6 +317,7 @@ static int
 ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
        struct ice_dcf_adapter *ad = dev->data->dev_private;
+       struct ice_dcf_hw *dcf_hw = &ad->real_hw;
        struct iavf_hw *hw = &ad->real_hw.avf;
        struct ice_rx_queue *rxq;
        int err = 0;
@@ -339,7 +340,11 @@ ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t 
rx_queue_id)
        IAVF_WRITE_FLUSH(hw);
 
        /* Ready to switch the queue on */
-       err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
+       if (!dcf_hw->lv_enabled)
+               err = ice_dcf_switch_queue(dcf_hw, rx_queue_id, true, true);
+       else
+               err = ice_dcf_switch_queue_lv(dcf_hw, rx_queue_id, true, true);
+
        if (err) {
                PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
                            rx_queue_id);
@@ -448,6 +453,7 @@ static int
 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
        struct ice_dcf_adapter *ad = dev->data->dev_private;
+       struct ice_dcf_hw *dcf_hw = &ad->real_hw;
        struct iavf_hw *hw = &ad->real_hw.avf;
        struct ice_tx_queue *txq;
        int err = 0;
@@ -463,7 +469,10 @@ ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
        IAVF_WRITE_FLUSH(hw);
 
        /* Ready to switch the queue on */
-       err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
+       if (!dcf_hw->lv_enabled)
+               err = ice_dcf_switch_queue(dcf_hw, tx_queue_id, false, true);
+       else
+               err = ice_dcf_switch_queue_lv(dcf_hw, tx_queue_id, false, true);
 
        if (err) {
                PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
@@ -650,12 +659,17 @@ ice_dcf_stop_queues(struct rte_eth_dev *dev)
        struct ice_dcf_hw *hw = &ad->real_hw;
        struct ice_rx_queue *rxq;
        struct ice_tx_queue *txq;
-       int ret, i;
+       int i;
 
        /* Stop All queues */
-       ret = ice_dcf_disable_queues(hw);
-       if (ret)
-               PMD_DRV_LOG(WARNING, "Fail to stop queues");
+       if (!hw->lv_enabled) {
+               if (ice_dcf_disable_queues(hw))
+                       PMD_DRV_LOG(WARNING, "Fail to stop queues");
+       } else {
+               if (ice_dcf_disable_queues_lv(hw))
+                       PMD_DRV_LOG(WARNING,
+                                   "Fail to stop queues for large VF");
+       }
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                txq = dev->data->tx_queues[i];
diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h
index 9ef524c97c..3f740e2c7b 100644
--- a/drivers/net/ice/ice_dcf_ethdev.h
+++ b/drivers/net/ice/ice_dcf_ethdev.h
@@ -20,10 +20,10 @@
 #define ICE_DCF_ETH_OVERHEAD \
        (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_DCF_VLAN_TAG_SIZE * 2)
 #define ICE_DCF_ETH_MAX_LEN (RTE_ETHER_MTU + ICE_DCF_ETH_OVERHEAD)
-#define ICE_DCF_MAX_NUM_QUEUES_DFLT 16
-#define ICE_DCF_MAX_NUM_QUEUES_LV   256
-#define ICE_DCF_CFG_Q_NUM_PER_BUF   32
-#define ICE_DCF_IRQ_MAP_NUM_PER_BUF 128
+#define ICE_DCF_MAX_NUM_QUEUES_DFLT    16
+#define ICE_DCF_MAX_NUM_QUEUES_LV      256
+#define ICE_DCF_CFG_Q_NUM_PER_BUF      32
+#define ICE_DCF_IRQ_MAP_NUM_PER_BUF    128
 
 struct ice_dcf_queue {
        uint64_t dummy;
-- 
2.33.1

Reply via email to