Add support for queue operations:
 - rx_queue_release
 - tx_queue_release

Signed-off-by: Mingxia Liu <mingxia....@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  2 ++
 drivers/net/cpfl/cpfl_rxtx.c   | 35 ++++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_rxtx.h   |  2 ++
 3 files changed, 39 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 4332f66ed6..be3cac3b27 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -632,6 +632,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
        .tx_queue_start                 = cpfl_tx_queue_start,
        .rx_queue_stop                  = cpfl_rx_queue_stop,
        .tx_queue_stop                  = cpfl_tx_queue_stop,
+       .rx_queue_release               = cpfl_dev_rx_queue_release,
+       .tx_queue_release               = cpfl_dev_tx_queue_release,
        .dev_supported_ptypes_get       = cpfl_dev_supported_ptypes_get,
 };
 
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index b7d616de4f..a10deb6c96 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -49,6 +49,14 @@ cpfl_tx_offload_convert(uint64_t offload)
        return ol;
 }
 
+static const struct idpf_rxq_ops def_rxq_ops = {
+       .release_mbufs = release_rxq_mbufs,
+};
+
+static const struct idpf_txq_ops def_txq_ops = {
+       .release_mbufs = release_txq_mbufs,
+};
+
 static const struct rte_memzone *
 cpfl_dma_zone_reserve(struct rte_eth_dev *dev, uint16_t queue_idx,
                      uint16_t len, uint16_t queue_type,
@@ -177,6 +185,7 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct 
idpf_rx_queue *rxq,
        reset_split_rx_bufq(bufq);
        bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +
                         queue_idx * vport->chunks_info.rx_buf_qtail_spacing);
+       bufq->ops = &def_rxq_ops;
        bufq->q_set = true;
 
        if (bufq_id == 1) {
@@ -235,6 +244,12 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
        if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
                return -EINVAL;
 
+       /* Free memory if needed */
+       if (dev->data->rx_queues[queue_idx] != NULL) {
+               idpf_rx_queue_release(dev->data->rx_queues[queue_idx]);
+               dev->data->rx_queues[queue_idx] = NULL;
+       }
+
        /* Setup Rx queue */
        rxq = rte_zmalloc_socket("cpfl rxq",
                                 sizeof(struct idpf_rx_queue),
@@ -287,6 +302,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
                reset_single_rx_queue(rxq);
                rxq->qrx_tail = hw->hw_addr + 
(vport->chunks_info.rx_qtail_start +
                                queue_idx * 
vport->chunks_info.rx_qtail_spacing);
+               rxq->ops = &def_rxq_ops;
        } else {
                reset_split_rx_descq(rxq);
 
@@ -399,6 +415,12 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
        if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
                return -EINVAL;
 
+       /* Free memory if needed. */
+       if (dev->data->tx_queues[queue_idx] != NULL) {
+               idpf_tx_queue_release(dev->data->tx_queues[queue_idx]);
+               dev->data->tx_queues[queue_idx] = NULL;
+       }
+
        /* Allocate the TX queue data structure. */
        txq = rte_zmalloc_socket("cpfl txq",
                                 sizeof(struct idpf_tx_queue),
@@ -461,6 +483,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
 
        txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
                        queue_idx * vport->chunks_info.tx_qtail_spacing);
+       txq->ops = &def_txq_ops;
        txq->q_set = true;
        dev->data->tx_queues[queue_idx] = txq;
 
@@ -674,6 +697,18 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
        return 0;
 }
 
+void
+cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       idpf_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       idpf_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
 void
 cpfl_stop_queues(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 6b63137d5c..037d479d56 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -35,4 +35,6 @@ int cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t 
tx_queue_id);
 void cpfl_stop_queues(struct rte_eth_dev *dev);
 int cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 #endif /* _CPFL_RXTX_H_ */
-- 
2.25.1

Reply via email to