Add support for queue operations:
- setup rx/tx queue
- release rx/tx queue
- start rx/tx queues
- stop rx/tx queues

Signed-off-by: Xiaoyun Li <xiaoyun...@intel.com>
Signed-off-by: Junfeng Guo <junfeng....@intel.com>
---
 drivers/net/gve/gve_ethdev.c | 206 +++++++++++++++++++++++++++++++++
 drivers/net/gve/gve_ethdev.h |  48 ++++++++
 drivers/net/gve/gve_rx.c     | 212 ++++++++++++++++++++++++++++++++++
 drivers/net/gve/gve_tx.c     | 214 +++++++++++++++++++++++++++++++++++
 drivers/net/gve/meson.build  |   2 +
 5 files changed, 682 insertions(+)
 create mode 100644 drivers/net/gve/gve_rx.c
 create mode 100644 drivers/net/gve/gve_tx.c

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 5bcc9ab3a0..7a3695aec1 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -28,12 +28,111 @@ gve_write_version(uint8_t *driver_version_register)
        writeb('\n', driver_version_register);
 }
 
+static int
+gve_alloc_queue_page_list(struct gve_priv *priv, uint32_t id, uint32_t pages)
+{
+       char z_name[RTE_MEMZONE_NAMESIZE];
+       struct gve_queue_page_list *qpl;
+       const struct rte_memzone *mz;
+       dma_addr_t page_bus;
+       uint32_t i;
+
+       if (priv->num_registered_pages + pages >
+           priv->max_registered_pages) {
+               PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" 
PRIu64,
+                           priv->num_registered_pages + pages,
+                           priv->max_registered_pages);
+               return -EINVAL;
+       }
+       qpl = &priv->qpl[id];
+       snprintf(z_name, sizeof(z_name), "gve_%s_qpl%d", 
priv->pci_dev->device.name, id);
+       mz = rte_memzone_reserve_aligned(z_name, pages * PAGE_SIZE,
+                                        rte_socket_id(),
+                                        RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
+       if (mz == NULL) {
+               PMD_DRV_LOG(ERR, "Failed to alloc %s.", z_name);
+               return -ENOMEM;
+       }
+       qpl->page_buses = rte_zmalloc("qpl page buses", pages * 
sizeof(dma_addr_t), 0);
+       if (qpl->page_buses == NULL) {
+               PMD_DRV_LOG(ERR, "Failed to alloc qpl %u page buses", id);
+               return -ENOMEM;
+       }
+       page_bus = mz->iova;
+       for (i = 0; i < pages; i++) {
+               qpl->page_buses[i] = page_bus;
+               page_bus += PAGE_SIZE;
+       }
+       qpl->id = id;
+       qpl->mz = mz;
+       qpl->num_entries = pages;
+
+       priv->num_registered_pages += pages;
+
+       return 0;
+}
+
+static void
+gve_free_qpls(struct gve_priv *priv)
+{
+       uint16_t nb_txqs = priv->max_nb_txq;
+       uint16_t nb_rxqs = priv->max_nb_rxq;
+       uint32_t i;
+
+       for (i = 0; i < nb_txqs + nb_rxqs; i++) {
+               if (priv->qpl[i].mz != NULL)
+                       rte_memzone_free(priv->qpl[i].mz);
+               if (priv->qpl[i].page_buses != NULL)
+                       rte_free(priv->qpl[i].page_buses);
+       }
+
+       if (priv->qpl != NULL)
+               rte_free(priv->qpl);
+}
+
 static int
 gve_dev_configure(__rte_unused struct rte_eth_dev *dev)
 {
        return 0;
 }
 
+static int
+gve_refill_pages(struct gve_rx_queue *rxq)
+{
+       struct rte_mbuf *nmb;
+       uint16_t i;
+       int diag;
+
+       diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], 
rxq->nb_rx_desc);
+       if (diag < 0) {
+               for (i = 0; i < rxq->nb_rx_desc - 1; i++) {
+                       nmb = rte_pktmbuf_alloc(rxq->mpool);
+                       if (!nmb)
+                               break;
+                       rxq->sw_ring[i] = nmb;
+               }
+               if (i < rxq->nb_rx_desc - 1)
+                       return -ENOMEM;
+       }
+       rxq->nb_avail = 0;
+       rxq->next_avail = rxq->nb_rx_desc - 1;
+
+       for (i = 0; i < rxq->nb_rx_desc; i++) {
+               if (rxq->is_gqi_qpl) {
+                       rxq->rx_data_ring[i].addr = rte_cpu_to_be_64(i * 
PAGE_SIZE);
+               } else {
+                       if (i == rxq->nb_rx_desc - 1)
+                               break;
+                       nmb = rxq->sw_ring[i];
+                       rxq->rx_data_ring[i].addr = 
rte_cpu_to_be_64(rte_mbuf_data_iova(nmb));
+               }
+       }
+
+       rte_write32(rte_cpu_to_be_32(rxq->next_avail), rxq->qrx_tail);
+
+       return 0;
+}
+
 static int
 gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 {
@@ -65,16 +164,70 @@ gve_link_update(struct rte_eth_dev *dev, __rte_unused int 
wait_to_complete)
 static int
 gve_dev_start(struct rte_eth_dev *dev)
 {
+       uint16_t num_queues = dev->data->nb_tx_queues;
+       struct gve_priv *priv = dev->data->dev_private;
+       struct gve_tx_queue *txq;
+       struct gve_rx_queue *rxq;
+       uint16_t i;
+       int err;
+
+       priv->txqs = (struct gve_tx_queue **)dev->data->tx_queues;
+       err = gve_adminq_create_tx_queues(priv, num_queues);
+       if (err) {
+               PMD_DRV_LOG(ERR, "failed to create %u tx queues.", num_queues);
+               return err;
+       }
+       for (i = 0; i < num_queues; i++) {
+               txq = priv->txqs[i];
+               txq->qtx_tail =
+               &priv->db_bar2[rte_be_to_cpu_32(txq->qres->db_index)];
+               txq->qtx_head =
+               &priv->cnt_array[rte_be_to_cpu_32(txq->qres->counter_index)];
+
+               rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr);
+       }
+
+       num_queues = dev->data->nb_rx_queues;
+       priv->rxqs = (struct gve_rx_queue **)dev->data->rx_queues;
+       err = gve_adminq_create_rx_queues(priv, num_queues);
+       if (err) {
+               PMD_DRV_LOG(ERR, "failed to create %u rx queues.", num_queues);
+               goto err_tx;
+       }
+       for (i = 0; i < num_queues; i++) {
+               rxq = priv->rxqs[i];
+               rxq->qrx_tail =
+               &priv->db_bar2[rte_be_to_cpu_32(rxq->qres->db_index)];
+
+               rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), rxq->ntfy_addr);
+
+               err = gve_refill_pages(rxq);
+               if (err) {
+                       PMD_DRV_LOG(ERR, "Failed to refill for RX");
+                       goto err_rx;
+               }
+       }
+
        dev->data->dev_started = 1;
        gve_link_update(dev, 0);
 
        return 0;
+
+err_rx:
+       gve_stop_rx_queues(dev);
+err_tx:
+       gve_stop_tx_queues(dev);
+       return err;
 }
 
 static int
 gve_dev_stop(struct rte_eth_dev *dev)
 {
        dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
+
+       gve_stop_tx_queues(dev);
+       gve_stop_rx_queues(dev);
+
        dev->data->dev_started = 0;
 
        return 0;
@@ -83,7 +236,11 @@ gve_dev_stop(struct rte_eth_dev *dev)
 static int
 gve_dev_close(struct rte_eth_dev *dev)
 {
+       struct gve_priv *priv = dev->data->dev_private;
+       struct gve_tx_queue *txq;
+       struct gve_rx_queue *rxq;
        int err = 0;
+       uint16_t i;
 
        if (dev->data->dev_started) {
                err = gve_dev_stop(dev);
@@ -91,6 +248,21 @@ gve_dev_close(struct rte_eth_dev *dev)
                        PMD_DRV_LOG(ERR, "Failed to stop dev.");
        }
 
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               gve_tx_queue_release(txq);
+       }
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               gve_rx_queue_release(rxq);
+       }
+
+       gve_free_qpls(priv);
+       rte_free(priv->adminq);
+       rte_free(priv->qpl);
+       rte_free(priv);
+
        return err;
 }
 
@@ -127,6 +299,8 @@ static const struct eth_dev_ops gve_eth_dev_ops = {
        .dev_start            = gve_dev_start,
        .dev_stop             = gve_dev_stop,
        .dev_close            = gve_dev_close,
+       .rx_queue_setup       = gve_rx_queue_setup,
+       .tx_queue_setup       = gve_tx_queue_setup,
        .link_update          = gve_link_update,
        .mtu_set              = gve_dev_mtu_set,
 };
@@ -264,7 +438,9 @@ gve_setup_device_resources(struct gve_priv *priv)
 static int
 gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
 {
+       uint16_t pages;
        int num_ntfy;
+       uint32_t i;
        int err;
 
        /* Set up the adminq */
@@ -315,10 +491,40 @@ gve_init_priv(struct gve_priv *priv, bool 
skip_describe_device)
        PMD_DRV_LOG(INFO, "Max TX queues %d, Max RX queues %d",
                    priv->max_nb_txq, priv->max_nb_rxq);
 
+       /* In GQI_QPL queue format:
+        * Allocate queue page lists according to max queue number
+        * tx qpl id should start from 0 while rx qpl id should start
+        * from priv->max_nb_txq
+        */
+       if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
+               priv->qpl = rte_zmalloc("gve_qpl",
+                                       (priv->max_nb_txq + priv->max_nb_rxq) *
+                                       sizeof(struct gve_queue_page_list), 0);
+               if (priv->qpl == NULL) {
+                       PMD_DRV_LOG(ERR, "Failed to alloc qpl.");
+                       err = -ENOMEM;
+                       goto free_adminq;
+               }
+
+               for (i = 0; i < priv->max_nb_txq + priv->max_nb_rxq; i++) {
+                       if (i < priv->max_nb_txq)
+                               pages = priv->tx_pages_per_qpl;
+                       else
+                               pages = priv->rx_data_slot_cnt;
+                       err = gve_alloc_queue_page_list(priv, i, pages);
+                       if (err != 0) {
+                               PMD_DRV_LOG(ERR, "Failed to alloc qpl %u.", i);
+                               goto err_qpl;
+                       }
+               }
+       }
+
 setup_device:
        err = gve_setup_device_resources(priv);
        if (!err)
                return 0;
+err_qpl:
+       gve_free_qpls(priv);
 free_adminq:
        gve_adminq_free(priv);
        return err;
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 2ac2a46ac1..b0391f7df5 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -34,15 +34,35 @@ union gve_tx_desc {
        struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
 };
 
+struct gve_tx_iovec {
+       uint32_t iov_base; /* offset in fifo */
+       uint32_t iov_len;
+};
+
 struct gve_tx_queue {
        volatile union gve_tx_desc *tx_desc_ring;
        const struct rte_memzone *mz;
        uint64_t tx_ring_phys_addr;
+       struct rte_mbuf **sw_ring;
+       volatile rte_be32_t *qtx_tail;
+       volatile rte_be32_t *qtx_head;
 
+       uint32_t tx_tail;
        uint16_t nb_tx_desc;
+       uint16_t nb_free;
+       uint32_t next_to_clean;
+       uint16_t free_thresh;
 
        /* Only valid for DQO_QPL queue format */
+       uint16_t sw_tail;
+       uint16_t sw_ntc;
+       uint16_t sw_nb_free;
+       uint32_t fifo_size;
+       uint32_t fifo_head;
+       uint32_t fifo_avail;
+       uint64_t fifo_base;
        struct gve_queue_page_list *qpl;
+       struct gve_tx_iovec *iov_ring;
 
        uint16_t port_id;
        uint16_t queue_id;
@@ -56,6 +76,8 @@ struct gve_tx_queue {
 
        /* Only valid for DQO_RDA queue format */
        struct gve_tx_queue *complq;
+
+       uint8_t is_gqi_qpl;
 };
 
 struct gve_rx_queue {
@@ -64,9 +86,17 @@ struct gve_rx_queue {
        const struct rte_memzone *mz;
        const struct rte_memzone *data_mz;
        uint64_t rx_ring_phys_addr;
+       struct rte_mbuf **sw_ring;
+       struct rte_mempool *mpool;
 
+       uint16_t rx_tail;
        uint16_t nb_rx_desc;
+       uint16_t expected_seqno; /* the next expected seqno */
+       uint16_t free_thresh;
+       uint32_t next_avail;
+       uint32_t nb_avail;
 
+       volatile rte_be32_t *qrx_tail;
        volatile rte_be32_t *ntfy_addr;
 
        /* only valid for GQI_QPL queue format */
@@ -83,6 +113,8 @@ struct gve_rx_queue {
 
        /* Only valid for DQO_RDA queue format */
        struct gve_rx_queue *bufq;
+
+       uint8_t is_gqi_qpl;
 };
 
 struct gve_priv {
@@ -222,4 +254,20 @@ gve_clear_device_rings_ok(struct gve_priv *priv)
                                &priv->state_flags);
 }
 
+int
+gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t 
nb_desc,
+                  unsigned int socket_id, const struct rte_eth_rxconf *conf,
+                  struct rte_mempool *pool);
+int
+gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t 
nb_desc,
+                  unsigned int socket_id, const struct rte_eth_txconf *conf);
+
+void gve_tx_queue_release(void *txq);
+
+void gve_rx_queue_release(void *rxq);
+
+void gve_stop_tx_queues(struct rte_eth_dev *dev);
+
+void gve_stop_rx_queues(struct rte_eth_dev *dev);
+
 #endif /* _GVE_ETHDEV_H_ */
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
new file mode 100644
index 0000000000..e64a461253
--- /dev/null
+++ b/drivers/net/gve/gve_rx.c
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Intel Corporation
+ */
+
+#include "gve_ethdev.h"
+#include "base/gve_adminq.h"
+
+static inline void
+gve_reset_rxq(struct gve_rx_queue *rxq)
+{
+       struct rte_mbuf **sw_ring = rxq->sw_ring;
+       uint32_t size, i;
+
+       if (rxq == NULL) {
+               PMD_DRV_LOG(ERR, "pointer to rxq is NULL");
+               return;
+       }
+
+       size = rxq->nb_rx_desc * sizeof(struct gve_rx_desc);
+       for (i = 0; i < size; i++)
+               ((volatile char *)rxq->rx_desc_ring)[i] = 0;
+
+       size = rxq->nb_rx_desc * sizeof(union gve_rx_data_slot);
+       for (i = 0; i < size; i++)
+               ((volatile char *)rxq->rx_data_ring)[i] = 0;
+
+       for (i = 0; i < rxq->nb_rx_desc; i++)
+               sw_ring[i] = NULL;
+
+       rxq->rx_tail = 0;
+       rxq->next_avail = 0;
+       rxq->nb_avail = rxq->nb_rx_desc;
+       rxq->expected_seqno = 1;
+}
+
+static inline void
+gve_release_rxq_mbufs(struct gve_rx_queue *rxq)
+{
+       uint16_t i;
+
+       for (i = 0; i < rxq->nb_rx_desc; i++) {
+               if (rxq->sw_ring[i]) {
+                       rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+                       rxq->sw_ring[i] = NULL;
+               }
+       }
+
+       rxq->nb_avail = rxq->nb_rx_desc;
+}
+
+void
+gve_rx_queue_release(void *rxq)
+{
+       struct gve_rx_queue *q = rxq;
+
+       if (!q)
+               return;
+
+       if (q->is_gqi_qpl) {
+               gve_adminq_unregister_page_list(q->hw, q->qpl->id);
+               q->qpl = NULL;
+       }
+
+       gve_release_rxq_mbufs(q);
+       rte_free(q->sw_ring);
+       rte_memzone_free(q->data_mz);
+       rte_memzone_free(q->mz);
+       rte_memzone_free(q->qres_mz);
+       q->qres = NULL;
+       rte_free(q);
+}
+
+int
+gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
+               uint16_t nb_desc, unsigned int socket_id,
+               const struct rte_eth_rxconf *conf, struct rte_mempool *pool)
+{
+       struct gve_priv *hw = dev->data->dev_private;
+       const struct rte_memzone *mz;
+       struct gve_rx_queue *rxq;
+       uint16_t free_thresh;
+       int err = 0;
+
+       if (nb_desc != hw->rx_desc_cnt) {
+               PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use 
hw nb_desc %u.",
+                           hw->rx_desc_cnt);
+       }
+       nb_desc = hw->rx_desc_cnt;
+
+       /* Free memory if needed. */
+       if (dev->data->rx_queues[queue_id]) {
+               gve_rx_queue_release(dev->data->rx_queues[queue_id]);
+               dev->data->rx_queues[queue_id] = NULL;
+       }
+
+       /* Allocate the RX queue data structure. */
+       rxq = rte_zmalloc_socket("gve rxq",
+                                sizeof(struct gve_rx_queue),
+                                RTE_CACHE_LINE_SIZE,
+                                socket_id);
+       if (!rxq) {
+               PMD_DRV_LOG(ERR, "Failed to allocate memory for rx queue 
structure");
+               err = -ENOMEM;
+               goto err_rxq;
+       }
+
+       free_thresh = conf->rx_free_thresh ? conf->rx_free_thresh : 
GVE_DEFAULT_RX_FREE_THRESH;
+       if (free_thresh >= nb_desc) {
+               PMD_DRV_LOG(ERR, "rx_free_thresh (%u) must be less than nb_desc 
(%u) minus 3.",
+                           free_thresh, rxq->nb_rx_desc);
+               err = -EINVAL;
+               goto err_rxq;
+       }
+
+       rxq->nb_rx_desc = nb_desc;
+       rxq->free_thresh = free_thresh;
+       rxq->queue_id = queue_id;
+       rxq->port_id = dev->data->port_id;
+       rxq->ntfy_id = hw->num_ntfy_blks / 2 + queue_id;
+       rxq->is_gqi_qpl = hw->queue_format == GVE_GQI_QPL_FORMAT;
+       rxq->mpool = pool;
+       rxq->hw = hw;
+       rxq->ntfy_addr = 
&hw->db_bar2[rte_be_to_cpu_32(hw->irq_dbs[rxq->ntfy_id].id)];
+
+       rxq->rx_buf_len = rte_pktmbuf_data_room_size(rxq->mpool) - 
RTE_PKTMBUF_HEADROOM;
+
+       /* Allocate software ring */
+       rxq->sw_ring = rte_zmalloc_socket("gve rx sw ring", sizeof(struct 
rte_mbuf *) * nb_desc,
+                                         RTE_CACHE_LINE_SIZE, socket_id);
+       if (!rxq->sw_ring) {
+               PMD_DRV_LOG(ERR, "Failed to allocate memory for SW RX ring");
+               err = -ENOMEM;
+               goto err_rxq;
+       }
+
+       mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
+                                     nb_desc * sizeof(struct gve_rx_desc),
+                                     PAGE_SIZE, socket_id);
+       if (mz == NULL) {
+               PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
+               err = -ENOMEM;
+               goto err_sw_ring;
+       }
+       rxq->rx_desc_ring = (struct gve_rx_desc *)mz->addr;
+       rxq->rx_ring_phys_addr = mz->iova;
+       rxq->mz = mz;
+
+       mz = rte_eth_dma_zone_reserve(dev, "gve rx data ring", queue_id,
+                                     sizeof(union gve_rx_data_slot) * nb_desc,
+                                     PAGE_SIZE, socket_id);
+       if (mz == NULL) {
+               PMD_DRV_LOG(ERR, "Failed to allocate memory for RX data ring");
+               err = -ENOMEM;
+               goto err_rx_ring;
+       }
+       rxq->rx_data_ring = (union gve_rx_data_slot *)mz->addr;
+       rxq->data_mz = mz;
+       if (rxq->is_gqi_qpl) {
+               rxq->qpl = &hw->qpl[rxq->ntfy_id];
+               err = gve_adminq_register_page_list(hw, rxq->qpl);
+               if (err != 0) {
+                       PMD_DRV_LOG(ERR, "Failed to register qpl %u", queue_id);
+                       goto err_data_ring;
+               }
+       }
+
+       mz = rte_eth_dma_zone_reserve(dev, "rxq_res", queue_id,
+                                     sizeof(struct gve_queue_resources),
+                                     PAGE_SIZE, socket_id);
+       if (mz == NULL) {
+               PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX 
resource");
+               err = -ENOMEM;
+               goto err_data_ring;
+       }
+       rxq->qres = (struct gve_queue_resources *)mz->addr;
+       rxq->qres_mz = mz;
+
+       gve_reset_rxq(rxq);
+
+       dev->data->rx_queues[queue_id] = rxq;
+
+       return 0;
+
+err_data_ring:
+       rte_memzone_free(rxq->data_mz);
+err_rx_ring:
+       rte_memzone_free(rxq->mz);
+err_sw_ring:
+       rte_free(rxq->sw_ring);
+err_rxq:
+       rte_free(rxq);
+       return err;
+}
+
+void
+gve_stop_rx_queues(struct rte_eth_dev *dev)
+{
+       struct gve_priv *hw = dev->data->dev_private;
+       struct gve_rx_queue *rxq;
+       uint16_t i;
+       int err;
+
+       err = gve_adminq_destroy_rx_queues(hw, dev->data->nb_rx_queues);
+       if (err != 0)
+               PMD_DRV_LOG(WARNING, "failed to destroy rxqs");
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               gve_release_rxq_mbufs(rxq);
+               gve_reset_rxq(rxq);
+       }
+}
diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c
new file mode 100644
index 0000000000..b706b62e71
--- /dev/null
+++ b/drivers/net/gve/gve_tx.c
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Intel Corporation
+ */
+
+#include "gve_ethdev.h"
+#include "base/gve_adminq.h"
+
+static inline void
+gve_reset_txq(struct gve_tx_queue *txq)
+{
+       struct rte_mbuf **sw_ring = txq->sw_ring;
+       uint32_t size, i;
+
+       if (txq == NULL) {
+               PMD_DRV_LOG(ERR, "Pointer to txq is NULL");
+               return;
+       }
+
+       size = txq->nb_tx_desc * sizeof(union gve_tx_desc);
+       for (i = 0; i < size; i++)
+               ((volatile char *)txq->tx_desc_ring)[i] = 0;
+
+       for (i = 0; i < txq->nb_tx_desc; i++) {
+               sw_ring[i] = NULL;
+               if (txq->is_gqi_qpl) {
+                       txq->iov_ring[i].iov_base = 0;
+                       txq->iov_ring[i].iov_len = 0;
+               }
+       }
+
+       txq->tx_tail = 0;
+       txq->nb_free = txq->nb_tx_desc - 1;
+       txq->next_to_clean = 0;
+
+       if (txq->is_gqi_qpl) {
+               txq->fifo_size = PAGE_SIZE * txq->hw->tx_pages_per_qpl;
+               txq->fifo_avail = txq->fifo_size;
+               txq->fifo_head = 0;
+               txq->fifo_base = (uint64_t)(txq->qpl->mz->addr);
+
+               txq->sw_tail = 0;
+               txq->sw_nb_free = txq->nb_tx_desc - 1;
+               txq->sw_ntc = 0;
+       }
+}
+
+static inline void
+gve_release_txq_mbufs(struct gve_tx_queue *txq)
+{
+       uint16_t i;
+
+       for (i = 0; i < txq->nb_tx_desc; i++) {
+               if (txq->sw_ring[i]) {
+                       rte_pktmbuf_free_seg(txq->sw_ring[i]);
+                       txq->sw_ring[i] = NULL;
+               }
+       }
+}
+
+void
+gve_tx_queue_release(void *txq)
+{
+       struct gve_tx_queue *q = txq;
+
+       if (!q)
+               return;
+
+       if (q->is_gqi_qpl) {
+               gve_adminq_unregister_page_list(q->hw, q->qpl->id);
+               rte_free(q->iov_ring);
+               q->qpl = NULL;
+       }
+
+       gve_release_txq_mbufs(q);
+       rte_free(q->sw_ring);
+       rte_memzone_free(q->mz);
+       rte_memzone_free(q->qres_mz);
+       q->qres = NULL;
+       rte_free(q);
+}
+
+int
+gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t 
nb_desc,
+                  unsigned int socket_id, const struct rte_eth_txconf *conf)
+{
+       struct gve_priv *hw = dev->data->dev_private;
+       const struct rte_memzone *mz;
+       struct gve_tx_queue *txq;
+       uint16_t free_thresh;
+       int err = 0;
+
+       if (nb_desc != hw->tx_desc_cnt) {
+               PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use 
hw nb_desc %u.",
+                           hw->tx_desc_cnt);
+       }
+       nb_desc = hw->tx_desc_cnt;
+
+       /* Free memory if needed. */
+       if (dev->data->tx_queues[queue_id]) {
+               gve_tx_queue_release(dev->data->tx_queues[queue_id]);
+               dev->data->tx_queues[queue_id] = NULL;
+       }
+
+       /* Allocate the TX queue data structure. */
+       txq = rte_zmalloc_socket("gve txq", sizeof(struct gve_tx_queue),
+                                RTE_CACHE_LINE_SIZE, socket_id);
+       if (!txq) {
+               PMD_DRV_LOG(ERR, "Failed to allocate memory for tx queue 
structure");
+               err = -ENOMEM;
+               goto err_txq;
+       }
+
+       free_thresh = conf->tx_free_thresh ? conf->tx_free_thresh : 
GVE_DEFAULT_TX_FREE_THRESH;
+       if (free_thresh >= nb_desc - 3) {
+               PMD_DRV_LOG(ERR, "tx_free_thresh (%u) must be less than nb_desc 
(%u) minus 3.",
+                           free_thresh, txq->nb_tx_desc);
+               err = -EINVAL;
+               goto err_txq;
+       }
+
+       txq->nb_tx_desc = nb_desc;
+       txq->free_thresh = free_thresh;
+       txq->queue_id = queue_id;
+       txq->port_id = dev->data->port_id;
+       txq->ntfy_id = queue_id;
+       txq->is_gqi_qpl = hw->queue_format == GVE_GQI_QPL_FORMAT;
+       txq->hw = hw;
+       txq->ntfy_addr = 
&hw->db_bar2[rte_be_to_cpu_32(hw->irq_dbs[txq->ntfy_id].id)];
+
+       /* Allocate software ring */
+       txq->sw_ring = rte_zmalloc_socket("gve tx sw ring",
+                                         sizeof(struct rte_mbuf *) * nb_desc,
+                                         RTE_CACHE_LINE_SIZE, socket_id);
+       if (!txq->sw_ring) {
+               PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
+               err = -ENOMEM;
+               goto err_txq;
+       }
+
+       mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
+                                     nb_desc * sizeof(union gve_tx_desc),
+                                     PAGE_SIZE, socket_id);
+       if (mz == NULL) {
+               PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
+               err = -ENOMEM;
+               goto err_sw_ring;
+       }
+       txq->tx_desc_ring = (union gve_tx_desc *)mz->addr;
+       txq->tx_ring_phys_addr = mz->iova;
+       txq->mz = mz;
+
+       if (txq->is_gqi_qpl) {
+               txq->iov_ring = rte_zmalloc_socket("gve tx iov ring",
+                                                  sizeof(struct gve_tx_iovec) 
* nb_desc,
+                                                  RTE_CACHE_LINE_SIZE, 
socket_id);
+               if (!txq->iov_ring) {
+                       PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX 
ring");
+                       err = -ENOMEM;
+                       goto err_tx_ring;
+               }
+               txq->qpl = &hw->qpl[queue_id];
+               err = gve_adminq_register_page_list(hw, txq->qpl);
+               if (err != 0) {
+                       PMD_DRV_LOG(ERR, "Failed to register qpl %u", queue_id);
+                       goto err_iov_ring;
+               }
+       }
+
+       mz = rte_eth_dma_zone_reserve(dev, "txq_res", queue_id, sizeof(struct 
gve_queue_resources),
+                                     PAGE_SIZE, socket_id);
+       if (mz == NULL) {
+               PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX 
resource");
+               err = -ENOMEM;
+               goto err_iov_ring;
+       }
+       txq->qres = (struct gve_queue_resources *)mz->addr;
+       txq->qres_mz = mz;
+
+       gve_reset_txq(txq);
+
+       dev->data->tx_queues[queue_id] = txq;
+
+       return 0;
+
+err_iov_ring:
+       if (txq->is_gqi_qpl)
+               rte_free(txq->iov_ring);
+err_tx_ring:
+       rte_memzone_free(txq->mz);
+err_sw_ring:
+       rte_free(txq->sw_ring);
+err_txq:
+       rte_free(txq);
+       return err;
+}
+
+void
+gve_stop_tx_queues(struct rte_eth_dev *dev)
+{
+       struct gve_priv *hw = dev->data->dev_private;
+       struct gve_tx_queue *txq;
+       uint16_t i;
+       int err;
+
+       err = gve_adminq_destroy_tx_queues(hw, dev->data->nb_tx_queues);
+       if (err != 0)
+               PMD_DRV_LOG(WARNING, "failed to destroy txqs");
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               gve_release_txq_mbufs(txq);
+               gve_reset_txq(txq);
+       }
+}
diff --git a/drivers/net/gve/meson.build b/drivers/net/gve/meson.build
index d8ec64b3a3..af0010c01c 100644
--- a/drivers/net/gve/meson.build
+++ b/drivers/net/gve/meson.build
@@ -9,6 +9,8 @@ endif
 
 sources = files(
         'base/gve_adminq.c',
+        'gve_rx.c',
+        'gve_tx.c',
         'gve_ethdev.c',
 )
 includes += include_directories('base')
-- 
2.34.1

Reply via email to