From: Long Li <lon...@microsoft.com>

Unnecessary assignments involve memset and waste CPU cycles.
Removing them to reduce CPU usage.

Fixes: 517ed6e2d590 ("net/mana: add basic driver with build environment")
Cc: sta...@dpdk.org
Signed-off-by: Long Li <lon...@microsoft.com>
---
 drivers/net/mana/gdma.c | 11 ++---------
 drivers/net/mana/mana.h |  2 +-
 drivers/net/mana/rx.c   |  9 ++++-----
 drivers/net/mana/tx.c   | 17 ++++++++++-------
 4 files changed, 17 insertions(+), 22 deletions(-)

diff --git a/drivers/net/mana/gdma.c b/drivers/net/mana/gdma.c
index 3d4039014f..0922463ef9 100644
--- a/drivers/net/mana/gdma.c
+++ b/drivers/net/mana/gdma.c
@@ -123,7 +123,7 @@ write_scatter_gather_list(uint8_t *work_queue_head_pointer,
 int
 gdma_post_work_request(struct mana_gdma_queue *queue,
                       struct gdma_work_request *work_req,
-                      struct gdma_posted_wqe_info *wqe_info)
+                      uint32_t *wqe_size_in_bu)
 {
        uint32_t client_oob_size =
                work_req->inline_oob_size_in_bytes >
@@ -149,14 +149,7 @@ gdma_post_work_request(struct mana_gdma_queue *queue,
        DRV_LOG(DEBUG, "client_oob_size %u sgl_data_size %u wqe_size %u",
                client_oob_size, sgl_data_size, wqe_size);
 
-       if (wqe_info) {
-               wqe_info->wqe_index =
-                       ((queue->head * GDMA_WQE_ALIGNMENT_UNIT_SIZE) &
-                        (queue->size - 1)) / GDMA_WQE_ALIGNMENT_UNIT_SIZE;
-               wqe_info->unmasked_queue_offset = queue->head;
-               wqe_info->wqe_size_in_bu =
-                       wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE;
-       }
+       *wqe_size_in_bu = wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE;
 
        wq_buffer_pointer = gdma_get_wqe_pointer(queue);
        wq_buffer_pointer += write_dma_client_oob(wq_buffer_pointer, work_req,
diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
index 4a05238a96..d4a1ba8492 100644
--- a/drivers/net/mana/mana.h
+++ b/drivers/net/mana/mana.h
@@ -459,7 +459,7 @@ int mana_rq_ring_doorbell(struct mana_rxq *rxq, uint8_t 
arm);
 
 int gdma_post_work_request(struct mana_gdma_queue *queue,
                           struct gdma_work_request *work_req,
-                          struct gdma_posted_wqe_info *wqe_info);
+                          uint32_t *wqe_size_in_bu);
 uint8_t *gdma_get_wqe_pointer(struct mana_gdma_queue *queue);
 
 uint16_t mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **rx_pkts,
diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c
index 55247889c1..bdbd11c5f9 100644
--- a/drivers/net/mana/rx.c
+++ b/drivers/net/mana/rx.c
@@ -52,8 +52,8 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
 {
        struct rte_mbuf *mbuf = NULL;
        struct gdma_sgl_element sgl[1];
-       struct gdma_work_request request = {0};
-       struct gdma_posted_wqe_info wqe_info = {0};
+       struct gdma_work_request request;
+       uint32_t wqe_size_in_bu;
        struct mana_priv *priv = rxq->priv;
        int ret;
        struct mana_mr_cache *mr;
@@ -72,7 +72,6 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
        }
 
        request.gdma_header.struct_size = sizeof(request);
-       wqe_info.gdma_header.struct_size = sizeof(wqe_info);
 
        sgl[0].address = rte_cpu_to_le_64(rte_pktmbuf_mtod(mbuf, uint64_t));
        sgl[0].memory_key = mr->lkey;
@@ -87,14 +86,14 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
        request.flags = 0;
        request.client_data_unit = NOT_USING_CLIENT_DATA_UNIT;
 
-       ret = gdma_post_work_request(&rxq->gdma_rq, &request, &wqe_info);
+       ret = gdma_post_work_request(&rxq->gdma_rq, &request, &wqe_size_in_bu);
        if (!ret) {
                struct mana_rxq_desc *desc =
                        &rxq->desc_ring[rxq->desc_ring_head];
 
                /* update queue for tracking pending packets */
                desc->pkt = mbuf;
-               desc->wqe_size_in_bu = wqe_info.wqe_size_in_bu;
+               desc->wqe_size_in_bu = wqe_size_in_bu;
                rxq->desc_ring_head = (rxq->desc_ring_head + 1) % rxq->num_desc;
        } else {
                DRV_LOG(ERR, "failed to post recv ret %d", ret);
diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
index 300bf27cc1..a7ee47c582 100644
--- a/drivers/net/mana/tx.c
+++ b/drivers/net/mana/tx.c
@@ -208,8 +208,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
        for (uint16_t pkt_idx = 0; pkt_idx < nb_pkts; pkt_idx++) {
                struct rte_mbuf *m_pkt = tx_pkts[pkt_idx];
                struct rte_mbuf *m_seg = m_pkt;
-               struct transmit_oob_v2 tx_oob = {0};
-               struct one_sgl sgl = {0};
+               struct transmit_oob_v2 tx_oob;
+               struct one_sgl sgl;
                uint16_t seg_idx;
 
                /* Drop the packet if it exceeds max segments */
@@ -263,6 +263,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
                        tx_oob.short_oob.tx_compute_TCP_checksum = 1;
                        tx_oob.short_oob.tx_transport_header_offset =
                                m_pkt->l2_len + m_pkt->l3_len;
+               } else {
+                       tx_oob.short_oob.tx_compute_TCP_checksum = 0;
                }
 
                if ((m_pkt->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
@@ -301,6 +303,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
                        }
 
                        tx_oob.short_oob.tx_compute_UDP_checksum = 1;
+               } else {
+                       tx_oob.short_oob.tx_compute_UDP_checksum = 0;
                }
 
                tx_oob.short_oob.suppress_tx_CQE_generation = 0;
@@ -355,11 +359,10 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
                if (seg_idx != m_pkt->nb_segs)
                        continue;
 
-               struct gdma_work_request work_req = {0};
-               struct gdma_posted_wqe_info wqe_info = {0};
+               struct gdma_work_request work_req;
+               uint32_t wqe_size_in_bu;
 
                work_req.gdma_header.struct_size = sizeof(work_req);
-               wqe_info.gdma_header.struct_size = sizeof(wqe_info);
 
                work_req.sgl = sgl.gdma_sgl;
                work_req.num_sgl_elements = m_pkt->nb_segs;
@@ -370,14 +373,14 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
                work_req.client_data_unit = NOT_USING_CLIENT_DATA_UNIT;
 
                ret = gdma_post_work_request(&txq->gdma_sq, &work_req,
-                                            &wqe_info);
+                                            &wqe_size_in_bu);
                if (!ret) {
                        struct mana_txq_desc *desc =
                                &txq->desc_ring[txq->desc_ring_head];
 
                        /* Update queue for tracking pending requests */
                        desc->pkt = m_pkt;
-                       desc->wqe_size_in_bu = wqe_info.wqe_size_in_bu;
+                       desc->wqe_size_in_bu = wqe_size_in_bu;
                        txq->desc_ring_head =
                                (txq->desc_ring_head + 1) % txq->num_desc;
 
-- 
2.32.0

Reply via email to