From: Xuelin Shi <xuelin....@freescale.com>

enforce rules for cpu and ixgbe exchanging data.
1. cpu use data owned by ixgbe must use rte_le_to_cpu_xx(...)
2. cpu fill data to ixgbe must use rte_cpu_to_le_xx(...)

Signed-off-by: Xuelin Shi <xuelin.shi at freescale.com>
---
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 115 ++++++++++++++++++++++++--------------
 1 file changed, 72 insertions(+), 43 deletions(-)

diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c 
b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index e6766b3..fb01a4a 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -140,7 +140,7 @@ ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
        int i;

        /* check DD bit on threshold descriptor */
-       status = txq->tx_ring[txq->tx_next_dd].wb.status;
+       status = rte_le_to_cpu_32(txq->tx_ring[txq->tx_next_dd].wb.status);
        if (! (status & IXGBE_ADVTXD_STAT_DD))
                return 0;

@@ -186,11 +186,14 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct 
rte_mbuf **pkts)
                pkt_len = (*pkts)->data_len;

                /* write data to descriptor */
-               txdp->read.buffer_addr = buf_dma_addr;
+               txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
                txdp->read.cmd_type_len =
-                               ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+                       rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+
                txdp->read.olinfo_status =
-                               (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+                       rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
                rte_prefetch0(&(*pkts)->pool);
        }
 }
@@ -206,11 +209,14 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct 
rte_mbuf **pkts)
        pkt_len = (*pkts)->data_len;

        /* write data to descriptor */
-       txdp->read.buffer_addr = buf_dma_addr;
+       txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
        txdp->read.cmd_type_len =
-                       ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+                       rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+
        txdp->read.olinfo_status =
-                       (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+                       rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
        rte_prefetch0(&(*pkts)->pool);
 }

@@ -297,7 +303,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                 * a divisor of the ring size
                 */
                tx_r[txq->tx_next_rs].read.cmd_type_len |=
-                       rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+                               rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
                txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);

                txq->tx_tail = 0;
@@ -316,7 +322,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
         */
        if (txq->tx_tail > txq->tx_next_rs) {
                tx_r[txq->tx_next_rs].read.cmd_type_len |=
-                       rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+                               rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
                txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
                                                txq->tx_rs_thresh);
                if (txq->tx_next_rs >= txq->nb_tx_desc)
@@ -517,6 +523,7 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
        uint16_t nb_tx_desc = txq->nb_tx_desc;
        uint16_t desc_to_clean_to;
        uint16_t nb_tx_to_clean;
+       uint32_t stat;

        /* Determine the last descriptor needing to be cleaned */
        desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
@@ -525,7 +532,9 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)

        /* Check to make sure the last descriptor to clean is done */
        desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
-       if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
+
+       stat = rte_le_to_cpu_32(txr[desc_to_clean_to].wb.status);
+       if (! (stat & IXGBE_TXD_STAT_DD))
        {
                PMD_TX_FREE_LOG(DEBUG,
                                "TX descriptor %4u is not done"
@@ -556,7 +565,7 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
         * up to the last descriptor with the RS bit set
         * are done. Only reset the threshold descriptor.
         */
-       txr[desc_to_clean_to].wb.status = 0;
+       txr[desc_to_clean_to].wb.status = rte_cpu_to_le_32(0);

        /* Update the txq to reflect the last descriptor that was cleaned */
        txq->last_desc_cleaned = desc_to_clean_to;
@@ -813,12 +822,14 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                         */
                        slen = m_seg->data_len;
                        buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+
                        txd->read.buffer_addr =
-                               rte_cpu_to_le_64(buf_dma_addr);
+                                       rte_cpu_to_le_64(buf_dma_addr);
                        txd->read.cmd_type_len =
-                               rte_cpu_to_le_32(cmd_type_len | slen);
+                                       rte_cpu_to_le_32(cmd_type_len | slen);
                        txd->read.olinfo_status =
-                               rte_cpu_to_le_32(olinfo_status);
+                                       rte_cpu_to_le_32(olinfo_status);
+
                        txe->last_id = tx_last;
                        tx_id = txe->next_id;
                        txe = txn;
@@ -958,14 +969,16 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
        uint64_t pkt_flags;
        int s[LOOK_AHEAD], nb_dd;
        int i, j, nb_rx = 0;
+       uint32_t stat;


        /* get references to current descriptor and S/W ring entry */
        rxdp = &rxq->rx_ring[rxq->rx_tail];
        rxep = &rxq->sw_ring[rxq->rx_tail];

+       stat = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
        /* check to make sure there is at least 1 packet to receive */
-       if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
+       if (! (stat & IXGBE_RXDADV_STAT_DD))
                return 0;

        /*
@@ -977,7 +990,7 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
        {
                /* Read desc statuses backwards to avoid race condition */
                for (j = LOOK_AHEAD-1; j >= 0; --j)
-                       s[j] = rxdp[j].wb.upper.status_error;
+                       s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);

                /* Compute how many status bits were set */
                nb_dd = 0;
@@ -988,28 +1001,36 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)

                /* Translate descriptor info to mbuf format */
                for (j = 0; j < nb_dd; ++j) {
+                       uint16_t tmp16;
+                       uint32_t tmp32;
+
                        mb = rxep[j].mbuf;
-                       pkt_len = (uint16_t)(rxdp[j].wb.upper.length - 
rxq->crc_len);
+                       tmp16 = rte_le_to_cpu_16(rxdp[j].wb.upper.length);
+                       pkt_len = tmp16 - rxq->crc_len;
                        mb->data_len = pkt_len;
                        mb->pkt_len = pkt_len;
-                       mb->vlan_tci = rxdp[j].wb.upper.vlan;
                        mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);

                        /* convert descriptor fields to rte mbuf flags */
-                       pkt_flags  = rx_desc_hlen_type_rss_to_pkt_flags(
+                       tmp32 = rte_le_to_cpu_32(
                                        rxdp[j].wb.lower.lo_dword.data);
+                       pkt_flags  = rx_desc_hlen_type_rss_to_pkt_flags(tmp32);
+
                        /* reuse status field from scan list */
                        pkt_flags |= rx_desc_status_to_pkt_flags(s[j]);
                        pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
                        mb->ol_flags = pkt_flags;

                        if (likely(pkt_flags & PKT_RX_RSS_HASH))
-                               mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+                               mb->hash.rss = rte_le_to_cpu_32(
+                                               rxdp[j].wb.lower.hi_dword.rss);
                        else if (pkt_flags & PKT_RX_FDIR) {
-                               mb->hash.fdir.hash =
-                                       
(uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum)
-                                               & IXGBE_ATR_HASH_MASK);
-                               mb->hash.fdir.id = 
rxdp[j].wb.lower.hi_dword.csum_ip.ip_id;
+                               tmp16 = rxdp[j].wb.lower.hi_dword.csum_ip.csum;
+                               mb->hash.fdir.hash = rte_le_to_cpu_16(
+                                               tmp16 & IXGBE_ATR_HASH_MASK);
+
+                               tmp16 = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id;
+                               mb->hash.fdir.id = rte_le_to_cpu_16(tmp16);
                        }
                }

@@ -1063,8 +1084,8 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)

                /* populate the descriptors */
                dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
-               rxdp[i].read.hdr_addr = dma_addr;
-               rxdp[i].read.pkt_addr = dma_addr;
+               rxdp[i].read.hdr_addr = rte_cpu_to_le_64(dma_addr);
+               rxdp[i].read.pkt_addr = rte_cpu_to_le_64(dma_addr);
        }

        /* update tail pointer */
@@ -1221,8 +1242,8 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 * using invalid descriptor fields when read from rxd.
                 */
                rxdp = &rx_ring[rx_id];
-               staterr = rxdp->wb.upper.status_error;
-               if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+               staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
+               if (! (staterr & IXGBE_RXDADV_STAT_DD))
                        break;
                rxd = *rxdp;

@@ -1326,12 +1347,17 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                rxm->ol_flags = pkt_flags;

                if (likely(pkt_flags & PKT_RX_RSS_HASH))
-                       rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
+                       rxm->hash.rss = rte_le_to_cpu_32(
+                                               rxd.wb.lower.hi_dword.rss);
                else if (pkt_flags & PKT_RX_FDIR) {
-                       rxm->hash.fdir.hash =
-                               (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
-                                          & IXGBE_ATR_HASH_MASK);
-                       rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
+                       uint16_t tmp16;
+
+                       tmp16 = rxd.wb.lower.hi_dword.csum_ip.csum;
+                       rxm->hash.fdir.hash = rte_le_to_cpu_16(
+                                               tmp16 & IXGBE_ATR_HASH_MASK);
+
+                       rxm->hash.fdir.id = rte_le_to_cpu_16(
+                                       rxd.wb.lower.hi_dword.csum_ip.ip_id);
                }
                /*
                 * Store the mbuf address into the next entry of the array
@@ -1413,8 +1439,8 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                 * using invalid descriptor fields when read from rxd.
                 */
                rxdp = &rx_ring[rx_id];
-               staterr = rxdp->wb.upper.status_error;
-               if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+               staterr = rte_cpu_to_le_32(rxdp->wb.upper.status_error);
+               if (! (staterr & IXGBE_RXDADV_STAT_DD))
                        break;
                rxd = *rxdp;

@@ -1570,13 +1596,14 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                first_seg->ol_flags = pkt_flags;

                if (likely(pkt_flags & PKT_RX_RSS_HASH))
-                       first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
+                       first_seg->hash.rss = rte_le_to_cpu_32(
+                                               rxd.wb.lower.hi_dword.rss);
                else if (pkt_flags & PKT_RX_FDIR) {
-                       first_seg->hash.fdir.hash =
+                       first_seg->hash.fdir.hash = rte_le_to_cpu_16(
                                (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
-                                          & IXGBE_ATR_HASH_MASK);
-                       first_seg->hash.fdir.id =
-                               rxd.wb.lower.hi_dword.csum_ip.ip_id;
+                                          & IXGBE_ATR_HASH_MASK));
+                       first_seg->hash.fdir.id = rte_le_to_cpu_16(
+                               rxd.wb.lower.hi_dword.csum_ip.ip_id);
                }

                /* Prefetch data of first segment, if configured to do so. */
@@ -1742,7 +1769,7 @@ ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
        prev = (uint16_t) (txq->nb_tx_desc - 1);
        for (i = 0; i < txq->nb_tx_desc; i++) {
                volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
-               txd->wb.status = IXGBE_TXD_STAT_DD;
+               txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
                txe[i].mbuf = NULL;
                txe[i].last_id = i;
                txe[prev].next_id = i;
@@ -2306,7 +2333,8 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, 
uint16_t rx_queue_id)
        rxdp = &(rxq->rx_ring[rxq->rx_tail]);

        while ((desc < rxq->nb_rx_desc) &&
-               (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) {
+               (rte_le_to_cpu_32(rxdp->wb.upper.status_error) &
+                                 IXGBE_RXDADV_STAT_DD)) {
                desc += IXGBE_RXQ_SCAN_INTERVAL;
                rxdp += IXGBE_RXQ_SCAN_INTERVAL;
                if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
@@ -2331,7 +2359,8 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t 
offset)
                desc -= rxq->nb_rx_desc;

        rxdp = &rxq->rx_ring[desc];
-       return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD);
+       return !!(rte_le_to_cpu_32(rxdp->wb.upper.status_error) &
+                       IXGBE_RXDADV_STAT_DD);
 }

 void
-- 
1.9.1

Reply via email to