From: Xuelin Shi <xuelin....@freescale.com> 1. cpu use data owned by ixgbe must use rte_le_to_cpu_xx(...) 2. cpu fill data to ixgbe must use rte_cpu_to_le_xx(...) 3. checking pci status with converted constant
Signed-off-by: Xuelin Shi <xuelin.shi at freescale.com> --- changes for v4: fix compiling error: cpu16 to cpu_16 fix issues reported by checkpatch drivers/net/ixgbe/ixgbe_rxtx.c | 78 ++++++++++++++++++++++++++++-------------- 1 file changed, 52 insertions(+), 26 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index a7c94a9..f904b40 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -130,7 +130,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) /* check DD bit on threshold descriptor */ status = txq->tx_ring[txq->tx_next_dd].wb.status; - if (! (status & IXGBE_ADVTXD_STAT_DD)) + if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))) return 0; /* @@ -175,11 +175,14 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) pkt_len = (*pkts)->data_len; /* write data to descriptor */ - txdp->read.buffer_addr = buf_dma_addr; + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txdp->read.cmd_type_len = - ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + txdp->read.olinfo_status = - (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_prefetch0(&(*pkts)->pool); } } @@ -195,11 +198,14 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) pkt_len = (*pkts)->data_len; /* write data to descriptor */ - txdp->read.buffer_addr = buf_dma_addr; + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txdp->read.cmd_type_len = - ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + txdp->read.olinfo_status = - (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_prefetch0(&(*pkts)->pool); } @@ -511,6 +517,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) uint16_t nb_tx_desc = txq->nb_tx_desc; uint16_t desc_to_clean_to; uint16_t nb_tx_to_clean; + uint32_t stat; /* Determine the last descriptor needing to be cleaned */ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); @@ -519,7 +526,9 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) /* Check to make sure the last descriptor to clean is done */ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; - if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD)) + + stat = txr[desc_to_clean_to].wb.status; + if (!(stat & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) { PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done" @@ -806,12 +815,14 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, */ slen = m_seg->data_len; buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); + txd->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txd->read.cmd_type_len = rte_cpu_to_le_32(cmd_type_len | slen); txd->read.olinfo_status = rte_cpu_to_le_32(olinfo_status); + txe->last_id = tx_last; tx_id = txe->next_id; txe = txn; @@ -1062,14 +1073,16 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) int s[LOOK_AHEAD], nb_dd; #endif /* RTE_NEXT_ABI */ int i, j, nb_rx = 0; + uint32_t stat; /* get references to current descriptor and S/W ring entry */ rxdp = &rxq->rx_ring[rxq->rx_tail]; rxep = &rxq->sw_ring[rxq->rx_tail]; + stat = rxdp->wb.upper.status_error; /* check to make sure there is at least 1 packet to receive */ - if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) + if (!(stat & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) return 0; /* @@ -1081,7 +1094,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) { /* Read desc statuses backwards to avoid race condition */ for (j = LOOK_AHEAD-1; j >= 0; --j) - s[j] = rxdp[j].wb.upper.status_error; + s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error); #ifdef RTE_NEXT_ABI for (j = LOOK_AHEAD - 1; j >= 0; --j) @@ -1099,7 +1112,9 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) /* Translate descriptor info to mbuf format */ for (j = 0; j < nb_dd; ++j) { mb = rxep[j].mbuf; - pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len); + pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) - + rxq->crc_len; + mb->data_len = pkt_len; mb->pkt_len = pkt_len; mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan); @@ -1115,7 +1130,9 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]); #else /* RTE_NEXT_ABI */ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags( - rxdp[j].wb.lower.lo_dword.data); + rte_le_to_cpu_32( + rxdp[j].wb.lower.lo_dword.data)); + /* reuse status field from scan list */ pkt_flags |= rx_desc_status_to_pkt_flags(s[j]); pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); @@ -1123,12 +1140,16 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) #endif /* RTE_NEXT_ABI */ if (likely(pkt_flags & PKT_RX_RSS_HASH)) - mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss; + mb->hash.rss = rte_le_to_cpu_32( + rxdp[j].wb.lower.hi_dword.rss); + else if (pkt_flags & PKT_RX_FDIR) { - mb->hash.fdir.hash = - (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum) - & IXGBE_ATR_HASH_MASK); - mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id; + mb->hash.fdir.hash = rte_le_to_cpu_16( + rxdp[j].wb.lower.hi_dword.csum_ip.csum) & + IXGBE_ATR_HASH_MASK; + + mb->hash.fdir.id = rte_le_to_cpu_16( + rxdp[j].wb.lower.hi_dword.csum_ip.ip_id); } } @@ -1365,7 +1386,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxdp = &rx_ring[rx_id]; staterr = rxdp->wb.upper.status_error; - if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) break; rxd = *rxdp; @@ -1483,12 +1504,15 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, #endif /* RTE_NEXT_ABI */ if (likely(pkt_flags & PKT_RX_RSS_HASH)) - rxm->hash.rss = rxd.wb.lower.hi_dword.rss; + rxm->hash.rss = rte_le_to_cpu_32( + rxd.wb.lower.hi_dword.rss); else if (pkt_flags & PKT_RX_FDIR) { - rxm->hash.fdir.hash = - (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum) - & IXGBE_ATR_HASH_MASK); - rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id; + rxm->hash.fdir.hash = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.csum) & + IXGBE_ATR_HASH_MASK; + + rxm->hash.fdir.id = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.ip_id); } /* * Store the mbuf address into the next entry of the array @@ -1998,7 +2022,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) prev = (uint16_t) (txq->nb_tx_desc - 1); for (i = 0; i < txq->nb_tx_desc; i++) { volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; - txd->wb.status = IXGBE_TXD_STAT_DD; + txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD); txe[i].mbuf = NULL; txe[i].last_id = i; txe[prev].next_id = i; @@ -2604,7 +2628,8 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxdp = &(rxq->rx_ring[rxq->rx_tail]); while ((desc < rxq->nb_rx_desc) && - (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) { + (rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) { desc += IXGBE_RXQ_SCAN_INTERVAL; rxdp += IXGBE_RXQ_SCAN_INTERVAL; if (rxq->rx_tail + desc >= rxq->nb_rx_desc) @@ -2629,7 +2654,8 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) desc -= rxq->nb_rx_desc; rxdp = &rxq->rx_ring[desc]; - return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD); + return !!(rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)); } void __attribute__((cold)) -- 1.9.1