Both axgbe_dev_rx_descriptor_status() and
axgbe_dev_tx_descriptor_status() compute the descriptor address as
desc[idx + offset] where idx is the masked ring position. When
idx + offset >= nb_desc, this reads past the end of the
descriptor ring buffer.

Fix by incorporating the offset into the index before masking,
using AXGBE_GET_DESC_IDX() which wraps with (nb_desc - 1).

Signed-off-by: Stephen Hemminger <[email protected]>
---
 drivers/net/axgbe/axgbe_rxtx.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index 51a1aeb0b9..6f750d6ede 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -1205,8 +1205,8 @@ axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t 
offset)
        if (offset >= rxq->nb_desc - rxq->dirty)
                return RTE_ETH_RX_DESC_UNAVAIL;
 
-       idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
-       desc = &rxq->desc[idx + offset];
+       idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur + offset);
+       desc = &rxq->desc[idx];
 
        if (!AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
                return RTE_ETH_RX_DESC_DONE;
@@ -1228,8 +1228,8 @@ axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t 
offset)
        if (offset >= txq->nb_desc - txq->dirty)
                return RTE_ETH_TX_DESC_UNAVAIL;
 
-       idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt - 1);
-       desc = &txq->desc[idx + offset];
+       idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt - 1 + 
offset);
+       desc = &txq->desc[idx];
 
        if (!AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
                return RTE_ETH_TX_DESC_DONE;
-- 
2.51.0

Reply via email to