From: Björn Töpel <bjorn.to...@intel.com> When the zero-copy enabled XDP Tx ring is torn down, due to configuration changes, outstanding frames on the hardware descriptor ring are queued on the completion ring.
The completion ring has a back-pressure mechanism that will guarantee that there is sufficient space on the ring. Signed-off-by: Björn Töpel <bjorn.to...@intel.com> Tested-by: Andrew Bowers <andrewx.bow...@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirs...@intel.com> --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 17 +++++++---- .../ethernet/intel/i40e/i40e_txrx_common.h | 2 ++ drivers/net/ethernet/intel/i40e/i40e_xsk.c | 30 +++++++++++++++++++ 3 files changed, 43 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 37bd4e50ccde..7f85d4ba8b54 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -636,13 +636,18 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring) unsigned long bi_size; u16 i; - /* ring already cleared, nothing to do */ - if (!tx_ring->tx_bi) - return; + if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) { + i40e_xsk_clean_tx_ring(tx_ring); + } else { + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_bi) + return; - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) - i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) + i40e_unmap_and_free_tx_resource(tx_ring, + &tx_ring->tx_bi[i]); + } bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; memset(tx_ring->tx_bi, 0, bi_size); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h index b5afd479a9c5..29c68b29d36f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h @@ -87,4 +87,6 @@ static inline void i40e_arm_wb(struct i40e_ring *tx_ring, } } +void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring); + #endif /* I40E_TXRX_COMMON_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 2ebfc78bbd09..d5a9f5b7cfa9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -830,3 +830,33 @@ int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id) return 0; } + +/** + * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown + * @xdp_ring: XDP Tx ring + **/ +void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) +{ + u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; + struct xdp_umem *umem = tx_ring->xsk_umem; + struct i40e_tx_buffer *tx_bi; + u32 xsk_frames = 0; + + while (ntc != ntu) { + tx_bi = &tx_ring->tx_bi[ntc]; + + if (tx_bi->xdpf) + i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + + ntc++; + if (ntc >= tx_ring->count) + ntc = 0; + } + + if (xsk_frames) + xsk_umem_complete_tx(umem, xsk_frames); +} -- 2.17.1