On 7/4/2025 9:18 AM, Michal Kubiak wrote:
> @@ -1075,16 +780,17 @@ void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring)
> static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
> {
> unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
> - unsigned int offset = rx_ring->rx_offset;
> - struct xdp_buff *xdp = &rx_ring->xdp;
> struct ice_tx_ring *xdp_ring = NULL;
> struct bpf_prog *xdp_prog = NULL;
> u32 ntc = rx_ring->next_to_clean;
> + LIBETH_XDP_ONSTACK_BUFF(xdp);
> u32 cached_ntu, xdp_verdict;
> u32 cnt = rx_ring->count;
> u32 xdp_xmit = 0;
> bool failure;
>
> + libeth_xdp_init_buff(xdp, &rx_ring->xdp, &rx_ring->xdp_rxq);
> +
> xdp_prog = READ_ONCE(rx_ring->xdp_prog);
> if (xdp_prog) {
> xdp_ring = rx_ring->xdp_ring;
> @@ -1094,7 +800,7 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring,
> int budget)
> /* start the loop to process Rx packets bounded by 'budget' */
> while (likely(total_rx_pkts < (unsigned int)budget)) {
> union ice_32b_rx_flex_desc *rx_desc;
> - struct ice_rx_buf *rx_buf;
> + struct libeth_fqe *rx_buf;
> struct sk_buff *skb;
> unsigned int size;
> u16 stat_err_bits;
> @@ -1124,19 +830,10 @@ static int ice_clean_rx_irq(struct ice_rx_ring
> *rx_ring, int budget)
> ICE_RX_FLX_DESC_PKT_LEN_M;
>
> /* retrieve a buffer from the ring */
> - rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
> -
> - if (!xdp->data) {
> - void *hard_start;
> -
> - hard_start = page_address(rx_buf->page) +
> rx_buf->page_offset -
> - offset;
> - xdp_prepare_buff(xdp, hard_start, offset, size,
> !!offset);
> - xdp_buff_clear_frags_flag(xdp);
> - } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
> - ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc,
> ICE_XDP_CONSUMED);
> + rx_buf = &rx_ring->rx_fqes[ntc];
> + if (!libeth_xdp_process_buff(xdp, rx_buf, size))
> break;
> - }
> +
> if (++ntc == cnt)
> ntc = 0;
>
> @@ -1144,27 +841,35 @@ static int ice_clean_rx_irq(struct ice_rx_ring
> *rx_ring, int budget)
> if (ice_is_non_eop(rx_ring, rx_desc))
> continue;
>
> - ice_get_pgcnts(rx_ring);
> xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring,
> rx_desc);
> if (xdp_verdict == ICE_XDP_PASS)
> goto construct_skb;
> - total_rx_bytes += xdp_get_buff_len(xdp);
> - total_rx_pkts++;
>
> - ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
> + if (xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR))
> + xdp_xmit |= xdp_verdict;
> + total_rx_bytes += xdp_get_buff_len(&xdp->base);
> + total_rx_pkts++;
>
> + xdp->data = NULL;
> + rx_ring->first_desc = ntc;
> + rx_ring->nr_frags = 0;
> continue;
> construct_skb:
> - skb = ice_build_skb(rx_ring, xdp);
> + skb = xdp_build_skb_from_buff(&xdp->base);
> +
> /* exit if we failed to retrieve a buffer */
> if (!skb) {
> rx_ring->ring_stats->rx_stats.alloc_page_failed++;This is not your fault, but we've been incorrectly incrementing alloc_page_failed here instead of alloc_buf_failed. > xdp_verdict = ICE_XDP_CONSUMED; xdp_verdict is no longer used, so I don't think we need to modify it further here. It was previously being used as part of the call to ice_put_rx_mbuf. > + xdp->data = NULL; > + rx_ring->first_desc = ntc; > + rx_ring->nr_frags = 0; > + break; > } > - ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); > > - if (!skb) > - break; > + xdp->data = NULL; > + rx_ring->first_desc = ntc; > + rx_ring->nr_frags = 0; > The failure case for !skb does the same as this, so would it make sense to move this block up to before !skb and just check the skb pointer afterwards? > stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); > if (unlikely(ice_test_staterr(rx_desc->wb.status_error0
OpenPGP_signature.asc
Description: OpenPGP digital signature
