The only user of frag_size field in XDP RxQ info is
bpf_xdp_frags_increase_tail(). It clearly expects whole buffer size instead
of DMA write size. Different assumptions in idpf driver configuration lead
to negative tailroom.
To make it worse, buffer sizes are not actually uniform in idpf when
splitq is enabled, as there are several buffer queues, so rxq->rx_buf_size
is meaningless in this case.
Use truesize of the first bufq in AF_XDP ZC, as there is only one. Disable
growinf tail for regular splitq.
Fixes: ac8a861f632e ("idpf: prepare structures to support XDP")
Signed-off-by: Larysa Zaremba <[email protected]>
---
drivers/net/ethernet/intel/idpf/xdp.c | 6 +++++-
drivers/net/ethernet/intel/idpf/xsk.c | 1 +
drivers/net/ethernet/intel/libeth/xsk.c | 1 +
include/net/libeth/xsk.h | 3 +++
4 files changed, 10 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/intel/idpf/xdp.c
b/drivers/net/ethernet/intel/idpf/xdp.c
index 958d16f87424..7d91f21174de 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.c
+++ b/drivers/net/ethernet/intel/idpf/xdp.c
@@ -46,11 +46,15 @@ static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue
*rxq, void *arg)
{
const struct idpf_vport *vport = rxq->q_vector->vport;
bool split = idpf_is_queue_model_split(vport->rxq_model);
+ u32 frag_size = 0;
int err;
+ if (idpf_queue_has(XSK, rxq))
+ frag_size = rxq->bufq_sets[0].bufq.truesize;
+
err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
rxq->q_vector->napi.napi_id,
- rxq->rx_buf_size);
+ frag_size);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/idpf/xsk.c
b/drivers/net/ethernet/intel/idpf/xsk.c
index fd2cc43ab43c..95a665cb2f33 100644
--- a/drivers/net/ethernet/intel/idpf/xsk.c
+++ b/drivers/net/ethernet/intel/idpf/xsk.c
@@ -401,6 +401,7 @@ int idpf_xskfq_init(struct idpf_buf_queue *bufq)
bufq->pending = fq.pending;
bufq->thresh = fq.thresh;
bufq->rx_buf_size = fq.buf_len;
+ bufq->truesize = fq.truesize;
if (!idpf_xskfq_refill(bufq))
netdev_err(bufq->pool->netdev,
diff --git a/drivers/net/ethernet/intel/libeth/xsk.c
b/drivers/net/ethernet/intel/libeth/xsk.c
index 846e902e31b6..4882951d5c9c 100644
--- a/drivers/net/ethernet/intel/libeth/xsk.c
+++ b/drivers/net/ethernet/intel/libeth/xsk.c
@@ -167,6 +167,7 @@ int libeth_xskfq_create(struct libeth_xskfq *fq)
fq->pending = fq->count;
fq->thresh = libeth_xdp_queue_threshold(fq->count);
fq->buf_len = xsk_pool_get_rx_frame_size(fq->pool);
+ fq->truesize = xsk_pool_get_rx_frag_step(fq->pool);
return 0;
}
diff --git a/include/net/libeth/xsk.h b/include/net/libeth/xsk.h
index 481a7b28e6f2..82b5d21aae87 100644
--- a/include/net/libeth/xsk.h
+++ b/include/net/libeth/xsk.h
@@ -597,6 +597,7 @@ __libeth_xsk_run_pass(struct libeth_xdp_buff *xdp,
* @pending: current number of XSkFQEs to refill
* @thresh: threshold below which the queue is refilled
* @buf_len: HW-writeable length per each buffer
+ * @truesize: step between consecutive buffers, 0 if none exists
* @nid: ID of the closest NUMA node with memory
*/
struct libeth_xskfq {
@@ -614,6 +615,8 @@ struct libeth_xskfq {
u32 thresh;
u32 buf_len;
+ u32 truesize;
+
int nid;
};
--
2.52.0