The driver's tx napi is very important for XSK. It is responsible for obtaining data from the XSK queue and sending it out.
At the beginning, we need to trigger tx napi. Signed-off-by: Xuan Zhuo <xuanz...@linux.alibaba.com> --- drivers/net/virtio_net.c | 121 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 119 insertions(+), 2 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 2767338dc060..7e811f392768 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -535,10 +535,13 @@ enum virtnet_xmit_type { VIRTNET_XMIT_TYPE_SKB, VIRTNET_XMIT_TYPE_XDP, VIRTNET_XMIT_TYPE_DMA, + VIRTNET_XMIT_TYPE_XSK, }; #define VIRTNET_XMIT_TYPE_MASK (VIRTNET_XMIT_TYPE_SKB | VIRTNET_XMIT_TYPE_XDP \ - | VIRTNET_XMIT_TYPE_DMA) + | VIRTNET_XMIT_TYPE_DMA | VIRTNET_XMIT_TYPE_XSK) + +#define VIRTIO_XSK_FLAG_OFFSET 4 static enum virtnet_xmit_type virtnet_xmit_ptr_strip(void **ptr) { @@ -768,6 +771,10 @@ static void __free_old_xmit(struct send_queue *sq, bool in_napi, * func again. */ goto retry; + + case VIRTNET_XMIT_TYPE_XSK: + /* Make gcc happy. DONE in subsequent commit */ + break; } } } @@ -1265,6 +1272,102 @@ static void check_sq_full_and_disable(struct virtnet_info *vi, } } +static void *virtnet_xsk_to_ptr(u32 len) +{ + unsigned long p; + + p = len << VIRTIO_XSK_FLAG_OFFSET; + + return virtnet_xmit_ptr_mix((void *)p, VIRTNET_XMIT_TYPE_XSK); +} + +static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len) +{ + sg->dma_address = addr; + sg->length = len; +} + +static int virtnet_xsk_xmit_one(struct send_queue *sq, + struct xsk_buff_pool *pool, + struct xdp_desc *desc) +{ + struct virtnet_info *vi; + dma_addr_t addr; + + vi = sq->vq->vdev->priv; + + addr = xsk_buff_raw_get_dma(pool, desc->addr); + xsk_buff_raw_dma_sync_for_device(pool, addr, desc->len); + + sg_init_table(sq->sg, 2); + + sg_fill_dma(sq->sg, sq->xsk.hdr_dma_address, vi->hdr_len); + sg_fill_dma(sq->sg + 1, addr, desc->len); + + return virtqueue_add_outbuf(sq->vq, sq->sg, 2, + virtnet_xsk_to_ptr(desc->len), GFP_ATOMIC); +} + +static int virtnet_xsk_xmit_batch(struct send_queue *sq, + struct xsk_buff_pool *pool, + unsigned int budget, + u64 *kicks) +{ + struct xdp_desc *descs = pool->tx_descs; + bool kick = false; + u32 nb_pkts, i; + int err; + + budget = min_t(u32, budget, sq->vq->num_free); + + nb_pkts = xsk_tx_peek_release_desc_batch(pool, budget); + if (!nb_pkts) + return 0; + + for (i = 0; i < nb_pkts; i++) { + err = virtnet_xsk_xmit_one(sq, pool, &descs[i]); + if (unlikely(err)) { + xsk_tx_completed(sq->xsk.pool, nb_pkts - i); + break; + } + + kick = true; + } + + if (kick && virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) + (*kicks)++; + + return i; +} + +static bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool, + int budget) +{ + struct virtnet_info *vi = sq->vq->vdev->priv; + struct virtnet_sq_free_stats stats = {}; + u64 kicks = 0; + int sent; + + __free_old_xmit(sq, true, &stats); + + sent = virtnet_xsk_xmit_batch(sq, pool, budget, &kicks); + + if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) + check_sq_full_and_disable(vi, vi->dev, sq); + + u64_stats_update_begin(&sq->stats.syncp); + u64_stats_add(&sq->stats.packets, stats.packets); + u64_stats_add(&sq->stats.bytes, stats.bytes); + u64_stats_add(&sq->stats.kicks, kicks); + u64_stats_add(&sq->stats.xdp_tx, sent); + u64_stats_update_end(&sq->stats.syncp); + + if (xsk_uses_need_wakeup(pool)) + xsk_set_tx_need_wakeup(pool); + + return sent == budget; +} + static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, struct send_queue *sq, struct xdp_frame *xdpf) @@ -2707,6 +2810,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) struct virtnet_info *vi = sq->vq->vdev->priv; unsigned int index = vq2txq(sq->vq); struct netdev_queue *txq; + bool xsk_busy = false; int opaque; bool done; @@ -2719,7 +2823,11 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) txq = netdev_get_tx_queue(vi->dev, index); __netif_tx_lock(txq, raw_smp_processor_id()); virtqueue_disable_cb(sq->vq); - free_old_xmit(sq, true); + + if (sq->xsk.pool) + xsk_busy = virtnet_xsk_xmit(sq, sq->xsk.pool, budget); + else + free_old_xmit(sq, true); if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { if (netif_tx_queue_stopped(txq)) { @@ -2730,6 +2838,11 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) netif_tx_wake_queue(txq); } + if (xsk_busy) { + __netif_tx_unlock(txq); + return budget; + } + opaque = virtqueue_enable_cb_prepare(sq->vq); done = napi_complete_done(napi, 0); @@ -5715,6 +5828,10 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) case VIRTNET_XMIT_TYPE_DMA: virtnet_sq_unmap(sq, &buf); goto retry; + + case VIRTNET_XMIT_TYPE_XSK: + /* Make gcc happy. DONE in subsequent commit */ + break; } } -- 2.32.0.3.g01195cf9f