Introduce tun_ring_consume() that wraps ptr_ring_consume() and calls
__tun_wake_queue(). The latter wakes the stopped netdev subqueue once
half of the ring capacity has been consumed, tracked via the new
cons_cnt field in tun_file. When the ring is empty the queue is also
woken to handle potential races.

Without the corresponding queue stopping (introduced in a subsequent
commit), this patch alone causes no regression for a tap setup sending
to a qemu VM: 1.151 Mpps to 1.153 Mpps.

Details: AMD Ryzen 5 5600X at 4.3 GHz, 3200 MHz RAM, isolated QEMU
threads, pktgen sender; Avg over 20 runs @ 100,000,000 packets;
SRSO and spectre v2 mitigations disabled.

Co-developed-by: Tim Gebauer <[email protected]>
Signed-off-by: Tim Gebauer <[email protected]>
Signed-off-by: Simon Schippers <[email protected]>
---
 drivers/net/tun.c | 40 ++++++++++++++++++++++++++++++++++++----
 1 file changed, 36 insertions(+), 4 deletions(-)

diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index c492fda6fc15..a82d665dab5f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -145,6 +145,7 @@ struct tun_file {
        struct list_head next;
        struct tun_struct *detached;
        struct ptr_ring tx_ring;
+       int cons_cnt;
        struct xdp_rxq_info xdp_rxq;
 };
 
@@ -564,6 +565,7 @@ static void tun_queue_purge(struct tun_file *tfile)
        while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
                tun_ptr_free(ptr);
 
+       tfile->cons_cnt = 0;
        skb_queue_purge(&tfile->sk.sk_write_queue);
        skb_queue_purge(&tfile->sk.sk_error_queue);
 }
@@ -730,6 +732,7 @@ static int tun_attach(struct tun_struct *tun, struct file 
*file,
                goto out;
        }
 
+       tfile->cons_cnt = 0;
        tfile->queue_index = tun->numqueues;
        tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
 
@@ -2113,13 +2116,39 @@ static ssize_t tun_put_user(struct tun_struct *tun,
        return total;
 }
 
-static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
+static void __tun_wake_queue(struct tun_struct *tun, struct tun_file *tfile)
+{
+       if (ptr_ring_empty(&tfile->tx_ring))
+               goto wake;
+
+       if (!__netif_subqueue_stopped(tun->dev, tfile->queue_index) ||
+           ++tfile->cons_cnt < tfile->tx_ring.size / 2)
+               return;
+
+wake:
+       netif_wake_subqueue(tun->dev, tfile->queue_index);
+       tfile->cons_cnt = 0;
+}
+
+static void *tun_ring_consume(struct tun_struct *tun, struct tun_file *tfile)
+{
+       void *ptr;
+
+       ptr = ptr_ring_consume(&tfile->tx_ring);
+       if (ptr)
+               __tun_wake_queue(tun, tfile);
+
+       return ptr;
+}
+
+static void *tun_ring_recv(struct tun_struct *tun, struct tun_file *tfile,
+                          int noblock, int *err)
 {
        DECLARE_WAITQUEUE(wait, current);
        void *ptr = NULL;
        int error = 0;
 
-       ptr = ptr_ring_consume(&tfile->tx_ring);
+       ptr = tun_ring_consume(tun, tfile);
        if (ptr)
                goto out;
        if (noblock) {
@@ -2131,7 +2160,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int 
noblock, int *err)
 
        while (1) {
                set_current_state(TASK_INTERRUPTIBLE);
-               ptr = ptr_ring_consume(&tfile->tx_ring);
+               ptr = tun_ring_consume(tun, tfile);
                if (ptr)
                        break;
                if (signal_pending(current)) {
@@ -2168,7 +2197,7 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct 
tun_file *tfile,
 
        if (!ptr) {
                /* Read frames from ring */
-               ptr = tun_ring_recv(tfile, noblock, &err);
+               ptr = tun_ring_recv(tun, tfile, noblock, &err);
                if (!ptr)
                        return err;
        }
@@ -3404,6 +3433,8 @@ static int tun_chr_open(struct inode *inode, struct file 
* file)
                return -ENOMEM;
        }
 
+       tfile->cons_cnt = 0;
+
        mutex_init(&tfile->napi_mutex);
        RCU_INIT_POINTER(tfile->tun, NULL);
        tfile->flags = 0;
@@ -3612,6 +3643,7 @@ static int tun_queue_resize(struct tun_struct *tun)
        for (i = 0; i < tun->numqueues; i++) {
                tfile = rtnl_dereference(tun->tfiles[i]);
                rings[i] = &tfile->tx_ring;
+               tfile->cons_cnt = 0;
        }
        list_for_each_entry(tfile, &tun->disabled, next)
                rings[i++] = &tfile->tx_ring;
-- 
2.43.0


Reply via email to