Completion queue address reservation could not be undone.
In case of bad 'queue_id' or skb allocation failure, reserved entry
will be leaked reducing the total capacity of completion queue.

Fix that by moving reservation to the point where failure is not
possible. Additionally, 'queue_id' checking moved out from the loop
since there is no point to check it there.

Fixes: 35fcde7f8deb ("xsk: support for Tx")
Signed-off-by: Ilya Maximets <i.maxim...@samsung.com>
---
 net/xdp/xsk.c | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index f53a6ef7c155..703cf5ea448b 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -226,6 +226,9 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr 
*m,
 
        mutex_lock(&xs->mutex);
 
+       if (xs->queue_id >= xs->dev->real_num_tx_queues)
+               goto out;
+
        while (xskq_peek_desc(xs->tx, &desc)) {
                char *buffer;
                u64 addr;
@@ -236,12 +239,6 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr 
*m,
                        goto out;
                }
 
-               if (xskq_reserve_addr(xs->umem->cq))
-                       goto out;
-
-               if (xs->queue_id >= xs->dev->real_num_tx_queues)
-                       goto out;
-
                len = desc.len;
                skb = sock_alloc_send_skb(sk, len, 1, &err);
                if (unlikely(!skb)) {
@@ -253,7 +250,7 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr 
*m,
                addr = desc.addr;
                buffer = xdp_umem_get_data(xs->umem, addr);
                err = skb_store_bits(skb, 0, buffer, len);
-               if (unlikely(err)) {
+               if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
                        kfree_skb(skb);
                        goto out;
                }
-- 
2.17.1

Reply via email to