From: Marco Pinna <marco.pin...@gmail.com>

Preliminary patch to introduce an optimization to the
enqueue system.

All the code used to enqueue a packet into the virtqueue
is removed from virtio_transport_send_pkt_work()
and moved to the new virtio_transport_send_skb() function.

Co-developed-by: Luigi Leonardi <luigi.leona...@outlook.com>
Signed-off-by: Luigi Leonardi <luigi.leona...@outlook.com>
Signed-off-by: Marco Pinna <marco.pin...@gmail.com>
---
 net/vmw_vsock/virtio_transport.c | 105 ++++++++++++++++++++++-----------------
 1 file changed, 59 insertions(+), 46 deletions(-)

diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 43d405298857..c4205c22f40b 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -94,6 +94,63 @@ static u32 virtio_transport_get_local_cid(void)
        return ret;
 }
 
+/* Caller need to hold vsock->tx_lock on vq */
+static int virtio_transport_send_skb(struct sk_buff *skb, struct virtqueue *vq,
+                                    struct virtio_vsock *vsock)
+{
+       int ret, in_sg = 0, out_sg = 0;
+       struct scatterlist **sgs;
+
+       sgs = vsock->out_sgs;
+       sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
+                   sizeof(*virtio_vsock_hdr(skb)));
+       out_sg++;
+
+       if (!skb_is_nonlinear(skb)) {
+               if (skb->len > 0) {
+                       sg_init_one(sgs[out_sg], skb->data, skb->len);
+                       out_sg++;
+               }
+       } else {
+               struct skb_shared_info *si;
+               int i;
+
+               /* If skb is nonlinear, then its buffer must contain
+                * only header and nothing more. Data is stored in
+                * the fragged part.
+                */
+               WARN_ON_ONCE(skb_headroom(skb) != 
sizeof(*virtio_vsock_hdr(skb)));
+
+               si = skb_shinfo(skb);
+
+               for (i = 0; i < si->nr_frags; i++) {
+                       skb_frag_t *skb_frag = &si->frags[i];
+                       void *va;
+
+                       /* We will use 'page_to_virt()' for the userspace page
+                        * here, because virtio or dma-mapping layers will call
+                        * 'virt_to_phys()' later to fill the buffer descriptor.
+                        * We don't touch memory at "virtual" address of this 
page.
+                        */
+                       va = page_to_virt(skb_frag_page(skb_frag));
+                       sg_init_one(sgs[out_sg],
+                                   va + skb_frag_off(skb_frag),
+                                   skb_frag_size(skb_frag));
+                       out_sg++;
+               }
+       }
+
+       ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
+       /* Usually this means that there is no more space available in
+        * the vq
+        */
+       if (ret < 0)
+               return ret;
+
+       virtio_transport_deliver_tap_pkt(skb);
+       return 0;
+}
+
 static void
 virtio_transport_send_pkt_work(struct work_struct *work)
 {
@@ -111,66 +168,22 @@ virtio_transport_send_pkt_work(struct work_struct *work)
        vq = vsock->vqs[VSOCK_VQ_TX];
 
        for (;;) {
-               int ret, in_sg = 0, out_sg = 0;
-               struct scatterlist **sgs;
                struct sk_buff *skb;
                bool reply;
+               int ret;
 
                skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
                if (!skb)
                        break;
 
                reply = virtio_vsock_skb_reply(skb);
-               sgs = vsock->out_sgs;
-               sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
-                           sizeof(*virtio_vsock_hdr(skb)));
-               out_sg++;
-
-               if (!skb_is_nonlinear(skb)) {
-                       if (skb->len > 0) {
-                               sg_init_one(sgs[out_sg], skb->data, skb->len);
-                               out_sg++;
-                       }
-               } else {
-                       struct skb_shared_info *si;
-                       int i;
-
-                       /* If skb is nonlinear, then its buffer must contain
-                        * only header and nothing more. Data is stored in
-                        * the fragged part.
-                        */
-                       WARN_ON_ONCE(skb_headroom(skb) != 
sizeof(*virtio_vsock_hdr(skb)));
-
-                       si = skb_shinfo(skb);
 
-                       for (i = 0; i < si->nr_frags; i++) {
-                               skb_frag_t *skb_frag = &si->frags[i];
-                               void *va;
-
-                               /* We will use 'page_to_virt()' for the 
userspace page
-                                * here, because virtio or dma-mapping layers 
will call
-                                * 'virt_to_phys()' later to fill the buffer 
descriptor.
-                                * We don't touch memory at "virtual" address 
of this page.
-                                */
-                               va = page_to_virt(skb_frag_page(skb_frag));
-                               sg_init_one(sgs[out_sg],
-                                           va + skb_frag_off(skb_frag),
-                                           skb_frag_size(skb_frag));
-                               out_sg++;
-                       }
-               }
-
-               ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, 
GFP_KERNEL);
-               /* Usually this means that there is no more space available in
-                * the vq
-                */
+               ret = virtio_transport_send_skb(skb, vq, vsock);
                if (ret < 0) {
                        virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, 
skb);
                        break;
                }
 
-               virtio_transport_deliver_tap_pkt(skb);
-
                if (reply) {
                        struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
                        int val;

-- 
2.45.2



Reply via email to