On Thu, Dec 10, 2015 at 09:23:25PM +0000, Alex Bennée wrote:
> Stefan Hajnoczi <stefa...@redhat.com> writes:
> 
> > From: Asias He <as...@redhat.com>
> >
> > VM sockets virtio transport implementation. This module runs in guest
> > kernel.
> 
> checkpatch warns on a bunch of whitespace/tab issues.

Will fix in the next version.

> > +struct virtio_vsock {
> > +   /* Virtio device */
> > +   struct virtio_device *vdev;
> > +   /* Virtio virtqueue */
> > +   struct virtqueue *vqs[VSOCK_VQ_MAX];
> > +   /* Wait queue for send pkt */
> > +   wait_queue_head_t queue_wait;
> > +   /* Work item to send pkt */
> > +   struct work_struct tx_work;
> > +   /* Work item to recv pkt */
> > +   struct work_struct rx_work;
> > +   /* Mutex to protect send pkt*/
> > +   struct mutex tx_lock;
> > +   /* Mutex to protect recv pkt*/
> > +   struct mutex rx_lock;
> 
> Further down I got confused by what lock was what and exactly what was
> being protected. If the receive and transmit paths touch separate things
> it might be worth re-arranging the structure to make it clearer, eg:
> 
>    /* The transmit path is protected by tx_lock */
>    struct mutex tx_lock;
>    struct work_struct tx_work;
>    ..
>    ..
> 
>    /* The receive path is protected by rx_lock */
>    wait_queue_head_t queue_wait;
>    ..
>    ..
> 
>  Which might make things a little clearer. Then all the redundant
>  information in the comments can be removed. I don't need to know what
>  is a Virtio device, virtqueue or wait_queue etc as they are implicit in
>  the structure name.

Thanks, that is a nice idea.

> > +   mutex_lock(&vsock->tx_lock);
> > +   while ((ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt,
> > +                                   GFP_KERNEL)) < 0) {
> > +           prepare_to_wait_exclusive(&vsock->queue_wait, &wait,
> > +                                     TASK_UNINTERRUPTIBLE);
> > +           mutex_unlock(&vsock->tx_lock);
> > +           schedule();
> > +           mutex_lock(&vsock->tx_lock);
> > +           finish_wait(&vsock->queue_wait, &wait);
> > +   }
> > +   virtqueue_kick(vq);
> > +   mutex_unlock(&vsock->tx_lock);
> 
> What are we protecting with tx_lock here? See comments above about
> making the lock usage semantics clearer.

vq (vsock->vqs[VSOCK_VQ_TX]) is being protected.  Concurrent calls to
virtqueue_add_sgs() are not allowed.

> > +
> > +   return pkt_len;
> > +}
> > +
> > +static struct virtio_transport_pkt_ops virtio_ops = {
> > +   .send_pkt = virtio_transport_send_pkt,
> > +};
> > +
> > +static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
> > +{
> > +   int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
> > +   struct virtio_vsock_pkt *pkt;
> > +   struct scatterlist hdr, buf, *sgs[2];
> > +   struct virtqueue *vq;
> > +   int ret;
> > +
> > +   vq = vsock->vqs[VSOCK_VQ_RX];
> > +
> > +   do {
> > +           pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
> > +           if (!pkt) {
> > +                   pr_debug("%s: fail to allocate pkt\n", __func__);
> > +                   goto out;
> > +           }
> > +
> > +           /* TODO: use mergeable rx buffer */
> 
> TODO's should end up in merged code.

Will fix in next revision.

> > +           pkt->buf = kmalloc(buf_len, GFP_KERNEL);
> > +           if (!pkt->buf) {
> > +                   pr_debug("%s: fail to allocate pkt->buf\n", __func__);
> > +                   goto err;
> > +           }
> > +
> > +           sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
> > +           sgs[0] = &hdr;
> > +
> > +           sg_init_one(&buf, pkt->buf, buf_len);
> > +           sgs[1] = &buf;
> > +           ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
> > +           if (ret)
> > +                   goto err;
> > +           vsock->rx_buf_nr++;
> > +   } while (vq->num_free);
> > +   if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
> > +           vsock->rx_buf_max_nr = vsock->rx_buf_nr;
> > +out:
> > +   virtqueue_kick(vq);
> > +   return;
> > +err:
> > +   virtqueue_kick(vq);
> > +   virtio_transport_free_pkt(pkt);
> 
> You could free the pkt memory at the fail site and just have one exit path.

Okay, I agree the err label is of marginal use.  Let's get rid of it.

> 
> > +   return;
> > +}
> > +
> > +static void virtio_transport_send_pkt_work(struct work_struct *work)
> > +{
> > +   struct virtio_vsock *vsock =
> > +           container_of(work, struct virtio_vsock, tx_work);
> > +   struct virtio_vsock_pkt *pkt;
> > +   bool added = false;
> > +   struct virtqueue *vq;
> > +   unsigned int len;
> > +   struct sock *sk;
> > +
> > +   vq = vsock->vqs[VSOCK_VQ_TX];
> > +   mutex_lock(&vsock->tx_lock);
> > +   do {
> 
> You can move the declarations of pkt/len into the do block.

Okay.

> 
> > +           virtqueue_disable_cb(vq);
> > +           while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) {
> 
> And the sk declaration here

Okay.

> > +static void virtio_transport_recv_pkt_work(struct work_struct *work)
> > +{
> > +   struct virtio_vsock *vsock =
> > +           container_of(work, struct virtio_vsock, rx_work);
> > +   struct virtio_vsock_pkt *pkt;
> > +   struct virtqueue *vq;
> > +   unsigned int len;
> 
> Same as above for pkt, len.

Okay.

> > +   vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
> > +   if (!vsock) {
> > +           ret = -ENOMEM;
> > +           goto out;
> 
> Won't this attempt to kfree a NULL vsock?

kfree(NULL) is a nop so this is safe.

Attachment: signature.asc
Description: PGP signature

Reply via email to