This patch fixes the warning generated when a timeout occurs
on the request and socket is closed from a non-sleep context
by

1. Moving the socket closing on a timeout to nbd_thread_send

2. Make sock lock to be a mutex instead of a spin lock, since
   nbd_xmit_timeout doesn't need to hold it anymore.

3. Move sock_shutdown outside the tx_lock in NBD_DO_IT.
---
 drivers/block/nbd.c | 85 +++++++++++++++++++++++++++++++----------------------
 1 file changed, 50 insertions(+), 35 deletions(-)

diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 31e73a7..a52cc16 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -3,7 +3,7 @@
  *
  * Note that you can not swap over this thing, yet. Seems to work but
  * deadlocks sometimes - you can not swap over TCP in general.
- * 
+ *
  * Copyright 1997-2000, 2008 Pavel Machek <pa...@ucw.cz>
  * Parts copyright 2001 Steven Whitehouse <st...@chygwyn.com>
  *
@@ -35,14 +35,14 @@
 #include <linux/types.h>
 #include <linux/debugfs.h>
 
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <asm/types.h>
 
 #include <linux/nbd.h>
 
 struct nbd_device {
        u32 flags;
-       struct socket * sock;   /* If == NULL, device is not ready, yet */
+       struct socket *sock;    /* If == NULL, device is not ready, yet */
        int magic;
 
        spinlock_t queue_lock;
@@ -57,12 +57,12 @@ struct nbd_device {
        int blksize;
        loff_t bytesize;
        int xmit_timeout;
-       bool timedout;
+       atomic_t timedout;
        bool disconnect; /* a disconnect has been requested by user */
 
        struct timer_list timeout_timer;
        /* protects initialization and shutdown of the socket */
-       spinlock_t sock_lock;
+       struct mutex sock_lock;
        struct task_struct *task_recv;
        struct task_struct *task_send;
 
@@ -172,10 +172,9 @@ static void nbd_end_request(struct nbd_device *nbd, struct 
request *req)
  */
 static void sock_shutdown(struct nbd_device *nbd)
 {
-       spin_lock_irq(&nbd->sock_lock);
-
+       mutex_lock(&nbd->sock_lock);
        if (!nbd->sock) {
-               spin_unlock_irq(&nbd->sock_lock);
+               mutex_unlock(&nbd->sock_lock);
                return;
        }
 
@@ -183,27 +182,19 @@ static void sock_shutdown(struct nbd_device *nbd)
        kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
        sockfd_put(nbd->sock);
        nbd->sock = NULL;
-       spin_unlock_irq(&nbd->sock_lock);
-
+       mutex_unlock(&nbd->sock_lock);
        del_timer(&nbd->timeout_timer);
 }
 
 static void nbd_xmit_timeout(unsigned long arg)
 {
        struct nbd_device *nbd = (struct nbd_device *)arg;
-       unsigned long flags;
 
        if (list_empty(&nbd->queue_head))
                return;
 
-       spin_lock_irqsave(&nbd->sock_lock, flags);
-
-       nbd->timedout = true;
-
-       if (nbd->sock)
-               kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
-
-       spin_unlock_irqrestore(&nbd->sock_lock, flags);
+       atomic_inc(&nbd->timedout);
+       wake_up(&nbd->waiting_wq);
 
        dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down 
connection\n");
 }
@@ -266,6 +257,7 @@ static inline int sock_send_bvec(struct nbd_device *nbd, 
struct bio_vec *bvec,
 {
        int result;
        void *kaddr = kmap(bvec->bv_page);
+
        result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
                           bvec->bv_len, flags);
        kunmap(bvec->bv_page);
@@ -278,6 +270,7 @@ static int nbd_send_req(struct nbd_device *nbd, struct 
request *req)
        int result, flags;
        struct nbd_request request;
        unsigned long size = blk_rq_bytes(req);
+
        u32 type;
 
        if (req->cmd_type == REQ_TYPE_DRV_PRIV)
@@ -363,6 +356,7 @@ static inline int sock_recv_bvec(struct nbd_device *nbd, 
struct bio_vec *bvec)
 {
        int result;
        void *kaddr = kmap(bvec->bv_page);
+
        result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
                        MSG_WAITALL);
        kunmap(bvec->bv_page);
@@ -579,7 +573,27 @@ static int nbd_thread_send(void *data)
                /* wait for something to do */
                wait_event_interruptible(nbd->waiting_wq,
                                         kthread_should_stop() ||
-                                        !list_empty(&nbd->waiting_queue));
+                                        !list_empty(&nbd->waiting_queue) ||
+                                        atomic_read(&nbd->timedout));
+
+               if (atomic_read(&nbd->timedout)) {
+                       mutex_lock(&nbd->sock_lock);
+                       if (nbd->sock) {
+                               struct request sreq;
+
+                               blk_rq_init(NULL, &sreq);
+                               sreq.cmd_type = REQ_TYPE_DRV_PRIV;
+                               mutex_lock(&nbd->tx_lock);
+                               nbd->disconnect = true;
+                               nbd_send_req(nbd, &sreq);
+                               mutex_unlock(&nbd->tx_lock);
+                               dev_err(disk_to_dev(nbd->disk),
+                                       "Device Timeout occured.Shutting down"
+                                       " socket.");
+                       }
+                       mutex_unlock(&nbd->sock_lock);
+                       sock_shutdown(nbd);
+               }
 
                /* extract request */
                if (list_empty(&nbd->waiting_queue))
@@ -592,7 +606,11 @@ static int nbd_thread_send(void *data)
                spin_unlock_irq(&nbd->queue_lock);
 
                /* handle request */
-               nbd_handle_req(nbd, req);
+               if (atomic_read(&nbd->timedout)) {
+                       req->errors++;
+                       nbd_end_request(nbd, req);
+               } else
+                       nbd_handle_req(nbd, req);
        }
 
        nbd->task_send = NULL;
@@ -601,8 +619,8 @@ static int nbd_thread_send(void *data)
 }
 
 /*
- * We always wait for result of write, for now. It would be nice to make it 
optional
- * in future
+ * We always wait for result of write, for now.
+ * It would be nice to make it optional in future
  * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
  *   { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
  */
@@ -611,7 +629,7 @@ static void nbd_request_handler(struct request_queue *q)
                __releases(q->queue_lock) __acquires(q->queue_lock)
 {
        struct request *req;
-       
+
        while ((req = blk_fetch_request(q)) != NULL) {
                struct nbd_device *nbd;
 
@@ -647,7 +665,7 @@ static int nbd_set_socket(struct nbd_device *nbd, struct 
socket *sock)
 {
        int ret = 0;
 
-       spin_lock_irq(&nbd->sock_lock);
+       mutex_lock(&nbd->sock_lock);
 
        if (nbd->sock) {
                ret = -EBUSY;
@@ -657,7 +675,7 @@ static int nbd_set_socket(struct nbd_device *nbd, struct 
socket *sock)
        nbd->sock = sock;
 
 out:
-       spin_unlock_irq(&nbd->sock_lock);
+       mutex_unlock(&nbd->sock_lock);
 
        return ret;
 }
@@ -666,7 +684,7 @@ out:
 static void nbd_reset(struct nbd_device *nbd)
 {
        nbd->disconnect = false;
-       nbd->timedout = false;
+       atomic_set(&nbd->timedout, 0);
        nbd->blksize = 1024;
        nbd->bytesize = 0;
        set_capacity(nbd->disk, 0);
@@ -729,7 +747,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct 
nbd_device *nbd,
                nbd_send_req(nbd, &sreq);
                return 0;
        }
- 
+
        case NBD_CLEAR_SOCK:
                sock_shutdown(nbd);
                nbd_clear_que(nbd);
@@ -803,17 +821,15 @@ static int __nbd_ioctl(struct block_device *bdev, struct 
nbd_device *nbd,
                error = nbd_thread_recv(nbd, bdev);
                nbd_dev_dbg_close(nbd);
                kthread_stop(thread);
-
-               mutex_lock(&nbd->tx_lock);
-
                sock_shutdown(nbd);
+               mutex_lock(&nbd->tx_lock);
                nbd_clear_que(nbd);
                kill_bdev(bdev);
                nbd_bdev_reset(bdev);
 
                if (nbd->disconnect) /* user requested, ignore socket errors */
                        error = 0;
-               if (nbd->timedout)
+               if (atomic_read(&nbd->timedout))
                        error = -ETIMEDOUT;
 
                nbd_reset(nbd);
@@ -856,8 +872,7 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t 
mode,
        return error;
 }
 
-static const struct block_device_operations nbd_fops =
-{
+static const struct block_device_operations nbd_fops = {
        .owner =        THIS_MODULE,
        .ioctl =        nbd_ioctl,
        .compat_ioctl = nbd_ioctl,
@@ -1075,7 +1090,7 @@ static int __init nbd_init(void)
                nbd_dev[i].magic = NBD_MAGIC;
                INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
                spin_lock_init(&nbd_dev[i].queue_lock);
-               spin_lock_init(&nbd_dev[i].sock_lock);
+               mutex_init(&nbd_dev[i].sock_lock);
                INIT_LIST_HEAD(&nbd_dev[i].queue_head);
                mutex_init(&nbd_dev[i].tx_lock);
                init_timer(&nbd_dev[i].timeout_timer);
-- 
2.6.2

Reply via email to