Re: [PATCH v8 2/3] tcm_vhost: Add hotplug/hotunplug support

2013-04-24 Thread Michael S. Tsirkin
On Wed, Apr 24, 2013 at 11:32:23AM +0800, Asias He wrote:
 In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
 virtio-scsi), hotplug support is added to virtio-scsi.
 
 This patch adds hotplug and hotunplug support to tcm_vhost.
 
 You can create or delete a LUN in targetcli to hotplug or hotunplug a
 LUN in guest.
 
 Changes in v8:
 - Use vq-mutex for event
 - Drop tcm_vhost: Add helper to check if endpoint is setup
 - Rename vs_events_dropped to vs_events_missed
 - Init lun[] explicitly
 
 Changes in v7:
 - Add vhost_work_flush for vs-vs_event_work to this series
 
 Changes in v6:
 - Pass tcm_vhost_evt to tcm_vhost_do_evt_work
 
 Changes in v5:
 - Switch to int from u64 to vs_events_nr
 - Set s-vs_events_dropped flag in tcm_vhost_allocate_evt
 - Do not nest dev mutex within vq mutex
 - Use vs_events_lock to protect vs_events_dropped and vs_events_nr
 - Rebase to target/master
 
 Changes in v4:
 - Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
 - Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick
 
 Changes in v3:
 - Separate the bug fix to another thread
 
 Changes in v2:
 - Remove code duplication in tcm_vhost_{hotplug,hotunplug}
 - Fix racing of vs_events_nr
 - Add flush fix patch to this series
 
 Signed-off-by: Asias He as...@redhat.com
 Reviewed-by: Stefan Hajnoczi stefa...@redhat.com
 ---
  drivers/vhost/tcm_vhost.c | 203 
 +-
  drivers/vhost/tcm_vhost.h |  10 +++
  2 files changed, 209 insertions(+), 4 deletions(-)
 
 diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
 index 88ebb79..def0c57 100644
 --- a/drivers/vhost/tcm_vhost.c
 +++ b/drivers/vhost/tcm_vhost.c
 @@ -66,11 +66,13 @@ enum {
   * TODO: debug and remove the workaround.
   */
  enum {
 - VHOST_SCSI_FEATURES = VHOST_FEATURES  (~VIRTIO_RING_F_EVENT_IDX)
 + VHOST_SCSI_FEATURES = (VHOST_FEATURES  (~VIRTIO_RING_F_EVENT_IDX)) |
 +   (1ULL  VIRTIO_SCSI_F_HOTPLUG)
  };
  
  #define VHOST_SCSI_MAX_TARGET256
  #define VHOST_SCSI_MAX_VQ128
 +#define VHOST_SCSI_MAX_EVENT 128
  
  struct vhost_scsi {
   /* Protected by vhost_scsi-dev.mutex */
 @@ -82,6 +84,12 @@ struct vhost_scsi {
  
   struct vhost_work vs_completion_work; /* cmd completion work item */
   struct llist_head vs_completion_list; /* cmd completion queue */
 +
 + struct vhost_work vs_event_work; /* evt injection work item */
 + struct llist_head vs_event_list; /* evt injection queue */
 +
 + bool vs_events_missed; /* any missed events, protected by vq-mutex */
 + int vs_events_nr; /* num of pending events, protected by vq-mutex */
  };
  
  /* Local pointer to allocated TCM configfs fabric module */
 @@ -361,6 +369,32 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
   return 0;
  }
  
 +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt 
 *evt)
 +{
 + vs-vs_events_nr--;
 + kfree(evt);
 +}
 +
 +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
 + u32 event, u32 reason)
 +{
 + struct tcm_vhost_evt *evt;
 +
 + if (vs-vs_events_nr  VHOST_SCSI_MAX_EVENT) {
 + vs-vs_events_missed = true;
 + return NULL;
 + }
 +
 + evt = kmalloc(sizeof(*evt), GFP_KERNEL);
 + if (evt) {

!evt should trigger vq_err I think.
Please do
if (!evt) {
vq_err
return NULL
}

this way code below has less nesting.

 + evt-event.event = event;
 + evt-event.reason = reason;
 + vs-vs_events_nr++;
 + }
 +
 + return evt;
 +}
 +
  static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
  {
   struct se_cmd *se_cmd = tv_cmd-tvc_se_cmd;
 @@ -379,6 +413,74 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd 
 *tv_cmd)
   kfree(tv_cmd);
  }
  
 +static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
 + struct tcm_vhost_evt *evt)
 +{
 + struct vhost_virtqueue *vq = vs-vqs[VHOST_SCSI_VQ_EVT];
 + struct virtio_scsi_event *event = evt-event;
 + struct virtio_scsi_event __user *eventp;
 + unsigned out, in;
 + int head, ret;
 +
 +again:
 + vhost_disable_notify(vs-dev, vq);
 + head = vhost_get_vq_desc(vs-dev, vq, vq-iov,
 + ARRAY_SIZE(vq-iov), out, in,
 + NULL, NULL);
 + if (head  0) {
 + vs-vs_events_missed = true;
 + return;
 + }
 + if (head == vq-num) {
 + if (vhost_enable_notify(vs-dev, vq))
 + goto again;
 + vs-vs_events_missed = true;
 + return;
 + }
 +
 + if ((vq-iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
 + vq_err(vq, Expecting virtio_scsi_event, got %zu bytes\n,
 + vq-iov[out].iov_len);
 + vs-vs_events_missed = true;
 + return;
 + }
 +
 + if (vs-vs_events_missed) {
 + 

Re: [PATCH v8 2/3] tcm_vhost: Add hotplug/hotunplug support

2013-04-24 Thread Asias He
On Wed, Apr 24, 2013 at 12:18:33PM +0300, Michael S. Tsirkin wrote:
 On Wed, Apr 24, 2013 at 11:32:23AM +0800, Asias He wrote:
  In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
  virtio-scsi), hotplug support is added to virtio-scsi.
  
  This patch adds hotplug and hotunplug support to tcm_vhost.
  
  You can create or delete a LUN in targetcli to hotplug or hotunplug a
  LUN in guest.
  
  Changes in v8:
  - Use vq-mutex for event
  - Drop tcm_vhost: Add helper to check if endpoint is setup
  - Rename vs_events_dropped to vs_events_missed
  - Init lun[] explicitly
  
  Changes in v7:
  - Add vhost_work_flush for vs-vs_event_work to this series
  
  Changes in v6:
  - Pass tcm_vhost_evt to tcm_vhost_do_evt_work
  
  Changes in v5:
  - Switch to int from u64 to vs_events_nr
  - Set s-vs_events_dropped flag in tcm_vhost_allocate_evt
  - Do not nest dev mutex within vq mutex
  - Use vs_events_lock to protect vs_events_dropped and vs_events_nr
  - Rebase to target/master
  
  Changes in v4:
  - Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
  - Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick
  
  Changes in v3:
  - Separate the bug fix to another thread
  
  Changes in v2:
  - Remove code duplication in tcm_vhost_{hotplug,hotunplug}
  - Fix racing of vs_events_nr
  - Add flush fix patch to this series
  
  Signed-off-by: Asias He as...@redhat.com
  Reviewed-by: Stefan Hajnoczi stefa...@redhat.com
  ---
   drivers/vhost/tcm_vhost.c | 203 
  +-
   drivers/vhost/tcm_vhost.h |  10 +++
   2 files changed, 209 insertions(+), 4 deletions(-)
  
  diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
  index 88ebb79..def0c57 100644
  --- a/drivers/vhost/tcm_vhost.c
  +++ b/drivers/vhost/tcm_vhost.c
  @@ -66,11 +66,13 @@ enum {
* TODO: debug and remove the workaround.
*/
   enum {
  -   VHOST_SCSI_FEATURES = VHOST_FEATURES  (~VIRTIO_RING_F_EVENT_IDX)
  +   VHOST_SCSI_FEATURES = (VHOST_FEATURES  (~VIRTIO_RING_F_EVENT_IDX)) |
  + (1ULL  VIRTIO_SCSI_F_HOTPLUG)
   };
   
   #define VHOST_SCSI_MAX_TARGET  256
   #define VHOST_SCSI_MAX_VQ  128
  +#define VHOST_SCSI_MAX_EVENT   128
   
   struct vhost_scsi {
  /* Protected by vhost_scsi-dev.mutex */
  @@ -82,6 +84,12 @@ struct vhost_scsi {
   
  struct vhost_work vs_completion_work; /* cmd completion work item */
  struct llist_head vs_completion_list; /* cmd completion queue */
  +
  +   struct vhost_work vs_event_work; /* evt injection work item */
  +   struct llist_head vs_event_list; /* evt injection queue */
  +
  +   bool vs_events_missed; /* any missed events, protected by vq-mutex */
  +   int vs_events_nr; /* num of pending events, protected by vq-mutex */
   };
   
   /* Local pointer to allocated TCM configfs fabric module */
  @@ -361,6 +369,32 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd 
  *se_cmd)
  return 0;
   }
   
  +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt 
  *evt)
  +{
  +   vs-vs_events_nr--;
  +   kfree(evt);
  +}
  +
  +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
  +   u32 event, u32 reason)
  +{
  +   struct tcm_vhost_evt *evt;
  +
  +   if (vs-vs_events_nr  VHOST_SCSI_MAX_EVENT) {
  +   vs-vs_events_missed = true;
  +   return NULL;
  +   }
  +
  +   evt = kmalloc(sizeof(*evt), GFP_KERNEL);
  +   if (evt) {
 
 !evt should trigger vq_err I think.
 Please do
   if (!evt) {
   vq_err
   return NULL
   }
 
 this way code below has less nesting.

okay.

  +   evt-event.event = event;
  +   evt-event.reason = reason;
  +   vs-vs_events_nr++;
  +   }
  +
  +   return evt;
  +}
  +
   static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
   {
  struct se_cmd *se_cmd = tv_cmd-tvc_se_cmd;
  @@ -379,6 +413,74 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd 
  *tv_cmd)
  kfree(tv_cmd);
   }
   
  +static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
  +   struct tcm_vhost_evt *evt)
  +{
  +   struct vhost_virtqueue *vq = vs-vqs[VHOST_SCSI_VQ_EVT];
  +   struct virtio_scsi_event *event = evt-event;
  +   struct virtio_scsi_event __user *eventp;
  +   unsigned out, in;
  +   int head, ret;
  +
  +again:
  +   vhost_disable_notify(vs-dev, vq);
  +   head = vhost_get_vq_desc(vs-dev, vq, vq-iov,
  +   ARRAY_SIZE(vq-iov), out, in,
  +   NULL, NULL);
  +   if (head  0) {
  +   vs-vs_events_missed = true;
  +   return;
  +   }
  +   if (head == vq-num) {
  +   if (vhost_enable_notify(vs-dev, vq))
  +   goto again;
  +   vs-vs_events_missed = true;
  +   return;
  +   }
  +
  +   if ((vq-iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
  +   vq_err(vq, Expecting virtio_scsi_event, got %zu bytes\n,
  +   vq-iov[out].iov_len);
  +   

Re: [PATCH v8 2/3] tcm_vhost: Add hotplug/hotunplug support

2013-04-24 Thread Michael S. Tsirkin
On Wed, Apr 24, 2013 at 06:06:25PM +0800, Asias He wrote:
 On Wed, Apr 24, 2013 at 12:18:33PM +0300, Michael S. Tsirkin wrote:
  On Wed, Apr 24, 2013 at 11:32:23AM +0800, Asias He wrote:
   In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
   virtio-scsi), hotplug support is added to virtio-scsi.
   
   This patch adds hotplug and hotunplug support to tcm_vhost.
   
   You can create or delete a LUN in targetcli to hotplug or hotunplug a
   LUN in guest.
   
   Changes in v8:
   - Use vq-mutex for event
   - Drop tcm_vhost: Add helper to check if endpoint is setup
   - Rename vs_events_dropped to vs_events_missed
   - Init lun[] explicitly
   
   Changes in v7:
   - Add vhost_work_flush for vs-vs_event_work to this series
   
   Changes in v6:
   - Pass tcm_vhost_evt to tcm_vhost_do_evt_work
   
   Changes in v5:
   - Switch to int from u64 to vs_events_nr
   - Set s-vs_events_dropped flag in tcm_vhost_allocate_evt
   - Do not nest dev mutex within vq mutex
   - Use vs_events_lock to protect vs_events_dropped and vs_events_nr
   - Rebase to target/master
   
   Changes in v4:
   - Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
   - Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick
   
   Changes in v3:
   - Separate the bug fix to another thread
   
   Changes in v2:
   - Remove code duplication in tcm_vhost_{hotplug,hotunplug}
   - Fix racing of vs_events_nr
   - Add flush fix patch to this series
   
   Signed-off-by: Asias He as...@redhat.com
   Reviewed-by: Stefan Hajnoczi stefa...@redhat.com
   ---
drivers/vhost/tcm_vhost.c | 203 
   +-
drivers/vhost/tcm_vhost.h |  10 +++
2 files changed, 209 insertions(+), 4 deletions(-)
   
   diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
   index 88ebb79..def0c57 100644
   --- a/drivers/vhost/tcm_vhost.c
   +++ b/drivers/vhost/tcm_vhost.c
   @@ -66,11 +66,13 @@ enum {
 * TODO: debug and remove the workaround.
 */
enum {
   - VHOST_SCSI_FEATURES = VHOST_FEATURES  (~VIRTIO_RING_F_EVENT_IDX)
   + VHOST_SCSI_FEATURES = (VHOST_FEATURES  (~VIRTIO_RING_F_EVENT_IDX)) |
   +   (1ULL  VIRTIO_SCSI_F_HOTPLUG)
};

#define VHOST_SCSI_MAX_TARGET256
#define VHOST_SCSI_MAX_VQ128
   +#define VHOST_SCSI_MAX_EVENT 128

struct vhost_scsi {
 /* Protected by vhost_scsi-dev.mutex */
   @@ -82,6 +84,12 @@ struct vhost_scsi {

 struct vhost_work vs_completion_work; /* cmd completion work item */
 struct llist_head vs_completion_list; /* cmd completion queue */
   +
   + struct vhost_work vs_event_work; /* evt injection work item */
   + struct llist_head vs_event_list; /* evt injection queue */
   +
   + bool vs_events_missed; /* any missed events, protected by vq-mutex */
   + int vs_events_nr; /* num of pending events, protected by vq-mutex */
};

/* Local pointer to allocated TCM configfs fabric module */
   @@ -361,6 +369,32 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd 
   *se_cmd)
 return 0;
}

   +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct 
   tcm_vhost_evt *evt)
   +{
   + vs-vs_events_nr--;
   + kfree(evt);
   +}
   +
   +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi 
   *vs,
   + u32 event, u32 reason)
   +{
   + struct tcm_vhost_evt *evt;
   +
   + if (vs-vs_events_nr  VHOST_SCSI_MAX_EVENT) {
   + vs-vs_events_missed = true;
   + return NULL;
   + }
   +
   + evt = kmalloc(sizeof(*evt), GFP_KERNEL);
   + if (evt) {
  
  !evt should trigger vq_err I think.
  Please do
  if (!evt) {
  vq_err
  return NULL
  }
  
  this way code below has less nesting.
 
 okay.
 
   + evt-event.event = event;
   + evt-event.reason = reason;
   + vs-vs_events_nr++;
   + }
   +
   + return evt;
   +}
   +
static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
{
 struct se_cmd *se_cmd = tv_cmd-tvc_se_cmd;
   @@ -379,6 +413,74 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd 
   *tv_cmd)
 kfree(tv_cmd);
}

   +static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
   + struct tcm_vhost_evt *evt)
   +{
   + struct vhost_virtqueue *vq = vs-vqs[VHOST_SCSI_VQ_EVT];
   + struct virtio_scsi_event *event = evt-event;
   + struct virtio_scsi_event __user *eventp;
   + unsigned out, in;
   + int head, ret;
   +
   +again:
   + vhost_disable_notify(vs-dev, vq);
   + head = vhost_get_vq_desc(vs-dev, vq, vq-iov,
   + ARRAY_SIZE(vq-iov), out, in,
   + NULL, NULL);
   + if (head  0) {
   + vs-vs_events_missed = true;
   + return;
   + }
   + if (head == vq-num) {
   + if (vhost_enable_notify(vs-dev, vq))
   + goto again;
   + vs-vs_events_missed = true;
   + return;
   + }
   +
   + if ((vq-iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
   + 

Re: [PATCH v8 2/3] tcm_vhost: Add hotplug/hotunplug support

2013-04-24 Thread Michael S. Tsirkin
On Wed, Apr 24, 2013 at 06:06:25PM +0800, Asias He wrote:
   +{
   +
   + struct vhost_scsi *vs = tpg-vhost_scsi;
   + struct vhost_virtqueue *vq;
   + u32 reason;
   + int ret;
   +
   + mutex_lock(tpg-tv_tpg_mutex);
   + vs = tpg-vhost_scsi;
   + mutex_unlock(tpg-tv_tpg_mutex);
   + if (!vs)
   + return -ENODEV;
   +
  
  What if clear_endpoint and close happen at this
  point? Can vhost_scsi go away so tcm_vhost_check_feature
  references freed memory?
 
 There is no easy way to handle this.

Take a reference somewhere so this can't happen?

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html