Re: [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support

2013-03-26 Thread Asias He
On Tue, Mar 26, 2013 at 10:46:28PM +0200, Michael S. Tsirkin wrote:
> On Tue, Mar 26, 2013 at 10:56:33AM +0800, Asias He wrote:
> > On Mon, Mar 25, 2013 at 01:10:33PM +0200, Michael S. Tsirkin wrote:
> > > On Mon, Mar 25, 2013 at 03:48:06PM +0800, Asias He wrote:
> > > > On Sun, Mar 24, 2013 at 05:20:09PM +0200, Michael S. Tsirkin wrote:
> > > > > On Fri, Mar 22, 2013 at 01:39:05PM +0800, Asias He wrote:
> > > > > > In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
> > > > > > virtio-scsi), hotplug support is added to virtio-scsi.
> > > > > > 
> > > > > > This patch adds hotplug and hotunplug support to tcm_vhost.
> > > > > > 
> > > > > > You can create or delete a LUN in targetcli to hotplug or hotunplug 
> > > > > > a
> > > > > > LUN in guest.
> > > > > > 
> > > > > > Changes in v4:
> > > > > > - Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
> > > > > > - Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick
> > > > > > 
> > > > > > Changes in v3:
> > > > > > - Separate the bug fix to another thread
> > > > > > 
> > > > > > Changes in v2:
> > > > > > - Remove code duplication in tcm_vhost_{hotplug,hotunplug}
> > > > > > - Fix racing of vs_events_nr
> > > > > > - Add flush fix patch to this series
> > > > > > 
> > > > > > Signed-off-by: Asias He 
> > > > > > Reviewed-by: Stefan Hajnoczi 
> > > > > > ---
> > > > > >  drivers/vhost/tcm_vhost.c | 212 
> > > > > > --
> > > > > >  drivers/vhost/tcm_vhost.h |  10 +++
> > > > > >  2 files changed, 217 insertions(+), 5 deletions(-)
> > > > > > 
> > > > > > diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> > > > > > index d81e3a9..e734ead 100644
> > > > > > --- a/drivers/vhost/tcm_vhost.c
> > > > > > +++ b/drivers/vhost/tcm_vhost.c
> > > > > > @@ -62,6 +62,9 @@ enum {
> > > > > >  
> > > > > >  #define VHOST_SCSI_MAX_TARGET  256
> > > > > >  #define VHOST_SCSI_MAX_VQ  128
> > > > > > +#define VHOST_SCSI_MAX_EVENT   128
> > > > > > +
> > > > > > +#define VHOST_SCSI_FEATURES (VHOST_FEATURES | (1ULL << 
> > > > > > VIRTIO_SCSI_F_HOTPLUG))
> > > > > >  
> > > > > >  struct vhost_scsi {
> > > > > > /* Protected by vhost_scsi->dev.mutex */
> > > > > > @@ -73,6 +76,12 @@ struct vhost_scsi {
> > > > > >  
> > > > > > struct vhost_work vs_completion_work; /* cmd completion work 
> > > > > > item */
> > > > > > struct llist_head vs_completion_list; /* cmd completion queue */
> > > > > > +
> > > > > > +   struct vhost_work vs_event_work; /* evt injection work item */
> > > > > > +   struct llist_head vs_event_list; /* evt injection queue */
> > > > > > +
> > > > > > +   bool vs_events_dropped; /* any missed events, protected by 
> > > > > > dev.mutex */
> > > > > > +   u64 vs_events_nr; /* num of pending events, protected by 
> > > > > > dev.mutex */
> > > > > 
> > > > > Woa u64. We allow up to 128 of these. int will do.
> > > > 
> > > > u64 is used before we limit the total number of events, changed to int.
> > > > 
> > > > > >  };
> > > > > >  
> > > > > >  /* Local pointer to allocated TCM configfs fabric module */
> > > > > > @@ -120,6 +129,16 @@ static bool tcm_vhost_check_endpoint(struct 
> > > > > > vhost_virtqueue *vq)
> > > > > > return ret;
> > > > > >  }
> > > > > >  
> > > > > > +static bool tcm_vhost_check_events_dropped(struct vhost_scsi *vs)
> > > > > > +{
> > > > > > +   bool ret;
> > > > > > +
> > > > > > +   mutex_lock(&vs->dev.mutex);
> > > > > > +   ret = vs->vs_events_dropped;
> > > > > > +   mutex_unlock(&vs->dev.mutex);
> > > > > > +
> > > > > > +   return ret;
> > > > > > +}
> > > > > >  static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
> > > > > >  {
> > > > > > return 1;
> > > > > > @@ -370,6 +389,36 @@ static int tcm_vhost_queue_tm_rsp(struct 
> > > > > > se_cmd *se_cmd)
> > > > > > return 0;
> > > > > >  }
> > > > > >  
> > > > > > +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct 
> > > > > > tcm_vhost_evt *evt)
> > > > > > +{
> > > > > > +   mutex_lock(&vs->dev.mutex);
> > > > > > +   vs->vs_events_nr--;
> > > > > > +   kfree(evt);
> > > > > > +   mutex_unlock(&vs->dev.mutex);
> > > > > > +}
> > > > > > +
> > > > > > +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct 
> > > > > > vhost_scsi *vs,
> > > > > > +   u32 event, u32 reason)
> > > > > > +{
> > > > > > +   struct tcm_vhost_evt *evt;
> > > > > > +
> > > > > > +   mutex_lock(&vs->dev.mutex);
> > > > > > +   if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
> > > > > 
> > > > > 
> > > > > Se eventfd dropped flag here and stop worrying about it in callers?
> > > > 
> > > > Set the flag here now and do not bother the callers.
> > > > 
> > > > > > +   mutex_unlock(&vs->dev.mutex);
> > > > > > +   return NULL;
> > > > > > +   }
> > > > > > +
> > > > > > +   evt = kzalloc(sizeof(*evt), GFP_KERNEL);
> > > > > > +   if (evt) {
> > > > > > +   evt->event.event = event;
> > > > > > +   evt->event.reason = reason;
> > > 

Re: [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support

2013-03-26 Thread Michael S. Tsirkin
On Tue, Mar 26, 2013 at 10:56:33AM +0800, Asias He wrote:
> On Mon, Mar 25, 2013 at 01:10:33PM +0200, Michael S. Tsirkin wrote:
> > On Mon, Mar 25, 2013 at 03:48:06PM +0800, Asias He wrote:
> > > On Sun, Mar 24, 2013 at 05:20:09PM +0200, Michael S. Tsirkin wrote:
> > > > On Fri, Mar 22, 2013 at 01:39:05PM +0800, Asias He wrote:
> > > > > In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
> > > > > virtio-scsi), hotplug support is added to virtio-scsi.
> > > > > 
> > > > > This patch adds hotplug and hotunplug support to tcm_vhost.
> > > > > 
> > > > > You can create or delete a LUN in targetcli to hotplug or hotunplug a
> > > > > LUN in guest.
> > > > > 
> > > > > Changes in v4:
> > > > > - Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
> > > > > - Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick
> > > > > 
> > > > > Changes in v3:
> > > > > - Separate the bug fix to another thread
> > > > > 
> > > > > Changes in v2:
> > > > > - Remove code duplication in tcm_vhost_{hotplug,hotunplug}
> > > > > - Fix racing of vs_events_nr
> > > > > - Add flush fix patch to this series
> > > > > 
> > > > > Signed-off-by: Asias He 
> > > > > Reviewed-by: Stefan Hajnoczi 
> > > > > ---
> > > > >  drivers/vhost/tcm_vhost.c | 212 
> > > > > --
> > > > >  drivers/vhost/tcm_vhost.h |  10 +++
> > > > >  2 files changed, 217 insertions(+), 5 deletions(-)
> > > > > 
> > > > > diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> > > > > index d81e3a9..e734ead 100644
> > > > > --- a/drivers/vhost/tcm_vhost.c
> > > > > +++ b/drivers/vhost/tcm_vhost.c
> > > > > @@ -62,6 +62,9 @@ enum {
> > > > >  
> > > > >  #define VHOST_SCSI_MAX_TARGET256
> > > > >  #define VHOST_SCSI_MAX_VQ128
> > > > > +#define VHOST_SCSI_MAX_EVENT 128
> > > > > +
> > > > > +#define VHOST_SCSI_FEATURES (VHOST_FEATURES | (1ULL << 
> > > > > VIRTIO_SCSI_F_HOTPLUG))
> > > > >  
> > > > >  struct vhost_scsi {
> > > > >   /* Protected by vhost_scsi->dev.mutex */
> > > > > @@ -73,6 +76,12 @@ struct vhost_scsi {
> > > > >  
> > > > >   struct vhost_work vs_completion_work; /* cmd completion work 
> > > > > item */
> > > > >   struct llist_head vs_completion_list; /* cmd completion queue */
> > > > > +
> > > > > + struct vhost_work vs_event_work; /* evt injection work item */
> > > > > + struct llist_head vs_event_list; /* evt injection queue */
> > > > > +
> > > > > + bool vs_events_dropped; /* any missed events, protected by 
> > > > > dev.mutex */
> > > > > + u64 vs_events_nr; /* num of pending events, protected by 
> > > > > dev.mutex */
> > > > 
> > > > Woa u64. We allow up to 128 of these. int will do.
> > > 
> > > u64 is used before we limit the total number of events, changed to int.
> > > 
> > > > >  };
> > > > >  
> > > > >  /* Local pointer to allocated TCM configfs fabric module */
> > > > > @@ -120,6 +129,16 @@ static bool tcm_vhost_check_endpoint(struct 
> > > > > vhost_virtqueue *vq)
> > > > >   return ret;
> > > > >  }
> > > > >  
> > > > > +static bool tcm_vhost_check_events_dropped(struct vhost_scsi *vs)
> > > > > +{
> > > > > + bool ret;
> > > > > +
> > > > > + mutex_lock(&vs->dev.mutex);
> > > > > + ret = vs->vs_events_dropped;
> > > > > + mutex_unlock(&vs->dev.mutex);
> > > > > +
> > > > > + return ret;
> > > > > +}
> > > > >  static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
> > > > >  {
> > > > >   return 1;
> > > > > @@ -370,6 +389,36 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd 
> > > > > *se_cmd)
> > > > >   return 0;
> > > > >  }
> > > > >  
> > > > > +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct 
> > > > > tcm_vhost_evt *evt)
> > > > > +{
> > > > > + mutex_lock(&vs->dev.mutex);
> > > > > + vs->vs_events_nr--;
> > > > > + kfree(evt);
> > > > > + mutex_unlock(&vs->dev.mutex);
> > > > > +}
> > > > > +
> > > > > +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct 
> > > > > vhost_scsi *vs,
> > > > > + u32 event, u32 reason)
> > > > > +{
> > > > > + struct tcm_vhost_evt *evt;
> > > > > +
> > > > > + mutex_lock(&vs->dev.mutex);
> > > > > + if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
> > > > 
> > > > 
> > > > Se eventfd dropped flag here and stop worrying about it in callers?
> > > 
> > > Set the flag here now and do not bother the callers.
> > > 
> > > > > + mutex_unlock(&vs->dev.mutex);
> > > > > + return NULL;
> > > > > + }
> > > > > +
> > > > > + evt = kzalloc(sizeof(*evt), GFP_KERNEL);
> > > > > + if (evt) {
> > > > > + evt->event.event = event;
> > > > > + evt->event.reason = reason;
> > > > > + vs->vs_events_nr++;
> > > > > + }
> > > > > + mutex_unlock(&vs->dev.mutex);
> > > > > +
> > > > > + return evt;
> > > > > +}
> > > > > +
> > > > >  static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
> > > > >  {
> > > > >   

Re: [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support

2013-03-25 Thread Asias He
On Mon, Mar 25, 2013 at 01:10:33PM +0200, Michael S. Tsirkin wrote:
> On Mon, Mar 25, 2013 at 03:48:06PM +0800, Asias He wrote:
> > On Sun, Mar 24, 2013 at 05:20:09PM +0200, Michael S. Tsirkin wrote:
> > > On Fri, Mar 22, 2013 at 01:39:05PM +0800, Asias He wrote:
> > > > In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
> > > > virtio-scsi), hotplug support is added to virtio-scsi.
> > > > 
> > > > This patch adds hotplug and hotunplug support to tcm_vhost.
> > > > 
> > > > You can create or delete a LUN in targetcli to hotplug or hotunplug a
> > > > LUN in guest.
> > > > 
> > > > Changes in v4:
> > > > - Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
> > > > - Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick
> > > > 
> > > > Changes in v3:
> > > > - Separate the bug fix to another thread
> > > > 
> > > > Changes in v2:
> > > > - Remove code duplication in tcm_vhost_{hotplug,hotunplug}
> > > > - Fix racing of vs_events_nr
> > > > - Add flush fix patch to this series
> > > > 
> > > > Signed-off-by: Asias He 
> > > > Reviewed-by: Stefan Hajnoczi 
> > > > ---
> > > >  drivers/vhost/tcm_vhost.c | 212 
> > > > --
> > > >  drivers/vhost/tcm_vhost.h |  10 +++
> > > >  2 files changed, 217 insertions(+), 5 deletions(-)
> > > > 
> > > > diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> > > > index d81e3a9..e734ead 100644
> > > > --- a/drivers/vhost/tcm_vhost.c
> > > > +++ b/drivers/vhost/tcm_vhost.c
> > > > @@ -62,6 +62,9 @@ enum {
> > > >  
> > > >  #define VHOST_SCSI_MAX_TARGET  256
> > > >  #define VHOST_SCSI_MAX_VQ  128
> > > > +#define VHOST_SCSI_MAX_EVENT   128
> > > > +
> > > > +#define VHOST_SCSI_FEATURES (VHOST_FEATURES | (1ULL << 
> > > > VIRTIO_SCSI_F_HOTPLUG))
> > > >  
> > > >  struct vhost_scsi {
> > > > /* Protected by vhost_scsi->dev.mutex */
> > > > @@ -73,6 +76,12 @@ struct vhost_scsi {
> > > >  
> > > > struct vhost_work vs_completion_work; /* cmd completion work 
> > > > item */
> > > > struct llist_head vs_completion_list; /* cmd completion queue */
> > > > +
> > > > +   struct vhost_work vs_event_work; /* evt injection work item */
> > > > +   struct llist_head vs_event_list; /* evt injection queue */
> > > > +
> > > > +   bool vs_events_dropped; /* any missed events, protected by 
> > > > dev.mutex */
> > > > +   u64 vs_events_nr; /* num of pending events, protected by 
> > > > dev.mutex */
> > > 
> > > Woa u64. We allow up to 128 of these. int will do.
> > 
> > u64 is used before we limit the total number of events, changed to int.
> > 
> > > >  };
> > > >  
> > > >  /* Local pointer to allocated TCM configfs fabric module */
> > > > @@ -120,6 +129,16 @@ static bool tcm_vhost_check_endpoint(struct 
> > > > vhost_virtqueue *vq)
> > > > return ret;
> > > >  }
> > > >  
> > > > +static bool tcm_vhost_check_events_dropped(struct vhost_scsi *vs)
> > > > +{
> > > > +   bool ret;
> > > > +
> > > > +   mutex_lock(&vs->dev.mutex);
> > > > +   ret = vs->vs_events_dropped;
> > > > +   mutex_unlock(&vs->dev.mutex);
> > > > +
> > > > +   return ret;
> > > > +}
> > > >  static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
> > > >  {
> > > > return 1;
> > > > @@ -370,6 +389,36 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd 
> > > > *se_cmd)
> > > > return 0;
> > > >  }
> > > >  
> > > > +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct 
> > > > tcm_vhost_evt *evt)
> > > > +{
> > > > +   mutex_lock(&vs->dev.mutex);
> > > > +   vs->vs_events_nr--;
> > > > +   kfree(evt);
> > > > +   mutex_unlock(&vs->dev.mutex);
> > > > +}
> > > > +
> > > > +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi 
> > > > *vs,
> > > > +   u32 event, u32 reason)
> > > > +{
> > > > +   struct tcm_vhost_evt *evt;
> > > > +
> > > > +   mutex_lock(&vs->dev.mutex);
> > > > +   if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
> > > 
> > > 
> > > Se eventfd dropped flag here and stop worrying about it in callers?
> > 
> > Set the flag here now and do not bother the callers.
> > 
> > > > +   mutex_unlock(&vs->dev.mutex);
> > > > +   return NULL;
> > > > +   }
> > > > +
> > > > +   evt = kzalloc(sizeof(*evt), GFP_KERNEL);
> > > > +   if (evt) {
> > > > +   evt->event.event = event;
> > > > +   evt->event.reason = reason;
> > > > +   vs->vs_events_nr++;
> > > > +   }
> > > > +   mutex_unlock(&vs->dev.mutex);
> > > > +
> > > > +   return evt;
> > > > +}
> > > > +
> > > >  static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
> > > >  {
> > > > struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
> > > > @@ -388,6 +437,77 @@ static void vhost_scsi_free_cmd(struct 
> > > > tcm_vhost_cmd *tv_cmd)
> > > > kfree(tv_cmd);
> > > >  }
> > > >  
> > > > +static void tcm_vhost_do_evt_work(struct vhost_s

Re: [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support

2013-03-25 Thread Michael S. Tsirkin
On Mon, Mar 25, 2013 at 03:48:06PM +0800, Asias He wrote:
> On Sun, Mar 24, 2013 at 05:20:09PM +0200, Michael S. Tsirkin wrote:
> > On Fri, Mar 22, 2013 at 01:39:05PM +0800, Asias He wrote:
> > > In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
> > > virtio-scsi), hotplug support is added to virtio-scsi.
> > > 
> > > This patch adds hotplug and hotunplug support to tcm_vhost.
> > > 
> > > You can create or delete a LUN in targetcli to hotplug or hotunplug a
> > > LUN in guest.
> > > 
> > > Changes in v4:
> > > - Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
> > > - Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick
> > > 
> > > Changes in v3:
> > > - Separate the bug fix to another thread
> > > 
> > > Changes in v2:
> > > - Remove code duplication in tcm_vhost_{hotplug,hotunplug}
> > > - Fix racing of vs_events_nr
> > > - Add flush fix patch to this series
> > > 
> > > Signed-off-by: Asias He 
> > > Reviewed-by: Stefan Hajnoczi 
> > > ---
> > >  drivers/vhost/tcm_vhost.c | 212 
> > > --
> > >  drivers/vhost/tcm_vhost.h |  10 +++
> > >  2 files changed, 217 insertions(+), 5 deletions(-)
> > > 
> > > diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> > > index d81e3a9..e734ead 100644
> > > --- a/drivers/vhost/tcm_vhost.c
> > > +++ b/drivers/vhost/tcm_vhost.c
> > > @@ -62,6 +62,9 @@ enum {
> > >  
> > >  #define VHOST_SCSI_MAX_TARGET256
> > >  #define VHOST_SCSI_MAX_VQ128
> > > +#define VHOST_SCSI_MAX_EVENT 128
> > > +
> > > +#define VHOST_SCSI_FEATURES (VHOST_FEATURES | (1ULL << 
> > > VIRTIO_SCSI_F_HOTPLUG))
> > >  
> > >  struct vhost_scsi {
> > >   /* Protected by vhost_scsi->dev.mutex */
> > > @@ -73,6 +76,12 @@ struct vhost_scsi {
> > >  
> > >   struct vhost_work vs_completion_work; /* cmd completion work item */
> > >   struct llist_head vs_completion_list; /* cmd completion queue */
> > > +
> > > + struct vhost_work vs_event_work; /* evt injection work item */
> > > + struct llist_head vs_event_list; /* evt injection queue */
> > > +
> > > + bool vs_events_dropped; /* any missed events, protected by dev.mutex */
> > > + u64 vs_events_nr; /* num of pending events, protected by dev.mutex */
> > 
> > Woa u64. We allow up to 128 of these. int will do.
> 
> u64 is used before we limit the total number of events, changed to int.
> 
> > >  };
> > >  
> > >  /* Local pointer to allocated TCM configfs fabric module */
> > > @@ -120,6 +129,16 @@ static bool tcm_vhost_check_endpoint(struct 
> > > vhost_virtqueue *vq)
> > >   return ret;
> > >  }
> > >  
> > > +static bool tcm_vhost_check_events_dropped(struct vhost_scsi *vs)
> > > +{
> > > + bool ret;
> > > +
> > > + mutex_lock(&vs->dev.mutex);
> > > + ret = vs->vs_events_dropped;
> > > + mutex_unlock(&vs->dev.mutex);
> > > +
> > > + return ret;
> > > +}
> > >  static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
> > >  {
> > >   return 1;
> > > @@ -370,6 +389,36 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd 
> > > *se_cmd)
> > >   return 0;
> > >  }
> > >  
> > > +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct 
> > > tcm_vhost_evt *evt)
> > > +{
> > > + mutex_lock(&vs->dev.mutex);
> > > + vs->vs_events_nr--;
> > > + kfree(evt);
> > > + mutex_unlock(&vs->dev.mutex);
> > > +}
> > > +
> > > +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi 
> > > *vs,
> > > + u32 event, u32 reason)
> > > +{
> > > + struct tcm_vhost_evt *evt;
> > > +
> > > + mutex_lock(&vs->dev.mutex);
> > > + if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
> > 
> > 
> > Se eventfd dropped flag here and stop worrying about it in callers?
> 
> Set the flag here now and do not bother the callers.
> 
> > > + mutex_unlock(&vs->dev.mutex);
> > > + return NULL;
> > > + }
> > > +
> > > + evt = kzalloc(sizeof(*evt), GFP_KERNEL);
> > > + if (evt) {
> > > + evt->event.event = event;
> > > + evt->event.reason = reason;
> > > + vs->vs_events_nr++;
> > > + }
> > > + mutex_unlock(&vs->dev.mutex);
> > > +
> > > + return evt;
> > > +}
> > > +
> > >  static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
> > >  {
> > >   struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
> > > @@ -388,6 +437,77 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd 
> > > *tv_cmd)
> > >   kfree(tv_cmd);
> > >  }
> > >  
> > > +static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
> > > + struct virtio_scsi_event *event)
> > > +{
> > > + struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
> > > + struct virtio_scsi_event __user *eventp;
> > > + unsigned out, in;
> > > + int head, ret;
> > > +
> > > + if (!tcm_vhost_check_endpoint(vq))
> > > + return;
> > > +
> > > + mutex_lock(&vq->mutex);
> > > +again:
> > > + vhost_disable_notify(&vs->dev, vq);
> > > + head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
> > > + ARRAY_SIZE(vq->iov), &out, &in,
> > > + NULL, NULL);
> > > + if (head < 0) {
> > 

Re: [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support

2013-03-25 Thread Asias He
On Sun, Mar 24, 2013 at 05:20:09PM +0200, Michael S. Tsirkin wrote:
> On Fri, Mar 22, 2013 at 01:39:05PM +0800, Asias He wrote:
> > In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
> > virtio-scsi), hotplug support is added to virtio-scsi.
> > 
> > This patch adds hotplug and hotunplug support to tcm_vhost.
> > 
> > You can create or delete a LUN in targetcli to hotplug or hotunplug a
> > LUN in guest.
> > 
> > Changes in v4:
> > - Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
> > - Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick
> > 
> > Changes in v3:
> > - Separate the bug fix to another thread
> > 
> > Changes in v2:
> > - Remove code duplication in tcm_vhost_{hotplug,hotunplug}
> > - Fix racing of vs_events_nr
> > - Add flush fix patch to this series
> > 
> > Signed-off-by: Asias He 
> > Reviewed-by: Stefan Hajnoczi 
> > ---
> >  drivers/vhost/tcm_vhost.c | 212 
> > --
> >  drivers/vhost/tcm_vhost.h |  10 +++
> >  2 files changed, 217 insertions(+), 5 deletions(-)
> > 
> > diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> > index d81e3a9..e734ead 100644
> > --- a/drivers/vhost/tcm_vhost.c
> > +++ b/drivers/vhost/tcm_vhost.c
> > @@ -62,6 +62,9 @@ enum {
> >  
> >  #define VHOST_SCSI_MAX_TARGET  256
> >  #define VHOST_SCSI_MAX_VQ  128
> > +#define VHOST_SCSI_MAX_EVENT   128
> > +
> > +#define VHOST_SCSI_FEATURES (VHOST_FEATURES | (1ULL << 
> > VIRTIO_SCSI_F_HOTPLUG))
> >  
> >  struct vhost_scsi {
> > /* Protected by vhost_scsi->dev.mutex */
> > @@ -73,6 +76,12 @@ struct vhost_scsi {
> >  
> > struct vhost_work vs_completion_work; /* cmd completion work item */
> > struct llist_head vs_completion_list; /* cmd completion queue */
> > +
> > +   struct vhost_work vs_event_work; /* evt injection work item */
> > +   struct llist_head vs_event_list; /* evt injection queue */
> > +
> > +   bool vs_events_dropped; /* any missed events, protected by dev.mutex */
> > +   u64 vs_events_nr; /* num of pending events, protected by dev.mutex */
> 
> Woa u64. We allow up to 128 of these. int will do.

u64 is used before we limit the total number of events, changed to int.

> >  };
> >  
> >  /* Local pointer to allocated TCM configfs fabric module */
> > @@ -120,6 +129,16 @@ static bool tcm_vhost_check_endpoint(struct 
> > vhost_virtqueue *vq)
> > return ret;
> >  }
> >  
> > +static bool tcm_vhost_check_events_dropped(struct vhost_scsi *vs)
> > +{
> > +   bool ret;
> > +
> > +   mutex_lock(&vs->dev.mutex);
> > +   ret = vs->vs_events_dropped;
> > +   mutex_unlock(&vs->dev.mutex);
> > +
> > +   return ret;
> > +}
> >  static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
> >  {
> > return 1;
> > @@ -370,6 +389,36 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd 
> > *se_cmd)
> > return 0;
> >  }
> >  
> > +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt 
> > *evt)
> > +{
> > +   mutex_lock(&vs->dev.mutex);
> > +   vs->vs_events_nr--;
> > +   kfree(evt);
> > +   mutex_unlock(&vs->dev.mutex);
> > +}
> > +
> > +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
> > +   u32 event, u32 reason)
> > +{
> > +   struct tcm_vhost_evt *evt;
> > +
> > +   mutex_lock(&vs->dev.mutex);
> > +   if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
> 
> 
> Se eventfd dropped flag here and stop worrying about it in callers?

Set the flag here now and do not bother the callers.

> > +   mutex_unlock(&vs->dev.mutex);
> > +   return NULL;
> > +   }
> > +
> > +   evt = kzalloc(sizeof(*evt), GFP_KERNEL);
> > +   if (evt) {
> > +   evt->event.event = event;
> > +   evt->event.reason = reason;
> > +   vs->vs_events_nr++;
> > +   }
> > +   mutex_unlock(&vs->dev.mutex);
> > +
> > +   return evt;
> > +}
> > +
> >  static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
> >  {
> > struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
> > @@ -388,6 +437,77 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd 
> > *tv_cmd)
> > kfree(tv_cmd);
> >  }
> >  
> > +static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
> > +   struct virtio_scsi_event *event)
> > +{
> > +   struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
> > +   struct virtio_scsi_event __user *eventp;
> > +   unsigned out, in;
> > +   int head, ret;
> > +
> > +   if (!tcm_vhost_check_endpoint(vq))
> > +   return;
> > +
> > +   mutex_lock(&vq->mutex);
> > +again:
> > +   vhost_disable_notify(&vs->dev, vq);
> > +   head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
> > +   ARRAY_SIZE(vq->iov), &out, &in,
> > +   NULL, NULL);
> > +   if (head < 0) {
> > +   mutex_lock(&vs->dev.mutex);
> 
> This nests dev mutex within vq mutex, can cause deadlock.
> Should be the other way around.

Changed the order.

> > +   vs->vs_events_dropped = true;
> > +   mutex_unlock(&vs->dev.mutex);
> > +   g

Re: [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support

2013-03-24 Thread Michael S. Tsirkin
On Fri, Mar 22, 2013 at 01:39:05PM +0800, Asias He wrote:
> In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
> virtio-scsi), hotplug support is added to virtio-scsi.
> 
> This patch adds hotplug and hotunplug support to tcm_vhost.
> 
> You can create or delete a LUN in targetcli to hotplug or hotunplug a
> LUN in guest.
> 
> Changes in v4:
> - Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
> - Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick
> 
> Changes in v3:
> - Separate the bug fix to another thread
> 
> Changes in v2:
> - Remove code duplication in tcm_vhost_{hotplug,hotunplug}
> - Fix racing of vs_events_nr
> - Add flush fix patch to this series
> 
> Signed-off-by: Asias He 
> Reviewed-by: Stefan Hajnoczi 
> ---
>  drivers/vhost/tcm_vhost.c | 212 
> --
>  drivers/vhost/tcm_vhost.h |  10 +++
>  2 files changed, 217 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> index d81e3a9..e734ead 100644
> --- a/drivers/vhost/tcm_vhost.c
> +++ b/drivers/vhost/tcm_vhost.c
> @@ -62,6 +62,9 @@ enum {
>  
>  #define VHOST_SCSI_MAX_TARGET256
>  #define VHOST_SCSI_MAX_VQ128
> +#define VHOST_SCSI_MAX_EVENT 128
> +
> +#define VHOST_SCSI_FEATURES (VHOST_FEATURES | (1ULL << 
> VIRTIO_SCSI_F_HOTPLUG))
>  
>  struct vhost_scsi {
>   /* Protected by vhost_scsi->dev.mutex */
> @@ -73,6 +76,12 @@ struct vhost_scsi {
>  
>   struct vhost_work vs_completion_work; /* cmd completion work item */
>   struct llist_head vs_completion_list; /* cmd completion queue */
> +
> + struct vhost_work vs_event_work; /* evt injection work item */
> + struct llist_head vs_event_list; /* evt injection queue */
> +
> + bool vs_events_dropped; /* any missed events, protected by dev.mutex */
> + u64 vs_events_nr; /* num of pending events, protected by dev.mutex */

Woa u64. We allow up to 128 of these. int will do.

>  };
>  
>  /* Local pointer to allocated TCM configfs fabric module */
> @@ -120,6 +129,16 @@ static bool tcm_vhost_check_endpoint(struct 
> vhost_virtqueue *vq)
>   return ret;
>  }
>  
> +static bool tcm_vhost_check_events_dropped(struct vhost_scsi *vs)
> +{
> + bool ret;
> +
> + mutex_lock(&vs->dev.mutex);
> + ret = vs->vs_events_dropped;
> + mutex_unlock(&vs->dev.mutex);
> +
> + return ret;
> +}
>  static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
>  {
>   return 1;
> @@ -370,6 +389,36 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
>   return 0;
>  }
>  
> +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt 
> *evt)
> +{
> + mutex_lock(&vs->dev.mutex);
> + vs->vs_events_nr--;
> + kfree(evt);
> + mutex_unlock(&vs->dev.mutex);
> +}
> +
> +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
> + u32 event, u32 reason)
> +{
> + struct tcm_vhost_evt *evt;
> +
> + mutex_lock(&vs->dev.mutex);
> + if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {


Se eventfd dropped flag here and stop worrying about it in callers?

> + mutex_unlock(&vs->dev.mutex);
> + return NULL;
> + }
> +
> + evt = kzalloc(sizeof(*evt), GFP_KERNEL);
> + if (evt) {
> + evt->event.event = event;
> + evt->event.reason = reason;
> + vs->vs_events_nr++;
> + }
> + mutex_unlock(&vs->dev.mutex);
> +
> + return evt;
> +}
> +
>  static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
>  {
>   struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
> @@ -388,6 +437,77 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd 
> *tv_cmd)
>   kfree(tv_cmd);
>  }
>  
> +static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
> + struct virtio_scsi_event *event)
> +{
> + struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
> + struct virtio_scsi_event __user *eventp;
> + unsigned out, in;
> + int head, ret;
> +
> + if (!tcm_vhost_check_endpoint(vq))
> + return;
> +
> + mutex_lock(&vq->mutex);
> +again:
> + vhost_disable_notify(&vs->dev, vq);
> + head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
> + ARRAY_SIZE(vq->iov), &out, &in,
> + NULL, NULL);
> + if (head < 0) {
> + mutex_lock(&vs->dev.mutex);

This nests dev mutex within vq mutex, can cause deadlock.
Should be the other way around.

> + vs->vs_events_dropped = true;
> + mutex_unlock(&vs->dev.mutex);
> + goto out;
> + }
> + if (head == vq->num) {
> + if (vhost_enable_notify(&vs->dev, vq))
> + goto again;
> + mutex_lock(&vs->dev.mutex);
> + vs->vs_events_dropped = true;
> + mutex_unlock(&vs->dev.mutex);
> + goto out;
> + }
> +
> + if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
> + vq_er