Re: [PATCH V2 6/8] scsi: virtio_scsi: fix IO hang by irq vector automatic affinity

2018-02-05 Thread Paolo Bonzini
On 05/02/2018 16:20, Ming Lei wrote:
> Now 84676c1f21e8ff5(genirq/affinity: assign vectors to all possible CPUs)
> has been merged to V4.16-rc, and it is easy to allocate all offline CPUs
> for some irq vectors, this can't be avoided even though the allocation
> is improved.
> 
> For example, on a 8cores VM, 4~7 are not-present/offline, 4 queues of
> virtio-scsi, the irq affinity assigned can become the following shape:
> 
>   irq 36, cpu list 0-7
>   irq 37, cpu list 0-7
>   irq 38, cpu list 0-7
>   irq 39, cpu list 0-1
>   irq 40, cpu list 4,6
>   irq 41, cpu list 2-3
>   irq 42, cpu list 5,7
> 
> Then IO hang is triggered in case of non-SCSI_MQ.
> 
> Given storage IO is always C/S model, there isn't such issue with 
> SCSI_MQ(blk-mq),
> because no IO can be submitted to one hw queue if the hw queue hasn't online
> CPUs.
> 
> Fix this issue by forcing to use blk_mq.
> 
> BTW, I have been used virtio-scsi(scsi_mq) for several years, and it has
> been quite stable, so it shouldn't cause extra risk.

I think that's ok now that we have I/O schedulers for blk-mq.

Acked-by: Paolo Bonzini 

Paolo

> Cc: Arun Easi 
> Cc: Omar Sandoval ,
> Cc: "Martin K. Petersen" ,
> Cc: James Bottomley ,
> Cc: Christoph Hellwig ,
> Cc: Don Brace 
> Cc: Kashyap Desai 
> Cc: Peter Rivera 
> Cc: Paolo Bonzini 
> Cc: Mike Snitzer 
> Reviewed-by: Hannes Reinecke 
> Tested-by: Laurence Oberman 
> Signed-off-by: Ming Lei 
> ---
>  drivers/scsi/virtio_scsi.c | 59 
> +++---
>  1 file changed, 3 insertions(+), 56 deletions(-)
> 
> diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
> index 7c28e8d4955a..54e3a0f6844c 100644
> --- a/drivers/scsi/virtio_scsi.c
> +++ b/drivers/scsi/virtio_scsi.c
> @@ -91,9 +91,6 @@ struct virtio_scsi_vq {
>  struct virtio_scsi_target_state {
>   seqcount_t tgt_seq;
>  
> - /* Count of outstanding requests. */
> - atomic_t reqs;
> -
>   /* Currently active virtqueue for requests sent to this target. */
>   struct virtio_scsi_vq *req_vq;
>  };
> @@ -152,8 +149,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi 
> *vscsi, void *buf)
>   struct virtio_scsi_cmd *cmd = buf;
>   struct scsi_cmnd *sc = cmd->sc;
>   struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
> - struct virtio_scsi_target_state *tgt =
> - scsi_target(sc->device)->hostdata;
>  
>   dev_dbg(&sc->device->sdev_gendev,
>   "cmd %p response %u status %#02x sense_len %u\n",
> @@ -210,8 +205,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi 
> *vscsi, void *buf)
>   }
>  
>   sc->scsi_done(sc);
> -
> - atomic_dec(&tgt->reqs);
>  }
>  
>  static void virtscsi_vq_done(struct virtio_scsi *vscsi,
> @@ -580,10 +573,7 @@ static int virtscsi_queuecommand_single(struct Scsi_Host 
> *sh,
>   struct scsi_cmnd *sc)
>  {
>   struct virtio_scsi *vscsi = shost_priv(sh);
> - struct virtio_scsi_target_state *tgt =
> - scsi_target(sc->device)->hostdata;
>  
> - atomic_inc(&tgt->reqs);
>   return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
>  }
>  
> @@ -596,55 +586,11 @@ static struct virtio_scsi_vq 
> *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
>   return &vscsi->req_vqs[hwq];
>  }
>  
> -static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
> -struct virtio_scsi_target_state 
> *tgt)
> -{
> - struct virtio_scsi_vq *vq;
> - unsigned long flags;
> - u32 queue_num;
> -
> - local_irq_save(flags);
> - if (atomic_inc_return(&tgt->reqs) > 1) {
> - unsigned long seq;
> -
> - do {
> - seq = read_seqcount_begin(&tgt->tgt_seq);
> - vq = tgt->req_vq;
> - } while (read_seqcount_retry(&tgt->tgt_seq, seq));
> - } else {
> - /* no writes can be concurrent because of atomic_t */
> - write_seqcount_begin(&tgt->tgt_seq);
> -
> - /* keep previous req_vq if a reader just arrived */
> - if (unlikely(atomic_read(&tgt->reqs) > 1)) {
> - vq = tgt->req_vq;
> - goto unlock;
> - }
> -
> - queue_num = smp_processor_id();
> - while (unlikely(queue_num >= vscsi->num_queues))
> - queue_num -= vscsi->num_queues;
> - tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
> - unlock:
> - write_seqcount_end(&tgt->tgt_seq);
> - }
> - local_irq_restore(flags);
> -
> - return vq;
> -}
> -
>  static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
>  struct scsi_cmnd *sc)
>  {
>   struct virtio_scsi *vscsi = shost_priv(sh);
> - struct virtio_scsi_target_state *tgt =
> - scsi_target(sc->device)->hostdata;
> - s

[PATCH V2 6/8] scsi: virtio_scsi: fix IO hang by irq vector automatic affinity

2018-02-05 Thread Ming Lei
Now 84676c1f21e8ff5(genirq/affinity: assign vectors to all possible CPUs)
has been merged to V4.16-rc, and it is easy to allocate all offline CPUs
for some irq vectors, this can't be avoided even though the allocation
is improved.

For example, on a 8cores VM, 4~7 are not-present/offline, 4 queues of
virtio-scsi, the irq affinity assigned can become the following shape:

irq 36, cpu list 0-7
irq 37, cpu list 0-7
irq 38, cpu list 0-7
irq 39, cpu list 0-1
irq 40, cpu list 4,6
irq 41, cpu list 2-3
irq 42, cpu list 5,7

Then IO hang is triggered in case of non-SCSI_MQ.

Given storage IO is always C/S model, there isn't such issue with 
SCSI_MQ(blk-mq),
because no IO can be submitted to one hw queue if the hw queue hasn't online
CPUs.

Fix this issue by forcing to use blk_mq.

BTW, I have been used virtio-scsi(scsi_mq) for several years, and it has
been quite stable, so it shouldn't cause extra risk.

Cc: Arun Easi 
Cc: Omar Sandoval ,
Cc: "Martin K. Petersen" ,
Cc: James Bottomley ,
Cc: Christoph Hellwig ,
Cc: Don Brace 
Cc: Kashyap Desai 
Cc: Peter Rivera 
Cc: Paolo Bonzini 
Cc: Mike Snitzer 
Reviewed-by: Hannes Reinecke 
Tested-by: Laurence Oberman 
Signed-off-by: Ming Lei 
---
 drivers/scsi/virtio_scsi.c | 59 +++---
 1 file changed, 3 insertions(+), 56 deletions(-)

diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 7c28e8d4955a..54e3a0f6844c 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -91,9 +91,6 @@ struct virtio_scsi_vq {
 struct virtio_scsi_target_state {
seqcount_t tgt_seq;
 
-   /* Count of outstanding requests. */
-   atomic_t reqs;
-
/* Currently active virtqueue for requests sent to this target. */
struct virtio_scsi_vq *req_vq;
 };
@@ -152,8 +149,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi 
*vscsi, void *buf)
struct virtio_scsi_cmd *cmd = buf;
struct scsi_cmnd *sc = cmd->sc;
struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
-   struct virtio_scsi_target_state *tgt =
-   scsi_target(sc->device)->hostdata;
 
dev_dbg(&sc->device->sdev_gendev,
"cmd %p response %u status %#02x sense_len %u\n",
@@ -210,8 +205,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi 
*vscsi, void *buf)
}
 
sc->scsi_done(sc);
-
-   atomic_dec(&tgt->reqs);
 }
 
 static void virtscsi_vq_done(struct virtio_scsi *vscsi,
@@ -580,10 +573,7 @@ static int virtscsi_queuecommand_single(struct Scsi_Host 
*sh,
struct scsi_cmnd *sc)
 {
struct virtio_scsi *vscsi = shost_priv(sh);
-   struct virtio_scsi_target_state *tgt =
-   scsi_target(sc->device)->hostdata;
 
-   atomic_inc(&tgt->reqs);
return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
 }
 
@@ -596,55 +586,11 @@ static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct 
virtio_scsi *vscsi,
return &vscsi->req_vqs[hwq];
 }
 
-static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
-  struct virtio_scsi_target_state 
*tgt)
-{
-   struct virtio_scsi_vq *vq;
-   unsigned long flags;
-   u32 queue_num;
-
-   local_irq_save(flags);
-   if (atomic_inc_return(&tgt->reqs) > 1) {
-   unsigned long seq;
-
-   do {
-   seq = read_seqcount_begin(&tgt->tgt_seq);
-   vq = tgt->req_vq;
-   } while (read_seqcount_retry(&tgt->tgt_seq, seq));
-   } else {
-   /* no writes can be concurrent because of atomic_t */
-   write_seqcount_begin(&tgt->tgt_seq);
-
-   /* keep previous req_vq if a reader just arrived */
-   if (unlikely(atomic_read(&tgt->reqs) > 1)) {
-   vq = tgt->req_vq;
-   goto unlock;
-   }
-
-   queue_num = smp_processor_id();
-   while (unlikely(queue_num >= vscsi->num_queues))
-   queue_num -= vscsi->num_queues;
-   tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
- unlock:
-   write_seqcount_end(&tgt->tgt_seq);
-   }
-   local_irq_restore(flags);
-
-   return vq;
-}
-
 static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
   struct scsi_cmnd *sc)
 {
struct virtio_scsi *vscsi = shost_priv(sh);
-   struct virtio_scsi_target_state *tgt =
-   scsi_target(sc->device)->hostdata;
-   struct virtio_scsi_vq *req_vq;
-
-   if (shost_use_blk_mq(sh))
-   req_vq = virtscsi_pick_vq_mq(vscsi, sc);
-   else
-   req_vq = virtscsi_pick_vq(vscsi, tgt);
+   struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
 
return virtscsi_queue