On Wed, 09/24 13:18, Paolo Bonzini wrote: > Il 24/09/2014 10:27, Fam Zheng ha scritto: > > For VIRTIO_SCSI_T_TMF_ABORT_TASK and VIRTIO_SCSI_T_TMF_ABORT_TASK_SET, > > use scsi_req_cancel_async to start the cancellation. > > > > Because each tmf command may cancel multiple requests, we need to use a > > counter to track the number of remaining requests we still need to wait > > for. > > > > Signed-off-by: Fam Zheng <f...@redhat.com> > > --- > > hw/scsi/virtio-scsi.c | 84 > > ++++++++++++++++++++++++++++++++++++++++++++++----- > > 1 file changed, 77 insertions(+), 7 deletions(-) > > > > diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c > > index fa36e23..9bd7d8a 100644 > > --- a/hw/scsi/virtio-scsi.c > > +++ b/hw/scsi/virtio-scsi.c > > @@ -208,12 +208,40 @@ static void *virtio_scsi_load_request(QEMUFile *f, > > SCSIRequest *sreq) > > return req; > > } > > > > -static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) > > +typedef struct { > > + VirtIOSCSIReq *tmf_req; > > + int remaining; > > +} VirtIOSCSICancelTracker; > > What about putting "remaining" directly in VirtIOSCSIReq?
It's rarely used, so I preferred managing it here. > > > +typedef struct { > > + Notifier notifier; > > + VirtIOSCSICancelTracker *tracker; > > +} VirtIOSCSICancelNotifier; > > + > > +static void virtio_scsi_cancel_notify(Notifier *notifier, void *data) > > +{ > > + VirtIOSCSICancelNotifier *n = container_of(notifier, > > + VirtIOSCSICancelNotifier, > > + notifier); > > + > > + if (--n->tracker->remaining == 0) { > > + virtio_scsi_complete_req(n->tracker->tmf_req); > > + g_free(n->tracker); > > + } > > + g_free(n); > > +} > > + > > +/* Return true if the request is ready to be completed and return to guest; > > + * false if the request will be completed (by some other events) later, for > > + * example in the case of async cancellation. */ > > +static bool virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) > > Perhaps return 0/-EINPROGRESS so that it's easier to remember the > calling convention? OK! > > > { > > SCSIDevice *d = virtio_scsi_device_find(s, req->req.tmf.lun); > > SCSIRequest *r, *next; > > BusChild *kid; > > int target; > > + bool ret = true; > > + int cancel_count; > > > > if (s->dataplane_started && bdrv_get_aio_context(d->conf.bs) != > > s->ctx) { > > aio_context_acquire(s->ctx); > > @@ -251,7 +279,18 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, > > VirtIOSCSIReq *req) > > */ > > req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; > > } else { > > - scsi_req_cancel(r); > > + VirtIOSCSICancelNotifier *notifier; > > + VirtIOSCSICancelTracker *tracker; > > + > > + notifier = g_new(VirtIOSCSICancelNotifier, 1); > > Slice allocator? Cancellation is not in the fast path, but I can do it. > > > + notifier->notifier.notify > > + = virtio_scsi_cancel_notify; > > + tracker = g_new(VirtIOSCSICancelTracker, 1); > > Same here if you keep VirtIOSCSICancelTracker. > > > + tracker->tmf_req = req; > > + tracker->remaining = 1; > > + notifier->tracker = tracker; > > + scsi_req_cancel_async(r, ¬ifier->notifier); > > + ret = false; > > } > > } > > break; > > @@ -277,6 +316,7 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, > > VirtIOSCSIReq *req) > > if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) { > > goto incorrect_lun; > > } > > + cancel_count = 0; > > QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) { > > if (r->hba_private) { > > if (req->req.tmf.subtype == > > VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) { > > @@ -286,10 +326,36 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, > > VirtIOSCSIReq *req) > > req->resp.tmf.response = > > VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; > > break; > > } else { > > - scsi_req_cancel(r); > > + /* Before we actually cancel any requests in the next > > for > > + * loop, let's count them. This way, if the bus starts > > + * calling back to the notifier even before we finish > > the > > + * loop, the counter, which value is already seen in > > + * virtio_scsi_cancel_notify, will prevent us from > > + * completing the tmf too quickly. */ > > + cancel_count++; > > } > > } > > } > > + if (cancel_count) { > > + VirtIOSCSICancelNotifier *notifier; > > + VirtIOSCSICancelTracker *tracker; > > + > > + tracker = g_new(VirtIOSCSICancelTracker, 1); > > Same as above. > > > + tracker->tmf_req = req; > > + tracker->remaining = cancel_count; > > + > > + QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) { > > + if (r->hba_private) { > > + notifier = g_new(VirtIOSCSICancelNotifier, > > 1); > > Same as above. > > > + notifier->notifier.notify > > + = virtio_scsi_cancel_notify; > > + notifier->tracker = tracker; > > + scsi_req_cancel_async(r, ¬ifier->notifier); > > + } > > + } > > + ret = false; > > + } > > + > > break; > > > > case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: > > @@ -310,20 +376,22 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, > > VirtIOSCSIReq *req) > > break; > > } > > > > - return; > > + return ret; > > > > incorrect_lun: > > req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN; > > - return; > > + return ret; > > > > fail: > > req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET; > > + return ret; > > } > > > > void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req) > > { > > VirtIODevice *vdev = (VirtIODevice *)s; > > int type; > > + bool should_complete = true; > > > > if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0, > > &type, sizeof(type)) < sizeof(type)) { > > @@ -337,7 +405,7 @@ void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, > > VirtIOSCSIReq *req) > > sizeof(VirtIOSCSICtrlTMFResp)) < 0) { > > virtio_scsi_bad_req(); > > } else { > > - virtio_scsi_do_tmf(s, req); > > + should_complete = virtio_scsi_do_tmf(s, req); > > } > > > > } else if (req->req.tmf.type == VIRTIO_SCSI_T_AN_QUERY || > > @@ -350,7 +418,9 @@ void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, > > VirtIOSCSIReq *req) > > req->resp.an.response = VIRTIO_SCSI_S_OK; > > } > > } > > - virtio_scsi_complete_req(req); > > + if (should_complete) { > > + virtio_scsi_complete_req(req); > > + } > > } > > > > static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) > > > > Very nice apart from these comments. Thanks for reviewing! Fam