This patch enables sending CVQ state load commands in parallel at device startup by following steps:
* Refactor vhost_vdpa_net_load_cmd() to iterate through the control commands shadow buffers. This allows different CVQ state load commands to use their own unique buffers. * Delay the polling and checking of buffers until either the SVQ is full or control commands shadow buffers are full. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1578 Signed-off-by: Hawkins Jiawei <yin31...@gmail.com> --- v4: - refactor argument `cmds_in_flight` to `len` for vhost_vdpa_net_svq_full() - check the return value of vhost_vdpa_net_svq_poll() in vhost_vdpa_net_svq_flush() suggested by Eugenio - use iov_size(), vhost_vdpa_net_load_cursor_reset() and iov_discard_front() to update the cursors instead of accessing it directly according to Eugenio v3: https://lore.kernel.org/all/3a002790e6c880af928c6470ecbf03e7c65a68bb.1689748694.git.yin31...@gmail.com/ net/vhost-vdpa.c | 155 +++++++++++++++++++++++++++++------------------ 1 file changed, 97 insertions(+), 58 deletions(-) diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index a71e8c9090..818464b702 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -646,6 +646,31 @@ static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s, in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); } +/* + * Poll SVQ for multiple pending control commands and check the device's ack. + * + * Caller should hold the BQL when invoking this function. + * + * @s: The VhostVDPAState + * @len: The length of the pending status shadow buffer + */ +static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len) +{ + /* Device uses a one-byte length ack for each control command */ + ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len); + if (unlikely(dev_written != len)) { + return -EIO; + } + + /* check the device's ack */ + for (int i = 0; i < len; ++i) { + if (s->status[i] != VIRTIO_NET_OK) { + return -EIO; + } + } + return 0; +} + static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, struct iovec *out_cursor, struct iovec *in_cursor, uint8_t class, @@ -660,10 +685,30 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, cmd_size = sizeof(ctrl) + data_size; struct iovec out, in; ssize_t r; + unsigned dummy_cursor_iov_cnt; assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); + if (vhost_vdpa_net_svq_available_slots(s) < 2 || + iov_size(out_cursor, 1) < cmd_size) { + /* + * It is time to flush all pending control commands if SVQ is full + * or control commands shadow buffers are full. + * + * We can poll here since we've had BQL from the time + * we sent the descriptor. + */ + r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base - + (void *)s->status); + if (unlikely(r < 0)) { + return r; + } + + vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor); + } + /* Each CVQ command has one out descriptor and one in descriptor */ assert(vhost_vdpa_net_svq_available_slots(s) >= 2); + assert(iov_size(out_cursor, 1) >= cmd_size); /* Prepare the buffer for out descriptor for the device */ iov_copy(&out, 1, out_cursor, 1, 0, cmd_size); @@ -681,11 +726,13 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, return r; } - /* - * We can poll here since we've had BQL from the time - * we sent the descriptor. - */ - return vhost_vdpa_net_svq_poll(s, 1); + /* iterate the cursors */ + dummy_cursor_iov_cnt = 1; + iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size); + dummy_cursor_iov_cnt = 1; + iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status)); + + return 0; } static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, @@ -697,15 +744,12 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, .iov_base = (void *)n->mac, .iov_len = sizeof(n->mac), }; - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, - VIRTIO_NET_CTRL_MAC, - VIRTIO_NET_CTRL_MAC_ADDR_SET, - &data, 1); - if (unlikely(dev_written < 0)) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_MAC, + VIRTIO_NET_CTRL_MAC_ADDR_SET, + &data, 1); + if (unlikely(r < 0)) { + return r; } } @@ -750,15 +794,12 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, .iov_len = mul_macs_size, }, }; - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, data, ARRAY_SIZE(data)); - if (unlikely(dev_written < 0)) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + if (unlikely(r < 0)) { + return r; } return 0; @@ -770,7 +811,7 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, struct iovec *in_cursor) { struct virtio_net_ctrl_mq mq; - ssize_t dev_written; + ssize_t r; if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) { return 0; @@ -781,15 +822,12 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s, .iov_base = &mq, .iov_len = sizeof(mq), }; - dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, - VIRTIO_NET_CTRL_MQ, - VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, - &data, 1); - if (unlikely(dev_written < 0)) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_MQ, + VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, + &data, 1); + if (unlikely(r < 0)) { + return r; } return 0; @@ -801,7 +839,7 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, struct iovec *in_cursor) { uint64_t offloads; - ssize_t dev_written; + ssize_t r; if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { @@ -829,15 +867,12 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, .iov_base = &offloads, .iov_len = sizeof(offloads), }; - dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, - VIRTIO_NET_CTRL_GUEST_OFFLOADS, - VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, - &data, 1); - if (unlikely(dev_written < 0)) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_GUEST_OFFLOADS, + VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, + &data, 1); + if (unlikely(r < 0)) { + return r; } return 0; @@ -853,16 +888,12 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, .iov_base = &on, .iov_len = sizeof(on), }; - ssize_t dev_written; + ssize_t r; - dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, - VIRTIO_NET_CTRL_RX, - cmd, &data, 1); - if (unlikely(dev_written < 0)) { - return dev_written; - } - if (*s->status != VIRTIO_NET_OK) { - return -EIO; + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_RX, cmd, &data, 1); + if (unlikely(r < 0)) { + return r; } return 0; @@ -1019,15 +1050,12 @@ static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, .iov_base = &vid, .iov_len = sizeof(vid), }; - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, - VIRTIO_NET_CTRL_VLAN, - VIRTIO_NET_CTRL_VLAN_ADD, - &data, 1); - if (unlikely(dev_written < 0)) { - return dev_written; - } - if (unlikely(*s->status != VIRTIO_NET_OK)) { - return -EIO; + ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, + VIRTIO_NET_CTRL_VLAN, + VIRTIO_NET_CTRL_VLAN_ADD, + &data, 1); + if (unlikely(r < 0)) { + return r; } return 0; @@ -1096,6 +1124,17 @@ static int vhost_vdpa_net_load(NetClientState *nc) return r; } + /* + * We need to poll and check all pending device's used buffers. + * + * We can poll here since we've had BQL from the time + * we sent the descriptor. + */ + r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status); + if (unlikely(r)) { + return r; + } + return 0; } -- 2.25.1