Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

2023-01-19 Thread Dr. David Alan Gilbert
xt_iterate.isra () at 
> > > > /lib64/libglib-2.0.so.0
> > > > #31 0x75549042 in g_main_loop_run () at /lib64/libglib-2.0.so.0
> > > > #32 0x55b72de7 in vhost_user_read (dev=0x584dd600, 
> > > > msg=0x7fffc420) at ../hw/virtio/vhost-user.c:413
> > > > #33 0x55b754b1 in vhost_user_get_u64 (dev=0x584dd600, 
> > > > request=40, u64=0x7fffc6e0) at ../hw/virtio/vhost-user.c:1349
> > > > #34 0x55b758ff in vhost_user_get_status (dev=0x584dd600, 
> > > > status=0x7fffc713 "W\020") at ../hw/virtio/vhost-user.c:1474
> > > > #35 0x55b75967 in vhost_user_add_status (dev=0x584dd600, 
> > > > status=7 '\a') at ../hw/virtio/vhost-user.c:1488
> > > > #36 0x55b78bf6 in vhost_user_dev_start (dev=0x584dd600, 
> > > > started=true) at ../hw/virtio/vhost-user.c:2758
> > > > #37 0x55b709ad in vhost_dev_start (hdev=0x584dd600, 
> > > > vdev=0x57b965d0, vrings=false) at ../hw/virtio/vhost.c:1988
> > > > #38 0x5584291c in vhost_net_start_one (net=0x584dd600, 
> > > > dev=0x57b965d0) at ../hw/net/vhost_net.c:271
> > > > #39 0x55842f1e in vhost_net_start (dev=0x57b965d0, 
> > > > ncs=0x57bc09e0, data_queue_pairs=1, cvq=0) at 
> > > > ../hw/net/vhost_net.c:412
> > > > #40 0x55b1bf61 in virtio_net_vhost_status (n=0x57b965d0, 
> > > > status=15 '\017') at ../hw/net/virtio-net.c:311
> > > > #41 0x55b1c20c in virtio_net_set_status (vdev=0x57b965d0, 
> > > > status=15 '\017') at ../hw/net/virtio-net.c:392
> > > > #42 0x55b1ed04 in virtio_net_handle_mq (n=0x57b965d0, cmd=0 
> > > > '\000', iov=0x56c7ef50, iov_cnt=1) at ../hw/net/virtio-net.c:1497
> > > > #43 0x55b1eef0 in virtio_net_handle_ctrl_iov 
> > > > (vdev=0x57b965d0, in_sg=0x56a09880, in_num=1, 
> > > > out_sg=0x56a09890, out_num=1) at ../hw/net/virtio-net.c:1534
> > > > #44 0x55b1efe9 in virtio_net_handle_ctrl (vdev=0x57b965d0, 
> > > > vq=0x7fffc04ac140) at ../hw/net/virtio-net.c:1557
> > > > #45 0x55b63776 in virtio_queue_notify_vq (vq=0x7fffc04ac140) at 
> > > > ../hw/virtio/virtio.c:2249
> > > > #46 0x55b669dc in virtio_queue_host_notifier_read 
> > > > (n=0x7fffc04ac1b4) at ../hw/virtio/virtio.c:3529
> > > > #47 0x55e3f458 in aio_dispatch_handler (ctx=0x56a016c0, 
> > > > node=0x7ffd8800e430) at ../util/aio-posix.c:369
> > > > #48 0x55e3f613 in aio_dispatch_handlers (ctx=0x56a016c0) at 
> > > > ../util/aio-posix.c:412
> > > > #49 0x55e3f669 in aio_dispatch (ctx=0x56a016c0) at 
> > > > ../util/aio-posix.c:422
> > > > #50 0x55e585de in aio_ctx_dispatch (source=0x56a016c0, 
> > > > callback=0x0, user_data=0x0) at ../util/async.c:321
> > > > #51 0x7554895d in g_main_context_dispatch () at 
> > > > /lib64/libglib-2.0.so.0
> > > > #52 0x55e5abea in glib_pollfds_poll () at 
> > > > ../util/main-loop.c:295
> > > > #53 0x55e5ac64 in os_host_main_loop_wait (timeout=0) at 
> > > > ../util/main-loop.c:318
> > > > #54 0x55e5ad69 in main_loop_wait (nonblocking=0) at 
> > > > ../util/main-loop.c:604
> > > > #55 0x559693de in qemu_main_loop () at ../softmmu/runstate.c:731
> > > > #56 0x556e7c06 in qemu_default_main () at ../softmmu/main.c:37
> > > > #57 0x556e7c3c in main (argc=71, argv=0x7fffcda8) at 
> > > > ../softmmu/main.c:48
> > > > 
> > > > 
> > > > 
> > > > -Original Message-
> > > > From: Maxime Coquelin 
> > > > Sent: Thursday, January 12, 2023 5:26 PM
> > > > To: Laurent Vivier 
> > > > Cc: qemu-devel@nongnu.org; Peter Maydell ; 
> > > > Yajun Wu ; Parav Pandit ; Michael 
> > > > S. Tsirkin 
> > > > Subject: Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start
> > > > 
> > > > External email: Use caution opening links or attachments
> > > > 
> > > > 
> > > > Hi Laurent,
> > > > 
> > > > On 1/11/23 10:50, Laurent Vivier wrote:
> > > >> On 1/9/23 11:55, Michael S. Tsirkin wrote:
> > > >>> On Fri, Jan 06, 2023 at 03:2

Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

2023-01-18 Thread Michael S. Tsirkin
>>>> #13 0x55b7179e in vhost_backend_update_device_iotlb 
> > > >>>> (dev=0x584dd600, iova=10468392960, uaddr=140736145588224, 
> > > >>>> len=4096, perm=IOMMU_RW) at ../hw/virtio/vhost-backend.c:361
> > > >>>> #14 0x55b6e34c in vhost_device_iotlb_miss 
> > > >>>> (dev=0x584dd600, iova=10468392960, write=1) at 
> > > >>>> ../hw/virtio/vhost.c:1113
> > > >>>> #15 0x55b718d9 in vhost_backend_handle_iotlb_msg 
> > > >>>> (dev=0x584dd600, imsg=0x7fffb390) at 
> > > >>>> ../hw/virtio/vhost-backend.c:393
> > > >>>> #16 0x55b76144 in slave_read (ioc=0x57a38680, 
> > > >>>> condition=G_IO_IN, opaque=0x584dd600) at 
> > > >>>> ../hw/virtio/vhost-user.c:1726
> > > >>>> #17 0x55c797a5 in qio_channel_fd_source_dispatch 
> > > >>>> (source=0x56c70250, callback=0x55b75f86 , 
> > > >>>> user_data=0x584dd600) at ../io/channel-watch.c:84
> > > >>>> #18 0x7554895d in g_main_context_dispatch () at 
> > > >>>> /lib64/libglib-2.0.so.0
> > > >>>> #19 0x75548d18 in g_main_context_iterate.isra () at 
> > > >>>> /lib64/libglib-2.0.so.0
> > > >>>> #20 0x75549042 in g_main_loop_run () at 
> > > >>>> /lib64/libglib-2.0.so.0
> > > >>>> #21 0x55b72de7 in vhost_user_read (dev=0x584dd600, 
> > > >>>> msg=0x7fffb830) at ../hw/virtio/vhost-user.c:413
> > > >>>> #22 0x55b72e9b in process_message_reply (dev=0x584dd600, 
> > > >>>> msg=0x7fffbaf0) at ../hw/virtio/vhost-user.c:439
> > > >>>> #23 0x55b77c26 in vhost_user_send_device_iotlb_msg 
> > > >>>> (dev=0x584dd600, imsg=0x7fffbdc0) at 
> > > >>>> ../hw/virtio/vhost-user.c:2341
> > > >>>> #24 0x55b7179e in vhost_backend_update_device_iotlb 
> > > >>>> (dev=0x584dd600, iova=10442702848, uaddr=140736119898112, 
> > > >>>> len=4096, perm=IOMMU_RW) at ../hw/virtio/vhost-backend.c:361
> > > >>>> #25 0x55b6e34c in vhost_device_iotlb_miss 
> > > >>>> (dev=0x584dd600, iova=10442702848, write=1) at 
> > > >>>> ../hw/virtio/vhost.c:1113
> > > >>>> #26 0x55b718d9 in vhost_backend_handle_iotlb_msg 
> > > >>>> (dev=0x584dd600, imsg=0x7fffbf70) at 
> > > >>>> ../hw/virtio/vhost-backend.c:393
> > > >>>> #27 0x55b76144 in slave_read (ioc=0x57a38680, 
> > > >>>> condition=G_IO_IN, opaque=0x584dd600) at 
> > > >>>> ../hw/virtio/vhost-user.c:1726
> > > >>>> #28 0x55c797a5 in qio_channel_fd_source_dispatch 
> > > >>>> (source=0x56f1a530, callback=0x55b75f86 , 
> > > >>>> user_data=0x584dd600) at ../io/channel-watch.c:84
> > > >>>> #29 0x7554895d in g_main_context_dispatch () at 
> > > >>>> /lib64/libglib-2.0.so.0
> > > >>>> #30 0x75548d18 in g_main_context_iterate.isra () at 
> > > >>>> /lib64/libglib-2.0.so.0
> > > >>>> #31 0x75549042 in g_main_loop_run () at 
> > > >>>> /lib64/libglib-2.0.so.0
> > > >>>> #32 0x55b72de7 in vhost_user_read (dev=0x584dd600, 
> > > >>>> msg=0x7fffc420) at ../hw/virtio/vhost-user.c:413
> > > >>>> #33 0x55b754b1 in vhost_user_get_u64 (dev=0x584dd600, 
> > > >>>> request=40, u64=0x7fffc6e0) at ../hw/virtio/vhost-user.c:1349
> > > >>>> #34 0x55b758ff in vhost_user_get_status (dev=0x584dd600, 
> > > >>>> status=0x7fffc713 "W\020") at ../hw/virtio/vhost-user.c:1474
> > > >>>> #35 0x55b75967 in vhost_user_add_status (dev=0x584dd600, 
> > > >>>> status=7 '\a') at ../hw/virtio/vhost-user.c:1488
> > > >>>> #36 0x55b78bf6 in vhost_user_dev_start (dev=0x584dd600, 
> > > >>>> started=true) at ../hw/virtio/vhost-user.c:2758
> > > >>>> #37 0x55b709ad in vhost_dev_start (hdev=0x584dd600, 
> > > >>>> vdev=0x

Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

2023-01-17 Thread Greg Kurz
gt;>> user_data=0x584dd600) at ../io/channel-watch.c:84
> > >>>> #18 0x7554895d in g_main_context_dispatch () at 
> > >>>> /lib64/libglib-2.0.so.0
> > >>>> #19 0x75548d18 in g_main_context_iterate.isra () at 
> > >>>> /lib64/libglib-2.0.so.0
> > >>>> #20 0x75549042 in g_main_loop_run () at /lib64/libglib-2.0.so.0
> > >>>> #21 0x55b72de7 in vhost_user_read (dev=0x584dd600, 
> > >>>> msg=0x7fffb830) at ../hw/virtio/vhost-user.c:413
> > >>>> #22 0x55b72e9b in process_message_reply (dev=0x584dd600, 
> > >>>> msg=0x7fffbaf0) at ../hw/virtio/vhost-user.c:439
> > >>>> #23 0x55b77c26 in vhost_user_send_device_iotlb_msg 
> > >>>> (dev=0x584dd600, imsg=0x7fffbdc0) at 
> > >>>> ../hw/virtio/vhost-user.c:2341
> > >>>> #24 0x55b7179e in vhost_backend_update_device_iotlb 
> > >>>> (dev=0x584dd600, iova=10442702848, uaddr=140736119898112, 
> > >>>> len=4096, perm=IOMMU_RW) at ../hw/virtio/vhost-backend.c:361
> > >>>> #25 0x55b6e34c in vhost_device_iotlb_miss (dev=0x584dd600, 
> > >>>> iova=10442702848, write=1) at ../hw/virtio/vhost.c:1113
> > >>>> #26 0x55b718d9 in vhost_backend_handle_iotlb_msg 
> > >>>> (dev=0x584dd600, imsg=0x7fffbf70) at 
> > >>>> ../hw/virtio/vhost-backend.c:393
> > >>>> #27 0x55b76144 in slave_read (ioc=0x57a38680, 
> > >>>> condition=G_IO_IN, opaque=0x584dd600) at 
> > >>>> ../hw/virtio/vhost-user.c:1726
> > >>>> #28 0x55c797a5 in qio_channel_fd_source_dispatch 
> > >>>> (source=0x56f1a530, callback=0x55b75f86 , 
> > >>>> user_data=0x584dd600) at ../io/channel-watch.c:84
> > >>>> #29 0x7554895d in g_main_context_dispatch () at 
> > >>>> /lib64/libglib-2.0.so.0
> > >>>> #30 0x75548d18 in g_main_context_iterate.isra () at 
> > >>>> /lib64/libglib-2.0.so.0
> > >>>> #31 0x75549042 in g_main_loop_run () at /lib64/libglib-2.0.so.0
> > >>>> #32 0x55b72de7 in vhost_user_read (dev=0x584dd600, 
> > >>>> msg=0x7fffc420) at ../hw/virtio/vhost-user.c:413
> > >>>> #33 0x55b754b1 in vhost_user_get_u64 (dev=0x584dd600, 
> > >>>> request=40, u64=0x7fffc6e0) at ../hw/virtio/vhost-user.c:1349
> > >>>> #34 0x55b758ff in vhost_user_get_status (dev=0x584dd600, 
> > >>>> status=0x7fffc713 "W\020") at ../hw/virtio/vhost-user.c:1474
> > >>>> #35 0x55b75967 in vhost_user_add_status (dev=0x584dd600, 
> > >>>> status=7 '\a') at ../hw/virtio/vhost-user.c:1488
> > >>>> #36 0x55b78bf6 in vhost_user_dev_start (dev=0x584dd600, 
> > >>>> started=true) at ../hw/virtio/vhost-user.c:2758
> > >>>> #37 0x55b709ad in vhost_dev_start (hdev=0x584dd600, 
> > >>>> vdev=0x57b965d0, vrings=false) at ../hw/virtio/vhost.c:1988
> > >>>> #38 0x5584291c in vhost_net_start_one (net=0x584dd600, 
> > >>>> dev=0x57b965d0) at ../hw/net/vhost_net.c:271
> > >>>> #39 0x55842f1e in vhost_net_start (dev=0x57b965d0, 
> > >>>> ncs=0x57bc09e0, data_queue_pairs=1, cvq=0) at 
> > >>>> ../hw/net/vhost_net.c:412
> > >>>> #40 0x55b1bf61 in virtio_net_vhost_status (n=0x57b965d0, 
> > >>>> status=15 '\017') at ../hw/net/virtio-net.c:311
> > >>>> #41 0x55b1c20c in virtio_net_set_status (vdev=0x57b965d0, 
> > >>>> status=15 '\017') at ../hw/net/virtio-net.c:392
> > >>>> #42 0x55b1ed04 in virtio_net_handle_mq (n=0x57b965d0, 
> > >>>> cmd=0 '\000', iov=0x56c7ef50, iov_cnt=1) at 
> > >>>> ../hw/net/virtio-net.c:1497
> > >>>> #43 0x55b1eef0 in virtio_net_handle_ctrl_iov 
> > >>>> (vdev=0x57b965d0, in_sg=0x56a09880, in_num=1, 
> > >>>> out_sg=0x56a09890, out_num=1) at ../hw/net/virtio-net.c:1534
> > >>>> #44 0x55b1efe9 in virtio_net_handle_ctrl (vdev=0x57b965d0, 
> > >>>> vq=0x7fffc04ac140) at ../hw/n

Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

2023-01-17 Thread Greg Kurz
gt;>> (dev=0x584dd600, iova=10442702848, uaddr=140736119898112, len=4096, 
> >>>> perm=IOMMU_RW) at ../hw/virtio/vhost-backend.c:361
> >>>> #25 0x55b6e34c in vhost_device_iotlb_miss (dev=0x584dd600, 
> >>>> iova=10442702848, write=1) at ../hw/virtio/vhost.c:1113
> >>>> #26 0x55b718d9 in vhost_backend_handle_iotlb_msg 
> >>>> (dev=0x584dd600, imsg=0x7fffbf70) at 
> >>>> ../hw/virtio/vhost-backend.c:393
> >>>> #27 0x55b76144 in slave_read (ioc=0x57a38680, 
> >>>> condition=G_IO_IN, opaque=0x584dd600) at 
> >>>> ../hw/virtio/vhost-user.c:1726
> >>>> #28 0x55c797a5 in qio_channel_fd_source_dispatch 
> >>>> (source=0x56f1a530, callback=0x55b75f86 , 
> >>>> user_data=0x584dd600) at ../io/channel-watch.c:84
> >>>> #29 0x7554895d in g_main_context_dispatch () at 
> >>>> /lib64/libglib-2.0.so.0
> >>>> #30 0x75548d18 in g_main_context_iterate.isra () at 
> >>>> /lib64/libglib-2.0.so.0
> >>>> #31 0x75549042 in g_main_loop_run () at /lib64/libglib-2.0.so.0
> >>>> #32 0x55b72de7 in vhost_user_read (dev=0x584dd600, 
> >>>> msg=0x7fffc420) at ../hw/virtio/vhost-user.c:413
> >>>> #33 0x55b754b1 in vhost_user_get_u64 (dev=0x584dd600, 
> >>>> request=40, u64=0x7fffc6e0) at ../hw/virtio/vhost-user.c:1349
> >>>> #34 0x55b758ff in vhost_user_get_status (dev=0x584dd600, 
> >>>> status=0x7fffc713 "W\020") at ../hw/virtio/vhost-user.c:1474
> >>>> #35 0x55b75967 in vhost_user_add_status (dev=0x584dd600, 
> >>>> status=7 '\a') at ../hw/virtio/vhost-user.c:1488
> >>>> #36 0x55b78bf6 in vhost_user_dev_start (dev=0x584dd600, 
> >>>> started=true) at ../hw/virtio/vhost-user.c:2758
> >>>> #37 0x55b709ad in vhost_dev_start (hdev=0x584dd600, 
> >>>> vdev=0x57b965d0, vrings=false) at ../hw/virtio/vhost.c:1988
> >>>> #38 0x5584291c in vhost_net_start_one (net=0x584dd600, 
> >>>> dev=0x57b965d0) at ../hw/net/vhost_net.c:271
> >>>> #39 0x55842f1e in vhost_net_start (dev=0x57b965d0, 
> >>>> ncs=0x57bc09e0, data_queue_pairs=1, cvq=0) at 
> >>>> ../hw/net/vhost_net.c:412
> >>>> #40 0x55b1bf61 in virtio_net_vhost_status (n=0x57b965d0, 
> >>>> status=15 '\017') at ../hw/net/virtio-net.c:311
> >>>> #41 0x55b1c20c in virtio_net_set_status (vdev=0x57b965d0, 
> >>>> status=15 '\017') at ../hw/net/virtio-net.c:392
> >>>> #42 0x55b1ed04 in virtio_net_handle_mq (n=0x57b965d0, cmd=0 
> >>>> '\000', iov=0x56c7ef50, iov_cnt=1) at ../hw/net/virtio-net.c:1497
> >>>> #43 0x55b1eef0 in virtio_net_handle_ctrl_iov 
> >>>> (vdev=0x57b965d0, in_sg=0x56a09880, in_num=1, 
> >>>> out_sg=0x56a09890, out_num=1) at ../hw/net/virtio-net.c:1534
> >>>> #44 0x55b1efe9 in virtio_net_handle_ctrl (vdev=0x57b965d0, 
> >>>> vq=0x7fffc04ac140) at ../hw/net/virtio-net.c:1557
> >>>> #45 0x55b63776 in virtio_queue_notify_vq (vq=0x7fffc04ac140) at 
> >>>> ../hw/virtio/virtio.c:2249
> >>>> #46 0x55b669dc in virtio_queue_host_notifier_read 
> >>>> (n=0x7fffc04ac1b4) at ../hw/virtio/virtio.c:3529
> >>>> #47 0x55e3f458 in aio_dispatch_handler (ctx=0x56a016c0, 
> >>>> node=0x7ffd8800e430) at ../util/aio-posix.c:369
> >>>> #48 0x55e3f613 in aio_dispatch_handlers (ctx=0x56a016c0) at 
> >>>> ../util/aio-posix.c:412
> >>>> #49 0x55e3f669 in aio_dispatch (ctx=0x56a016c0) at 
> >>>> ../util/aio-posix.c:422
> >>>> #50 0x55e585de in aio_ctx_dispatch (source=0x56a016c0, 
> >>>> callback=0x0, user_data=0x0) at ../util/async.c:321
> >>>> #51 0x7554895d in g_main_context_dispatch () at 
> >>>> /lib64/libglib-2.0.so.0
> >>>> #52 0x55e5abea in glib_pollfds_poll () at ../util/main-loop.c:295
> >>>> #53 0x55e5ac64 in os_host_main_loop_wait (timeout=0) at 
> >>>> ../util/main-loop.c:318
> >>>> #54 0x55e5ad69 in main_loop_wait (nonblocking=0) at 
> >

Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

2023-01-17 Thread Maxime Coquelin
5556a016c0) at 
../util/aio-posix.c:422
#50 0x55e585de in aio_ctx_dispatch (source=0x56a016c0, 
callback=0x0, user_data=0x0) at ../util/async.c:321
#51 0x7554895d in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#52 0x55e5abea in glib_pollfds_poll () at ../util/main-loop.c:295
#53 0x55e5ac64 in os_host_main_loop_wait (timeout=0) at 
../util/main-loop.c:318
#54 0x55e5ad69 in main_loop_wait (nonblocking=0) at 
../util/main-loop.c:604
#55 0x559693de in qemu_main_loop () at ../softmmu/runstate.c:731
#56 0x556e7c06 in qemu_default_main () at ../softmmu/main.c:37
#57 0x556e7c3c in main (argc=71, argv=0x7fffcda8) at 
../softmmu/main.c:48



-Original Message-
From: Maxime Coquelin 
Sent: Thursday, January 12, 2023 5:26 PM
To: Laurent Vivier 
Cc: qemu-devel@nongnu.org; Peter Maydell ; Yajun Wu 
; Parav Pandit ; Michael S. Tsirkin 

Subject: Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

External email: Use caution opening links or attachments


Hi Laurent,

On 1/11/23 10:50, Laurent Vivier wrote:

On 1/9/23 11:55, Michael S. Tsirkin wrote:

On Fri, Jan 06, 2023 at 03:21:43PM +0100, Laurent Vivier wrote:

Hi,

it seems this patch breaks vhost-user with DPDK.

See
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fbu
gzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D2155173&data=05%7C01%7Cyajun
w%40nvidia.com%7C47e6e0fabd044383fd3308daf47f0253%7C43083d15727340c1
b7db39efd9ccc17a%7C0%7C0%7C638091123577559319%7CUnknown%7CTWFpbGZsb3
d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D
%7C3000%7C%7C%7C&sdata=1pjChYTKHVmBoempNitiZHBdrlPIMFjKoD6FeOVSay0%3
D&reserved=0

it seems QEMU doesn't receive the expected commands sequence:

Received unexpected msg type. Expected 22 received 40 Fail to update
device iotlb Received unexpected msg type. Expected 40 received 22
Received unexpected msg type. Expected 22 received 11 Fail to update
device iotlb Received unexpected msg type. Expected 11 received 22
vhost VQ 1 ring restore failed: -71: Protocol error (71) Received
unexpected msg type. Expected 22 received 11 Fail to update device
iotlb Received unexpected msg type. Expected 11 received 22 vhost VQ
0 ring restore failed: -71: Protocol error (71) unable to start
vhost net: 71: falling back on userspace virtio

It receives VHOST_USER_GET_STATUS (40) when it expects
VHOST_USER_IOTLB_MSG (22) and VHOST_USER_IOTLB_MSG when it expects
VHOST_USER_GET_STATUS.
and VHOST_USER_GET_VRING_BASE (11) when it expect
VHOST_USER_GET_STATUS and so on.

Any idea?


We only have a single thread on DPDK side to handle Vhost-user requests, it 
will read a request, handle it and reply to it. Then it reads the next one, 
etc... So I don't think it is possible to mix request replies order on DPDK 
side.

Maybe there are two threads concurrently sending requests on QEMU side?

Regards,
Maxime


Thanks,
Laurent



So I am guessing it's coming from:

   if (msg.hdr.request != request) {
   error_report("Received unexpected msg type. Expected %d
received %d",
request, msg.hdr.request);
   return -EPROTO;
   }

in process_message_reply and/or in vhost_user_get_u64.



On 11/7/22 23:53, Michael S. Tsirkin wrote:

From: Yajun Wu 

The motivation of adding vhost-user vhost_dev_start support is to
improve backend configuration speed and reduce live migration VM
downtime.

Today VQ configuration is issued one by one. For virtio net with
multi-queue support, backend needs to update RSS (Receive side
scaling) on every rx queue enable. Updating RSS is time-consuming
(typical time like 7ms).

Implement already defined vhost status and message in the vhost
specification [1].
(a) VHOST_USER_PROTOCOL_F_STATUS
(b) VHOST_USER_SET_STATUS
(c) VHOST_USER_GET_STATUS

Send message VHOST_USER_SET_STATUS with VIRTIO_CONFIG_S_DRIVER_OK
for device start and reset(0) for device stop.

On reception of the DRIVER_OK message, backend can apply the needed
setting only once (instead of incremental) and also utilize
parallelism on enabling queues.

This improves QEMU's live migration downtime with vhost user
backend implementation by great margin, specially for the large
number of VQs of 64 from 800 msec to 250 msec.

[1]
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fq
emu-project.gitlab.io%2Fqemu%2Finterop%2Fvhost-user.html&data=05%7C
01%7Cyajunw%40nvidia.com%7C47e6e0fabd044383fd3308daf47f0253%7C43083
d15727340c1b7db39efd9ccc17a%7C0%7C0%7C638091123577559319%7CUnknown%
7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiL
CJXVCI6Mn0%3D%7C3000%7C%7C%7C&sdata=YPbrFRJA92KLLwADMUDvwBt%2Fme2Ef
GZuVANOmXH5pic%3D&reserved=0

Signed-off-by: Yajun Wu 
Acked-by: Parav Pandit 
Message-Id: <20221017064452.1226514-3-yaj...@nvidia.com>
Reviewed-by: Michael S. Tsirkin 
Signed-off-by: Michael S. Tsirkin 


Probably easiest to debug from dpdk side.
Does the problem go away if you disable the feature
VHOST_USER_PROTOCOL_F_STATUS in dpdk?


Maxime could you help to debug this?

Thanks,
Laurent
















Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

2023-01-17 Thread Greg Kurz
/hw/virtio/virtio.c:2249
> > #46 0x55b669dc in virtio_queue_host_notifier_read 
> > (n=0x7fffc04ac1b4) at ../hw/virtio/virtio.c:3529
> > #47 0x55e3f458 in aio_dispatch_handler (ctx=0x56a016c0, 
> > node=0x7ffd8800e430) at ../util/aio-posix.c:369
> > #48 0x55e3f613 in aio_dispatch_handlers (ctx=0x56a016c0) at 
> > ../util/aio-posix.c:412
> > #49 0x55e3f669 in aio_dispatch (ctx=0x56a016c0) at 
> > ../util/aio-posix.c:422
> > #50 0x55e585de in aio_ctx_dispatch (source=0x56a016c0, 
> > callback=0x0, user_data=0x0) at ../util/async.c:321
> > #51 0x7554895d in g_main_context_dispatch () at 
> > /lib64/libglib-2.0.so.0
> > #52 0x55e5abea in glib_pollfds_poll () at ../util/main-loop.c:295
> > #53 0x55e5ac64 in os_host_main_loop_wait (timeout=0) at 
> > ../util/main-loop.c:318
> > #54 0x55e5ad69 in main_loop_wait (nonblocking=0) at 
> > ../util/main-loop.c:604
> > #55 0x559693de in qemu_main_loop () at ../softmmu/runstate.c:731
> > #56 0x556e7c06 in qemu_default_main () at ../softmmu/main.c:37
> > #57 0x556e7c3c in main (argc=71, argv=0x7fffcda8) at 
> > ../softmmu/main.c:48
> > 
> > 
> > 
> > -Original Message-
> > From: Maxime Coquelin 
> > Sent: Thursday, January 12, 2023 5:26 PM
> > To: Laurent Vivier 
> > Cc: qemu-devel@nongnu.org; Peter Maydell ; Yajun 
> > Wu ; Parav Pandit ; Michael S. Tsirkin 
> > 
> > Subject: Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start
> > 
> > External email: Use caution opening links or attachments
> > 
> > 
> > Hi Laurent,
> > 
> > On 1/11/23 10:50, Laurent Vivier wrote:
> >> On 1/9/23 11:55, Michael S. Tsirkin wrote:
> >>> On Fri, Jan 06, 2023 at 03:21:43PM +0100, Laurent Vivier wrote:
> >>>> Hi,
> >>>>
> >>>> it seems this patch breaks vhost-user with DPDK.
> >>>>
> >>>> See
> >>>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fbu
> >>>> gzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D2155173&data=05%7C01%7Cyajun
> >>>> w%40nvidia.com%7C47e6e0fabd044383fd3308daf47f0253%7C43083d15727340c1
> >>>> b7db39efd9ccc17a%7C0%7C0%7C638091123577559319%7CUnknown%7CTWFpbGZsb3
> >>>> d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D
> >>>> %7C3000%7C%7C%7C&sdata=1pjChYTKHVmBoempNitiZHBdrlPIMFjKoD6FeOVSay0%3
> >>>> D&reserved=0
> >>>>
> >>>> it seems QEMU doesn't receive the expected commands sequence:
> >>>>
> >>>> Received unexpected msg type. Expected 22 received 40 Fail to update
> >>>> device iotlb Received unexpected msg type. Expected 40 received 22
> >>>> Received unexpected msg type. Expected 22 received 11 Fail to update
> >>>> device iotlb Received unexpected msg type. Expected 11 received 22
> >>>> vhost VQ 1 ring restore failed: -71: Protocol error (71) Received
> >>>> unexpected msg type. Expected 22 received 11 Fail to update device
> >>>> iotlb Received unexpected msg type. Expected 11 received 22 vhost VQ
> >>>> 0 ring restore failed: -71: Protocol error (71) unable to start
> >>>> vhost net: 71: falling back on userspace virtio
> >>>>
> >>>> It receives VHOST_USER_GET_STATUS (40) when it expects
> >>>> VHOST_USER_IOTLB_MSG (22) and VHOST_USER_IOTLB_MSG when it expects
> >>>> VHOST_USER_GET_STATUS.
> >>>> and VHOST_USER_GET_VRING_BASE (11) when it expect
> >>>> VHOST_USER_GET_STATUS and so on.
> >>>>
> >>>> Any idea?
> > 
> > We only have a single thread on DPDK side to handle Vhost-user requests, it 
> > will read a request, handle it and reply to it. Then it reads the next one, 
> > etc... So I don't think it is possible to mix request replies order on DPDK 
> > side.
> > 
> > Maybe there are two threads concurrently sending requests on QEMU side?
> > 
> > Regards,
> > Maxime
> > 
> >>>> Thanks,
> >>>> Laurent
> >>>
> >>>
> >>> So I am guessing it's coming from:
> >>>
> >>>   if (msg.hdr.request != request) {
> >>>   error_report("Received unexpected msg type. Expected %d
> >>> received %d",
> >>>request, msg.hdr.request);
> >>> 

Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

2023-01-17 Thread Greg Kurz
e in vhost_net_start (dev=0x57b965d0, 
> > > ncs=0x57bc09e0, data_queue_pairs=1, cvq=0) at 
> > > ../hw/net/vhost_net.c:412
> > > #40 0x55b1bf61 in virtio_net_vhost_status (n=0x57b965d0, 
> > > status=15 '\017') at ../hw/net/virtio-net.c:311
> > > #41 0x55b1c20c in virtio_net_set_status (vdev=0x57b965d0, 
> > > status=15 '\017') at ../hw/net/virtio-net.c:392
> > > #42 0x55b1ed04 in virtio_net_handle_mq (n=0x57b965d0, cmd=0 
> > > '\000', iov=0x56c7ef50, iov_cnt=1) at ../hw/net/virtio-net.c:1497
> > > #43 0x55b1eef0 in virtio_net_handle_ctrl_iov 
> > > (vdev=0x57b965d0, in_sg=0x56a09880, in_num=1, 
> > > out_sg=0x56a09890, out_num=1) at ../hw/net/virtio-net.c:1534
> > > #44 0x55b1efe9 in virtio_net_handle_ctrl (vdev=0x57b965d0, 
> > > vq=0x7fffc04ac140) at ../hw/net/virtio-net.c:1557
> > > #45 0x55b63776 in virtio_queue_notify_vq (vq=0x7fffc04ac140) at 
> > > ../hw/virtio/virtio.c:2249
> > > #46 0x0000555555b669dc in virtio_queue_host_notifier_read 
> > > (n=0x7fffc04ac1b4) at ../hw/virtio/virtio.c:3529
> > > #47 0x55e3f458 in aio_dispatch_handler (ctx=0x56a016c0, 
> > > node=0x7ffd8800e430) at ../util/aio-posix.c:369
> > > #48 0x55e3f613 in aio_dispatch_handlers (ctx=0x56a016c0) at 
> > > ../util/aio-posix.c:412
> > > #49 0x55e3f669 in aio_dispatch (ctx=0x56a016c0) at 
> > > ../util/aio-posix.c:422
> > > #50 0x55e585de in aio_ctx_dispatch (source=0x56a016c0, 
> > > callback=0x0, user_data=0x0) at ../util/async.c:321
> > > #51 0x7554895d in g_main_context_dispatch () at 
> > > /lib64/libglib-2.0.so.0
> > > #52 0x55e5abea in glib_pollfds_poll () at ../util/main-loop.c:295
> > > #53 0x55e5ac64 in os_host_main_loop_wait (timeout=0) at 
> > > ../util/main-loop.c:318
> > > #54 0x55e5ad69 in main_loop_wait (nonblocking=0) at 
> > > ../util/main-loop.c:604
> > > #55 0x559693de in qemu_main_loop () at ../softmmu/runstate.c:731
> > > #56 0x556e7c06 in qemu_default_main () at ../softmmu/main.c:37
> > > #57 0x556e7c3c in main (argc=71, argv=0x7fffcda8) at 
> > > ../softmmu/main.c:48
> > > 
> > > 
> > > 
> > > -Original Message-
> > > From: Maxime Coquelin 
> > > Sent: Thursday, January 12, 2023 5:26 PM
> > > To: Laurent Vivier 
> > > Cc: qemu-devel@nongnu.org; Peter Maydell ; 
> > > Yajun Wu ; Parav Pandit ; Michael S. 
> > > Tsirkin 
> > > Subject: Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start
> > > 
> > > External email: Use caution opening links or attachments
> > > 
> > > 
> > > Hi Laurent,
> > > 
> > > On 1/11/23 10:50, Laurent Vivier wrote:
> > >> On 1/9/23 11:55, Michael S. Tsirkin wrote:
> > >>> On Fri, Jan 06, 2023 at 03:21:43PM +0100, Laurent Vivier wrote:
> > >>>> Hi,
> > >>>>
> > >>>> it seems this patch breaks vhost-user with DPDK.
> > >>>>
> > >>>> See
> > >>>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fbu
> > >>>> gzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D2155173&data=05%7C01%7Cyajun
> > >>>> w%40nvidia.com%7C47e6e0fabd044383fd3308daf47f0253%7C43083d15727340c1
> > >>>> b7db39efd9ccc17a%7C0%7C0%7C638091123577559319%7CUnknown%7CTWFpbGZsb3
> > >>>> d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D
> > >>>> %7C3000%7C%7C%7C&sdata=1pjChYTKHVmBoempNitiZHBdrlPIMFjKoD6FeOVSay0%3
> > >>>> D&reserved=0
> > >>>>
> > >>>> it seems QEMU doesn't receive the expected commands sequence:
> > >>>>
> > >>>> Received unexpected msg type. Expected 22 received 40 Fail to update
> > >>>> device iotlb Received unexpected msg type. Expected 40 received 22
> > >>>> Received unexpected msg type. Expected 22 received 11 Fail to update
> > >>>> device iotlb Received unexpected msg type. Expected 11 received 22
> > >>>> vhost VQ 1 ring restore failed: -71: Protocol error (71) Received
> > >>>> unexpected msg type. Expected 22 received 11 Fail to update device
> > >>>> iotlb Received unexpected msg type. Expected 11 received 22 vhost VQ
> > >>

Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

2023-01-17 Thread Maxime Coquelin
andle_iotlb_msg (dev=0x584dd600, 
imsg=0x7fffbf70) at ../hw/virtio/vhost-backend.c:393
#27 0x55b76144 in slave_read (ioc=0x57a38680, condition=G_IO_IN, 
opaque=0x584dd600) at ../hw/virtio/vhost-user.c:1726
#28 0x55c797a5 in qio_channel_fd_source_dispatch (source=0x56f1a530, 
callback=0x55b75f86 , user_data=0x584dd600) at 
../io/channel-watch.c:84
#29 0x7554895d in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#30 0x75548d18 in g_main_context_iterate.isra () at 
/lib64/libglib-2.0.so.0
#31 0x75549042 in g_main_loop_run () at /lib64/libglib-2.0.so.0
#32 0x55b72de7 in vhost_user_read (dev=0x584dd600, 
msg=0x7fffc420) at ../hw/virtio/vhost-user.c:413
#33 0x55b754b1 in vhost_user_get_u64 (dev=0x584dd600, request=40, 
u64=0x7fffc6e0) at ../hw/virtio/vhost-user.c:1349
#34 0x55b758ff in vhost_user_get_status (dev=0x584dd600, 
status=0x7fffc713 "W\020") at ../hw/virtio/vhost-user.c:1474
#35 0x55b75967 in vhost_user_add_status (dev=0x584dd600, status=7 
'\a') at ../hw/virtio/vhost-user.c:1488
#36 0x55b78bf6 in vhost_user_dev_start (dev=0x584dd600, 
started=true) at ../hw/virtio/vhost-user.c:2758
#37 0x55b709ad in vhost_dev_start (hdev=0x584dd600, 
vdev=0x57b965d0, vrings=false) at ../hw/virtio/vhost.c:1988
#38 0x5584291c in vhost_net_start_one (net=0x584dd600, 
dev=0x57b965d0) at ../hw/net/vhost_net.c:271
#39 0x55842f1e in vhost_net_start (dev=0x57b965d0, 
ncs=0x57bc09e0, data_queue_pairs=1, cvq=0) at ../hw/net/vhost_net.c:412
#40 0x55b1bf61 in virtio_net_vhost_status (n=0x57b965d0, status=15 
'\017') at ../hw/net/virtio-net.c:311
#41 0x55b1c20c in virtio_net_set_status (vdev=0x57b965d0, status=15 
'\017') at ../hw/net/virtio-net.c:392
#42 0x55b1ed04 in virtio_net_handle_mq (n=0x57b965d0, cmd=0 '\000', 
iov=0x56c7ef50, iov_cnt=1) at ../hw/net/virtio-net.c:1497
#43 0x55b1eef0 in virtio_net_handle_ctrl_iov (vdev=0x57b965d0, 
in_sg=0x56a09880, in_num=1, out_sg=0x56a09890, out_num=1) at 
../hw/net/virtio-net.c:1534
#44 0x55b1efe9 in virtio_net_handle_ctrl (vdev=0x57b965d0, 
vq=0x7fffc04ac140) at ../hw/net/virtio-net.c:1557
#45 0x55b63776 in virtio_queue_notify_vq (vq=0x7fffc04ac140) at 
../hw/virtio/virtio.c:2249
#46 0x55b669dc in virtio_queue_host_notifier_read (n=0x7fffc04ac1b4) at 
../hw/virtio/virtio.c:3529
#47 0x55e3f458 in aio_dispatch_handler (ctx=0x56a016c0, 
node=0x7ffd8800e430) at ../util/aio-posix.c:369
#48 0x55e3f613 in aio_dispatch_handlers (ctx=0x56a016c0) at 
../util/aio-posix.c:412
#49 0x55e3f669 in aio_dispatch (ctx=0x56a016c0) at 
../util/aio-posix.c:422
#50 0x55e585de in aio_ctx_dispatch (source=0x56a016c0, 
callback=0x0, user_data=0x0) at ../util/async.c:321
#51 0x7554895d in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#52 0x55e5abea in glib_pollfds_poll () at ../util/main-loop.c:295
#53 0x55e5ac64 in os_host_main_loop_wait (timeout=0) at 
../util/main-loop.c:318
#54 0x55e5ad69 in main_loop_wait (nonblocking=0) at 
../util/main-loop.c:604
#55 0x559693de in qemu_main_loop () at ../softmmu/runstate.c:731
#56 0x556e7c06 in qemu_default_main () at ../softmmu/main.c:37
#57 0x556e7c3c in main (argc=71, argv=0x7fffcda8) at 
../softmmu/main.c:48



-Original Message-
From: Maxime Coquelin 
Sent: Thursday, January 12, 2023 5:26 PM
To: Laurent Vivier 
Cc: qemu-devel@nongnu.org; Peter Maydell ; Yajun Wu 
; Parav Pandit ; Michael S. Tsirkin 

Subject: Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

External email: Use caution opening links or attachments


Hi Laurent,

On 1/11/23 10:50, Laurent Vivier wrote:

On 1/9/23 11:55, Michael S. Tsirkin wrote:

On Fri, Jan 06, 2023 at 03:21:43PM +0100, Laurent Vivier wrote:

Hi,

it seems this patch breaks vhost-user with DPDK.

See
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fbu
gzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D2155173&data=05%7C01%7Cyajun
w%40nvidia.com%7C47e6e0fabd044383fd3308daf47f0253%7C43083d15727340c1
b7db39efd9ccc17a%7C0%7C0%7C638091123577559319%7CUnknown%7CTWFpbGZsb3
d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D
%7C3000%7C%7C%7C&sdata=1pjChYTKHVmBoempNitiZHBdrlPIMFjKoD6FeOVSay0%3
D&reserved=0

it seems QEMU doesn't receive the expected commands sequence:

Received unexpected msg type. Expected 22 received 40 Fail to update
device iotlb Received unexpected msg type. Expected 40 received 22
Received unexpected msg type. Expected 22 received 11 Fail to update
device iotlb Received unexpected msg type. Expected 11 received 22
vhost VQ 1 ring restore failed: -71: Protocol error (71) Received
unexpected msg type. Expected 22 received 11 Fail to up

RE: [PULL v4 76/83] vhost-user: Support vhost_dev_start

2023-01-15 Thread Yajun Wu
2758
#37 0x55b709ad in vhost_dev_start (hdev=0x584dd600, 
vdev=0x57b965d0, vrings=false) at ../hw/virtio/vhost.c:1988
#38 0x5584291c in vhost_net_start_one (net=0x584dd600, 
dev=0x57b965d0) at ../hw/net/vhost_net.c:271
#39 0x55842f1e in vhost_net_start (dev=0x57b965d0, 
ncs=0x57bc09e0, data_queue_pairs=1, cvq=0) at ../hw/net/vhost_net.c:412
#40 0x55b1bf61 in virtio_net_vhost_status (n=0x57b965d0, status=15 
'\017') at ../hw/net/virtio-net.c:311
#41 0x55b1c20c in virtio_net_set_status (vdev=0x57b965d0, status=15 
'\017') at ../hw/net/virtio-net.c:392
#42 0x55b1ed04 in virtio_net_handle_mq (n=0x57b965d0, cmd=0 '\000', 
iov=0x56c7ef50, iov_cnt=1) at ../hw/net/virtio-net.c:1497
#43 0x55b1eef0 in virtio_net_handle_ctrl_iov (vdev=0x57b965d0, 
in_sg=0x56a09880, in_num=1, out_sg=0x56a09890, out_num=1) at 
../hw/net/virtio-net.c:1534
#44 0x55b1efe9 in virtio_net_handle_ctrl (vdev=0x57b965d0, 
vq=0x7fffc04ac140) at ../hw/net/virtio-net.c:1557
#45 0x55b63776 in virtio_queue_notify_vq (vq=0x7fffc04ac140) at 
../hw/virtio/virtio.c:2249
#46 0x55b669dc in virtio_queue_host_notifier_read (n=0x7fffc04ac1b4) at 
../hw/virtio/virtio.c:3529
#47 0x55e3f458 in aio_dispatch_handler (ctx=0x56a016c0, 
node=0x7ffd8800e430) at ../util/aio-posix.c:369
#48 0x55e3f613 in aio_dispatch_handlers (ctx=0x56a016c0) at 
../util/aio-posix.c:412
#49 0x55e3f669 in aio_dispatch (ctx=0x56a016c0) at 
../util/aio-posix.c:422
#50 0x55e585de in aio_ctx_dispatch (source=0x56a016c0, 
callback=0x0, user_data=0x0) at ../util/async.c:321
#51 0x7554895d in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#52 0x55e5abea in glib_pollfds_poll () at ../util/main-loop.c:295
#53 0x55e5ac64 in os_host_main_loop_wait (timeout=0) at 
../util/main-loop.c:318
#54 0x55e5ad69 in main_loop_wait (nonblocking=0) at 
../util/main-loop.c:604
#55 0x559693de in qemu_main_loop () at ../softmmu/runstate.c:731
#56 0x556e7c06 in qemu_default_main () at ../softmmu/main.c:37
#57 0x556e7c3c in main (argc=71, argv=0x7fffcda8) at 
../softmmu/main.c:48



-Original Message-
From: Maxime Coquelin  
Sent: Thursday, January 12, 2023 5:26 PM
To: Laurent Vivier 
Cc: qemu-devel@nongnu.org; Peter Maydell ; Yajun Wu 
; Parav Pandit ; Michael S. Tsirkin 

Subject: Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

External email: Use caution opening links or attachments


Hi Laurent,

On 1/11/23 10:50, Laurent Vivier wrote:
> On 1/9/23 11:55, Michael S. Tsirkin wrote:
>> On Fri, Jan 06, 2023 at 03:21:43PM +0100, Laurent Vivier wrote:
>>> Hi,
>>>
>>> it seems this patch breaks vhost-user with DPDK.
>>>
>>> See 
>>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fbu
>>> gzilla.redhat.com%2Fshow_bug.cgi%3Fid%3D2155173&data=05%7C01%7Cyajun
>>> w%40nvidia.com%7C47e6e0fabd044383fd3308daf47f0253%7C43083d15727340c1
>>> b7db39efd9ccc17a%7C0%7C0%7C638091123577559319%7CUnknown%7CTWFpbGZsb3
>>> d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D
>>> %7C3000%7C%7C%7C&sdata=1pjChYTKHVmBoempNitiZHBdrlPIMFjKoD6FeOVSay0%3
>>> D&reserved=0
>>>
>>> it seems QEMU doesn't receive the expected commands sequence:
>>>
>>> Received unexpected msg type. Expected 22 received 40 Fail to update 
>>> device iotlb Received unexpected msg type. Expected 40 received 22 
>>> Received unexpected msg type. Expected 22 received 11 Fail to update 
>>> device iotlb Received unexpected msg type. Expected 11 received 22 
>>> vhost VQ 1 ring restore failed: -71: Protocol error (71) Received 
>>> unexpected msg type. Expected 22 received 11 Fail to update device 
>>> iotlb Received unexpected msg type. Expected 11 received 22 vhost VQ 
>>> 0 ring restore failed: -71: Protocol error (71) unable to start 
>>> vhost net: 71: falling back on userspace virtio
>>>
>>> It receives VHOST_USER_GET_STATUS (40) when it expects 
>>> VHOST_USER_IOTLB_MSG (22) and VHOST_USER_IOTLB_MSG when it expects 
>>> VHOST_USER_GET_STATUS.
>>> and VHOST_USER_GET_VRING_BASE (11) when it expect 
>>> VHOST_USER_GET_STATUS and so on.
>>>
>>> Any idea?

We only have a single thread on DPDK side to handle Vhost-user requests, it 
will read a request, handle it and reply to it. Then it reads the next one, 
etc... So I don't think it is possible to mix request replies order on DPDK 
side.

Maybe there are two threads concurrently sending requests on QEMU side?

Regards,
Maxime

>>> Thanks,
>>> Laurent
>>
>>
>> So I

Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

2023-01-12 Thread Maxime Coquelin

Hi Laurent,

On 1/11/23 10:50, Laurent Vivier wrote:

On 1/9/23 11:55, Michael S. Tsirkin wrote:

On Fri, Jan 06, 2023 at 03:21:43PM +0100, Laurent Vivier wrote:

Hi,

it seems this patch breaks vhost-user with DPDK.

See https://bugzilla.redhat.com/show_bug.cgi?id=2155173

it seems QEMU doesn't receive the expected commands sequence:

Received unexpected msg type. Expected 22 received 40
Fail to update device iotlb
Received unexpected msg type. Expected 40 received 22
Received unexpected msg type. Expected 22 received 11
Fail to update device iotlb
Received unexpected msg type. Expected 11 received 22
vhost VQ 1 ring restore failed: -71: Protocol error (71)
Received unexpected msg type. Expected 22 received 11
Fail to update device iotlb
Received unexpected msg type. Expected 11 received 22
vhost VQ 0 ring restore failed: -71: Protocol error (71)
unable to start vhost net: 71: falling back on userspace virtio

It receives VHOST_USER_GET_STATUS (40) when it expects 
VHOST_USER_IOTLB_MSG (22)

and VHOST_USER_IOTLB_MSG when it expects VHOST_USER_GET_STATUS.
and VHOST_USER_GET_VRING_BASE (11) when it expect 
VHOST_USER_GET_STATUS and so on.


Any idea?


We only have a single thread on DPDK side to handle Vhost-user requests,
it will read a request, handle it and reply to it. Then it reads the
next one, etc... So I don't think it is possible to mix request replies
order on DPDK side.

Maybe there are two threads concurrently sending requests on QEMU side?

Regards,
Maxime


Thanks,
Laurent



So I am guessing it's coming from:

 if (msg.hdr.request != request) {
 error_report("Received unexpected msg type. Expected %d 
received %d",

  request, msg.hdr.request);
 return -EPROTO;
 }

in process_message_reply and/or in vhost_user_get_u64.



On 11/7/22 23:53, Michael S. Tsirkin wrote:

From: Yajun Wu 

The motivation of adding vhost-user vhost_dev_start support is to
improve backend configuration speed and reduce live migration VM
downtime.

Today VQ configuration is issued one by one. For virtio net with
multi-queue support, backend needs to update RSS (Receive side
scaling) on every rx queue enable. Updating RSS is time-consuming
(typical time like 7ms).

Implement already defined vhost status and message in the vhost
specification [1].
(a) VHOST_USER_PROTOCOL_F_STATUS
(b) VHOST_USER_SET_STATUS
(c) VHOST_USER_GET_STATUS

Send message VHOST_USER_SET_STATUS with VIRTIO_CONFIG_S_DRIVER_OK for
device start and reset(0) for device stop.

On reception of the DRIVER_OK message, backend can apply the needed 
setting
only once (instead of incremental) and also utilize parallelism on 
enabling

queues.

This improves QEMU's live migration downtime with vhost user backend
implementation by great margin, specially for the large number of 
VQs of 64

from 800 msec to 250 msec.

[1] https://qemu-project.gitlab.io/qemu/interop/vhost-user.html

Signed-off-by: Yajun Wu 
Acked-by: Parav Pandit 
Message-Id: <20221017064452.1226514-3-yaj...@nvidia.com>
Reviewed-by: Michael S. Tsirkin 
Signed-off-by: Michael S. Tsirkin 


Probably easiest to debug from dpdk side.
Does the problem go away if you disable the feature 
VHOST_USER_PROTOCOL_F_STATUS in dpdk?


Maxime could you help to debug this?

Thanks,
Laurent






RE: [PULL v4 76/83] vhost-user: Support vhost_dev_start

2023-01-11 Thread Yajun Wu
Hi,

VHOST_USER_PROTOCOL_F_STATUS is enabled by default (dpdk):

lib/vhost/vhost_user.h

17 #define VHOST_USER_PROTOCOL_FEATURES((1ULL << VHOST_USER_PROTOCOL_F_MQ) 
| \
 18  (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |\
 19  (1ULL << VHOST_USER_PROTOCOL_F_RARP) | \
 20  (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) | \
 21  (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU) | \
 22  (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
 23  (1ULL << VHOST_USER_PROTOCOL_F_CRYPTO_SESSION) | \
 24  (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
 25  (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \
 26  (1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT) | \
 27  (1ULL << VHOST_USER_PROTOCOL_F_STATUS))

Remove VHOST_USER_PROTOCOL_F_STATUS can disable VHOST_USER_SET/GET_STATUS 
message.
Should W.A. this issue.

Thanks,
Yajun

-Original Message-
From: Laurent Vivier  
Sent: Wednesday, January 11, 2023 5:50 PM
To: Maxime Coquelin 
Cc: qemu-devel@nongnu.org; Peter Maydell ; Yajun Wu 
; Parav Pandit ; Michael S. Tsirkin 

Subject: Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

External email: Use caution opening links or attachments


On 1/9/23 11:55, Michael S. Tsirkin wrote:
> On Fri, Jan 06, 2023 at 03:21:43PM +0100, Laurent Vivier wrote:
>> Hi,
>>
>> it seems this patch breaks vhost-user with DPDK.
>>
>> See 
>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fbug
>> zilla.redhat.com%2Fshow_bug.cgi%3Fid%3D2155173&data=05%7C01%7Cyajunw%
>> 40nvidia.com%7Cf4c581251ab548d64ae708daf3b94867%7C43083d15727340c1b7d
>> b39efd9ccc17a%7C0%7C0%7C638090274351645141%7CUnknown%7CTWFpbGZsb3d8ey
>> JWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C30
>> 00%7C%7C%7C&sdata=m582YO4Sd2jJ0S%2F%2FSv9zx6NSuXQIrRwkqBPgYedO%2Fr8%3
>> D&reserved=0
>>
>> it seems QEMU doesn't receive the expected commands sequence:
>>
>> Received unexpected msg type. Expected 22 received 40 Fail to update 
>> device iotlb Received unexpected msg type. Expected 40 received 22 
>> Received unexpected msg type. Expected 22 received 11 Fail to update 
>> device iotlb Received unexpected msg type. Expected 11 received 22 
>> vhost VQ 1 ring restore failed: -71: Protocol error (71) Received 
>> unexpected msg type. Expected 22 received 11 Fail to update device 
>> iotlb Received unexpected msg type. Expected 11 received 22 vhost VQ 
>> 0 ring restore failed: -71: Protocol error (71) unable to start vhost 
>> net: 71: falling back on userspace virtio
>>
>> It receives VHOST_USER_GET_STATUS (40) when it expects 
>> VHOST_USER_IOTLB_MSG (22) and VHOST_USER_IOTLB_MSG when it expects 
>> VHOST_USER_GET_STATUS.
>> and VHOST_USER_GET_VRING_BASE (11) when it expect VHOST_USER_GET_STATUS and 
>> so on.
>>
>> Any idea?
>>
>> Thanks,
>> Laurent
>
>
> So I am guessing it's coming from:
>
>  if (msg.hdr.request != request) {
>  error_report("Received unexpected msg type. Expected %d received %d",
>   request, msg.hdr.request);
>  return -EPROTO;
>  }
>
> in process_message_reply and/or in vhost_user_get_u64.
>
>
>> On 11/7/22 23:53, Michael S. Tsirkin wrote:
>>> From: Yajun Wu 
>>>
>>> The motivation of adding vhost-user vhost_dev_start support is to 
>>> improve backend configuration speed and reduce live migration VM 
>>> downtime.
>>>
>>> Today VQ configuration is issued one by one. For virtio net with 
>>> multi-queue support, backend needs to update RSS (Receive side
>>> scaling) on every rx queue enable. Updating RSS is time-consuming 
>>> (typical time like 7ms).
>>>
>>> Implement already defined vhost status and message in the vhost 
>>> specification [1].
>>> (a) VHOST_USER_PROTOCOL_F_STATUS
>>> (b) VHOST_USER_SET_STATUS
>>> (c) VHOST_USER_GET_STATUS
>>>
>>> Send message VHOST_USER_SET_STATUS with VIRTIO_CONFIG_S_DRIVER_OK 
>>> for device start and reset(0) for device stop.
>>>
>>> On reception of the DRIVER_OK message, backend can apply the needed 
>>> setting only once (instead of incremental) and also utilize 
>>> parallelism on enabling queues.
>>>
>>> This improves QEMU's live migration downtime with vhost user backend 
>>> implementation by great margin, specially for the large number of 

Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

2023-01-11 Thread Laurent Vivier

On 1/9/23 11:55, Michael S. Tsirkin wrote:

On Fri, Jan 06, 2023 at 03:21:43PM +0100, Laurent Vivier wrote:

Hi,

it seems this patch breaks vhost-user with DPDK.

See https://bugzilla.redhat.com/show_bug.cgi?id=2155173

it seems QEMU doesn't receive the expected commands sequence:

Received unexpected msg type. Expected 22 received 40
Fail to update device iotlb
Received unexpected msg type. Expected 40 received 22
Received unexpected msg type. Expected 22 received 11
Fail to update device iotlb
Received unexpected msg type. Expected 11 received 22
vhost VQ 1 ring restore failed: -71: Protocol error (71)
Received unexpected msg type. Expected 22 received 11
Fail to update device iotlb
Received unexpected msg type. Expected 11 received 22
vhost VQ 0 ring restore failed: -71: Protocol error (71)
unable to start vhost net: 71: falling back on userspace virtio

It receives VHOST_USER_GET_STATUS (40) when it expects VHOST_USER_IOTLB_MSG (22)
and VHOST_USER_IOTLB_MSG when it expects VHOST_USER_GET_STATUS.
and VHOST_USER_GET_VRING_BASE (11) when it expect VHOST_USER_GET_STATUS and so 
on.

Any idea?

Thanks,
Laurent



So I am guessing it's coming from:

 if (msg.hdr.request != request) {
 error_report("Received unexpected msg type. Expected %d received %d",
  request, msg.hdr.request);
 return -EPROTO;
 }

in process_message_reply and/or in vhost_user_get_u64.



On 11/7/22 23:53, Michael S. Tsirkin wrote:

From: Yajun Wu 

The motivation of adding vhost-user vhost_dev_start support is to
improve backend configuration speed and reduce live migration VM
downtime.

Today VQ configuration is issued one by one. For virtio net with
multi-queue support, backend needs to update RSS (Receive side
scaling) on every rx queue enable. Updating RSS is time-consuming
(typical time like 7ms).

Implement already defined vhost status and message in the vhost
specification [1].
(a) VHOST_USER_PROTOCOL_F_STATUS
(b) VHOST_USER_SET_STATUS
(c) VHOST_USER_GET_STATUS

Send message VHOST_USER_SET_STATUS with VIRTIO_CONFIG_S_DRIVER_OK for
device start and reset(0) for device stop.

On reception of the DRIVER_OK message, backend can apply the needed setting
only once (instead of incremental) and also utilize parallelism on enabling
queues.

This improves QEMU's live migration downtime with vhost user backend
implementation by great margin, specially for the large number of VQs of 64
from 800 msec to 250 msec.

[1] https://qemu-project.gitlab.io/qemu/interop/vhost-user.html

Signed-off-by: Yajun Wu 
Acked-by: Parav Pandit 
Message-Id: <20221017064452.1226514-3-yaj...@nvidia.com>
Reviewed-by: Michael S. Tsirkin 
Signed-off-by: Michael S. Tsirkin 


Probably easiest to debug from dpdk side.
Does the problem go away if you disable the feature 
VHOST_USER_PROTOCOL_F_STATUS in dpdk?


Maxime could you help to debug this?

Thanks,
Laurent




Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

2023-01-09 Thread Michael S. Tsirkin
On Fri, Jan 06, 2023 at 03:21:43PM +0100, Laurent Vivier wrote:
> Hi,
> 
> it seems this patch breaks vhost-user with DPDK.
> 
> See https://bugzilla.redhat.com/show_bug.cgi?id=2155173
> 
> it seems QEMU doesn't receive the expected commands sequence:
> 
> Received unexpected msg type. Expected 22 received 40
> Fail to update device iotlb
> Received unexpected msg type. Expected 40 received 22
> Received unexpected msg type. Expected 22 received 11
> Fail to update device iotlb
> Received unexpected msg type. Expected 11 received 22
> vhost VQ 1 ring restore failed: -71: Protocol error (71)
> Received unexpected msg type. Expected 22 received 11
> Fail to update device iotlb
> Received unexpected msg type. Expected 11 received 22
> vhost VQ 0 ring restore failed: -71: Protocol error (71)
> unable to start vhost net: 71: falling back on userspace virtio
> 
> It receives VHOST_USER_GET_STATUS (40) when it expects VHOST_USER_IOTLB_MSG 
> (22)
> and VHOST_USER_IOTLB_MSG when it expects VHOST_USER_GET_STATUS.
> and VHOST_USER_GET_VRING_BASE (11) when it expect VHOST_USER_GET_STATUS and 
> so on.
> 
> Any idea?
> 
> Thanks,
> Laurent


So I am guessing it's coming from:

if (msg.hdr.request != request) {
error_report("Received unexpected msg type. Expected %d received %d",
 request, msg.hdr.request); 
return -EPROTO;  
}   

in process_message_reply and/or in vhost_user_get_u64.


> On 11/7/22 23:53, Michael S. Tsirkin wrote:
> > From: Yajun Wu 
> > 
> > The motivation of adding vhost-user vhost_dev_start support is to
> > improve backend configuration speed and reduce live migration VM
> > downtime.
> > 
> > Today VQ configuration is issued one by one. For virtio net with
> > multi-queue support, backend needs to update RSS (Receive side
> > scaling) on every rx queue enable. Updating RSS is time-consuming
> > (typical time like 7ms).
> > 
> > Implement already defined vhost status and message in the vhost
> > specification [1].
> > (a) VHOST_USER_PROTOCOL_F_STATUS
> > (b) VHOST_USER_SET_STATUS
> > (c) VHOST_USER_GET_STATUS
> > 
> > Send message VHOST_USER_SET_STATUS with VIRTIO_CONFIG_S_DRIVER_OK for
> > device start and reset(0) for device stop.
> > 
> > On reception of the DRIVER_OK message, backend can apply the needed setting
> > only once (instead of incremental) and also utilize parallelism on enabling
> > queues.
> > 
> > This improves QEMU's live migration downtime with vhost user backend
> > implementation by great margin, specially for the large number of VQs of 64
> > from 800 msec to 250 msec.
> > 
> > [1] https://qemu-project.gitlab.io/qemu/interop/vhost-user.html
> > 
> > Signed-off-by: Yajun Wu 
> > Acked-by: Parav Pandit 
> > Message-Id: <20221017064452.1226514-3-yaj...@nvidia.com>
> > Reviewed-by: Michael S. Tsirkin 
> > Signed-off-by: Michael S. Tsirkin 

Probably easiest to debug from dpdk side.
Does the problem go away if you disable the feature 
VHOST_USER_PROTOCOL_F_STATUS in dpdk?

> > ---
> >   hw/virtio/vhost-user.c | 74 +-
> >   1 file changed, 73 insertions(+), 1 deletion(-)
> > 
> > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> > index d256ce589b..abe23d4ebe 100644
> > --- a/hw/virtio/vhost-user.c
> > +++ b/hw/virtio/vhost-user.c
> > @@ -81,6 +81,7 @@ enum VhostUserProtocolFeature {
> >   VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
> >   /* Feature 14 reserved for 
> > VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */
> >   VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
> > +VHOST_USER_PROTOCOL_F_STATUS = 16,
> >   VHOST_USER_PROTOCOL_F_MAX
> >   };
> > @@ -126,6 +127,8 @@ typedef enum VhostUserRequest {
> >   VHOST_USER_GET_MAX_MEM_SLOTS = 36,
> >   VHOST_USER_ADD_MEM_REG = 37,
> >   VHOST_USER_REM_MEM_REG = 38,
> > +VHOST_USER_SET_STATUS = 39,
> > +VHOST_USER_GET_STATUS = 40,
> >   VHOST_USER_MAX
> >   } VhostUserRequest;
> > @@ -1452,6 +1455,43 @@ static int vhost_user_set_u64(struct vhost_dev *dev, 
> > int request, uint64_t u64,
> >   return 0;
> >   }
> > +static int vhost_user_set_status(struct vhost_dev *dev, uint8_t status)
> > +{
> > +return vhost_user_set_u64(dev, VHOST_USER_SET_STATUS, status, false);
> > +}
> > +
> > +static int vhost_user_get_status(struct vhost_dev *dev, uint8_t *status)
> > +{
> > +uint64_t value;
> > +int ret;
> > +
> > +ret = vhost_user_get_u64(dev, VHOST_USER_GET_STATUS, &value);
> > +if (ret < 0) {
> > +return ret;
> > +}
> > +*status = value;
> > +
> > +return 0;
> > +}
> > +
> > +static int vhost_user_add_status(struct vhost_dev *dev, uint8_t status)
> > +{
> > +uint8_t s;
> > +int ret;
> > +
> > +ret = vhost_user_get_status(dev, &s);
> > +if (ret < 0) {
> > +return ret;
> > +}
> > +
> > +if ((s & status) == status) {
> > +return 0;
> > +}
> > +s |= status;
> > +
> > +return vhost_user_set_status(dev, s);
> >

Re: [PULL v4 76/83] vhost-user: Support vhost_dev_start

2023-01-06 Thread Laurent Vivier

Hi,

it seems this patch breaks vhost-user with DPDK.

See https://bugzilla.redhat.com/show_bug.cgi?id=2155173

it seems QEMU doesn't receive the expected commands sequence:

Received unexpected msg type. Expected 22 received 40
Fail to update device iotlb
Received unexpected msg type. Expected 40 received 22
Received unexpected msg type. Expected 22 received 11
Fail to update device iotlb
Received unexpected msg type. Expected 11 received 22
vhost VQ 1 ring restore failed: -71: Protocol error (71)
Received unexpected msg type. Expected 22 received 11
Fail to update device iotlb
Received unexpected msg type. Expected 11 received 22
vhost VQ 0 ring restore failed: -71: Protocol error (71)
unable to start vhost net: 71: falling back on userspace virtio

It receives VHOST_USER_GET_STATUS (40) when it expects VHOST_USER_IOTLB_MSG (22)
and VHOST_USER_IOTLB_MSG when it expects VHOST_USER_GET_STATUS.
and VHOST_USER_GET_VRING_BASE (11) when it expect VHOST_USER_GET_STATUS and so 
on.

Any idea?

Thanks,
Laurent

On 11/7/22 23:53, Michael S. Tsirkin wrote:

From: Yajun Wu 

The motivation of adding vhost-user vhost_dev_start support is to
improve backend configuration speed and reduce live migration VM
downtime.

Today VQ configuration is issued one by one. For virtio net with
multi-queue support, backend needs to update RSS (Receive side
scaling) on every rx queue enable. Updating RSS is time-consuming
(typical time like 7ms).

Implement already defined vhost status and message in the vhost
specification [1].
(a) VHOST_USER_PROTOCOL_F_STATUS
(b) VHOST_USER_SET_STATUS
(c) VHOST_USER_GET_STATUS

Send message VHOST_USER_SET_STATUS with VIRTIO_CONFIG_S_DRIVER_OK for
device start and reset(0) for device stop.

On reception of the DRIVER_OK message, backend can apply the needed setting
only once (instead of incremental) and also utilize parallelism on enabling
queues.

This improves QEMU's live migration downtime with vhost user backend
implementation by great margin, specially for the large number of VQs of 64
from 800 msec to 250 msec.

[1] https://qemu-project.gitlab.io/qemu/interop/vhost-user.html

Signed-off-by: Yajun Wu 
Acked-by: Parav Pandit 
Message-Id: <20221017064452.1226514-3-yaj...@nvidia.com>
Reviewed-by: Michael S. Tsirkin 
Signed-off-by: Michael S. Tsirkin 
---
  hw/virtio/vhost-user.c | 74 +-
  1 file changed, 73 insertions(+), 1 deletion(-)

diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index d256ce589b..abe23d4ebe 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -81,6 +81,7 @@ enum VhostUserProtocolFeature {
  VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
  /* Feature 14 reserved for VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */
  VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
+VHOST_USER_PROTOCOL_F_STATUS = 16,
  VHOST_USER_PROTOCOL_F_MAX
  };
  
@@ -126,6 +127,8 @@ typedef enum VhostUserRequest {

  VHOST_USER_GET_MAX_MEM_SLOTS = 36,
  VHOST_USER_ADD_MEM_REG = 37,
  VHOST_USER_REM_MEM_REG = 38,
+VHOST_USER_SET_STATUS = 39,
+VHOST_USER_GET_STATUS = 40,
  VHOST_USER_MAX
  } VhostUserRequest;
  
@@ -1452,6 +1455,43 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,

  return 0;
  }
  
+static int vhost_user_set_status(struct vhost_dev *dev, uint8_t status)

+{
+return vhost_user_set_u64(dev, VHOST_USER_SET_STATUS, status, false);
+}
+
+static int vhost_user_get_status(struct vhost_dev *dev, uint8_t *status)
+{
+uint64_t value;
+int ret;
+
+ret = vhost_user_get_u64(dev, VHOST_USER_GET_STATUS, &value);
+if (ret < 0) {
+return ret;
+}
+*status = value;
+
+return 0;
+}
+
+static int vhost_user_add_status(struct vhost_dev *dev, uint8_t status)
+{
+uint8_t s;
+int ret;
+
+ret = vhost_user_get_status(dev, &s);
+if (ret < 0) {
+return ret;
+}
+
+if ((s & status) == status) {
+return 0;
+}
+s |= status;
+
+return vhost_user_set_status(dev, s);
+}
+
  static int vhost_user_set_features(struct vhost_dev *dev,
 uint64_t features)
  {
@@ -1460,6 +1500,7 @@ static int vhost_user_set_features(struct vhost_dev *dev,
   * backend is actually logging changes
   */
  bool log_enabled = features & (0x1ULL << VHOST_F_LOG_ALL);
+int ret;
  
  /*

   * We need to include any extra backend only feature bits that
@@ -1467,9 +1508,18 @@ static int vhost_user_set_features(struct vhost_dev *dev,
   * VHOST_USER_F_PROTOCOL_FEATURES bit for enabling protocol
   * features.
   */
-return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES,
+ret = vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES,
features | dev->backend_features,
log_enabled);
+
+if (virtio_has_feature(dev->protocol_features,
+   VHOST_USER_PROTOCOL_F_STATUS)) {
+