Re: [PATCH v4 5/7] tests/qtest/vhost-user-test: add support for the vhost-user-blk device

2020-09-09 Thread Dima Stepanov
On Tue, Sep 08, 2020 at 11:03:56PM -0400, Raphael Norwitz wrote:
> On Fri, Sep 4, 2020 at 5:35 AM Dima Stepanov  wrote:
> >
> > Add vhost_user_ops structure for the vhost-user-blk device class. Add
> > the test_reconnect and test_migrate tests for this device.
> >
> > Signed-off-by: Dima Stepanov 
> 
> Reviewed-by: Raphael Norwitz 
> 
> Just one small suggestion.
> 
> > ---
> >  tests/qtest/vhost-user-test.c | 139 
> > +-
> >  1 file changed, 137 insertions(+), 2 deletions(-)
> 
> > @@ -857,12 +911,21 @@ static void test_reconnect(void *obj, void *arg, 
> > QGuestAllocator *alloc)
> >  {
> >  TestServer *s = arg;
> >  GSource *src;
> > +int nq;
> >
> > +if (s->vu_ops->driver_init) {
> > +s->vu_ops->driver_init(obj, alloc);
> > +}
> >  if (!wait_for_fds(s)) {
> >  return;
> >  }
> >
> 
> Maybe we could break this logic out into a helper? I imagine there may
> be other cases where we might want to get a number of rings for a
> given device type.
Yes, i've also thought about it and my point is that it isn't clear
right now how it will be used. So i decided to use it in the pretty
straigtforward way. As soon as we add some more vhost-user devices for
testing we can update this code properly.

> 
> 
> > -wait_for_rings_started(s, 2);
> > +nq = 1;
> > +if (s->vu_ops->type == VHOST_USER_NET) {
> > +/* tx and rx queues */
> > +nq = 2;
> > +}
> > +wait_for_rings_started(s, nq);
> >



Re: [PATCH v4 5/7] tests/qtest/vhost-user-test: add support for the vhost-user-blk device

2020-09-08 Thread Raphael Norwitz
On Fri, Sep 4, 2020 at 5:35 AM Dima Stepanov  wrote:
>
> Add vhost_user_ops structure for the vhost-user-blk device class. Add
> the test_reconnect and test_migrate tests for this device.
>
> Signed-off-by: Dima Stepanov 

Reviewed-by: Raphael Norwitz 

Just one small suggestion.

> ---
>  tests/qtest/vhost-user-test.c | 139 
> +-
>  1 file changed, 137 insertions(+), 2 deletions(-)

> @@ -857,12 +911,21 @@ static void test_reconnect(void *obj, void *arg, 
> QGuestAllocator *alloc)
>  {
>  TestServer *s = arg;
>  GSource *src;
> +int nq;
>
> +if (s->vu_ops->driver_init) {
> +s->vu_ops->driver_init(obj, alloc);
> +}
>  if (!wait_for_fds(s)) {
>  return;
>  }
>

Maybe we could break this logic out into a helper? I imagine there may
be other cases where we might want to get a number of rings for a
given device type.


> -wait_for_rings_started(s, 2);
> +nq = 1;
> +if (s->vu_ops->type == VHOST_USER_NET) {
> +/* tx and rx queues */
> +nq = 2;
> +}
> +wait_for_rings_started(s, nq);
>



[PATCH v4 5/7] tests/qtest/vhost-user-test: add support for the vhost-user-blk device

2020-09-04 Thread Dima Stepanov
Add vhost_user_ops structure for the vhost-user-blk device class. Add
the test_reconnect and test_migrate tests for this device.

Signed-off-by: Dima Stepanov 
---
 tests/qtest/vhost-user-test.c | 139 +-
 1 file changed, 137 insertions(+), 2 deletions(-)

diff --git a/tests/qtest/vhost-user-test.c b/tests/qtest/vhost-user-test.c
index 3df5322..a8af613 100644
--- a/tests/qtest/vhost-user-test.c
+++ b/tests/qtest/vhost-user-test.c
@@ -24,6 +24,7 @@
 #include "libqos/libqos.h"
 #include "libqos/pci-pc.h"
 #include "libqos/virtio-pci.h"
+#include "libqos/virtio-blk.h"
 
 #include "libqos/malloc-pc.h"
 #include "hw/virtio/virtio-net.h"
@@ -31,6 +32,7 @@
 #include "standard-headers/linux/vhost_types.h"
 #include "standard-headers/linux/virtio_ids.h"
 #include "standard-headers/linux/virtio_net.h"
+#include "standard-headers/linux/virtio_blk.h"
 
 #ifdef CONFIG_LINUX
 #include 
@@ -43,6 +45,7 @@
 " -numa node,memdev=mem"
 #define QEMU_CMD_CHR" -chardev socket,id=%s,path=%s%s"
 #define QEMU_CMD_NETDEV " -netdev vhost-user,id=hs0,chardev=%s,vhostforce"
+#define QEMU_CMD_BLKCHR " -chardev socket,id=chdev0,path=%s%s"
 
 #define HUGETLBFS_MAGIC   0x958458f6
 
@@ -55,6 +58,7 @@
 #define VHOST_USER_PROTOCOL_F_MQ 0
 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
 #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN   6
+#define VHOST_USER_PROTOCOL_F_CONFIG 9
 
 #define VHOST_LOG_PAGE 0x1000
 
@@ -78,6 +82,8 @@ typedef enum VhostUserRequest {
 VHOST_USER_SET_PROTOCOL_FEATURES = 16,
 VHOST_USER_GET_QUEUE_NUM = 17,
 VHOST_USER_SET_VRING_ENABLE = 18,
+VHOST_USER_GET_CONFIG = 24,
+VHOST_USER_SET_CONFIG = 25,
 VHOST_USER_MAX
 } VhostUserRequest;
 
@@ -99,6 +105,14 @@ typedef struct VhostUserLog {
 uint64_t mmap_offset;
 } VhostUserLog;
 
+#define VHOST_USER_MAX_CONFIG_SIZE 256
+typedef struct VhostUserConfig {
+uint32_t offset;
+uint32_t size;
+uint32_t flags;
+uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
+} VhostUserConfig;
+
 typedef struct VhostUserMsg {
 VhostUserRequest request;
 
@@ -114,6 +128,7 @@ typedef struct VhostUserMsg {
 struct vhost_vring_addr addr;
 VhostUserMemory memory;
 VhostUserLog log;
+VhostUserConfig config;
 } payload;
 } QEMU_PACKED VhostUserMsg;
 
@@ -137,6 +152,7 @@ enum {
 
 enum {
 VHOST_USER_NET,
+VHOST_USER_BLK,
 };
 
 typedef struct TestServer {
@@ -166,12 +182,15 @@ struct vhost_user_ops {
 int type;
 void (*append_opts)(TestServer *s, GString *cmd_line,
 const char *chr_opts);
+void (*driver_init)(void *obj, QGuestAllocator *alloc);
 
 /* VHOST-USER commands. */
 void (*set_features)(TestServer *s, CharBackend *chr,
 VhostUserMsg *msg);
 void (*get_protocol_features)(TestServer *s,
 CharBackend *chr, VhostUserMsg *msg);
+void (*get_config)(TestServer *s, CharBackend *chr,
+VhostUserMsg *msg);
 };
 
 static const char *init_hugepagefs(void);
@@ -194,6 +213,14 @@ static void append_vhost_net_opts(TestServer *s, GString 
*cmd_line,
chr_opts, s->chr_name);
 }
 
+static void append_vhost_blk_opts(TestServer *s, GString *cmd_line,
+ const char *chr_opts)
+{
+g_string_append_printf(cmd_line, QEMU_CMD_BLKCHR,
+   s->socket_path,
+   chr_opts);
+}
+
 static void append_mem_opts(TestServer *server, GString *cmd_line,
 int size, enum test_memfd memfd)
 {
@@ -425,6 +452,12 @@ static void chr_read(void *opaque, const uint8_t *buf, int 
size)
 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
 break;
 
+case VHOST_USER_GET_CONFIG:
+if (s->vu_ops->get_config) {
+s->vu_ops->get_config(s, chr, );
+}
+break;
+
 default:
 break;
 }
@@ -727,6 +760,9 @@ static void test_migrate(void *obj, void *arg, 
QGuestAllocator *alloc)
 guint8 *log;
 guint64 size;
 
+if (s->vu_ops->driver_init) {
+s->vu_ops->driver_init(obj, alloc);
+}
 if (!wait_for_fds(s)) {
 return;
 }
@@ -796,6 +832,24 @@ static void test_migrate(void *obj, void *arg, 
QGuestAllocator *alloc)
 g_string_free(dest_cmdline, true);
 }
 
+static void vu_blk_driver_init(void *obj, QGuestAllocator *alloc)
+{
+QVirtioBlk *blk_if;
+QVirtioDevice *dev;
+QVirtQueue *vq;
+uint64_t features;
+
+blk_if = obj;
+dev = blk_if->vdev;
+features = qvirtio_get_features(dev);
+qvirtio_set_features(dev, features);
+
+vq = qvirtqueue_setup(dev, alloc, 0);
+g_assert(vq);
+
+qvirtio_set_driver_ok(dev);
+}
+
 static void wait_for_rings_started(TestServer *s, size_t count)
 {
 gint64 end_time;
@@ -857,12 +911,21 @@ static void test_reconnect(void *obj, void *arg, 
QGuestAllocator *alloc)
 {
 TestServer *s = arg;
 GSource *src;
+int nq;
 
+