[Xen-devel] [PATCH RFC v2 2/8] Modified xl stack to receieve mirror QEMU disk option

2017-10-18 Thread Bruno Alvisio
---
 tools/ocaml/libs/xl/xenlight_stubs.c |  4 +--
 tools/xl/xl.h|  1 +
 tools/xl/xl_migrate.c| 47 +---
 tools/xl/xl_saverestore.c|  2 +-
 tools/xl/xl_vmcontrol.c  |  5 ++--
 5 files changed, 45 insertions(+), 14 deletions(-)

diff --git a/tools/ocaml/libs/xl/xenlight_stubs.c 
b/tools/ocaml/libs/xl/xenlight_stubs.c
index 98b52b9..9e19009 100644
--- a/tools/ocaml/libs/xl/xenlight_stubs.c
+++ b/tools/ocaml/libs/xl/xenlight_stubs.c
@@ -538,7 +538,7 @@ value stub_libxl_domain_create_restore(value ctx, value 
domain_config, value par
 
caml_enter_blocking_section();
ret = libxl_domain_create_restore(CTX, _dconfig, _domid, restore_fd,
-   -1, _params, ao_how, NULL);
+   -1, 0, _params, ao_how, NULL);
caml_leave_blocking_section();
 
free(ao_how);
@@ -611,7 +611,7 @@ value stub_libxl_domain_suspend(value ctx, value domid, 
value fd, value async, v
libxl_asyncop_how *ao_how = aohow_val(async);
 
caml_enter_blocking_section();
-   ret = libxl_domain_suspend(CTX, c_domid, c_fd, 0, ao_how);
+   ret = libxl_domain_suspend(CTX, c_domid, c_fd, 0, NULL, ao_how);
caml_leave_blocking_section();
 
free(ao_how);
diff --git a/tools/xl/xl.h b/tools/xl/xl.h
index 01c2af6..070bac1 100644
--- a/tools/xl/xl.h
+++ b/tools/xl/xl.h
@@ -35,6 +35,7 @@ struct domain_create {
 int daemonize;
 int monitor; /* handle guest reboots etc */
 int paused;
+int mirror_qemu_disk;
 int dryrun;
 int quiet;
 int vnc;
diff --git a/tools/xl/xl_migrate.c b/tools/xl/xl_migrate.c
index 1f0e87d..fee726f 100644
--- a/tools/xl/xl_migrate.c
+++ b/tools/xl/xl_migrate.c
@@ -177,7 +177,8 @@ static void migrate_do_preamble(int send_fd, int recv_fd, 
pid_t child,
 }
 
 static void migrate_domain(uint32_t domid, const char *rune, int debug,
-   const char *override_config_file)
+   const char *override_config_file,
+   int mirror_qemu_disks, const char* hostname)
 {
 pid_t child = -1;
 int rc;
@@ -205,7 +206,9 @@ static void migrate_domain(uint32_t domid, const char 
*rune, int debug,
 
 if (debug)
 flags |= LIBXL_SUSPEND_DEBUG;
-rc = libxl_domain_suspend(ctx, domid, send_fd, flags, NULL);
+if(mirror_qemu_disks)
+flags |= LIBXL_SUSPEND_MIRROR_QEMU_DISKS;
+rc = libxl_domain_suspend(ctx, domid, send_fd, flags, hostname, NULL);
 if (rc) {
 fprintf(stderr, "migration sender: libxl_domain_suspend failed"
 " (rc=%d)\n", rc);
@@ -316,7 +319,7 @@ static void migrate_domain(uint32_t domid, const char 
*rune, int debug,
 }
 
 static void migrate_receive(int debug, int daemonize, int monitor,
-int pause_after_migration,
+int pause_after_migration, int mirror_qemu_disks,
 int send_fd, int recv_fd,
 libxl_checkpointed_stream checkpointed,
 char *colo_proxy_script,
@@ -343,6 +346,7 @@ static void migrate_receive(int debug, int daemonize, int 
monitor,
 dom_info.daemonize = daemonize;
 dom_info.monitor = monitor;
 dom_info.paused = 1;
+dom_info.mirror_qemu_disks = mirror_qemu_disks;
 dom_info.migrate_fd = recv_fd;
 dom_info.send_back_fd = send_fd;
 dom_info.migration_domname_r = _domname;
@@ -423,6 +427,17 @@ static void migrate_receive(int debug, int daemonize, int 
monitor,
 
 fprintf(stderr, "migration target: Got permission, starting domain.\n");
 
+if(mirror_qemu_disks){
+fprintf(stderr, "migration target: Stopping NBD server\n");
+rc = libxl__nbd_server_stop(ctx, domid);
+if (rc){
+rc = 0; //For now, mask the error if NBD server fails to stop
+fprintf(stderr, "Failed to stop NBD server\n");
+}else{
+fprintf(stderr, "Stopped NBD server successfully\n");
+}
+}
+
 if (migration_domname) {
 rc = libxl_domain_rename(ctx, domid, migration_domname, 
common_domname);
 if (rc) goto perhaps_destroy_notify_rc;
@@ -478,6 +493,7 @@ static void migrate_receive(int debug, int daemonize, int 
monitor,
 int main_migrate_receive(int argc, char **argv)
 {
 int debug = 0, daemonize = 1, monitor = 1, pause_after_migration = 0;
+int mirror_qemu_disks = 0;
 libxl_checkpointed_stream checkpointed = LIBXL_CHECKPOINTED_STREAM_NONE;
 int opt;
 bool userspace_colo_proxy = false;
@@ -490,7 +506,7 @@ int main_migrate_receive(int argc, char **argv)
 COMMON_LONG_OPTS
 };
 
-SWITCH_FOREACH_OPT(opt, "Fedrp", opts, "migrate-receive", 0) {
+SWITCH_FOREACH_OPT(opt, "Fedrpq", opts, "migrate-receive", 0) {
 case 'F':
 daemonize = 0;
 break;
@@ -516,6 +532,9 @@ int main_migrate_receive(int argc, char **argv)
 case 'p':
  

[Xen-devel] [PATCH RFC v2 3/8] Adapted libxl to handle migration of instance with qemu based disks

2017-10-18 Thread Bruno Alvisio
---
 tools/libxl/libxl.h  |  11 ++-
 tools/libxl/libxl_create.c   | 191 ---
 tools/libxl/libxl_dm.c   |  26 --
 tools/libxl/libxl_dom_save.c |  85 -
 tools/libxl/libxl_domain.c   |   4 +-
 tools/libxl/libxl_internal.h |  60 ++--
 tools/libxl/libxl_save_callout.c |  38 +---
 tools/libxl/libxl_save_helper.c  |   4 +-
 tools/libxl/libxl_stream_read.c  |  17 +++-
 tools/libxl/libxl_stream_write.c |  28 +-
 10 files changed, 406 insertions(+), 58 deletions(-)

diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
index 91408b4..4022c37 100644
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -1333,7 +1333,7 @@ int libxl_domain_create_new(libxl_ctx *ctx, 
libxl_domain_config *d_config,
 LIBXL_EXTERNAL_CALLERS_ONLY;
 int libxl_domain_create_restore(libxl_ctx *ctx, libxl_domain_config *d_config,
 uint32_t *domid, int restore_fd,
-int send_back_fd,
+int send_back_fd, int mirror_qemu_disks,
 const libxl_domain_restore_params *params,
 const libxl_asyncop_how *ao_how,
 const libxl_asyncprogress_how *aop_console_how)
@@ -1373,8 +1373,9 @@ static inline int libxl_domain_create_restore_0x040400(
 const libxl_asyncprogress_how *aop_console_how)
 LIBXL_EXTERNAL_CALLERS_ONLY
 {
+//TODO: balvisio: Review
 return libxl_domain_create_restore(ctx, d_config, domid, restore_fd,
-   -1, params, ao_how, aop_console_how);
+   -1, 0, params, ao_how, aop_console_how);
 }
 
 #define libxl_domain_create_restore libxl_domain_create_restore_0x040400
@@ -1408,10 +1409,16 @@ int libxl_retrieve_domain_configuration(libxl_ctx *ctx, 
uint32_t domid,
 
 int libxl_domain_suspend(libxl_ctx *ctx, uint32_t domid, int fd,
  int flags, /* LIBXL_SUSPEND_* */
+ const char* hostname,
  const libxl_asyncop_how *ao_how)
  LIBXL_EXTERNAL_CALLERS_ONLY;
 #define LIBXL_SUSPEND_DEBUG 1
 #define LIBXL_SUSPEND_LIVE 2
+#define LIBXL_SUSPEND_MIRROR_QEMU_DISKS 4
+
+//TODO: balvisio: DO NOT HARD-CODE THIS PARAMS
+#define QEMU_DRIVE_MIRROR_PORT "11000"
+#define QEMU_DRIVE_MIRROR_DEVICE "ide0-hd0"
 
 /* @param suspend_cancel [from xenctrl.h:xc_domain_resume( @param fast )]
  *   If this parameter is true, use co-operative resume. The guest
diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
index 9123585..f10f2ce 100644
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -744,6 +744,10 @@ static int store_libxl_entry(libxl__gc *gc, uint32_t domid,
 static void domcreate_devmodel_started(libxl__egc *egc,
libxl__dm_spawn_state *dmss,
int rc);
+
+static void start_nbd_server(libxl__egc *egc, libxl__dm_spawn_state *dmss,
+ int ret);
+
 static void domcreate_bootloader_console_available(libxl__egc *egc,
libxl__bootloader_state 
*bl);
 static void domcreate_bootloader_done(libxl__egc *egc,
@@ -760,10 +764,17 @@ static void domcreate_stream_done(libxl__egc *egc,
   libxl__stream_read_state *srs,
   int ret);
 
+static void domcreate_pre_build(libxl__egc *egc,
+libxl__domain_create_state *dcs,
+int ret);
+
 static void domcreate_rebuild_done(libxl__egc *egc,
libxl__domain_create_state *dcs,
int ret);
 
+static void domcreate_multidev_begin(libxl__egc *egc,
+ libxl__domain_create_state *dcs);
+
 /* Our own function to clean up and call the user's callback.
  * The final call in the sequence. */
 static void domcreate_complete(libxl__egc *egc,
@@ -1016,6 +1027,64 @@ static void libxl__colo_restore_setup_done(libxl__egc 
*egc,
 libxl__stream_read_start(egc, >srs);
 }
 
+static void start_nbd_server(libxl__egc *egc, libxl__dm_spawn_state *dmss,
+ int ret){
+
+libxl__domain_create_state *dcs = CONTAINER_OF(dmss, *dcs, sdss.dm);
+STATE_AO_GC(dmss->spawn.ao);
+const uint32_t domid = dcs->guest_domid;
+dcs->sdss.dm.guest_domid = domid;
+
+if (ret) {
+LOGD(ERROR, domid, "device model did not start: %d", ret);
+goto error_out;
+}
+
+if(dcs->restore_fd >= 0 && dcs->mirror_qemu_disks) {
+ /*
+  * Start and add the NBD server
+  * Host is set it to "::" for now
+  * Port we hard code a port for now
+
+  * This code just 

[Xen-devel] [PATCH RFC v2 5/8] Improved migration flow syntax in libxl

2017-10-18 Thread Bruno Alvisio
---
 tools/libxl/libxl_create.c   |  5 +++--
 tools/libxl/libxl_dm.c   | 10 +-
 tools/libxl/libxl_internal.h |  4 ++--
 tools/libxl/libxl_save_callout.c | 41 +---
 tools/libxl/libxl_save_helper.c  | 15 ++-
 tools/libxl/libxl_types.idl  |  6 ++
 6 files changed, 48 insertions(+), 33 deletions(-)

diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
index f10f2ce..6df2754 100644
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -1121,6 +1121,7 @@ static void domcreate_bootloader_done(libxl__egc *egc,
 dcs->sdss.dm.spawn.ao = ao;
 dcs->sdss.dm.guest_config = dcs->guest_config;
 dcs->sdss.dm.build_state = >build_state;
+dcs->sdss.dm.mirror_qemu_disks = dcs->mirror_qemu_disks;
 if(!dcs->mirror_qemu_disks) {
 dcs->sdss.dm.callback = domcreate_devmodel_started;
 dcs->sdss.callback = domcreate_devmodel_started;
@@ -1518,7 +1519,7 @@ static void domcreate_launch_dm(libxl__egc *egc, 
libxl__multidev *multidev,
 if (libxl_defbool_val(d_config->b_info.device_model_stubdomain))
 libxl__spawn_stub_dm(egc, >sdss);
 else
-libxl__spawn_local_dm(egc, >sdss.dm, dcs->mirror_qemu_disks);
+libxl__spawn_local_dm(egc, >sdss.dm);
 
 /*
  * Handle the domain's (and the related stubdomain's) access to
@@ -1550,7 +1551,7 @@ static void domcreate_launch_dm(libxl__egc *egc, 
libxl__multidev *multidev,
 goto error_out;
 if (ret) {
 dcs->sdss.dm.guest_domid = domid;
-libxl__spawn_local_dm(egc, >sdss.dm, 0);
+libxl__spawn_local_dm(egc, >sdss.dm);
 return;
 } else {
 assert(!dcs->sdss.dm.guest_domid);
diff --git a/tools/libxl/libxl_dm.c b/tools/libxl/libxl_dm.c
index ff6721d..001e14e 100644
--- a/tools/libxl/libxl_dm.c
+++ b/tools/libxl/libxl_dm.c
@@ -1396,7 +1396,7 @@ static int libxl__build_device_model_args_new(libxl__gc 
*gc,
 }
 }
 
-if (state->saved_state && !mirror_qemu_disks) {
+if (state->saved_state) {
 /* This file descriptor is meant to be used by QEMU */
 *dm_state_fd = open(state->saved_state, O_RDONLY);
 flexarray_append(dm_args, "-incoming");
@@ -2062,7 +2062,7 @@ static void spawn_stub_launch_dm(libxl__egc *egc,
 /* If dom0 qemu not needed, do not launch it */
 spawn_stubdom_pvqemu_cb(egc, >pvqemu, 0);
 } else {
-libxl__spawn_local_dm(egc, >pvqemu, 0);
+libxl__spawn_local_dm(egc, >pvqemu);
 }
 
 return;
@@ -2167,8 +2167,7 @@ static void device_model_spawn_outcome(libxl__egc *egc,
libxl__dm_spawn_state *dmss,
int rc);
 
-void libxl__spawn_local_dm(libxl__egc *egc, libxl__dm_spawn_state *dmss,
-   int mirror_qemu_disks)
+void libxl__spawn_local_dm(libxl__egc *egc, libxl__dm_spawn_state *dmss)
 {
 /* convenience aliases */
 const int domid = dmss->guest_domid;
@@ -2208,7 +2207,8 @@ void libxl__spawn_local_dm(libxl__egc *egc, 
libxl__dm_spawn_state *dmss,
 }
 rc = libxl__build_device_model_args(gc, dm, domid, guest_config,
   , , state,
-  _state_fd, mirror_qemu_disks);
+  _state_fd,
+  dmss->mirror_qemu_disks);
 if (rc)
 goto out;
 
diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index 16a476f..30862c6 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -3780,7 +3780,7 @@ _hidden void libxl__domain_save(libxl__egc *egc,
 _hidden void libxl__xc_domain_save(libxl__egc *egc,
libxl__domain_save_state *dss,
libxl__save_helper_state *shs,
-   int mirror_qemu_disks);
+   int migration_phase);
 /* If rc==0 then retval is the return value from xc_domain_save
  * and errnoval is the errno value it provided.
  * If rc!=0, retval and errnoval are undefined. */
@@ -3813,7 +3813,7 @@ _hidden int libxl__restore_emulator_xenstore_data
 _hidden void libxl__xc_domain_restore(libxl__egc *egc,
   libxl__domain_create_state *dcs,
   libxl__save_helper_state *shs,
-  int hvm, int pae, int mirror_qemu_disks);
+  int hvm, int pae, int migration_phase);
 /* If rc==0 then retval is the return value from xc_domain_save
  * and errnoval is the errno value it provided.
  * If rc!=0, retval and errnoval are undefined. */
diff --git a/tools/libxl/libxl_save_callout.c b/tools/libxl/libxl_save_callout.c
index 290d91d..48f96d8 100644
--- 

[Xen-devel] [PATCH RFC v2 6/8] Adapted libxc for migration of local disk

2017-10-18 Thread Bruno Alvisio
---
 tools/libxc/include/xenguest.h|   6 +-
 tools/libxc/xc_nomigrate.c|   6 +-
 tools/libxc/xc_sr_common.h|   3 +
 tools/libxc/xc_sr_restore.c   |  14 +++--
 tools/libxc/xc_sr_save.c  | 118 +-
 tools/libxc/xc_sr_save_x86_hvm.c  |   7 ++-
 tools/libxc/xc_sr_stream_format.h |   4 ++
 7 files changed, 144 insertions(+), 14 deletions(-)

diff --git a/tools/libxc/include/xenguest.h b/tools/libxc/include/xenguest.h
index 5cd8111..a6f52f1 100644
--- a/tools/libxc/include/xenguest.h
+++ b/tools/libxc/include/xenguest.h
@@ -103,7 +103,8 @@ typedef enum {
 int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t 
max_iters,
uint32_t max_factor, uint32_t flags /* XCFLAGS_xxx */,
struct save_callbacks* callbacks, int hvm,
-   xc_migration_stream_t stream_type, int recv_fd);
+   xc_migration_stream_t stream_type, int recv_fd,
+   int migration_phase);
 
 /* callbacks provided by xc_domain_restore */
 struct restore_callbacks {
@@ -168,7 +169,8 @@ int xc_domain_restore(xc_interface *xch, int io_fd, 
uint32_t dom,
   unsigned long *console_mfn, domid_t console_domid,
   unsigned int hvm, unsigned int pae,
   xc_migration_stream_t stream_type,
-  struct restore_callbacks *callbacks, int send_back_fd);
+  struct restore_callbacks *callbacks, int send_back_fd,
+  int migration_phase);
 
 /**
  * This function will create a domain for a paravirtualized Linux
diff --git a/tools/libxc/xc_nomigrate.c b/tools/libxc/xc_nomigrate.c
index 317c8ce..c75411b 100644
--- a/tools/libxc/xc_nomigrate.c
+++ b/tools/libxc/xc_nomigrate.c
@@ -23,7 +23,8 @@
 int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t 
max_iters,
uint32_t max_factor, uint32_t flags,
struct save_callbacks* callbacks, int hvm,
-   xc_migration_stream_t stream_type, int recv_fd)
+   xc_migration_stream_t stream_type, int recv_fd,
+   int migration_phase)
 {
 errno = ENOSYS;
 return -1;
@@ -35,7 +36,8 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t 
dom,
   unsigned long *console_mfn, domid_t console_domid,
   unsigned int hvm, unsigned int pae,
   xc_migration_stream_t stream_type,
-  struct restore_callbacks *callbacks, int send_back_fd)
+  struct restore_callbacks *callbacks, int send_back_fd,
+  int migration_phase)
 {
 errno = ENOSYS;
 return -1;
diff --git a/tools/libxc/xc_sr_common.h b/tools/libxc/xc_sr_common.h
index a83f22a..903f18a 100644
--- a/tools/libxc/xc_sr_common.h
+++ b/tools/libxc/xc_sr_common.h
@@ -96,6 +96,8 @@ struct xc_sr_save_ops
  * after a successful save, or upon encountering an error.
  */
 int (*cleanup)(struct xc_sr_context *ctx);
+
+int (*local_disks)(struct xc_sr_context *ctx);
 };
 
 
@@ -177,6 +179,7 @@ struct xc_sr_context
 xc_interface *xch;
 uint32_t domid;
 int fd;
+int migration_phase;
 
 xc_dominfo_t dominfo;
 
diff --git a/tools/libxc/xc_sr_restore.c b/tools/libxc/xc_sr_restore.c
index a016678..13e6abc 100644
--- a/tools/libxc/xc_sr_restore.c
+++ b/tools/libxc/xc_sr_restore.c
@@ -799,11 +799,13 @@ static int restore(struct xc_sr_context *ctx)
  * With Remus, if we reach here, there must be some error on primary,
  * failover from the last checkpoint state.
  */
-rc = ctx->restore.ops.stream_complete(ctx);
-if ( rc )
-goto err;
+if ( !ctx->migration_phase != MIGRATION_PHASE_MIRROR_DISK ) {
+rc = ctx->restore.ops.stream_complete(ctx);
+if ( rc )
+goto err;
 
-IPRINTF("Restore successful");
+IPRINTF("Restore successful");
+}
 goto done;
 
  err:
@@ -829,13 +831,15 @@ int xc_domain_restore(xc_interface *xch, int io_fd, 
uint32_t dom,
   unsigned long *console_gfn, domid_t console_domid,
   unsigned int hvm, unsigned int pae,
   xc_migration_stream_t stream_type,
-  struct restore_callbacks *callbacks, int send_back_fd)
+  struct restore_callbacks *callbacks, int send_back_fd,
+  int migration_phase)
 {
 xen_pfn_t nr_pfns;
 struct xc_sr_context ctx =
 {
 .xch = xch,
 .fd = io_fd,
+.migration_phase = migration_phase
 };
 
 /* GCC 4.4 (of CentOS 6.x vintage) can' t initialise anonymous unions. */
diff --git a/tools/libxc/xc_sr_save.c b/tools/libxc/xc_sr_save.c
index ca6913b..181a0c8 100644
--- a/tools/libxc/xc_sr_save.c
+++ b/tools/libxc/xc_sr_save.c
@@ -412,6 +412,96 @@ static int send_all_pages(struct 

[Xen-devel] [PATCH RFC v2 8/8] Added support to handle QMP events

2017-10-18 Thread Bruno Alvisio
---
 tools/libxl/libxl_dom_save.c |  71 
 tools/libxl/libxl_qmp.c  | 150 +--
 2 files changed, 170 insertions(+), 51 deletions(-)

diff --git a/tools/libxl/libxl_dom_save.c b/tools/libxl/libxl_dom_save.c
index ddfe2f8..d188dd2 100644
--- a/tools/libxl/libxl_dom_save.c
+++ b/tools/libxl/libxl_dom_save.c
@@ -432,63 +432,44 @@ static void mirror_qemu_disks(libxl__egc *egc, 
libxl__stream_write_state *sws,
 {
 int counter = 20;
 char* target;
-bool job_is_ready = false;
 libxl__domain_save_state *dss = sws->dss;
 const uint32_t domid = dss->domid;
 STATE_AO_GC(dss->ao);
 
-if (dss->mirror_qemu_disks) {
+if (rc)
+goto err;
 /*
  * If the -q was provided, the drive-mirror job is started.
- * TODO: Move the following code as part of the domain_suspend
  * TODO: The port should be sent by the destination.
-*/
-start_mirror:
-LOGD(DEBUG, domid, "Sleeping for a bit so that source can start 
NBD\n");
-sleep(30);
-LOGD(DEBUG, domid, "Starting mirror-drive of device %s\n",
- QEMU_DRIVE_MIRROR_DEVICE);
-target = GCSPRINTF("nbd:%s:%s:exportname=%s", dss->hostname,
-   QEMU_DRIVE_MIRROR_PORT, QEMU_DRIVE_MIRROR_DEVICE);
-rc = libxl__qmp_drive_mirror(gc, dss->domid, QEMU_DRIVE_MIRROR_DEVICE,
+ */
+ start_mirror:
+LOGD(DEBUG, domid, "Sleeping for a bit so that source can start NBD\n");
+sleep(30);
+LOGD(DEBUG, domid, "Starting mirror-drive of device %s\n",
+ QEMU_DRIVE_MIRROR_DEVICE);
+target = GCSPRINTF("nbd:%s:%s:exportname=%s", dss->hostname,
+   QEMU_DRIVE_MIRROR_PORT, QEMU_DRIVE_MIRROR_DEVICE);
+rc = libxl__qmp_drive_mirror(gc, dss->domid, QEMU_DRIVE_MIRROR_DEVICE,
  target, "raw");
-if (!rc) {
-LOGD(INFO, domid, "Drive mirror command returned successfully\n");
+if (!rc) {
+LOGD(DEBUG, domid, "Drive mirror command returned successfully\n");
+}else{
+LOGD(ERROR, domid, "Sending drive mirror command failed\n");
+if(counter > 0){
+LOGD(INFO, domid, "Counter: %d. Sleeping for 10 sec and retry\n", 
counter);
+sleep(10);
+counter--;
+goto start_mirror;
 }else{
-LOGD(ERROR, domid, "Sending drive mirror command failed\n");
-if(counter > 0){
-LOGD(INFO, domid, "Counter: %d. Sleeping for 10 sec and 
retry\n", counter);
-sleep(10);
-counter--;
-goto start_mirror;
-}else{
-goto cont;
-}
-}
-
-/*
- * Query job status until it is ready
- * TODO: This code is just an inefficient busy wait. QMP sends an
- * TODO: asynchronous message when mirroring job is completed. Consider
- * TODO: adding the capability to handle asynchronous QMP messages 
(already done?)
- */
-while(!job_is_ready) {
-LOGD(INFO, domid, "Checking for drive-mirror job");
-rc = libxl__qmp_query_block_jobs(gc, dss->domid, _is_ready);
-if(rc){
-LOGD(ERROR, domid, "Checking block job failed\n");
-goto cont;
-}else{
-LOGD(INFO, domid, "Checking block job succeeded\n");
-}
-if(!job_is_ready){
-LOGD(INFO, domid, "Sleeping 5 sec\n");
-sleep(5);
-}
+goto err;
 }
 }
-cont:
+
 libxl__stream_write_start(egc, >dss->sws);
+return;
+
+ err:
+   dss->callback(egc, dss, rc);
 }
 
 static void stream_done(libxl__egc *egc,
diff --git a/tools/libxl/libxl_qmp.c b/tools/libxl/libxl_qmp.c
index fe6f076..5ef5fb1 100644
--- a/tools/libxl/libxl_qmp.c
+++ b/tools/libxl/libxl_qmp.c
@@ -59,6 +59,13 @@ typedef struct callback_id_pair {
 LIBXL_STAILQ_ENTRY(struct callback_id_pair) next;
 } callback_id_pair;
 
+typedef struct handler_event_pair {
+const char* event_type;
+void *opaque;
+qmp_request_context *context;
+qmp_callback_t event_handler;
+} event_handler_pair;
+
 struct libxl__qmp_handler {
 struct sockaddr_un addr;
 int qmp_fd;
@@ -66,6 +73,9 @@ struct libxl__qmp_handler {
 time_t timeout;
 /* wait_for_id will be used by the synchronous send function */
 int wait_for_id;
+/* wait_for_event_type is used to wait on QMP events */
+const char* wait_for_event_type;
+event_handler_pair *hep;
 
 char buffer[QMP_RECEIVE_BUFFER_SIZE + 1];
 libxl__yajl_ctx *yajl_ctx;
@@ -287,6 +297,25 @@ static void qmp_handle_error_response(libxl__gc *gc, 
libxl__qmp_handler *qmp,
  libxl__json_object_get_string(resp));
 }
 
+static void qmp_handle_event(libxl__gc *gc, libxl__qmp_handler *qmp,
+ const libxl__json_object *event)
+{
+const char* 

[Xen-devel] [PATCH RFC v2 0/8] Live migration for VMs with QEMU backed local storage

2017-10-18 Thread Bruno Alvisio
I am reviving this thread about the migration of VMs with local storage. I have 
worked on a solution to be able to migrate VMs that use QEMU as the backend 
disk driver. I have adapted the migration flow and piggybacked on the 
“drive-mirroring” capability already provided by QEMU.

Overview
1. The “xl migrate” command has an additional “-q” flag. When provided the 
local storage of the VM is mirrored to the destination during the migration 
process.
2. Internally, the modification consists on adding a new 
libxl__stream_read_state struct to the libxl__domain_create_state structure and 
libxl__stream_read_state structure to the libxl__domain_save_state struct.
3. Migration flow can now be divided into three phases:
   a. Phase One: Copies the necessary state to start a QEMU process on the 
destination. It is started with the “-incoming defer” option.
   b. Phase Two: Disk is mirrored using the QEMU embedded NBD server.
   c. Phase Three: Once the disk is completely mirrored, virtual RAM of the 
domain is live migrated to the destination. This phase most closely resembles 
to the current migration flow.
4. If the “-q” option is not provided the migration is equivalent to the 
current migration flow.

The new migration flow has follows the following major sequence of steps:
1. 1st stream copies the QEMU devices RAM from source to destination.
2. QEMU process is started on the destination with the option “-incoming 
defer”. (This creates the QEMU process but it doesn’t start running the main 
loop until “migrate incoming” command is executed)
3. “drive mirror” QMP command is executed so that the disk is mirrored to the 
destination node.
4. An event listener waits for the QMP BLOCK_JOB_READY event sent by QEMU which 
signals that the "disk mirror job" is complete.
5. 2nd Stream copies the virtual RAM from source to destination including QEMU 
state. At this point, the VM is suspended on source.
6. “migrate incoming” QMP command is executed on destination.
7. VM is restored in destination.

This is sample configuration file that I have used to test my branch:

name="tinycore"
disk=['/home/balvisio/tinycore.img,raw,xvda,w']
memory=128
builder='hvm'
vcpus=1
vfb = ['type=vnc']
vif= ['bridge=xenbr0']
boot='b'
acpi=1
device_model_version='qemu-xen'
serial='pty'
vnc=1
xen_platform_pci=0

Notes

1. Note that the configuration file uses "xen_platform_pci=0”. This is 
necessary so that the block device is seen by QEMU. Further modification should 
be made for the case "xen_platform_pci=1” if we still want to use NBD mirroring 
capability provided by QEMU.
2. The current branch has still many hardcoded values. Many of the can be 
easily removed:
a. Port used for disk mirroring (11000)
b. Name of the block devices. (ide0-hd0) Currently the branch only supports 
VM with on disk drive.
c. Live migration memory transfer: The pages transferred by libxc is 
hardcoded. Only a VM with 128 MB of memory is supported.

Here is a link to the branch in Github: 
https://github.com/balvisio/xen/tree/feature/local_storage_migration

Any feedback/suggestion is appreciated.

Cheers,

Bruno

Signed-off-by: Bruno Alvisio <bruno.alvi...@gmail.com>

---
 tools/libxc/include/xenguest.h   |   6 +-
 tools/libxc/xc_nomigrate.c   |   6 +-
 tools/libxc/xc_sr_common.h   |   3 + 
 tools/libxc/xc_sr_restore.c  |  14 +-
 tools/libxc/xc_sr_save.c | 117 +++-
 tools/libxc/xc_sr_save_x86_hvm.c |   7 +-
 tools/libxc/xc_sr_stream_format.h|   4 + 
 tools/libxl/libxl.h  |  11 +-
 tools/libxl/libxl_create.c   | 189 +++--
 tools/libxl/libxl_dm.c   |  20 ++- 
 tools/libxl/libxl_dom_save.c |  66 -
 tools/libxl/libxl_domain.c   |   4 +-
 tools/libxl/libxl_internal.h |  62 -
 tools/libxl/libxl_qmp.c  | 261 +++
 tools/libxl/libxl_save_callout.c |  30 ++--
 tools/libxl/libxl_save_helper.c  |  13 +-
 tools/libxl/libxl_stream_read.c  |  17 ++- 
 tools/libxl/libxl_stream_write.c |  28 +++-
 tools/libxl/libxl_types.idl  |   6 + 
 tools/ocaml/libs/xl/xenlight_stubs.c |   4 +-
 tools/xl/xl.h|   1 + 
 tools/xl/xl_cmdtable.c   |   3 +-
 tools/xl/xl_migrate.c|  36 +++--
 tools/xl/xl_saverestore.c|   2 +-
 tools/xl/xl_vmcontrol.c  |   5 +-
 25 files changed, 845 insertions(+), 70 deletions(-)

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH RFC v2 7/8] Fixed bugs in the migration flow

2017-10-18 Thread Bruno Alvisio
---
 tools/libxc/xc_sr_save.c |  1 -
 tools/libxl/libxl_create.c   | 15 ---
 tools/libxl/libxl_dom_save.c |  2 +-
 tools/libxl/libxl_domain.c   |  2 +-
 tools/libxl/libxl_internal.h |  2 +-
 tools/libxl/libxl_save_callout.c |  5 +++--
 tools/libxl/libxl_stream_write.c |  2 +-
 tools/xl/xl.h|  2 +-
 tools/xl/xl_cmdtable.c   |  3 ++-
 tools/xl/xl_migrate.c|  2 +-
 10 files changed, 19 insertions(+), 17 deletions(-)

diff --git a/tools/libxc/xc_sr_save.c b/tools/libxc/xc_sr_save.c
index 181a0c8..f3e162f 100644
--- a/tools/libxc/xc_sr_save.c
+++ b/tools/libxc/xc_sr_save.c
@@ -464,7 +464,6 @@ static int send_virtual_devices_and_params(struct 
xc_sr_context *ctx)
 uint64_t i = 0;
 int rc = 0;
 
-fprintf(stderr, "BRUNO: SEND VIRTUAL DEVICES AND PARAMS\n");
 xc_set_progress_prefix(xch, "Frames");
 
 //FOR RTL AND VGA IN 128MB VM . Might change on size of VM
diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
index 6df2754..0579671 100644
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -1055,24 +1055,24 @@ static void start_nbd_server(libxl__egc *egc, 
libxl__dm_spawn_state *dmss,
   * TODO: Assign port dynamically
   */
 
-LOGD(DEBUG, "Starting NBD Server\n");
+LOGD(DEBUG, domid, "Starting NBD Server\n");
 ret = libxl__qmp_nbd_server_start(gc, domid, "::", 
QEMU_DRIVE_MIRROR_PORT);
 if (ret) {
 ret = ERROR_FAIL;
-LOGD(ERROR, "Failed to start NBD Server\n");
+LOGD(ERROR, domid, "Failed to start NBD Server\n");
 goto skip_nbd;
 }else{
-LOGD(INFO, "Started NBD Server Successfully\n");
+LOGD(INFO, domid, "Started NBD Server Successfully\n");
 }
 
 ret = libxl__qmp_nbd_server_add(gc, domid, 
QEMU_DRIVE_MIRROR_DEVICE);
 
 if (ret) {
 ret = ERROR_FAIL;
-LOGD(ERROR, "Failed to add NBD Server\n");
+LOGD(ERROR, domid, "Failed to add NBD Server\n");
 goto skip_nbd;
 } else {
-LOGD(INFO, "NBD Add Successful\n");
+LOGD(INFO, domid, "NBD Add Successful\n");
 }
 }
 
@@ -1103,7 +1103,7 @@ static void domcreate_bootloader_done(libxl__egc *egc,
 libxl__srm_restore_autogen_callbacks *const callbacks =
 >srs.shs.callbacks.restore.a;
 libxl__srm_restore_autogen_callbacks *const callbacks_mirror_qemu_disks =
->srs_local_disks.shs.callbacks.restore.a;
+>srs_mirror_qemu_disks.shs.callbacks.restore.a;
 
 if (rc) {
 domcreate_rebuild_done(egc, dcs, rc);
@@ -1252,6 +1252,7 @@ static void domcreate_stream_done(libxl__egc *egc,
 {
 libxl__domain_create_state *dcs = srs->dcs;
 STATE_AO_GC(dcs->ao);
+int rc;
 
 const uint32_t domid = dcs->guest_domid;
 const char* uri;
@@ -1269,7 +1270,7 @@ static void domcreate_stream_done(libxl__egc *egc,
 }else{
 fprintf(stderr, "Stopped NBD server successfully\n");
 }
-uri = GCSPRINTF("exec: /bin/cat %s", 
(>sdss.dm)->build_state->saved_state);
+uri = GCSPRINTF("exec: /bin/cat %s", state_file);
 libxl__qmp_migrate_incoming(gc, domid, uri);
 domcreate_devmodel_started(egc, >sdss.dm, 0);
 }
diff --git a/tools/libxl/libxl_dom_save.c b/tools/libxl/libxl_dom_save.c
index a2730f5..ddfe2f8 100644
--- a/tools/libxl/libxl_dom_save.c
+++ b/tools/libxl/libxl_dom_save.c
@@ -446,7 +446,7 @@ static void mirror_qemu_disks(libxl__egc *egc, 
libxl__stream_write_state *sws,
 start_mirror:
 LOGD(DEBUG, domid, "Sleeping for a bit so that source can start 
NBD\n");
 sleep(30);
-LOGD(DEBUG, "Starting mirror-drive of device %s\n",
+LOGD(DEBUG, domid, "Starting mirror-drive of device %s\n",
  QEMU_DRIVE_MIRROR_DEVICE);
 target = GCSPRINTF("nbd:%s:%s:exportname=%s", dss->hostname,
QEMU_DRIVE_MIRROR_PORT, QEMU_DRIVE_MIRROR_DEVICE);
diff --git a/tools/libxl/libxl_domain.c b/tools/libxl/libxl_domain.c
index 76c6d3d..9b512b9 100644
--- a/tools/libxl/libxl_domain.c
+++ b/tools/libxl/libxl_domain.c
@@ -509,7 +509,7 @@ int libxl_domain_suspend(libxl_ctx *ctx, uint32_t domid, 
int fd, int flags,
 dss->type = type;
 dss->live = flags & LIBXL_SUSPEND_LIVE;
 dss->debug = flags & LIBXL_SUSPEND_DEBUG;
-dss->mirror_qemu_disks = flags & LIBXL_SUSPEND_MIRROR_QEMU_DISKS;
+dss->mirror_qemu_disks = (flags & LIBXL_SUSPEND_MIRROR_QEMU_DISKS) ? 1 : 0;
 dss->hostname = hostname;
 dss->checkpointed_stream = LIBXL_CHECKPOINTED_STREAM_NONE;
 
diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index 30862c6..d7b338b 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -3759,7 +3759,7 @@ struct 

[Xen-devel] [PATCH RFC v2 1/8] Added QMP commands for adding NBD server and disk migration

2017-10-18 Thread Bruno Alvisio
---
 tools/libxl/libxl_internal.h |  18 +++
 tools/libxl/libxl_qmp.c  | 125 ++-
 2 files changed, 142 insertions(+), 1 deletion(-)

diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index 7247509..1349a8f 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -1835,6 +1835,24 @@ _hidden int libxl__qmp_nbd_server_add(libxl__gc *gc, int 
domid,
 /* Start replication */
 _hidden int libxl__qmp_start_replication(libxl__gc *gc, int domid,
  bool primary);
+
+/* Add a disk to NBD server */
+_hidden int libxl__qmp_nbd_server_add(libxl__gc *gc, int domid,
+   const char *disk);
+
+/* Mirror disk drive */
+_hidden int libxl__qmp_drive_mirror(libxl__gc *gc, int domid, const char* 
device,
+const char* target, const char* format);
+
+/* Query block devices */
+_hidden int libxl__qmp_query_block(libxl__gc *gc, int domid, char 
*device_names);
+
+/* Query existing block jobs*/
+_hidden int libxl__qmp_query_block_jobs(libxl__gc *gc, int domid, bool 
*is_ready);
+
+/* Resume QEMU process started with -incoming defer */
+_hidden int libxl__qmp_migrate_incoming(libxl__gc *gc, int domid, const char* 
uri);
+
 /* Get replication error that occurs when the vm is running */
 _hidden int libxl__qmp_query_xen_replication_status(libxl__gc *gc, int domid);
 /* Do checkpoint */
diff --git a/tools/libxl/libxl_qmp.c b/tools/libxl/libxl_qmp.c
index eab993a..fe6f076 100644
--- a/tools/libxl/libxl_qmp.c
+++ b/tools/libxl/libxl_qmp.c
@@ -347,7 +347,10 @@ static libxl__qmp_handler *qmp_init_handler(libxl__gc *gc, 
uint32_t domid)
 }
 qmp->ctx = CTX;
 qmp->domid = domid;
-qmp->timeout = 5;
+
+//TODO: Changed default timeout because drive-mirror command takes a long
+//TODO: to return. Consider timeout to be passed as param.
+qmp->timeout = 600;
 
 LIBXL_STAILQ_INIT(>callback_list);
 
@@ -1069,6 +1072,126 @@ int libxl__qmp_nbd_server_add(libxl__gc *gc, int domid, 
const char *disk)
 return qmp_run_command(gc, domid, "nbd-server-add", args, NULL, NULL);
 }
 
+int libxl__qmp_drive_mirror(libxl__gc *gc, int domid, const char* device, 
const char* target, const char* format)
+{
+libxl__json_object *args = NULL;
+//TODO: Allow method to receive "sync", "speed", "mode", "granurality", 
"buf-size"
+qmp_parameters_add_string(gc, , "device", device);
+qmp_parameters_add_string(gc, , "target", target);
+qmp_parameters_add_string(gc, , "sync", "full");
+qmp_parameters_add_string(gc, , "format", format);
+qmp_parameters_add_string(gc, , "mode", "existing");
+qmp_parameters_add_integer(gc, , "granularity", 0);
+qmp_parameters_add_integer(gc, , "buf-size", 0);
+
+return qmp_run_command(gc, domid, "drive-mirror", args, NULL, NULL);
+}
+
+static int query_block_callback(libxl__qmp_handler *qmp,
+   const libxl__json_object *response,
+   void *opaque)
+{
+const libxl__json_object *blockinfo = NULL;
+GC_INIT(qmp->ctx);
+int i, rc = -1;
+
+for (i = 0; (blockinfo = libxl__json_array_get(response, i)); i++) {
+const libxl__json_object *d;
+const char* device_name;
+d = libxl__json_map_get("device", blockinfo, JSON_STRING);
+if(!d){
+goto out;
+}
+device_name = libxl__json_object_get_string(d);
+}
+
+rc = 0;
+out:
+GC_FREE;
+return rc;
+}
+
+static int query_block_jobs_callback(libxl__qmp_handler *qmp,
+   const libxl__json_object *response,
+   void *opaque)
+{
+const libxl__json_object *blockjobinfo = NULL;
+GC_INIT(qmp->ctx);
+int i, rc = -1;
+bool empty = true;
+
+for (i = 0; (blockjobinfo = libxl__json_array_get(response, i)); i++) {
+empty = false;
+const char *type;
+const char *device;
+unsigned int len;
+unsigned int offset;
+bool busy;
+bool paused;
+const char *iostatus;
+bool ready;
+
+const libxl__json_object *type_o = NULL;
+const libxl__json_object *device_o = NULL;
+const libxl__json_object *len_o = NULL;
+const libxl__json_object *offset_o = NULL;
+const libxl__json_object *busy_o = NULL;
+const libxl__json_object *paused_o = NULL;
+const libxl__json_object *io_status_o = NULL;
+const libxl__json_object *ready_o = NULL;
+
+type_o = libxl__json_map_get("type", blockjobinfo, JSON_STRING);
+device_o = libxl__json_map_get("device", blockjobinfo, JSON_STRING);
+len_o = libxl__json_map_get("len", blockjobinfo, JSON_INTEGER);
+offset_o = libxl__json_map_get("offset", blockjobinfo, JSON_INTEGER);
+busy_o = libxl__json_map_get("busy", blockjobinfo, JSON_BOOL);
+paused_o = 

[Xen-devel] [PATCH RFC v2 4/8] Remove stop NBD server command from xl

2017-10-18 Thread Bruno Alvisio
---
 tools/xl/xl_migrate.c | 11 ---
 1 file changed, 11 deletions(-)

diff --git a/tools/xl/xl_migrate.c b/tools/xl/xl_migrate.c
index fee726f..9f43d96 100644
--- a/tools/xl/xl_migrate.c
+++ b/tools/xl/xl_migrate.c
@@ -427,17 +427,6 @@ static void migrate_receive(int debug, int daemonize, int 
monitor,
 
 fprintf(stderr, "migration target: Got permission, starting domain.\n");
 
-if(mirror_qemu_disks){
-fprintf(stderr, "migration target: Stopping NBD server\n");
-rc = libxl__nbd_server_stop(ctx, domid);
-if (rc){
-rc = 0; //For now, mask the error if NBD server fails to stop
-fprintf(stderr, "Failed to stop NBD server\n");
-}else{
-fprintf(stderr, "Stopped NBD server successfully\n");
-}
-}
-
 if (migration_domname) {
 rc = libxl_domain_rename(ctx, domid, migration_domname, 
common_domname);
 if (rc) goto perhaps_destroy_notify_rc;
-- 
2.3.2 (Apple Git-55)


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] Question about libxc-migration-stream.pandoc

2017-08-09 Thread Bruno Alvisio
Hello,

In /docs/specs/libxc-migration-stream.pandoc "x86 HVM Guest" section states
that:

"HVM\_PARAMS must precede HVM\_CONTEXT, as certain parameters can affect the
validity of architectural state in the context." (line 679)

However, from the code it looks like the HVM_CONTEXT record is sent and
processed before the HVM_PARAMS record:

/tools/libxc/xc_sr_save_x86_hvm.c:

static int x86_hvm_end_of_checkpoint(struct xc_sr_context *ctx)
{

int rc;

/* Write the TSC record. */
rc = write_tsc_info(ctx);
if ( rc )
return rc;

/* Write the HVM_CONTEXT record. */
rc = write_hvm_context(ctx);
if ( rc )
return rc;
/* Write HVM_PARAMS record contains applicable HVM params. */

rc = write_hvm_params(ctx);

if ( rc )
return rc;

return 0;
}


I wanted to confirm is this is just a typo in the documentation file or a
bug on the code?

Thanks,

Bruno
___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH RFC] Live migration for VMs with QEMU backed local storage

2017-06-29 Thread Bruno Alvisio
Thanks Wei. Currently it is started after the memory is streamed from
source to destination (for migration) and the booting functions are
completed.I was going to ask to the list if there is a specific reason the
QEMU process needs to be started at that point.

Also, if the start point of the QEMU process is moved to an earlier part of
the domain creation process, how can I run a basic set of tests to validate
that I am not breaking any functionality and causing a regression?

Thanks,

Bruno

On Thu, Jun 29, 2017 at 7:58 AM, Wei Liu <wei.l...@citrix.com> wrote:

> On Fri, Jun 23, 2017 at 03:42:20AM -0400, Bruno Alvisio wrote:
> > This patch is the first attempt on adding live migration of instances
> with local
> > storage to Xen. This patch just handles very restricted case of fully
> > virtualized HVMs. The code uses the "drive-mirror" capability provided
> by QEMU.
> > A new "-l" option is introduced to "xl migrate" command. If provided,
> the local
> > disk should be mirrored during the migration process. If the option is
> set,
> > during the VM creation a qemu NBD server is started on the destination.
> After
> > the instance is suspended on the source, the QMP "disk-mirror" command
> is issued
> > to mirror the disk to destination. Once the mirroring job is complete,
> the
> > migration process continues as before. Finally, the NBD server is
> stopped after
> > the instance is successfully resumed on the destination node.
> >
> > A major problem with this patch is that the mirroring of the disk is
> performed
> > only after the memory stream is completed and the VM is suspended on the
> source;
> > thus the instance is frozen for a long period of time. The reason this
> happens
> > is that the QEMU process (needed for the disk mirroring) is started on
> the
> > destination node only after the memory copying is completed. One
> possibility I
> > was considering to solve this issue (if it is decided that this
> capability
> > should be used): Could a "helper" QEMU process be started on the
> destination
> > node at the beginning of the migration sequence with the sole purpose of
> > handling the disk mirroring and kill it at the end of the migration
> sequence?
> >
>
> In theory we could, but I am very cautious about this. I _think_ we can
> change the timing QEMU is started. It can be started earlier, but take
> precaution that it shouldn't resume the guest.
>
> In any case, start with the simple setup first.
>
___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH RFC] Live migration for VMs with QEMU backed local storage

2017-06-26 Thread Bruno Alvisio
Thank you for the information and feedback. The scenarios to handle are:
1. QEMU emulation
2. blkback.
3. qdisk.

>From the previous e-mails, there is an agreement that no functionality (or
maybe minimal) should be added to blkback.
@Roger Pau Monné: Yes, "drive-mirror" feature handles disks that being
actively written. As George Dunlap mentioned, I was thinking of scenarios
where iSCSI or DRBD are not set up and only occasional migrations are
needed.

TODO for me: I will start looking at the qdisk back and see how can I
leverage the disk mirroring feature already provided by QEMU.

Thanks,

Bruno

On Mon, Jun 26, 2017 at 6:06 AM, George Dunlap <dunl...@umich.edu> wrote:

> On Fri, Jun 23, 2017 at 9:03 AM, Roger Pau Monné <roger@citrix.com>
> wrote:
> > On Fri, Jun 23, 2017 at 03:42:20AM -0400, Bruno Alvisio wrote:
> >> This patch is the first attempt on adding live migration of instances
> with local
> >> storage to Xen. This patch just handles very restricted case of fully
> >> virtualized HVMs. The code uses the "drive-mirror" capability provided
> by QEMU.
> >> A new "-l" option is introduced to "xl migrate" command. If provided,
> the local
> >> disk should be mirrored during the migration process. If the option is
> set,
> >> during the VM creation a qemu NBD server is started on the destination.
> After
> >> the instance is suspended on the source, the QMP "disk-mirror" command
> is issued
> >> to mirror the disk to destination. Once the mirroring job is complete,
> the
> >> migration process continues as before. Finally, the NBD server is
> stopped after
> >> the instance is successfully resumed on the destination node.
> >
> > Since I'm not familiar with all this, can this "driver-mirror" QEMU
> > capability handle the migration of disk while being actively used?
> >
> >> A major problem with this patch is that the mirroring of the disk is
> performed
> >> only after the memory stream is completed and the VM is suspended on
> the source;
> >> thus the instance is frozen for a long period of time. The reason this
> happens
> >> is that the QEMU process (needed for the disk mirroring) is started on
> the
> >> destination node only after the memory copying is completed. One
> possibility I
> >> was considering to solve this issue (if it is decided that this
> capability
> >> should be used): Could a "helper" QEMU process be started on the
> destination
> >> node at the beginning of the migration sequence with the sole purpose of
> >> handling the disk mirroring and kill it at the end of the migration
> sequence?
> >>
> >> From the suggestions given by Konrad Wilk and Paul Durrant the preferred
> >> approach would be to handle the mirroring of disks by QEMU instead of
> directly
> >> being handled directly by, for example, blkback. It would be very
> helpful for me
> >> to have a mental map of all the scenarios that can be encountered
> regarding
> >> local disk (Xen could start supporting live migration of certain types
> of local
> >> disks). This are the ones I can think of:
> >> - Fully Virtualized HVM: QEMU emulation
> >
> > PV domains can also use the QEMU PV disk backend, so it should be
> > feasible to handle this migration for all guest types just using
> > QEMU.
> >
> >> - blkback
> >
> > TBH, I don't think such feature should be added to blkback. It's
> > too complex to be implemented inside of the kernel itself.
>
> In theory if blktap just exposed a dirty bitmap, like Xen does for the
> memory, the "smarts" of copying over the dirty blocks could be done in
> the toolstack.
>
> But I think probably the best thing to do to start with would simply
> say that disk migration is only available with a qdisk backend.
>
> > There are options already available to perform block device
> > duplication at the block level itself in Linux like DRDB [0] and IMHO
> > this is what should be used in conjunction with blkback.
> >
> > Remember that at the end of day the Unix philosophy has always been to
> > implement simple tools that solve specific problems, and then glue
> > them together in order to solve more complex problems.
> >
> > In that line of thought, why not simply use iSCSI or similar in order
> > to share the disk with all the hosts?
>
> Well iSCSI can be complicated to set up, and it means your disk data
> goes over a network rather than simply staying on your local disk.
> Obviously if people anticipate doing large amounts of migration, then
> it's worth the effort to set up DRBD or iSCSI.  But having the option
> to do occasional migrates without having to do through that overhead
> is still something worth having.  Given that qemu already has a disk
> mirroring function, it's probably worth pursuing.
>
>  -George
>
___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH RFC] Live migration for VMs with QEMU backed local storage

2017-06-23 Thread Bruno Alvisio
This patch is the first attempt on adding live migration of instances with local
storage to Xen. This patch just handles very restricted case of fully
virtualized HVMs. The code uses the "drive-mirror" capability provided by QEMU.
A new "-l" option is introduced to "xl migrate" command. If provided, the local
disk should be mirrored during the migration process. If the option is set,
during the VM creation a qemu NBD server is started on the destination. After
the instance is suspended on the source, the QMP "disk-mirror" command is issued
to mirror the disk to destination. Once the mirroring job is complete, the
migration process continues as before. Finally, the NBD server is stopped after
the instance is successfully resumed on the destination node.

A major problem with this patch is that the mirroring of the disk is performed
only after the memory stream is completed and the VM is suspended on the source;
thus the instance is frozen for a long period of time. The reason this happens
is that the QEMU process (needed for the disk mirroring) is started on the
destination node only after the memory copying is completed. One possibility I
was considering to solve this issue (if it is decided that this capability
should be used): Could a "helper" QEMU process be started on the destination
node at the beginning of the migration sequence with the sole purpose of
handling the disk mirroring and kill it at the end of the migration sequence? 

From the suggestions given by Konrad Wilk and Paul Durrant the preferred
approach would be to handle the mirroring of disks by QEMU instead of directly
being handled directly by, for example, blkback. It would be very helpful for me
to have a mental map of all the scenarios that can be encountered regarding
local disk (Xen could start supporting live migration of certain types of local
disks). This are the ones I can think of:
- Fully Virtualized HVM: QEMU emulation
- blkback
- blktap / blktap2 


I have included TODOs in the code. I am sending this patch as is because I first
wanted to get an initial feedback if this is the path the should be pursued. Any
suggestions and ideas on this patch or on how to make a more generic solution
would be really appreciated.

Signed-off-by: Bruno Alvisio <bruno.alvi...@gmail.com>

---
 tools/libxl/libxl.h  |  16 -
 tools/libxl/libxl_create.c   |  87 +-
 tools/libxl/libxl_internal.h |  16 +
 tools/libxl/libxl_qmp.c  | 115 ++-
 tools/ocaml/libs/xl/xenlight_stubs.c |   2 +-
 tools/xl/xl.h|   1 +
 tools/xl/xl_migrate.c|  79 +---
 tools/xl/xl_vmcontrol.c  |   2 +-
 8 files changed, 303 insertions(+), 15 deletions(-)

diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
index cf8687a..81fb2dc 100644
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -1294,6 +1294,15 @@ int libxl_ctx_alloc(libxl_ctx **pctx, int version,
 xentoollog_logger *lg);
 int libxl_ctx_free(libxl_ctx *ctx /* 0 is OK */);
 
+int libxl__drive_mirror(libxl_ctx *ctx, int domid, const char* device, const 
char* target, const char* format) LIBXL_EXTERNAL_CALLERS_ONLY;
+
+int libxl__query_block_jobs(libxl_ctx *ctx, int domid, bool *is_ready) 
LIBXL_EXTERNAL_CALLERS_ONLY;
+
+int libxl__query_block(libxl_ctx *ctx, int domid, char *device_names) 
LIBXL_EXTERNAL_CALLERS_ONLY;
+
+int libxl__nbd_server_stop(libxl_ctx *ctx, int domid) 
LIBXL_EXTERNAL_CALLERS_ONLY;
+
+
 /* domain related functions */
 
 /* If the result is ERROR_ABORTED, the domain may or may not exist
@@ -1307,7 +1316,7 @@ int libxl_domain_create_new(libxl_ctx *ctx, 
libxl_domain_config *d_config,
 LIBXL_EXTERNAL_CALLERS_ONLY;
 int libxl_domain_create_restore(libxl_ctx *ctx, libxl_domain_config *d_config,
 uint32_t *domid, int restore_fd,
-int send_back_fd,
+int send_back_fd, int copy_local_disks,
 const libxl_domain_restore_params *params,
 const libxl_asyncop_how *ao_how,
 const libxl_asyncprogress_how *aop_console_how)
@@ -1348,7 +1357,7 @@ static inline int libxl_domain_create_restore_0x040400(
 LIBXL_EXTERNAL_CALLERS_ONLY
 {
 return libxl_domain_create_restore(ctx, d_config, domid, restore_fd,
-   -1, params, ao_how, aop_console_how);
+   -1, 0, params, ao_how, aop_console_how);
 }
 
 #define libxl_domain_create_restore libxl_domain_create_restore_0x040400
@@ -1387,6 +1396,9 @@ int libxl_domain_suspend(libxl_ctx *ctx, uint32_t domid, 
int fd,
 #define LIBXL_SUSPEND_DEBUG 1
 #define LIBXL_SUSPEND_LIVE 2
 
+#define QEMU_DRIVE_MIRROR_PORT "11000"
+#defi

[Xen-devel] [PATCH RFC] Live migration for VMs with QEMU backed local storage

2017-06-23 Thread Bruno Alvisio
This patch is the first attempt on adding live migration of instances with local
storage to Xen. This patch just handles very restricted case of fully
virtualized HVMs. The code uses the "drive-mirror" capability provided by QEMU.
A new "-l" option is introduced to "xl migrate" command. If provided, the local
disk should be mirrored during the migration process. If the option is set,
during the VM creation a qemu NBD server is started on the destination. After
the instance is suspended on the source, the QMP "disk-mirror" command is issued
to mirror the disk to destination. Once the mirroring job is complete, the
migration process continues as before. Finally, the NBD server is stopped after
the instance is successfully resumed on the destination node.

A major problem with this patch is that the mirroring of the disk is performed
only after the memory stream is completed and the VM is suspended on the source;
thus the instance is frozen for a long period of time. The reason this happens
is that the QEMU process (needed for the disk mirroring) is started on the
destination node only after the memory copying is completed. One possibility I
was considering to solve this issue (if it is decided that this capability
should be used): Could a "helper" QEMU process be started on the destination
node at the beginning of the migration sequence with the sole purpose of
handling the disk mirroring and kill it at the end of the migration sequence? 

From the suggestions given by Konrad Wilk and Paul Durrant the preferred
approach would be to handle the mirroring of disks by QEMU instead of directly
being handled directly by, for example, blkback. It would be very helpful for me
to have a mental map of all the scenarios that can be encountered regarding
local disk (Xen could start supporting live migration of certain types of local
disks). This are the ones I can think of:
- Fully Virtualized HVM: QEMU emulation
- blkback
- blktap / blktap2 


I have included TODOs in the code. I am sending this patch as is because I first
wanted to get an initial feedback if this is the path the should be pursued. Any
suggestions and ideas on this patch or on how to make a more generic solution
would be really appreciated.

Signed-off-by: Bruno Alvisio <bruno.alvi...@gmail.com>

---
 tools/libxl/libxl.h  |  16 -
 tools/libxl/libxl_create.c   |  87 +-
 tools/libxl/libxl_internal.h |  16 +
 tools/libxl/libxl_qmp.c  | 115 ++-
 tools/ocaml/libs/xl/xenlight_stubs.c |   2 +-
 tools/xl/xl.h|   1 +
 tools/xl/xl_migrate.c|  79 +---
 tools/xl/xl_vmcontrol.c  |   2 +-
 8 files changed, 303 insertions(+), 15 deletions(-)

diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
index cf8687a..81fb2dc 100644
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -1294,6 +1294,15 @@ int libxl_ctx_alloc(libxl_ctx **pctx, int version,
 xentoollog_logger *lg);
 int libxl_ctx_free(libxl_ctx *ctx /* 0 is OK */);
 
+int libxl__drive_mirror(libxl_ctx *ctx, int domid, const char* device, const 
char* target, const char* format) LIBXL_EXTERNAL_CALLERS_ONLY;
+
+int libxl__query_block_jobs(libxl_ctx *ctx, int domid, bool *is_ready) 
LIBXL_EXTERNAL_CALLERS_ONLY;
+
+int libxl__query_block(libxl_ctx *ctx, int domid, char *device_names) 
LIBXL_EXTERNAL_CALLERS_ONLY;
+
+int libxl__nbd_server_stop(libxl_ctx *ctx, int domid) 
LIBXL_EXTERNAL_CALLERS_ONLY;
+
+
 /* domain related functions */
 
 /* If the result is ERROR_ABORTED, the domain may or may not exist
@@ -1307,7 +1316,7 @@ int libxl_domain_create_new(libxl_ctx *ctx, 
libxl_domain_config *d_config,
 LIBXL_EXTERNAL_CALLERS_ONLY;
 int libxl_domain_create_restore(libxl_ctx *ctx, libxl_domain_config *d_config,
 uint32_t *domid, int restore_fd,
-int send_back_fd,
+int send_back_fd, int copy_local_disks,
 const libxl_domain_restore_params *params,
 const libxl_asyncop_how *ao_how,
 const libxl_asyncprogress_how *aop_console_how)
@@ -1348,7 +1357,7 @@ static inline int libxl_domain_create_restore_0x040400(
 LIBXL_EXTERNAL_CALLERS_ONLY
 {
 return libxl_domain_create_restore(ctx, d_config, domid, restore_fd,
-   -1, params, ao_how, aop_console_how);
+   -1, 0, params, ao_how, aop_console_how);
 }
 
 #define libxl_domain_create_restore libxl_domain_create_restore_0x040400
@@ -1387,6 +1396,9 @@ int libxl_domain_suspend(libxl_ctx *ctx, uint32_t domid, 
int fd,
 #define LIBXL_SUSPEND_DEBUG 1
 #define LIBXL_SUSPEND_LIVE 2
 
+#define QEMU_DRIVE_MIRROR_PORT "11000"
+#defi

[Xen-devel] Fwd: VM Live Migration with Local Storage

2017-06-11 Thread Bruno Alvisio
Hello,

I think it would be beneficial to add local disk migration feature for
‘blkback' backend since it is one of the mostly used backends. I would like
to start a discussion about the design of the machinery needed to achieve
this feature.

===
Objective
Add a feature to migrate VMs that have local storage and use the blkback
iface.
===

===
User Interface
Add a cmd line option in “xl migrate” command to specify if local disks
need to be copied to the destination node.
===

===
Design

   1. As part of the libxl_domain_suspend, the “disk mirroring machinery”
   starts an asynchronous job that copies the disks blocks from source to the
   destination.
   2. The protocol to copy the disks should resemble the one used for
   memory copy:


   - Do first initial copy of the disk.
   - Check of sectors that have been written since copy started. For this,
   the blkback driver should be aware that migration of disk is happening and
   in this case forward the write request to the “migration machinery” so that
   a record of dirty blocks are logged.
   - Migration machinery copies “dirty” blocks until convergence.
   - Duplicate all the disk writes/reads to both disks in source and
   destinations node while VM is being suspended.


Block Diagram

   +—--+
   |  VM   |
   +---+
  |
  | I/O Write
  |
  V
+--+   +---+   +-+
|  blkback | > |  Source   |  sectors Stream   | Destination |
+--+   |  mirror   |-->|   mirror|
  || machinery |   I/O Writes  |  machinery  |
  |+---+   +-+
  |  |
  |  |
  | To I/O block layer   |
  |  |
  V  V
+--+   +-+
|   disk   |   |   Mirrored  |
+--+   | Disk|
   +-+


==
Initial Questions

   1. Is it possible to leverage the current design of QEMU for drive
   mirroring for Xen?
   2. What is the best place to implement this protocol? As part of Xen or
   the kernel?
   3. Is it possible to use the same stream currently used for migrating
   the memory to also migrate the disk blocks?


Any guidance/feedback for a more specific design is greatly appreciated.

Thanks,

Bruno

On Wed, Feb 22, 2017 at 5:00 AM, Wei Liu <wei.l...@citrix.com> wrote:

> Hi Bruno
>
> Thanks for your interest.
>
> On Tue, Feb 21, 2017 at 10:34:45AM -0800, Bruno Alvisio wrote:
> > Hello,
> >
> > I have been to doing some research and as far as I know XEN supports
> > Live Migration
> > of VMs that only have shared storage. (i.e. iSCSI) If the VM has been
> > booted with local storage it cannot be live migrated.
> > QEMU seems to support live migration with local storage (I have tested
> using
> > 'virsh migrate with the '--storage-copy-all' option)
> >
> > I am wondering if this still true in the latest XEN release. Are there
> plans
> > to add this functionality in future releases? I would be interested in
> > contributing to the Xen Project by adding this functionality.
> >
>
> No plan at the moment.
>
> Xen supports a wide variety of disk backends. QEMU is one of them. The
> others are blktap (not upstreamed yet) and in-kernel blkback. The latter
> two don't have the capability to copy local storage to the remote end.
>
> That said, I think it would be valuable to have such capability for QEMU
> backed disks. We also need to design the machinery so that other
> backends can be made to do the same thing in the future.
>
> If you want to undertake this project, I suggest you setup a Xen system,
> read xl / libxl source code under tools directory and understand how
> everything is put together. Reading source code could be daunting at
> times, so don't hesitate to ask for pointers. After you have the big
> picture in mind, we can then discuss how to implement the functionality
> on xen-devel.
>
> Does this sound good to you?
>
> Wei.
>
> > Thanks,
> >
> > Bruno
>
> > ___
> > Xen-devel mailing list
> > Xen-devel@lists.xen.org
> > https://lists.xen.org/xen-devel
>
>
___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] Question about QEMU as ide device emulator

2017-05-26 Thread Bruno Alvisio
Question has been answered. Thanks.

On Thu, May 25, 2017 at 6:35 PM, Bruno Alvisio <bruno.alvi...@gmail.com>
wrote:

> Hello all,
>
> Summary
>
> I am using XEN hypervisor to run a HVM with a QEMU backed disk. After I
> start the HVM I use QMP "query-block" command to see the devices of the VM.
> Initially the command returns the disk that I set as part of the
> configuration but after a few seconds  the "query-block" command returns an
> empty result.
>
> KVM Version: 4.6.5
> Arch: x86-64
> QEMU : 2.2.1
>
>
> I was expecting to see the device information in QEMU such as:
>
> {"execute":"query-block"}
>
> {"return": [{"io-status": "ok", "device": "ide0-hd0", "locked": false,
> "removable": false, "inserted": {"iops_rd": 0, "detect_zeroes": "off",
> "image": {"virtual-size": 10737418240, "filename": "/home/balvisio/
> debian-disk.img", "format": "raw", "actual-size": 4940075008,
> "dirty-flag": false}, "iops_wr": 0, "ro": false, "backing_file_depth": 0,
> "drv": "raw", "iops": 0, "bps_wr": 0, "encrypted": false, "bps": 0,
> "bps_rd": 0, "file": "/home/balvisio/debian-disk.img",
> "encryption_key_missing": false}, "type": "unknown"}]}
>
>
>
> I wanted to understand how the storage subsystem work for QEMU backed
> disks. Any help and pointers in the code would be great.
>
> Eventually I would like to implement a feature in XEN to allow migration
> of instances with local disks.
>
> Thanks,
>
> Bruno
>
>
> DETAILED LOGS
>
> I am using XEN hypervisor to run a HVM with QEMU backed disk. When I start
> the HVM I see the following QEMU process started:
>
> root  2199 1  0 18:57 ?00:00:02
> /usr/local/lib/xen/bin/qemu-system-i386 -xen-domid 3 -chardev
> socket,id=libxl-cmd,path=/var/run/xen/qmp-libxl-3,server,nowait
> -no-shutdown -mon chardev=libxl-cmd,mode=control -chardev
> socket,id=libxenstat-cmd,path=/var/run/xen/qmp-libxenstat-3,server,nowait
> -mon chardev=libxenstat-cmd,mode=control -nodefaults -no-user-config
> -name debianL2 -vnc :0,to=99 -display none -serial pty -device
> cirrus-vga,vgamem_mb=8 -boot order=d -device 
> rtl8139,id=nic0,netdev=net0,mac=00:16:3e:1b:d0:7e
> -netdev type=tap,id=net0,ifname=vif3.0-emu,script=no,downscript=no
> -machine xenfv -m 1016 -drive file=/home/balvisio/debian-dis
> k.img,if=ide,index=0,media=disk,format=raw,cache=writeback -drive
> if=ide,index=2,readonly=on,media=cdrom,id=ide-5632,file=/hom
> e/balvisio/debian-live-8.7.1-amd64-gnome-desktop.iso,format=raw
>
> After launching the VM, I connected to the QMP socket:
>
> # rlwrap -C qmp socat STDIO UNIX:/var/run/xen/qmp-libxl-2
>
> {"QMP": {"version": {"qemu": {"micro": 1, "minor": 2, "major": 2},
> "package": ""}, "capabilities": []}}
>
> {"execute":"qmp_capabilities"}
>
> {"return": {}}
>
> I issue the "query-block" command and I get
>
> {"execute":"query-block"}
>
> {"return": []}
>
>
> Xen Config File Used
>
> kernel="/usr/local/lib/xen/boot/hvmloader"
>
> builder='hvm'
>
> memory=1024
>
> vcpus=1
>
> name="debianL2"
>
> vfb = ['type=vnc']
>
> vif= ['bridge=xenbr0']
>
> boot='b'
>
> disk=['file:/home/balvisio/debian-disk.img,xvda,w']
>
> acpi=1
>
> device_model_version='qemu-xen'
>
> serial='pty'
>
> vnc=1
>
> vnclisten=""
> vncpasswd=""
>
>
>
>
>
___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] Question about QEMU as ide device emulator

2017-05-25 Thread Bruno Alvisio
Hello all,

Summary

I am using XEN hypervisor to run a HVM with a QEMU backed disk. After I
start the HVM I use QMP "query-block" command to see the devices of the VM.
Initially the command returns the disk that I set as part of the
configuration but after a few seconds  the "query-block" command returns an
empty result.

KVM Version: 4.6.5
Arch: x86-64
QEMU : 2.2.1


I was expecting to see the device information in QEMU such as:

{"execute":"query-block"}

{"return": [{"io-status": "ok", "device": "ide0-hd0", "locked": false,
"removable": false, "inserted": {"iops_rd": 0, "detect_zeroes": "off",
"image": {"virtual-size": 10737418240, "filename": "/home/balvisio/
debian-disk.img", "format": "raw", "actual-size": 4940075008, "dirty-flag":
false}, "iops_wr": 0, "ro": false, "backing_file_depth": 0, "drv": "raw",
"iops": 0, "bps_wr": 0, "encrypted": false, "bps": 0, "bps_rd": 0, "file":
"/home/balvisio/debian-disk.img", "encryption_key_missing": false}, "type":
"unknown"}]}



I wanted to understand how the storage subsystem work for QEMU backed
disks. Any help and pointers in the code would be great.

Eventually I would like to implement a feature in XEN to allow migration of
instances with local disks.

Thanks,

Bruno


DETAILED LOGS

I am using XEN hypervisor to run a HVM with QEMU backed disk. When I start
the HVM I see the following QEMU process started:

root  2199 1  0 18:57 ?00:00:02
/usr/local/lib/xen/bin/qemu-system-i386 -xen-domid 3 -chardev
socket,id=libxl-cmd,path=/var/run/xen/qmp-libxl-3,server,nowait
-no-shutdown -mon chardev=libxl-cmd,mode=control -chardev
socket,id=libxenstat-cmd,path=/var/run/xen/qmp-libxenstat-3,server,nowait
-mon chardev=libxenstat-cmd,mode=control -nodefaults -no-user-config -name
debianL2 -vnc :0,to=99 -display none -serial pty -device
cirrus-vga,vgamem_mb=8 -boot order=d -device
rtl8139,id=nic0,netdev=net0,mac=00:16:3e:1b:d0:7e
-netdev type=tap,id=net0,ifname=vif3.0-emu,script=no,downscript=no -machine
xenfv -m 1016 -drive file=/home/balvisio/debian-dis
k.img,if=ide,index=0,media=disk,format=raw,cache=writeback -drive
if=ide,index=2,readonly=on,media=cdrom,id=ide-5632,file=/hom
e/balvisio/debian-live-8.7.1-amd64-gnome-desktop.iso,format=raw

After launching the VM, I connected to the QMP socket:

# rlwrap -C qmp socat STDIO UNIX:/var/run/xen/qmp-libxl-2

{"QMP": {"version": {"qemu": {"micro": 1, "minor": 2, "major": 2},
"package": ""}, "capabilities": []}}

{"execute":"qmp_capabilities"}

{"return": {}}

I issue the "query-block" command and I get

{"execute":"query-block"}

{"return": []}


Xen Config File Used

kernel="/usr/local/lib/xen/boot/hvmloader"

builder='hvm'

memory=1024

vcpus=1

name="debianL2"

vfb = ['type=vnc']

vif= ['bridge=xenbr0']

boot='b'

disk=['file:/home/balvisio/debian-disk.img,xvda,w']

acpi=1

device_model_version='qemu-xen'

serial='pty'

vnc=1

vnclisten=""
vncpasswd=""
___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] VM Live Migration with Local Storage

2017-02-21 Thread Bruno Alvisio
Hello,

I have been to doing some research and as far as I know XEN supports
Live Migration
of VMs that only have shared storage. (i.e. iSCSI) If the VM has been
booted with local storage it cannot be live migrated.
QEMU seems to support live migration with local storage (I have tested using
'virsh migrate with the '--storage-copy-all' option)

I am wondering if this still true in the latest XEN release. Are there plans
to add this functionality in future releases? I would be interested in
contributing to the Xen Project by adding this functionality.

Thanks,

Bruno
___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel