On 08/21/2013 03:18 AM, Lei Li wrote:
Implementation of outgoing part for localhost migration.
The integration of migration thread and corresponding
adjustment will be in coming patches.
Signed-off-by: Lei Li <li...@linux.vnet.ibm.com>
---
include/migration/migration.h | 2 +
migration-local.c | 85 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 87 insertions(+), 0 deletions(-)
diff --git a/include/migration/migration.h b/include/migration/migration.h
index 5336117..d2c7eff 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -92,6 +92,8 @@ void rdma_start_outgoing_migration(void *opaque, const char
*host_port, Error **
void rdma_start_incoming_migration(const char *host_port, Error **errp);
+void local_start_outgoing_migration(void *opaque, const char *uri, Error
**errp);
+
void migrate_fd_error(MigrationState *s);
void migrate_fd_connect(MigrationState *s);
diff --git a/migration-local.c b/migration-local.c
index 93190fd..cf4a091 100644
--- a/migration-local.c
+++ b/migration-local.c
@@ -209,3 +209,88 @@ static void *qemu_fopen_local(int fd, const char *mode)
return s->file;
}
+
+/************************************************************************
+ * Outgoing part
+ **/
+
+static QEMUFileLocal *local_migration_init(void)
+{
+ QEMUFileLocal *s = g_malloc0(sizeof(*s));
+
+ s->state = MIG_STATE_SETUP;
+ trace_migrate_set_state(MIG_STATE_SETUP);
+ s->fd = -1;
+ s->last_block_sent = NULL;
+
+ return s;
+}
+
migration.c already does this. Is there some reason why you need to
access the state machine?
If you have a custom initial values, you should change
migrate_get_current, not create your own init().
+static void unix_local_wait_for_connect(int fd, void *opaque)
+{
+ MigrationState *s = opaque;
+
+ if (fd < 0) {
+ DPRINTF("migrate connect error\n");
+ s->file = NULL;
+ migrate_fd_error(s);
+ } else {
+ DPRINTF("migrate connect success\n");
+ s->file = qemu_fopen_local(fd, "wb");
+ migrate_fd_connect(s);
+ }
+}
+
+static void unix_local_outgoing_connect(MigrationState *s, const char *path,
+ Error **errp)
+{
+ unix_nonblocking_connect(path, unix_local_wait_for_connect, s, errp);
+}
+
+void local_start_outgoing_migration(void *opaque, const char *uri,
+ Error **errp)
+{
+ MigrationState *s = opaque;
+ const char *path;
+ QEMUFileLocal *local;
+ Error *local_err = NULL;
+ int is_vm_running;
+ int ret;
+
+ local = local_migration_init();
+ if (local == NULL) {
+ error_setg(errp, "Failed to initialize\n");
+ }
+
+ bdrv_flush_all();
+
+ is_vm_running = runstate_is_running();
+
+ /* Stop the VM first */
+ if (is_vm_running) {
+ ret = vm_stop(RUN_STATE_SAVE_VM);
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+
What is this for? The migration_thread() already does this work for you.....
Are you trying to avoid pre-copy? Pre-copy should not be bad for you.
+ /* Start outgoing migration via unix socket. */
+ if (uri) {
+ /* XXX. Creation of a new unix_start_outgoing_migration_* is
+ * not necessary, just for the first step. This will be replaced
+ * by vmsplice mechanism.
+ **/
+ unix_local_outgoing_connect(s, path, &local_err);
+ } else {
+ error_set(errp, QERR_INVALID_PARAMETER_VALUE,
+ "uri", "a valid migration protocol");
+ goto fail;
+ }
+
+ return;
+
+fail:
+ error_propagate(errp, local_err);
+ g_free(local);
+ migrate_fd_error(s);
+}