This approach looks a bit more natural. State deletion can be performed on running VM which reduces VM downtime
The situation when we have all devices read-only is not that frequent to optimize for it. We can afford to pause VM for the case. This reduces the amount of time when aio_context remains locked with next patches. Signed-off-by: Denis V. Lunev <d...@openvz.org> CC: Juan Quintela <quint...@redhat.com> --- migration/savevm.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/migration/savevm.c b/migration/savevm.c index f7ff37a..dbcb313 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -1267,9 +1267,12 @@ void hmp_savevm(Monitor *mon, const QDict *qdict) return; } - bs = find_vmstate_bs(); - if (!bs) { - monitor_printf(mon, "No block device can accept snapshots\n"); + /* Delete old snapshots of the same name */ + if (name && bdrv_all_delete_snapshot(name, &bs, &local_err) < 0) { + monitor_printf(mon, + "Error while deleting snapshot on device '%s': %s\n", + bdrv_get_device_name(bs), error_get_pretty(local_err)); + error_free(local_err); return; } @@ -1290,6 +1293,12 @@ void hmp_savevm(Monitor *mon, const QDict *qdict) sn->date_nsec = tv.tv_usec * 1000; sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + bs = find_vmstate_bs(); + if (!bs) { + monitor_printf(mon, "No block device can accept snapshots\n"); + goto the_end; + } + if (name) { ret = bdrv_snapshot_find(bs, old_sn, name); if (ret >= 0) { @@ -1304,15 +1313,6 @@ void hmp_savevm(Monitor *mon, const QDict *qdict) strftime(sn->name, sizeof(sn->name), "vm-%Y%m%d%H%M%S", &tm); } - /* Delete old snapshots of the same name */ - if (name && bdrv_all_delete_snapshot(name, &bs1, &local_err) < 0) { - monitor_printf(mon, - "Error while deleting snapshot on device '%s': %s\n", - bdrv_get_device_name(bs1), error_get_pretty(local_err)); - error_free(local_err); - goto the_end; - } - /* save the VM state */ f = qemu_fopen_bdrv(bs, 1); if (!f) { -- 2.5.0