Re: [libvirt] [PATCH v4 2/4] qemu: Implement virDomainMigrateGetMaxDowntime

2017-08-22 Thread John Ferlan


On 08/17/2017 06:17 PM, Scott Garfinkle wrote:
> Add code to support querying maximum allowable downtime during live migration.
> ---
>  src/qemu/qemu_driver.c   | 58 
> 
>  src/qemu/qemu_monitor.h  |  3 +++
>  src/qemu/qemu_monitor_json.c |  4 +++
>  src/remote/remote_driver.c   |  1 +
>  src/remote/remote_protocol.x | 16 +++-
>  src/remote_protocol-structs  |  8 ++
>  6 files changed, 89 insertions(+), 1 deletion(-)
> 
> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> index e9f07c6..ca7f531 100644
> --- a/src/qemu/qemu_driver.c
> +++ b/src/qemu/qemu_driver.c
> @@ -13150,6 +13150,63 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom,
>  return ret;
>  }
>  
> +
> +static int
> +qemuDomainMigrateGetMaxDowntime(virDomainPtr dom,
> +unsigned long long *downtime,
> +unsigned int flags)
> +{
> +virQEMUDriverPtr driver = dom->conn->privateData;
> +virDomainObjPtr vm;
> +qemuDomainObjPrivatePtr priv;
> +qemuMonitorMigrationParams migparams;

migparams = { 0 };

> +int ret = -1;
> +
> +virCheckFlags(0, -1);
> +
> +if (!(vm = qemuDomObjFromDomain(dom)))
> +goto cleanup;
> +
> +if (virDomainMigrateGetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0)
> +goto cleanup;
> +
> +if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
> +goto cleanup;
> +
> +if (!virDomainObjIsActive(vm)) {
> +virReportError(VIR_ERR_OPERATION_INVALID,
> +   "%s", _("domain is not running"));
> +goto endjob;
> +}
> +
> +priv = vm->privateData;
> +qemuDomainObjEnterMonitor(driver, vm);
> +
> +if (qemuMonitorGetMigrationParams(priv->mon, ) >= 0 &&
> +migparams.downtimeLimit_set)
> +{
> +*downtime = migparams.downtimeLimit;
> +ret = 0;
> +}

The { should have been on the previous line

The indent should have been 4 spaces

and the checks are almost right.  This will elicit the following message
and overwrite something that caused qemuMonitorGetMigrationParams to
fail for some other reason. The only reason the following message should
be generated is if we get a 0 return and downtimeLimit_set is false.

> +
> +if (ret < 0) {
> +virReportError(VIR_ERR_OPERATION_INVALID,
> +   "%s", _("Querying migration downtime is not supported by "
> + "QEMU binary"));

Indents for line 2 and 3 were off on this as well.

I altered the sequence to be as I suggested in the v3 review:

if (qemuMonitorGetMigrationParams(priv->mon, ) == 0) {
if (migparams.downtimeLimit_set) {
*downtime = migparams.downtimeLimit;
ret = 0;
} else {
virReportError(VIR_ERR_OPERATION_INVALID,"%s",
   _("Querying migration downtime is not
supported by "
 "QEMU binary"));
}
}

[pardon the email client line wrap on the string]

> +}
> +
> +if (qemuDomainObjExitMonitor(driver, vm) < 0)
> +ret = -1;
> +
> + endjob:
> +qemuDomainObjEndJob(driver, vm);
> +
> + cleanup:
> +virDomainObjEndAPI();
> +return ret;
> +}
> +
> +
>  static int
>  qemuDomainMigrateGetCompressionCache(virDomainPtr dom,
>   unsigned long long *cacheSize,
> @@ -20829,6 +20886,7 @@ static virHypervisorDriver qemuHypervisorDriver = {
>  .domainGetJobInfo = qemuDomainGetJobInfo, /* 0.7.7 */
>  .domainGetJobStats = qemuDomainGetJobStats, /* 1.0.3 */
>  .domainAbortJob = qemuDomainAbortJob, /* 0.7.7 */
> +.domainMigrateGetMaxDowntime = qemuDomainMigrateGetMaxDowntime, /* 3.7.0 
> */
>  .domainMigrateSetMaxDowntime = qemuDomainMigrateSetMaxDowntime, /* 0.8.0 
> */
>  .domainMigrateGetCompressionCache = 
> qemuDomainMigrateGetCompressionCache, /* 1.0.3 */
>  .domainMigrateSetCompressionCache = 
> qemuDomainMigrateSetCompressionCache, /* 1.0.3 */
> diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h
> index 31f7e97..9805a33 100644
> --- a/src/qemu/qemu_monitor.h
> +++ b/src/qemu/qemu_monitor.h
> @@ -627,6 +627,9 @@ struct _qemuMonitorMigrationParams {
>   * whereas, some string value indicates we can support setting/clearing 
> */
>  char *migrateTLSAlias;
>  char *migrateTLSHostname;
> +
> +bool downtimeLimit_set;
> +unsigned long long downtimeLimit;
>  };
>  
>  int qemuMonitorGetMigrationParams(qemuMonitorPtr mon,
> diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
> index b8a6815..df5fb7c 100644
> --- a/src/qemu/qemu_monitor_json.c
> +++ b/src/qemu/qemu_monitor_json.c
> @@ -2705,6 +2705,10 @@ qemuMonitorJSONGetMigrationParams(qemuMonitorPtr mon,
>  
>  #undef PARSE
>  
> +if (virJSONValueObjectGetNumberUlong(result, "downtime-limit",
> + >downtimeLimit) == 0)
> +

[libvirt] [PATCH v4 2/4] qemu: Implement virDomainMigrateGetMaxDowntime

2017-08-17 Thread Scott Garfinkle
Add code to support querying maximum allowable downtime during live migration.
---
 src/qemu/qemu_driver.c   | 58 
 src/qemu/qemu_monitor.h  |  3 +++
 src/qemu/qemu_monitor_json.c |  4 +++
 src/remote/remote_driver.c   |  1 +
 src/remote/remote_protocol.x | 16 +++-
 src/remote_protocol-structs  |  8 ++
 6 files changed, 89 insertions(+), 1 deletion(-)

diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index e9f07c6..ca7f531 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -13150,6 +13150,63 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom,
 return ret;
 }
 
+
+static int
+qemuDomainMigrateGetMaxDowntime(virDomainPtr dom,
+unsigned long long *downtime,
+unsigned int flags)
+{
+virQEMUDriverPtr driver = dom->conn->privateData;
+virDomainObjPtr vm;
+qemuDomainObjPrivatePtr priv;
+qemuMonitorMigrationParams migparams;
+int ret = -1;
+
+virCheckFlags(0, -1);
+
+if (!(vm = qemuDomObjFromDomain(dom)))
+goto cleanup;
+
+if (virDomainMigrateGetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0)
+goto cleanup;
+
+if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+goto cleanup;
+
+if (!virDomainObjIsActive(vm)) {
+virReportError(VIR_ERR_OPERATION_INVALID,
+   "%s", _("domain is not running"));
+goto endjob;
+}
+
+priv = vm->privateData;
+qemuDomainObjEnterMonitor(driver, vm);
+
+if (qemuMonitorGetMigrationParams(priv->mon, ) >= 0 &&
+migparams.downtimeLimit_set)
+{
+*downtime = migparams.downtimeLimit;
+ret = 0;
+}
+
+if (ret < 0) {
+virReportError(VIR_ERR_OPERATION_INVALID,
+   "%s", _("Querying migration downtime is not supported by "
+ "QEMU binary"));
+}
+
+if (qemuDomainObjExitMonitor(driver, vm) < 0)
+ret = -1;
+
+ endjob:
+qemuDomainObjEndJob(driver, vm);
+
+ cleanup:
+virDomainObjEndAPI();
+return ret;
+}
+
+
 static int
 qemuDomainMigrateGetCompressionCache(virDomainPtr dom,
  unsigned long long *cacheSize,
@@ -20829,6 +20886,7 @@ static virHypervisorDriver qemuHypervisorDriver = {
 .domainGetJobInfo = qemuDomainGetJobInfo, /* 0.7.7 */
 .domainGetJobStats = qemuDomainGetJobStats, /* 1.0.3 */
 .domainAbortJob = qemuDomainAbortJob, /* 0.7.7 */
+.domainMigrateGetMaxDowntime = qemuDomainMigrateGetMaxDowntime, /* 3.7.0 */
 .domainMigrateSetMaxDowntime = qemuDomainMigrateSetMaxDowntime, /* 0.8.0 */
 .domainMigrateGetCompressionCache = qemuDomainMigrateGetCompressionCache, 
/* 1.0.3 */
 .domainMigrateSetCompressionCache = qemuDomainMigrateSetCompressionCache, 
/* 1.0.3 */
diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h
index 31f7e97..9805a33 100644
--- a/src/qemu/qemu_monitor.h
+++ b/src/qemu/qemu_monitor.h
@@ -627,6 +627,9 @@ struct _qemuMonitorMigrationParams {
  * whereas, some string value indicates we can support setting/clearing */
 char *migrateTLSAlias;
 char *migrateTLSHostname;
+
+bool downtimeLimit_set;
+unsigned long long downtimeLimit;
 };
 
 int qemuMonitorGetMigrationParams(qemuMonitorPtr mon,
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index b8a6815..df5fb7c 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -2705,6 +2705,10 @@ qemuMonitorJSONGetMigrationParams(qemuMonitorPtr mon,
 
 #undef PARSE
 
+if (virJSONValueObjectGetNumberUlong(result, "downtime-limit",
+ >downtimeLimit) == 0)
+params->downtimeLimit_set = true;
+
 if ((tlsStr = virJSONValueObjectGetString(result, "tls-creds"))) {
 if (VIR_STRDUP(params->migrateTLSAlias, tlsStr) < 0)
 goto cleanup;
diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c
index a57d25f..027b073 100644
--- a/src/remote/remote_driver.c
+++ b/src/remote/remote_driver.c
@@ -8400,6 +8400,7 @@ static virHypervisorDriver hypervisor_driver = {
 .domainGetJobInfo = remoteDomainGetJobInfo, /* 0.7.7 */
 .domainGetJobStats = remoteDomainGetJobStats, /* 1.0.3 */
 .domainAbortJob = remoteDomainAbortJob, /* 0.7.7 */
+.domainMigrateGetMaxDowntime = remoteDomainMigrateGetMaxDowntime, /* 3.7.0 
*/
 .domainMigrateSetMaxDowntime = remoteDomainMigrateSetMaxDowntime, /* 0.8.0 
*/
 .domainMigrateGetCompressionCache = 
remoteDomainMigrateGetCompressionCache, /* 1.0.3 */
 .domainMigrateSetCompressionCache = 
remoteDomainMigrateSetCompressionCache, /* 1.0.3 */
diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x
index 0943208..2d49ceb 100644
--- a/src/remote/remote_protocol.x
+++ b/src/remote/remote_protocol.x
@@ -2326,6 +2326,15 @@ struct remote_domain_abort_job_args {
 };
 
 
+struct