Add code to support querying maximum allowable downtime during live migration.
---
src/qemu/qemu_driver.c | 58 ++++++++++++++++++++++++++++++++++++++++++++
src/qemu/qemu_monitor.h | 3 +++
src/qemu/qemu_monitor_json.c | 4 +++
src/remote/remote_driver.c | 1 +
src/remote/remote_protocol.x | 16 +++++++++++-
src/remote_protocol-structs | 8 ++++++
6 files changed, 89 insertions(+), 1 deletion(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index e9f07c6..ca7f531 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -13150,6 +13150,63 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom,
return ret;
}
+
+static int
+qemuDomainMigrateGetMaxDowntime(virDomainPtr dom,
+ unsigned long long *downtime,
+ unsigned int flags)
+{
+ virQEMUDriverPtr driver = dom->conn->privateData;
+ virDomainObjPtr vm;
+ qemuDomainObjPrivatePtr priv;
+ qemuMonitorMigrationParams migparams;
+ int ret = -1;
+
+ virCheckFlags(0, -1);
+
+ if (!(vm = qemuDomObjFromDomain(dom)))
+ goto cleanup;
+
+ if (virDomainMigrateGetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0)
+ goto cleanup;
+
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ goto cleanup;
+
+ if (!virDomainObjIsActive(vm)) {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("domain is not running"));
+ goto endjob;
+ }
+
+ priv = vm->privateData;
+ qemuDomainObjEnterMonitor(driver, vm);
+
+ if (qemuMonitorGetMigrationParams(priv->mon, &migparams) >= 0 &&
+ migparams.downtimeLimit_set)
+ {
+ *downtime = migparams.downtimeLimit;
+ ret = 0;
+ }
+
+ if (ret < 0) {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("Querying migration downtime is not supported
by "
+ "QEMU binary"));
+ }
+
+ if (qemuDomainObjExitMonitor(driver, vm) < 0)
+ ret = -1;
+
+ endjob:
+ qemuDomainObjEndJob(driver, vm);
+
+ cleanup:
+ virDomainObjEndAPI(&vm);
+ return ret;
+}
+
+
static int
qemuDomainMigrateGetCompressionCache(virDomainPtr dom,
unsigned long long *cacheSize,
@@ -20829,6 +20886,7 @@ static virHypervisorDriver qemuHypervisorDriver = {
.domainGetJobInfo = qemuDomainGetJobInfo, /* 0.7.7 */
.domainGetJobStats = qemuDomainGetJobStats, /* 1.0.3 */
.domainAbortJob = qemuDomainAbortJob, /* 0.7.7 */
+ .domainMigrateGetMaxDowntime = qemuDomainMigrateGetMaxDowntime, /* 3.7.0 */
.domainMigrateSetMaxDowntime = qemuDomainMigrateSetMaxDowntime, /* 0.8.0 */
.domainMigrateGetCompressionCache = qemuDomainMigrateGetCompressionCache, /* 1.0.3
*/
.domainMigrateSetCompressionCache = qemuDomainMigrateSetCompressionCache, /* 1.0.3
*/
diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h
index 31f7e97..9805a33 100644
--- a/src/qemu/qemu_monitor.h
+++ b/src/qemu/qemu_monitor.h
@@ -627,6 +627,9 @@ struct _qemuMonitorMigrationParams {
* whereas, some string value indicates we can support setting/clearing */
char *migrateTLSAlias;
char *migrateTLSHostname;
+
+ bool downtimeLimit_set;
+ unsigned long long downtimeLimit;
};
int qemuMonitorGetMigrationParams(qemuMonitorPtr mon,
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index b8a6815..df5fb7c 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -2705,6 +2705,10 @@ qemuMonitorJSONGetMigrationParams(qemuMonitorPtr mon,
#undef PARSE
+ if (virJSONValueObjectGetNumberUlong(result, "downtime-limit",
+ ¶ms->downtimeLimit) == 0)
+ params->downtimeLimit_set = true;
+
if ((tlsStr = virJSONValueObjectGetString(result, "tls-creds"))) {
if (VIR_STRDUP(params->migrateTLSAlias, tlsStr) < 0)
goto cleanup;
diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c
index a57d25f..027b073 100644
--- a/src/remote/remote_driver.c
+++ b/src/remote/remote_driver.c
@@ -8400,6 +8400,7 @@ static virHypervisorDriver hypervisor_driver = {
.domainGetJobInfo = remoteDomainGetJobInfo, /* 0.7.7 */
.domainGetJobStats = remoteDomainGetJobStats, /* 1.0.3 */
.domainAbortJob = remoteDomainAbortJob, /* 0.7.7 */
+ .domainMigrateGetMaxDowntime = remoteDomainMigrateGetMaxDowntime, /* 3.7.0 */
.domainMigrateSetMaxDowntime = remoteDomainMigrateSetMaxDowntime, /* 0.8.0 */
.domainMigrateGetCompressionCache = remoteDomainMigrateGetCompressionCache, /* 1.0.3
*/
.domainMigrateSetCompressionCache = remoteDomainMigrateSetCompressionCache, /* 1.0.3
*/
diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x
index 0943208..2d49ceb 100644
--- a/src/remote/remote_protocol.x
+++ b/src/remote/remote_protocol.x
@@ -2326,6 +2326,15 @@ struct remote_domain_abort_job_args {
};
+struct remote_domain_migrate_get_max_downtime_args {
+ remote_nonnull_domain dom;
+ unsigned int flags;
+};
+
+struct remote_domain_migrate_get_max_downtime_ret {
+ unsigned hyper downtime; /* insert@1 */
+};
+
struct remote_domain_migrate_set_max_downtime_args {
remote_nonnull_domain dom;
unsigned hyper downtime;
@@ -6064,7 +6073,12 @@ enum remote_procedure {
* @generate: both
* @acl: domain:write
*/
- REMOTE_PROC_DOMAIN_SET_BLOCK_THRESHOLD = 386
+ REMOTE_PROC_DOMAIN_SET_BLOCK_THRESHOLD = 386,
+ /**
+ * @generate: both
+ * @acl: domain:migrate
+ */
+ REMOTE_PROC_DOMAIN_MIGRATE_GET_MAX_DOWNTIME = 387
};
diff --git a/src/remote_protocol-structs b/src/remote_protocol-structs
index a46fe37..cea7664 100644
--- a/src/remote_protocol-structs
+++ b/src/remote_protocol-structs
@@ -1773,6 +1773,13 @@ struct remote_domain_get_job_stats_ret {
struct remote_domain_abort_job_args {
remote_nonnull_domain dom;
};
+struct remote_domain_migrate_get_max_downtime_args {
+ remote_nonnull_domain dom;
+ u_int flags;
+};
+struct remote_domain_migrate_get_max_downtime_ret {
+ uint64_t downtime;
+};
struct remote_domain_migrate_set_max_downtime_args {
remote_nonnull_domain dom;
uint64_t downtime;
@@ -3233,4 +3240,5 @@ enum remote_procedure {
REMOTE_PROC_DOMAIN_SET_VCPU = 384,
REMOTE_PROC_DOMAIN_EVENT_BLOCK_THRESHOLD = 385,
REMOTE_PROC_DOMAIN_SET_BLOCK_THRESHOLD = 386,
+ REMOTE_PROC_DOMAIN_GET_MAX_DOWNTIME = 387,
};
--
1.8.3.1