Call qemu monitor command directly within a special job that is only
allowed during outgoing migration.
---
src/qemu/qemu_domain.c | 1 +
src/qemu/qemu_domain.h | 3 +--
src/qemu/qemu_driver.c | 23 +++++++++++++++--------
src/qemu/qemu_migration.c | 21 +++++----------------
src/qemu/qemu_process.c | 1 +
5 files changed, 23 insertions(+), 26 deletions(-)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 2e4228a..c47ab60 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -52,6 +52,7 @@ VIR_ENUM_IMPL(qemuDomainJob, QEMU_JOB_LAST,
"destroy",
"suspend",
"modify",
+ "migration operation",
"none", /* async job is never stored in job.active */
"async nested",
);
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index c2ad456..7e6b522 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -43,6 +43,7 @@ enum qemuDomainJob {
QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
QEMU_JOB_MODIFY, /* May change state */
+ QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
/* The following two items must always be the last items before JOB_LAST */
QEMU_JOB_ASYNC, /* Asynchronous job */
@@ -69,12 +70,10 @@ enum qemuDomainJobSignals {
QEMU_JOB_SIGNAL_CANCEL = 1 << 0, /* Request job cancellation */
QEMU_JOB_SIGNAL_SUSPEND = 1 << 1, /* Request VM suspend to finish live
migration offline */
QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME = 1 << 2, /* Request migration downtime change
*/
- QEMU_JOB_SIGNAL_MIGRATE_SPEED = 1 << 3, /* Request migration speed change */
};
struct qemuDomainJobSignalsData {
unsigned long long migrateDowntime; /* Data for QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME */
- unsigned long migrateBandwidth; /* Data for QEMU_JOB_SIGNAL_MIGRATE_SPEED */
};
struct qemuDomainJobObj {
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index cd64f85..1235787 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -7432,19 +7432,23 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
qemuDriverLock(driver);
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
+ qemuDriverUnlock(driver);
if (!vm) {
char uuidstr[VIR_UUID_STRING_BUFLEN];
virUUIDFormat(dom->uuid, uuidstr);
qemuReportError(VIR_ERR_NO_DOMAIN,
_("no domain with matching uuid '%s'"),
uuidstr);
- goto cleanup;
+ return -1;
}
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
+ goto cleanup;
+
if (!virDomainObjIsActive(vm)) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
- goto cleanup;
+ goto endjob;
}
priv = vm->privateData;
@@ -7452,18 +7456,21 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not being migrated"));
- goto cleanup;
+ goto endjob;
}
- VIR_DEBUG("Requesting migration speed change to %luMbs", bandwidth);
- priv->job.signalsData.migrateBandwidth = bandwidth;
- priv->job.signals |= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
- ret = 0;
+ VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
+ qemuDomainObjExitMonitor(driver, vm);
+
+endjob:
+ if (qemuDomainObjEndJob(driver, vm) == 0)
+ vm = NULL;
cleanup:
if (vm)
virDomainObjUnlock(vm);
- qemuDriverUnlock(driver);
return ret;
}
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index a7566f4..d7ad97f 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -788,19 +788,6 @@ qemuMigrationProcessJobSignals(struct qemud_driver *driver,
}
if (ret < 0)
VIR_WARN("Unable to set migration downtime");
- } else if (priv->job.signals & QEMU_JOB_SIGNAL_MIGRATE_SPEED) {
- unsigned long bandwidth = priv->job.signalsData.migrateBandwidth;
-
- priv->job.signals ^= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
- priv->job.signalsData.migrateBandwidth = 0;
- VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
- ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
- if (ret == 0) {
- ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
- }
- if (ret < 0)
- VIR_WARN("Unable to set migration speed");
} else {
ret = 0;
}
@@ -2823,10 +2810,12 @@ qemuMigrationJobStart(struct qemud_driver *driver,
if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm, job) < 0)
return -1;
- if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
- else
- qemuDomainObjSetAsyncJobMask(vm, DEFAULT_JOB_MASK);
+ } else {
+ qemuDomainObjSetAsyncJobMask(vm, DEFAULT_JOB_MASK |
+ JOB_MASK(QEMU_JOB_MIGRATION_OP));
+ }
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index ddb29c1..9126c71 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -2394,6 +2394,7 @@ qemuProcessRecoverJob(struct qemud_driver *driver,
*/
break;
+ case QEMU_JOB_MIGRATION_OP:
case QEMU_JOB_ASYNC:
case QEMU_JOB_ASYNC_NESTED:
/* async job was already handled above */
--
1.7.6