Call qemu monitor command directly within a special job that is only
allowed during outgoing migration.
---
src/qemu/qemu_domain.c | 1 +
src/qemu/qemu_domain.h | 3 +--
src/qemu/qemu_driver.c | 23 +++++++++++++++--------
src/qemu/qemu_migration.c | 21 +++++----------------
src/qemu/qemu_process.c | 1 +
5 files changed, 23 insertions(+), 26 deletions(-)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index d2f03dd..7748592 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -52,6 +52,7 @@ VIR_ENUM_IMPL(qemuDomainJob, QEMU_JOB_LAST,
"destroy",
"suspend",
"modify",
+ "migration operation",
"none", /* async job is never stored in job.active */
"async nested",
);
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index a3acaf5..fa4e182 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -49,6 +49,7 @@ enum qemuDomainJob {
QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
QEMU_JOB_MODIFY, /* May change state */
+ QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
/* The following two items must always be the last items before JOB_LAST */
QEMU_JOB_ASYNC, /* Asynchronous job */
@@ -75,12 +76,10 @@ enum qemuDomainJobSignals {
QEMU_JOB_SIGNAL_CANCEL = 1 << 0, /* Request job cancellation */
QEMU_JOB_SIGNAL_SUSPEND = 1 << 1, /* Request VM suspend to finish live
migration offline */
QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME = 1 << 2, /* Request migration downtime change
*/
- QEMU_JOB_SIGNAL_MIGRATE_SPEED = 1 << 3, /* Request migration speed change */
};
struct qemuDomainJobSignalsData {
unsigned long long migrateDowntime; /* Data for QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME */
- unsigned long migrateBandwidth; /* Data for QEMU_JOB_SIGNAL_MIGRATE_SPEED */
};
struct qemuDomainJobObj {
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index f6cd433..f0c6489 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -7436,19 +7436,23 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
qemuDriverLock(driver);
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
+ qemuDriverUnlock(driver);
if (!vm) {
char uuidstr[VIR_UUID_STRING_BUFLEN];
virUUIDFormat(dom->uuid, uuidstr);
qemuReportError(VIR_ERR_NO_DOMAIN,
_("no domain with matching uuid '%s'"),
uuidstr);
- goto cleanup;
+ return -1;
}
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
+ goto cleanup;
+
if (!virDomainObjIsActive(vm)) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
- goto cleanup;
+ goto endjob;
}
priv = vm->privateData;
@@ -7456,18 +7460,21 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not being migrated"));
- goto cleanup;
+ goto endjob;
}
- VIR_DEBUG("Requesting migration speed change to %luMbs", bandwidth);
- priv->job.signalsData.migrateBandwidth = bandwidth;
- priv->job.signals |= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
- ret = 0;
+ VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
+ qemuDomainObjExitMonitor(driver, vm);
+
+endjob:
+ if (qemuDomainObjEndJob(driver, vm) == 0)
+ vm = NULL;
cleanup:
if (vm)
virDomainObjUnlock(vm);
- qemuDriverUnlock(driver);
return ret;
}
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 52262e2..7c5583b 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -788,19 +788,6 @@ qemuMigrationProcessJobSignals(struct qemud_driver *driver,
}
if (ret < 0)
VIR_WARN("Unable to set migration downtime");
- } else if (priv->job.signals & QEMU_JOB_SIGNAL_MIGRATE_SPEED) {
- unsigned long bandwidth = priv->job.signalsData.migrateBandwidth;
-
- priv->job.signals ^= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
- priv->job.signalsData.migrateBandwidth = 0;
- VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
- ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
- if (ret == 0) {
- ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
- }
- if (ret < 0)
- VIR_WARN("Unable to set migration speed");
} else {
ret = 0;
}
@@ -2865,10 +2852,12 @@ qemuMigrationJobStart(struct qemud_driver *driver,
if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm, job) < 0)
return -1;
- if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
- else
- qemuDomainObjSetAsyncJobMask(vm, DEFAULT_JOB_MASK);
+ } else {
+ qemuDomainObjSetAsyncJobMask(vm, DEFAULT_JOB_MASK |
+ JOB_MASK(QEMU_JOB_MIGRATION_OP));
+ }
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 48bd435..732adf6 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -2406,6 +2406,7 @@ qemuProcessRecoverJob(struct qemud_driver *driver,
*/
break;
+ case QEMU_JOB_MIGRATION_OP:
case QEMU_JOB_ASYNC:
case QEMU_JOB_ASYNC_NESTED:
/* async job was already handled above */
--
1.7.6