Currently, if user calls virDomainAbortJob we just issue
'migrate_cancel' and hope for the best. However, if user calls
the API in wrong phase when migration hasn't been started yet
(perform phase) the cancel request is just ignored. With this
patch, the request is remembered and as soon as perform phase
starts, migration is cancelled.
---
diff to v1:
-don't move 'migrate_cancel'
-drop qemuDomainObjAbortAsyncJobRequested()
-detect asyncAbort earlier
src/qemu/qemu_domain.c | 12 ++++++++++++
src/qemu/qemu_domain.h | 2 ++
src/qemu/qemu_driver.c | 1 +
src/qemu/qemu_migration.c | 11 +++++++++++
4 files changed, 26 insertions(+), 0 deletions(-)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index a5592b9..e0d6951 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -160,6 +160,7 @@ qemuDomainObjResetAsyncJob(qemuDomainObjPrivatePtr priv)
job->mask = DEFAULT_JOB_MASK;
job->start = 0;
job->dump_memory_only = false;
+ job->asyncAbort = false;
memset(&job->info, 0, sizeof(job->info));
}
@@ -959,6 +960,17 @@ qemuDomainObjEndAsyncJob(struct qemud_driver *driver, virDomainObjPtr
obj)
return virObjectUnref(obj);
}
+void
+qemuDomainObjAbortAsyncJob(virDomainObjPtr obj)
+{
+ qemuDomainObjPrivatePtr priv = obj->privateData;
+
+ VIR_DEBUG("Requesting abort of async job: %s",
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+
+ priv->job.asyncAbort = true;
+}
+
static int
qemuDomainObjEnterMonitorInternal(struct qemud_driver *driver,
bool driver_locked,
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 9c2f67c..a2acc0a 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -111,6 +111,7 @@ struct qemuDomainJobObj {
unsigned long long start; /* When the async job started */
bool dump_memory_only; /* use dump-guest-memory to do dump */
virDomainJobInfo info; /* Async job progress data */
+ bool asyncAbort; /* abort of async job requested */
};
typedef struct _qemuDomainPCIAddressSet qemuDomainPCIAddressSet;
@@ -204,6 +205,7 @@ bool qemuDomainObjEndJob(struct qemud_driver *driver,
bool qemuDomainObjEndAsyncJob(struct qemud_driver *driver,
virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK;
+void qemuDomainObjAbortAsyncJob(virDomainObjPtr obj);
void qemuDomainObjSetJobPhase(struct qemud_driver *driver,
virDomainObjPtr obj,
int phase);
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 01ba7eb..9aa8340 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -10373,6 +10373,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) {
}
VIR_DEBUG("Cancelling job at client request");
+ qemuDomainObjAbortAsyncJob(vm);
qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorMigrateCancel(priv->mon);
qemuDomainObjExitMonitor(driver, vm);
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 5f8a9c5..0a52486 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -2168,6 +2168,17 @@ qemuMigrationRun(struct qemud_driver *driver,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cleanup;
+ if (priv->job.asyncAbort) {
+ /* explicitly do this *after* we entered the monitor,
+ * as this is a critical section so we are guaranteed
+ * priv->job.asyncAbort will not change */
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ _("canceled by client"));
+ goto cleanup;
+ }
+
if (qemuMonitorSetMigrationSpeed(priv->mon, migrate_speed) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
goto cleanup;
--
1.7.8.6