This doesn't abort migration job in any phase, yet.
---
src/qemu/qemu_domain.c | 9 +-------
src/qemu/qemu_domain.h | 14 ++++--------
src/qemu/qemu_driver.c | 36 ++++++++++++++++++++++-----------
src/qemu/qemu_migration.c | 48 ---------------------------------------------
src/qemu/qemu_process.c | 12 +++++++++-
5 files changed, 40 insertions(+), 79 deletions(-)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index deaf9fd..7e43238 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -52,6 +52,7 @@ VIR_ENUM_IMPL(qemuDomainJob, QEMU_JOB_LAST,
"destroy",
"suspend",
"modify",
+ "abort",
"migration operation",
"none", /* async job is never stored in job.active */
"async nested",
@@ -158,12 +159,6 @@ qemuDomainObjInitJob(qemuDomainObjPrivatePtr priv)
return -1;
}
- if (virCondInit(&priv->job.signalCond) < 0) {
- ignore_value(virCondDestroy(&priv->job.cond));
- ignore_value(virCondDestroy(&priv->job.asyncCond));
- return -1;
- }
-
return 0;
}
@@ -185,7 +180,6 @@ qemuDomainObjResetAsyncJob(qemuDomainObjPrivatePtr priv)
job->mask = DEFAULT_JOB_MASK;
job->start = 0;
memset(&job->info, 0, sizeof(job->info));
- job->signals = 0;
}
void
@@ -208,7 +202,6 @@ qemuDomainObjFreeJob(qemuDomainObjPrivatePtr priv)
{
ignore_value(virCondDestroy(&priv->job.cond));
ignore_value(virCondDestroy(&priv->job.asyncCond));
- ignore_value(virCondDestroy(&priv->job.signalCond));
}
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 3da7931..234acab 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -31,8 +31,10 @@
# include "bitmap.h"
#define JOB_MASK(job) (1 << (job - 1))
-#define DEFAULT_JOB_MASK \
- (JOB_MASK(QEMU_JOB_QUERY) | JOB_MASK(QEMU_JOB_DESTROY))
+#define DEFAULT_JOB_MASK \
+ (JOB_MASK(QEMU_JOB_QUERY) | \
+ JOB_MASK(QEMU_JOB_DESTROY) | \
+ JOB_MASK(QEMU_JOB_ABORT))
/* Only 1 job is allowed at any time
* A job includes *all* monitor commands, even those just querying
@@ -43,6 +45,7 @@ enum qemuDomainJob {
QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
QEMU_JOB_MODIFY, /* May change state */
+ QEMU_JOB_ABORT, /* Abort current async job */
QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
/* The following two items must always be the last items before JOB_LAST */
@@ -66,10 +69,6 @@ enum qemuDomainAsyncJob {
QEMU_ASYNC_JOB_LAST
};
-enum qemuDomainJobSignals {
- QEMU_JOB_SIGNAL_CANCEL = 1 << 0, /* Request job cancellation */
-};
-
struct qemuDomainJobObj {
virCond cond; /* Use to coordinate jobs */
enum qemuDomainJob active; /* Currently running job */
@@ -80,9 +79,6 @@ struct qemuDomainJobObj {
unsigned long long mask; /* Jobs allowed during async job */
unsigned long long start; /* When the async job started */
virDomainJobInfo info; /* Async job progress data */
-
- virCond signalCond; /* Use to coordinate the safe queries during migration */
- unsigned int signals; /* Signals for running job */
};
typedef struct _qemuDomainPCIAddressSet qemuDomainPCIAddressSet;
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 8b186f7..6fd6019 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -7343,24 +7343,36 @@ static int qemuDomainAbortJob(virDomainPtr dom) {
goto cleanup;
}
- priv = vm->privateData;
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_ABORT) < 0)
+ goto cleanup;
if (virDomainObjIsActive(vm)) {
- if (priv->job.asyncJob) {
- VIR_DEBUG("Requesting cancellation of job on vm %s",
vm->def->name);
- priv->job.signals |= QEMU_JOB_SIGNAL_CANCEL;
- } else {
- qemuReportError(VIR_ERR_OPERATION_INVALID,
- "%s", _("no job is active on the
domain"));
- goto cleanup;
- }
- } else {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
- goto cleanup;
+ goto endjob;
}
- ret = 0;
+ priv = vm->privateData;
+
+ if (!priv->job.asyncJob) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("no job is active on the domain"));
+ goto endjob;
+ } else if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("cannot abort incoming migration;"
+ " use virDomainDestroy instead"));
+ goto endjob;
+ }
+
+ VIR_DEBUG("Cancelling job at client request");
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ ret = qemuMonitorMigrateCancel(priv->mon);
+ qemuDomainObjExitMonitor(driver, vm);
+
+endjob:
+ if (qemuDomainObjEndJob(driver, vm) == 0)
+ vm = NULL;
cleanup:
if (vm)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index de00811..c1c91d5 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -743,42 +743,6 @@ qemuMigrationSetOffline(struct qemud_driver *driver,
static int
-qemuMigrationProcessJobSignals(struct qemud_driver *driver,
- virDomainObjPtr vm,
- const char *job,
- bool cleanup)
-{
- qemuDomainObjPrivatePtr priv = vm->privateData;
- int ret = -1;
-
- if (!virDomainObjIsActive(vm)) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"),
- job, _("guest unexpectedly quit"));
- if (cleanup)
- priv->job.signals = 0;
- return -1;
- }
-
- if (priv->job.signals & QEMU_JOB_SIGNAL_CANCEL) {
- priv->job.signals ^= QEMU_JOB_SIGNAL_CANCEL;
- VIR_DEBUG("Cancelling job at client request");
- ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
- if (ret == 0) {
- ret = qemuMonitorMigrateCancel(priv->mon);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
- }
- if (ret < 0) {
- VIR_WARN("Unable to cancel job");
- }
- } else {
- ret = 0;
- }
-
- return ret;
-}
-
-
-static int
qemuMigrationUpdateJobStatus(struct qemud_driver *driver,
virDomainObjPtr vm,
const char *job)
@@ -878,17 +842,10 @@ qemuMigrationWaitForCompletion(struct qemud_driver *driver,
virDomainObjPtr vm)
while (priv->job.info.type == VIR_DOMAIN_JOB_UNBOUNDED) {
/* Poll every 50ms for progress & to allow cancellation */
struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
- while (priv->job.signals) {
- if (qemuMigrationProcessJobSignals(driver, vm, job, false) < 0)
- goto cleanup;
- }
-
- virCondSignal(&priv->job.signalCond);
if (qemuMigrationUpdateJobStatus(driver, vm, job) < 0)
goto cleanup;
-
virDomainObjUnlock(vm);
qemuDriverUnlock(driver);
@@ -899,11 +856,6 @@ qemuMigrationWaitForCompletion(struct qemud_driver *driver,
virDomainObjPtr vm)
}
cleanup:
- while (priv->job.signals) {
- qemuMigrationProcessJobSignals(driver, vm, job, true);
- }
- virCondBroadcast(&priv->job.signalCond);
-
if (priv->job.info.type == VIR_DOMAIN_JOB_COMPLETED)
return 0;
else
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 9126c71..962f2dc 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -2233,6 +2233,8 @@ qemuProcessRecoverMigration(struct qemud_driver *driver,
virDomainState state,
int reason)
{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+
if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
switch (phase) {
case QEMU_MIGRATION_PHASE_NONE:
@@ -2287,7 +2289,9 @@ qemuProcessRecoverMigration(struct qemud_driver *driver,
* domain */
VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
vm->def->name);
- /* TODO cancel possibly running migrate operation */
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ ignore_value(qemuMonitorMigrateCancel(priv->mon));
+ qemuDomainObjExitMonitor(driver, vm);
/* resume the domain but only if it was paused as a result of
* migration */
if (state == VIR_DOMAIN_PAUSED &&
@@ -2335,6 +2339,7 @@ qemuProcessRecoverJob(struct qemud_driver *driver,
virConnectPtr conn,
const struct qemuDomainJobObj *job)
{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
virDomainState state;
int reason;
@@ -2350,7 +2355,9 @@ qemuProcessRecoverJob(struct qemud_driver *driver,
case QEMU_ASYNC_JOB_SAVE:
case QEMU_ASYNC_JOB_DUMP:
- /* TODO cancel possibly running migrate operation */
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ ignore_value(qemuMonitorMigrateCancel(priv->mon));
+ qemuDomainObjExitMonitor(driver, vm);
/* resume the domain but only if it was paused as a result of
* running save/dump operation */
if (state == VIR_DOMAIN_PAUSED &&
@@ -2395,6 +2402,7 @@ qemuProcessRecoverJob(struct qemud_driver *driver,
break;
case QEMU_JOB_MIGRATION_OP:
+ case QEMU_JOB_ABORT:
case QEMU_JOB_ASYNC:
case QEMU_JOB_ASYNC_NESTED:
/* async job was already handled above */
--
1.7.6