Query commands are safe to be called during long running jobs (such as
migration). This patch makes them all work without the need to
special-case every single one of them.
The patch introduces new job.asyncCond condition and associated
job.asyncJob which are dedicated to asynchronous (from qemu monitor
point of view) jobs that can take arbitrarily long time to finish while
qemu monitor is still usable for other commands.
The existing job.active (and job.cond condition) is used all other
synchronous jobs (including the commands run during async job).
Locking schema is changed to use these two conditions. While asyncJob is
active, only allowed set of synchronous jobs is allowed (the set can be
different according to a particular asyncJob) so any method that
communicates to qemu monitor needs to check if it is allowed to be
executed during current asyncJob (if any). Once the check passes, the
method needs to normally acquire job.cond to ensure no other command is
running. Since domain object lock is released during that time, asyncJob
could have been started in the meantime so the method needs to recheck
the first condition. Then, normal jobs set job.active and asynchronous
jobs set job.asyncJob and optionally change the list of allowed job
groups.
Since asynchronous jobs only set job.asyncJob, other allowed commands
can still be run when domain object is unlocked (when communicating to
remote libvirtd or sleeping). To protect its own internal synchronous
commands, the asynchronous job needs to start a special nested job
before entering qemu monitor. The nested job doesn't check asyncJob, it
only acquires job.cond and sets job.active to block other jobs.
---
src/qemu/qemu_domain.c | 219 +++++++++++++++++++++++++++++++++++++--------
src/qemu/qemu_domain.h | 82 +++++++++++++----
src/qemu/qemu_driver.c | 122 +++++++++++++-------------
src/qemu/qemu_hotplug.c | 38 ++++----
src/qemu/qemu_migration.c | 152 ++++++++++++++++++-------------
src/qemu/qemu_process.c | 42 +++++----
6 files changed, 439 insertions(+), 216 deletions(-)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index a2e77b6..1ed5efd 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -87,8 +87,14 @@ qemuDomainObjInitJob(qemuDomainObjPrivatePtr priv)
if (virCondInit(&priv->job.cond) < 0)
return -1;
+ if (virCondInit(&priv->job.asyncCond) < 0) {
+ ignore_value(virCondDestroy(&priv->job.cond));
+ return -1;
+ }
+
if (virCondInit(&priv->job.signalCond) < 0) {
ignore_value(virCondDestroy(&priv->job.cond));
+ ignore_value(virCondDestroy(&priv->job.asyncCond));
return -1;
}
@@ -101,6 +107,15 @@ qemuDomainObjResetJob(qemuDomainObjPrivatePtr priv)
struct qemuDomainJobObj *job = &priv->job;
job->active = QEMU_JOB_NONE;
+}
+
+static void
+qemuDomainObjResetAsyncJob(qemuDomainObjPrivatePtr priv)
+{
+ struct qemuDomainJobObj *job = &priv->job;
+
+ job->asyncJob = QEMU_ASYNC_JOB_NONE;
+ job->mask = DEFAULT_JOB_MASK;
job->start = 0;
memset(&job->info, 0, sizeof(job->info));
job->signals = 0;
@@ -111,6 +126,7 @@ static void
qemuDomainObjFreeJob(qemuDomainObjPrivatePtr priv)
{
ignore_value(virCondDestroy(&priv->job.cond));
+ ignore_value(virCondDestroy(&priv->job.asyncCond));
ignore_value(virCondDestroy(&priv->job.signalCond));
}
@@ -509,12 +525,31 @@ qemuDomainObjSetJob(virDomainObjPtr obj,
}
void
-qemuDomainObjDiscardJob(virDomainObjPtr obj)
+qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj,
+ unsigned long long allowedJobs)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
- qemuDomainObjResetJob(priv);
- qemuDomainObjSetJob(obj, QEMU_JOB_NONE);
+ if (!priv->job.asyncJob)
+ return;
+
+ priv->job.mask = allowedJobs | JOB_MASK(QEMU_JOB_DESTROY);
+}
+
+void
+qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj)
+{
+ qemuDomainObjPrivatePtr priv = obj->privateData;
+
+ if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
+ qemuDomainObjResetJob(priv);
+ qemuDomainObjResetAsyncJob(priv);
+}
+
+static bool
+qemuDomainJobAllowed(qemuDomainObjPrivatePtr priv, enum qemuDomainJob job)
+{
+ return !priv->job.asyncJob || (priv->job.mask & JOB_MASK(job)) != 0;
}
/* Give up waiting for mutex after 30 seconds */
@@ -527,11 +562,14 @@ qemuDomainObjDiscardJob(virDomainObjPtr obj)
static int
qemuDomainObjBeginJobInternal(struct qemud_driver *driver,
bool driver_locked,
- virDomainObjPtr obj)
+ virDomainObjPtr obj,
+ enum qemuDomainJob job,
+ enum qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
unsigned long long now;
unsigned long long then;
+ bool nested = job == QEMU_JOB_ASYNC_NESTED;
if (virTimeMs(&now) < 0)
return -1;
@@ -541,27 +579,31 @@ qemuDomainObjBeginJobInternal(struct qemud_driver *driver,
if (driver_locked)
qemuDriverUnlock(driver);
+retry:
+ while (!nested && !qemuDomainJobAllowed(priv, job)) {
+ if (virCondWaitUntil(&priv->job.asyncCond, &obj->lock, then) <
0)
+ goto error;
+ }
+
while (priv->job.active) {
- if (virCondWaitUntil(&priv->job.cond, &obj->lock, then) < 0) {
- if (errno == ETIMEDOUT)
- qemuReportError(VIR_ERR_OPERATION_TIMEOUT,
- "%s", _("cannot acquire state change
lock"));
- else
- virReportSystemError(errno,
- "%s", _("cannot acquire job
mutex"));
- if (driver_locked) {
- virDomainObjUnlock(obj);
- qemuDriverLock(driver);
- virDomainObjLock(obj);
- }
- /* Safe to ignore value since ref count was incremented above */
- ignore_value(virDomainObjUnref(obj));
- return -1;
- }
+ if (virCondWaitUntil(&priv->job.cond, &obj->lock, then) < 0)
+ goto error;
}
+
+ /* No job is active but a new async job could have been started while obj
+ * was unlocked, so we need to recheck it. */
+ if (!nested && !qemuDomainJobAllowed(priv, job))
+ goto retry;
+
qemuDomainObjResetJob(priv);
- qemuDomainObjSetJob(obj, QEMU_JOB_UNSPECIFIED);
- priv->job.start = now;
+
+ if (job != QEMU_JOB_ASYNC) {
+ priv->job.active = job;
+ } else {
+ qemuDomainObjResetAsyncJob(priv);
+ priv->job.asyncJob = asyncJob;
+ priv->job.start = now;
+ }
if (driver_locked) {
virDomainObjUnlock(obj);
@@ -570,6 +612,22 @@ qemuDomainObjBeginJobInternal(struct qemud_driver *driver,
}
return 0;
+
+error:
+ if (errno == ETIMEDOUT)
+ qemuReportError(VIR_ERR_OPERATION_TIMEOUT,
+ "%s", _("cannot acquire state change
lock"));
+ else
+ virReportSystemError(errno,
+ "%s", _("cannot acquire job mutex"));
+ if (driver_locked) {
+ virDomainObjUnlock(obj);
+ qemuDriverLock(driver);
+ virDomainObjLock(obj);
+ }
+ /* Safe to ignore value since ref count was incremented above */
+ ignore_value(virDomainObjUnref(obj));
+ return -1;
}
/*
@@ -581,9 +639,17 @@ qemuDomainObjBeginJobInternal(struct qemud_driver *driver,
* Upon successful return, the object will have its ref count increased,
* successful calls must be followed by EndJob eventually
*/
-int qemuDomainObjBeginJob(virDomainObjPtr obj)
+int qemuDomainObjBeginJob(virDomainObjPtr obj, enum qemuDomainJob job)
+{
+ return qemuDomainObjBeginJobInternal(NULL, false, obj, job,
+ QEMU_ASYNC_JOB_NONE);
+}
+
+int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj,
+ enum qemuDomainAsyncJob asyncJob)
{
- return qemuDomainObjBeginJobInternal(NULL, false, obj);
+ return qemuDomainObjBeginJobInternal(NULL, false, obj, QEMU_JOB_ASYNC,
+ asyncJob);
}
/*
@@ -597,9 +663,49 @@ int qemuDomainObjBeginJob(virDomainObjPtr obj)
* successful calls must be followed by EndJob eventually
*/
int qemuDomainObjBeginJobWithDriver(struct qemud_driver *driver,
- virDomainObjPtr obj)
+ virDomainObjPtr obj,
+ enum qemuDomainJob job)
{
- return qemuDomainObjBeginJobInternal(driver, true, obj);
+ if (job <= QEMU_JOB_NONE || job >= QEMU_JOB_ASYNC) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("Attempt to start invalid job"));
+ return -1;
+ }
+
+ return qemuDomainObjBeginJobInternal(driver, true, obj, job,
+ QEMU_ASYNC_JOB_NONE);
+}
+
+int qemuDomainObjBeginAsyncJobWithDriver(struct qemud_driver *driver,
+ virDomainObjPtr obj,
+ enum qemuDomainAsyncJob asyncJob)
+{
+ return qemuDomainObjBeginJobInternal(driver, true, obj, QEMU_JOB_ASYNC,
+ asyncJob);
+}
+
+/*
+ * Use this to protect monitor sections within active async job.
+ *
+ * The caller must call qemuDomainObjBeginAsyncJob{,WithDriver} before it can
+ * use this method. Never use this method if you only own non-async job, use
+ * qemuDomainObjBeginJob{,WithDriver} instead.
+ */
+int
+qemuDomainObjBeginNestedJob(virDomainObjPtr obj)
+{
+ return qemuDomainObjBeginJobInternal(NULL, false, obj,
+ QEMU_JOB_ASYNC_NESTED,
+ QEMU_ASYNC_JOB_NONE);
+}
+
+int
+qemuDomainObjBeginNestedJobWithDriver(struct qemud_driver *driver,
+ virDomainObjPtr obj)
+{
+ return qemuDomainObjBeginJobInternal(driver, true, obj,
+ QEMU_JOB_ASYNC_NESTED,
+ QEMU_ASYNC_JOB_NONE);
}
/*
@@ -616,25 +722,60 @@ int qemuDomainObjEndJob(virDomainObjPtr obj)
qemuDomainObjPrivatePtr priv = obj->privateData;
qemuDomainObjResetJob(priv);
- qemuDomainObjSetJob(obj, QEMU_JOB_NONE);
virCondSignal(&priv->job.cond);
return virDomainObjUnref(obj);
}
+int
+qemuDomainObjEndAsyncJob(virDomainObjPtr obj)
+{
+ qemuDomainObjPrivatePtr priv = obj->privateData;
-static void
+ qemuDomainObjResetAsyncJob(priv);
+ virCondBroadcast(&priv->job.asyncCond);
+
+ return virDomainObjUnref(obj);
+}
+
+void
+qemuDomainObjEndNestedJob(virDomainObjPtr obj)
+{
+ qemuDomainObjPrivatePtr priv = obj->privateData;
+
+ qemuDomainObjResetJob(priv);
+ virCondSignal(&priv->job.cond);
+
+ /* safe to ignore since the surrounding async job increased the reference
+ * counter as well */
+ ignore_value(virDomainObjUnref(obj));
+}
+
+
+static int
qemuDomainObjEnterMonitorInternal(struct qemud_driver *driver,
virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
+ if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
+ if (qemuDomainObjBeginNestedJob(obj) < 0)
+ return -1;
+ if (!virDomainObjIsActive(obj)) {
+ qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
+ _("domain is no longer running"));
+ return -1;
+ }
+ }
+
qemuMonitorLock(priv->mon);
qemuMonitorRef(priv->mon);
ignore_value(virTimeMs(&priv->monStart));
virDomainObjUnlock(obj);
if (driver)
qemuDriverUnlock(driver);
+
+ return 0;
}
static void
@@ -657,20 +798,24 @@ qemuDomainObjExitMonitorInternal(struct qemud_driver *driver,
if (refs == 0) {
priv->mon = NULL;
}
+
+ if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
+ qemuDomainObjEndNestedJob(obj);
}
/*
* obj must be locked before calling, qemud_driver must be unlocked
*
* To be called immediately before any QEMU monitor API call
- * Must have already called qemuDomainObjBeginJob(), and checked
- * that the VM is still active.
+ * Must have already either called qemuDomainObjBeginJob() and checked
+ * that the VM is still active or called qemuDomainObjBeginAsyncJob, in which
+ * case this will call qemuDomainObjBeginNestedJob.
*
* To be followed with qemuDomainObjExitMonitor() once complete
*/
-void qemuDomainObjEnterMonitor(virDomainObjPtr obj)
+int qemuDomainObjEnterMonitor(virDomainObjPtr obj)
{
- qemuDomainObjEnterMonitorInternal(NULL, obj);
+ return qemuDomainObjEnterMonitorInternal(NULL, obj);
}
/* obj must NOT be locked before calling, qemud_driver must be unlocked
@@ -686,14 +831,16 @@ void qemuDomainObjExitMonitor(virDomainObjPtr obj)
* obj must be locked before calling, qemud_driver must be locked
*
* To be called immediately before any QEMU monitor API call
- * Must have already called qemuDomainObjBeginJob().
+ * Must have already either called qemuDomainObjBeginJobWithDriver() and
+ * checked that the VM is still active or called qemuDomainObjBeginAsyncJob,
+ * in which case this will call qemuDomainObjBeginNestedJobWithDriver.
*
* To be followed with qemuDomainObjExitMonitorWithDriver() once complete
*/
-void qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
- virDomainObjPtr obj)
+int qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
+ virDomainObjPtr obj)
{
- qemuDomainObjEnterMonitorInternal(driver, obj);
+ return qemuDomainObjEnterMonitorInternal(driver, obj);
}
/* obj must NOT be locked before calling, qemud_driver must be unlocked,
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 214e578..85a3c03 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -30,16 +30,35 @@
# include "qemu_conf.h"
# include "bitmap.h"
+#define JOB_MASK(job) (1 << (job - 1))
+#define DEFAULT_JOB_MASK \
+ (JOB_MASK(QEMU_JOB_QUERY) | JOB_MASK(QEMU_JOB_DESTROY))
+
/* Only 1 job is allowed at any time
* A job includes *all* monitor commands, even those just querying
* information, not merely actions */
enum qemuDomainJob {
QEMU_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */
- QEMU_JOB_UNSPECIFIED,
- QEMU_JOB_MIGRATION_OUT,
- QEMU_JOB_MIGRATION_IN,
- QEMU_JOB_SAVE,
- QEMU_JOB_DUMP,
+ QEMU_JOB_QUERY, /* Doesn't change any state */
+ QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
+ QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
+ QEMU_JOB_MODIFY, /* May change state */
+
+ /* The following two items must always be the last items */
+ QEMU_JOB_ASYNC, /* Asynchronous job */
+ QEMU_JOB_ASYNC_NESTED, /* Normal job within an async job */
+};
+
+/* Async job consists of a series of jobs that may change state. Independent
+ * jobs that do not change state (and possibly others if explicitly allowed by
+ * current async job) are allowed to be run even if async job is active.
+ */
+enum qemuDomainAsyncJob {
+ QEMU_ASYNC_JOB_NONE = 0,
+ QEMU_ASYNC_JOB_MIGRATION_OUT,
+ QEMU_ASYNC_JOB_MIGRATION_IN,
+ QEMU_ASYNC_JOB_SAVE,
+ QEMU_ASYNC_JOB_DUMP,
};
enum qemuDomainJobSignals {
@@ -63,14 +82,16 @@ struct qemuDomainJobSignalsData {
};
struct qemuDomainJobObj {
- virCond cond; /* Use in conjunction with main virDomainObjPtr lock */
- virCond signalCond; /* Use to coordinate the safe queries during migration */
-
- enum qemuDomainJob active; /* Currently running job */
+ virCond cond; /* Use to coordinate jobs */
+ enum qemuDomainJob active; /* Currently running job */
- unsigned long long start; /* When the job started */
- virDomainJobInfo info; /* Progress data */
+ virCond asyncCond; /* Use to coordinate with async jobs */
+ enum qemuDomainAsyncJob asyncJob; /* Currently active async job */
+ unsigned long long mask; /* Jobs allowed during async job */
+ unsigned long long start; /* When the async job started */
+ virDomainJobInfo info; /* Async job progress data */
+ virCond signalCond; /* Use to coordinate the safe queries during migration */
unsigned int signals; /* Signals for running job */
struct qemuDomainJobSignalsData signalsData; /* Signal specific data */
};
@@ -117,18 +138,43 @@ void qemuDomainEventQueue(struct qemud_driver *driver,
void qemuDomainSetPrivateDataHooks(virCapsPtr caps);
void qemuDomainSetNamespaceHooks(virCapsPtr caps);
-int qemuDomainObjBeginJob(virDomainObjPtr obj) ATTRIBUTE_RETURN_CHECK;
+int qemuDomainObjBeginJob(virDomainObjPtr obj,
+ enum qemuDomainJob job)
+ ATTRIBUTE_RETURN_CHECK;
+int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj,
+ enum qemuDomainAsyncJob asyncJob)
+ ATTRIBUTE_RETURN_CHECK;
+int qemuDomainObjBeginNestedJob(virDomainObjPtr obj)
+ ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjBeginJobWithDriver(struct qemud_driver *driver,
- virDomainObjPtr obj) ATTRIBUTE_RETURN_CHECK;
-int qemuDomainObjEndJob(virDomainObjPtr obj) ATTRIBUTE_RETURN_CHECK;
+ virDomainObjPtr obj,
+ enum qemuDomainJob job)
+ ATTRIBUTE_RETURN_CHECK;
+int qemuDomainObjBeginAsyncJobWithDriver(struct qemud_driver *driver,
+ virDomainObjPtr obj,
+ enum qemuDomainAsyncJob asyncJob)
+ ATTRIBUTE_RETURN_CHECK;
+int qemuDomainObjBeginNestedJobWithDriver(struct qemud_driver *driver,
+ virDomainObjPtr obj)
+ ATTRIBUTE_RETURN_CHECK;
+
+int qemuDomainObjEndJob(virDomainObjPtr obj)
+ ATTRIBUTE_RETURN_CHECK;
+int qemuDomainObjEndAsyncJob(virDomainObjPtr obj)
+ ATTRIBUTE_RETURN_CHECK;
+void qemuDomainObjEndNestedJob(virDomainObjPtr obj);
void qemuDomainObjSetJob(virDomainObjPtr obj, enum qemuDomainJob job);
-void qemuDomainObjDiscardJob(virDomainObjPtr obj);
+void qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj,
+ unsigned long long allowedJobs);
+void qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj);
-void qemuDomainObjEnterMonitor(virDomainObjPtr obj);
+int qemuDomainObjEnterMonitor(virDomainObjPtr obj)
+ ATTRIBUTE_RETURN_CHECK;
void qemuDomainObjExitMonitor(virDomainObjPtr obj);
-void qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
- virDomainObjPtr obj);
+int qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
+ virDomainObjPtr obj)
+ ATTRIBUTE_RETURN_CHECK;
void qemuDomainObjExitMonitorWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj);
void qemuDomainObjEnterRemoteWithDriver(struct qemud_driver *driver,
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index e9cdcbe..96b3737 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -141,7 +141,8 @@ qemuAutostartDomain(void *payload, const void *name ATTRIBUTE_UNUSED,
void *opaq
virDomainObjLock(vm);
virResetLastError();
- if (qemuDomainObjBeginJobWithDriver(data->driver, vm) < 0) {
+ if (qemuDomainObjBeginJobWithDriver(data->driver, vm,
+ QEMU_JOB_MODIFY) < 0) {
err = virGetLastError();
VIR_ERROR(_("Failed to start job on VM '%s': %s"),
vm->def->name,
@@ -1274,7 +1275,7 @@ static virDomainPtr qemudDomainCreate(virConnectPtr conn, const char
*xml,
def = NULL;
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup; /* XXXX free the 'vm' we created ? */
if (qemuProcessStart(conn, driver, vm, NULL,
@@ -1343,7 +1344,7 @@ static int qemudDomainSuspend(virDomainPtr dom) {
priv = vm->privateData;
- if (priv->job.active == QEMU_JOB_MIGRATION_OUT) {
+ if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_PAUSED) {
VIR_DEBUG("Requesting domain pause on %s",
vm->def->name);
@@ -1352,7 +1353,7 @@ static int qemudDomainSuspend(virDomainPtr dom) {
ret = 0;
goto cleanup;
} else {
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_SUSPEND) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -1405,7 +1406,7 @@ static int qemudDomainResume(virDomainPtr dom) {
goto cleanup;
}
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -1461,7 +1462,7 @@ static int qemuDomainShutdown(virDomainPtr dom) {
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm) < 0)
+ if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -1471,7 +1472,7 @@ static int qemuDomainShutdown(virDomainPtr dom) {
}
priv = vm->privateData;
- qemuDomainObjEnterMonitor(vm);
+ ignore_value(qemuDomainObjEnterMonitor(vm));
ret = qemuMonitorSystemPowerdown(priv->mon);
qemuDomainObjExitMonitor(vm);
@@ -1511,7 +1512,7 @@ static int qemuDomainReboot(virDomainPtr dom, unsigned int flags) {
#if HAVE_YAJL
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MONITOR_JSON)) {
- if (qemuDomainObjBeginJob(vm) < 0)
+ if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -1520,7 +1521,7 @@ static int qemuDomainReboot(virDomainPtr dom, unsigned int flags) {
goto endjob;
}
- qemuDomainObjEnterMonitor(vm);
+ ignore_value(qemuDomainObjEnterMonitor(vm));
ret = qemuMonitorSystemPowerdown(priv->mon);
qemuDomainObjExitMonitor(vm);
@@ -1571,7 +1572,7 @@ static int qemudDomainDestroy(virDomainPtr dom) {
*/
qemuProcessKill(vm);
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_DESTROY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -1684,7 +1685,7 @@ static int qemudDomainSetMemoryFlags(virDomainPtr dom, unsigned long
newmem,
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm) < 0)
+ if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
isActive = virDomainObjIsActive(vm);
@@ -1749,7 +1750,7 @@ static int qemudDomainSetMemoryFlags(virDomainPtr dom, unsigned long
newmem,
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
priv = vm->privateData;
- qemuDomainObjEnterMonitor(vm);
+ ignore_value(qemuDomainObjEnterMonitor(vm));
r = qemuMonitorSetBalloon(priv->mon, newmem);
qemuDomainObjExitMonitor(vm);
qemuAuditMemory(vm, vm->def->mem.cur_balloon, newmem,
"update",
@@ -1821,9 +1822,9 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int
flags)
priv = vm->privateData;
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorInjectNMI(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuDomainObjEndJob(vm) == 0) {
@@ -1879,12 +1880,12 @@ static int qemudDomainGetInfo(virDomainPtr dom,
(vm->def->memballoon->model == VIR_DOMAIN_MEMBALLOON_MODEL_NONE)) {
info->memory = vm->def->mem.max_balloon;
} else if (!priv->job.active) {
- if (qemuDomainObjBeginJob(vm) < 0)
+ if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm))
err = 0;
else {
- qemuDomainObjEnterMonitor(vm);
+ ignore_value(qemuDomainObjEnterMonitor(vm));
err = qemuMonitorGetBalloonInfo(priv->mon, &balloon);
qemuDomainObjExitMonitor(vm);
}
@@ -2122,11 +2123,10 @@ static int qemudDomainSaveFlag(struct qemud_driver *driver,
virDomainPtr dom,
priv = vm->privateData;
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm,
+ QEMU_ASYNC_JOB_SAVE) < 0)
goto cleanup;
- qemuDomainObjSetJob(vm, QEMU_JOB_SAVE);
-
memset(&priv->job.info, 0, sizeof(priv->job.info));
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
@@ -2294,7 +2294,7 @@ static int qemudDomainSaveFlag(struct qemud_driver *driver,
virDomainPtr dom,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_SAVED);
if (!vm->persistent) {
- if (qemuDomainObjEndJob(vm) > 0)
+ if (qemuDomainObjEndAsyncJob(vm) > 0)
virDomainRemoveInactive(&driver->domains,
vm);
vm = NULL;
@@ -2310,7 +2310,7 @@ endjob:
VIR_WARN("Unable to resume guest CPUs after save
failure");
}
}
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndAsyncJob(vm) == 0)
vm = NULL;
}
@@ -2608,7 +2608,8 @@ static int qemudDomainCoreDump(virDomainPtr dom,
}
priv = vm->privateData;
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm,
+ QEMU_ASYNC_JOB_DUMP) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -2617,8 +2618,6 @@ static int qemudDomainCoreDump(virDomainPtr dom,
goto endjob;
}
- qemuDomainObjSetJob(vm, QEMU_JOB_DUMP);
-
/* Migrate will always stop the VM, so the resume condition is
independent of whether the stop command is issued. */
resume = virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING;
@@ -2664,7 +2663,7 @@ endjob:
}
}
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndAsyncJob(vm) == 0)
vm = NULL;
else if ((ret == 0) && (flags & VIR_DUMP_CRASH) &&
!vm->persistent) {
virDomainRemoveInactive(&driver->domains,
@@ -2708,7 +2707,7 @@ qemuDomainScreenshot(virDomainPtr dom,
priv = vm->privateData;
- if (qemuDomainObjBeginJob(vm) < 0)
+ if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -2738,7 +2737,7 @@ qemuDomainScreenshot(virDomainPtr dom,
virSecurityManagerSetSavedStateLabel(qemu_driver->securityManager, vm, tmp);
- qemuDomainObjEnterMonitor(vm);
+ ignore_value(qemuDomainObjEnterMonitor(vm));
if (qemuMonitorScreendump(priv->mon, tmp) < 0) {
qemuDomainObjExitMonitor(vm);
goto endjob;
@@ -2793,7 +2792,8 @@ static void processWatchdogEvent(void *data, void *opaque)
goto unlock;
}
- if (qemuDomainObjBeginJobWithDriver(driver, wdEvent->vm) < 0) {
+ if (qemuDomainObjBeginAsyncJobWithDriver(driver, wdEvent->vm,
+ QEMU_ASYNC_JOB_DUMP) < 0) {
VIR_FREE(dumpfile);
goto unlock;
}
@@ -2831,7 +2831,7 @@ endjob:
/* Safe to ignore value since ref count was incremented in
* qemuProcessHandleWatchdog().
*/
- ignore_value(qemuDomainObjEndJob(wdEvent->vm));
+ ignore_value(qemuDomainObjEndAsyncJob(wdEvent->vm));
unlock:
if (virDomainObjUnref(wdEvent->vm) > 0)
@@ -2848,7 +2848,7 @@ static int qemudDomainHotplugVcpus(virDomainObjPtr vm, unsigned int
nvcpus)
int oldvcpus = vm->def->vcpus;
int vcpus = oldvcpus;
- qemuDomainObjEnterMonitor(vm);
+ ignore_value(qemuDomainObjEnterMonitor(vm));
/* We need different branches here, because we want to offline
* in reverse order to onlining, so any partial fail leaves us in a
@@ -2934,7 +2934,7 @@ qemudDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus,
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm) < 0)
+ if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm) && (flags & VIR_DOMAIN_AFFECT_LIVE)) {
@@ -3755,7 +3755,7 @@ qemuDomainRestore(virConnectPtr conn,
}
def = NULL;
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
ret = qemuDomainSaveImageStartVM(conn, driver, vm, &fd, &header, path);
@@ -3848,10 +3848,10 @@ static char *qemuDomainGetXMLDesc(virDomainPtr dom,
/* Don't delay if someone's using the monitor, just use
* existing most recent data instead */
if (!priv->job.active) {
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
err = qemuMonitorGetBalloonInfo(priv->mon, &balloon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuDomainObjEndJob(vm) == 0) {
@@ -4078,7 +4078,7 @@ qemudDomainStartWithFlags(virDomainPtr dom, unsigned int flags)
goto cleanup;
}
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) {
@@ -4823,7 +4823,7 @@ qemuDomainModifyDeviceFlags(virDomainPtr dom, const char *xml,
goto cleanup;
}
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) {
@@ -6006,8 +6006,8 @@ qemudDomainBlockStats (virDomainPtr dom,
}
priv = vm->privateData;
- if ((priv->job.active == QEMU_JOB_MIGRATION_OUT)
- || (priv->job.active == QEMU_JOB_SAVE)) {
+ if ((priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
+ || (priv->job.asyncJob == QEMU_ASYNC_JOB_SAVE)) {
virDomainObjRef(vm);
while (priv->job.signals & QEMU_JOB_SIGNAL_BLKSTAT)
ignore_value(virCondWait(&priv->job.signalCond, &vm->lock));
@@ -6023,7 +6023,7 @@ qemudDomainBlockStats (virDomainPtr dom,
if (virDomainObjUnref(vm) == 0)
vm = NULL;
} else {
- if (qemuDomainObjBeginJob(vm) < 0)
+ if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -6032,7 +6032,7 @@ qemudDomainBlockStats (virDomainPtr dom,
goto endjob;
}
- qemuDomainObjEnterMonitor(vm);
+ ignore_value(qemuDomainObjEnterMonitor(vm));
ret = qemuMonitorGetBlockStatsInfo(priv->mon,
disk->info.alias,
&stats->rd_req,
@@ -6135,12 +6135,12 @@ qemudDomainMemoryStats (virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm) < 0)
+ if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) {
qemuDomainObjPrivatePtr priv = vm->privateData;
- qemuDomainObjEnterMonitor(vm);
+ ignore_value(qemuDomainObjEnterMonitor(vm));
ret = qemuMonitorGetMemoryStats(priv->mon, stats, nr_stats);
qemuDomainObjExitMonitor(vm);
} else {
@@ -6259,7 +6259,7 @@ qemudDomainMemoryPeek (virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm) < 0)
+ if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -6283,7 +6283,7 @@ qemudDomainMemoryPeek (virDomainPtr dom,
virSecurityManagerSetSavedStateLabel(qemu_driver->securityManager, vm, tmp);
priv = vm->privateData;
- qemuDomainObjEnterMonitor(vm);
+ ignore_value(qemuDomainObjEnterMonitor(vm));
if (flags == VIR_MEMORY_VIRTUAL) {
if (qemuMonitorSaveVirtualMemory(priv->mon, offset, size, tmp) < 0) {
qemuDomainObjExitMonitor(vm);
@@ -6453,8 +6453,8 @@ static int qemuDomainGetBlockInfo(virDomainPtr dom,
virDomainObjIsActive(vm)) {
qemuDomainObjPrivatePtr priv = vm->privateData;
- if ((priv->job.active == QEMU_JOB_MIGRATION_OUT)
- || (priv->job.active == QEMU_JOB_SAVE)) {
+ if ((priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
+ || (priv->job.asyncJob == QEMU_ASYNC_JOB_SAVE)) {
virDomainObjRef(vm);
while (priv->job.signals & QEMU_JOB_SIGNAL_BLKINFO)
ignore_value(virCondWait(&priv->job.signalCond,
&vm->lock));
@@ -6470,11 +6470,11 @@ static int qemuDomainGetBlockInfo(virDomainPtr dom,
if (virDomainObjUnref(vm) == 0)
vm = NULL;
} else {
- if (qemuDomainObjBeginJob(vm) < 0)
+ if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) {
- qemuDomainObjEnterMonitor(vm);
+ ignore_value(qemuDomainObjEnterMonitor(vm));
ret = qemuMonitorGetBlockExtent(priv->mon,
disk->info.alias,
&info->allocation);
@@ -7083,7 +7083,7 @@ qemuDomainMigrateConfirm3(virDomainPtr domain,
goto cleanup;
}
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
ret = qemuMigrationConfirm(driver, domain->conn, vm,
@@ -7293,7 +7293,7 @@ static int qemuDomainGetJobInfo(virDomainPtr dom,
priv = vm->privateData;
if (virDomainObjIsActive(vm)) {
- if (priv->job.active) {
+ if (priv->job.asyncJob) {
memcpy(info, &priv->job.info, sizeof(*info));
/* Refresh elapsed time again just to ensure it
@@ -7343,7 +7343,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) {
priv = vm->privateData;
if (virDomainObjIsActive(vm)) {
- if (priv->job.active) {
+ if (priv->job.asyncJob) {
VIR_DEBUG("Requesting cancellation of job on vm %s",
vm->def->name);
priv->job.signals |= QEMU_JOB_SIGNAL_CANCEL;
} else {
@@ -7397,7 +7397,7 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom,
priv = vm->privateData;
- if (priv->job.active != QEMU_JOB_MIGRATION_OUT) {
+ if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not being migrated"));
goto cleanup;
@@ -7446,7 +7446,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
priv = vm->privateData;
- if (priv->job.active != QEMU_JOB_MIGRATION_OUT) {
+ if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not being migrated"));
goto cleanup;
@@ -7639,7 +7639,7 @@ qemuDomainSnapshotCreateActive(virConnectPtr conn,
bool resume = false;
int ret = -1;
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
return -1;
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
@@ -7658,7 +7658,7 @@ qemuDomainSnapshotCreateActive(virConnectPtr conn,
}
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name);
qemuDomainObjExitMonitorWithDriver(driver, vm);
@@ -7984,7 +7984,7 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr
snapshot,
vm->current_snapshot = snap;
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (snap->def->state == VIR_DOMAIN_RUNNING
@@ -7992,7 +7992,7 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr
snapshot,
if (virDomainObjIsActive(vm)) {
priv = vm->privateData;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
rc = qemuMonitorLoadSnapshot(priv->mon, snap->def->name);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (rc < 0)
@@ -8116,7 +8116,7 @@ static int qemuDomainSnapshotDiscard(struct qemud_driver *driver,
}
else {
priv = vm->privateData;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
/* we continue on even in the face of error */
qemuMonitorDeleteSnapshot(priv->mon, snap->def->name);
qemuDomainObjExitMonitorWithDriver(driver, vm);
@@ -8255,7 +8255,7 @@ static int qemuDomainSnapshotDelete(virDomainSnapshotPtr snapshot,
goto cleanup;
}
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (flags & VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN) {
@@ -8324,9 +8324,9 @@ static int qemuDomainMonitorCommand(virDomainPtr domain, const char
*cmd,
hmp = !!(flags & VIR_DOMAIN_QEMU_MONITOR_COMMAND_HMP);
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorArbitraryCommand(priv->mon, cmd, result, hmp);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuDomainObjEndJob(vm) == 0) {
diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c
index a7f11ab..a7571cd 100644
--- a/src/qemu/qemu_hotplug.c
+++ b/src/qemu/qemu_hotplug.c
@@ -96,7 +96,7 @@ int qemuDomainChangeEjectableMedia(struct qemud_driver *driver,
if (!(driveAlias = qemuDeviceDriveHostAlias(origdisk, priv->qemuCaps)))
goto error;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (disk->src) {
const char *format = NULL;
if (disk->type != VIR_DOMAIN_DISK_TYPE_DIR) {
@@ -198,7 +198,7 @@ int qemuDomainAttachPciDiskDevice(struct qemud_driver *driver,
goto error;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
ret = qemuMonitorAddDrive(priv->mon, drivestr);
if (ret == 0) {
@@ -295,7 +295,7 @@ int qemuDomainAttachPciControllerDevice(struct qemud_driver *driver,
goto cleanup;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
ret = qemuMonitorAddDevice(priv->mon, devstr);
} else {
@@ -440,7 +440,7 @@ int qemuDomainAttachSCSIDisk(struct qemud_driver *driver,
goto error;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
ret = qemuMonitorAddDrive(priv->mon, drivestr);
if (ret == 0) {
@@ -542,7 +542,7 @@ int qemuDomainAttachUsbMassstorageDevice(struct qemud_driver *driver,
goto error;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
ret = qemuMonitorAddDrive(priv->mon, drivestr);
if (ret == 0) {
@@ -675,7 +675,7 @@ int qemuDomainAttachNetDevice(virConnectPtr conn,
goto cleanup;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_NETDEV) &&
qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorAddNetdev(priv->mon, netstr, tapfd, tapfd_name,
@@ -711,7 +711,7 @@ int qemuDomainAttachNetDevice(virConnectPtr conn,
goto try_remove;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorAddDevice(priv->mon, nicstr) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
@@ -767,7 +767,7 @@ try_remove:
char *netdev_name;
if (virAsprintf(&netdev_name, "host%s", net->info.alias)
< 0)
goto no_memory;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuMonitorRemoveNetdev(priv->mon, netdev_name) < 0)
VIR_WARN("Failed to remove network backend for netdev %s",
netdev_name);
@@ -780,7 +780,7 @@ try_remove:
char *hostnet_name;
if (virAsprintf(&hostnet_name, "host%s", net->info.alias) <
0)
goto no_memory;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuMonitorRemoveHostNetwork(priv->mon, vlan, hostnet_name) < 0)
VIR_WARN("Failed to remove network backend for vlan %d, net %s",
vlan, hostnet_name);
@@ -841,14 +841,14 @@ int qemuDomainAttachHostPciDevice(struct qemud_driver *driver,
priv->qemuCaps)))
goto error;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorAddDeviceWithFd(priv->mon, devstr,
configfd, configfd_name);
qemuDomainObjExitMonitorWithDriver(driver, vm);
} else {
virDomainDevicePCIAddress guestAddr;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorAddPCIHostDevice(priv->mon,
&hostdev->source.subsys.u.pci,
&guestAddr);
@@ -929,7 +929,7 @@ int qemuDomainAttachHostUsbDevice(struct qemud_driver *driver,
goto error;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE))
ret = qemuMonitorAddDevice(priv->mon, devstr);
else
@@ -1237,7 +1237,7 @@ int qemuDomainDetachPciDiskDevice(struct qemud_driver *driver,
goto cleanup;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) {
qemuDomainObjExitMonitor(vm);
@@ -1333,7 +1333,7 @@ int qemuDomainDetachDiskDevice(struct qemud_driver *driver,
goto cleanup;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) {
qemuDomainObjExitMonitor(vm);
qemuAuditDisk(vm, detach, NULL, "detach", false);
@@ -1471,7 +1471,7 @@ int qemuDomainDetachPciControllerDevice(struct qemud_driver
*driver,
goto cleanup;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorDelDevice(priv->mon, detach->info.alias)) {
qemuDomainObjExitMonitor(vm);
@@ -1566,7 +1566,7 @@ int qemuDomainDetachNetDevice(struct qemud_driver *driver,
goto cleanup;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) {
qemuDomainObjExitMonitor(vm);
@@ -1701,7 +1701,7 @@ int qemuDomainDetachHostPciDevice(struct qemud_driver *driver,
return -1;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
ret = qemuMonitorDelDevice(priv->mon, detach->info.alias);
} else {
@@ -1804,7 +1804,7 @@ int qemuDomainDetachHostUsbDevice(struct qemud_driver *driver,
return -1;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorDelDevice(priv->mon, detach->info.alias);
qemuDomainObjExitMonitorWithDriver(driver, vm);
qemuAuditHostdev(vm, detach, "detach", ret == 0);
@@ -1879,7 +1879,7 @@ qemuDomainChangeGraphicsPasswords(struct qemud_driver *driver,
if (!auth->passwd && !driver->vncPassword)
return 0;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorSetPassword(priv->mon,
type,
auth->passwd ? auth->passwd : defaultPasswd,
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 3634966..0712d73 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -749,9 +749,11 @@ qemuMigrationProcessJobSignals(struct qemud_driver *driver,
if (priv->job.signals & QEMU_JOB_SIGNAL_CANCEL) {
priv->job.signals ^= QEMU_JOB_SIGNAL_CANCEL;
VIR_DEBUG("Cancelling job at client request");
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- ret = qemuMonitorMigrateCancel(priv->mon);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
+ ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ if (ret == 0) {
+ ret = qemuMonitorMigrateCancel(priv->mon);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ }
if (ret < 0) {
VIR_WARN("Unable to cancel job");
}
@@ -766,9 +768,11 @@ qemuMigrationProcessJobSignals(struct qemud_driver *driver,
priv->job.signals ^= QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME;
priv->job.signalsData.migrateDowntime = 0;
VIR_DEBUG("Setting migration downtime to %llums", ms);
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- ret = qemuMonitorSetMigrationDowntime(priv->mon, ms);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
+ ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ if (ret == 0) {
+ ret = qemuMonitorSetMigrationDowntime(priv->mon, ms);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ }
if (ret < 0)
VIR_WARN("Unable to set migration downtime");
} else if (priv->job.signals & QEMU_JOB_SIGNAL_MIGRATE_SPEED) {
@@ -777,21 +781,25 @@ qemuMigrationProcessJobSignals(struct qemud_driver *driver,
priv->job.signals ^= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
priv->job.signalsData.migrateBandwidth = 0;
VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
+ ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ if (ret == 0) {
+ ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ }
if (ret < 0)
VIR_WARN("Unable to set migration speed");
} else if (priv->job.signals & QEMU_JOB_SIGNAL_BLKSTAT) {
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- ret = qemuMonitorGetBlockStatsInfo(priv->mon,
- priv->job.signalsData.statDevName,
- &priv->job.signalsData.blockStat->rd_req,
- &priv->job.signalsData.blockStat->rd_bytes,
- &priv->job.signalsData.blockStat->wr_req,
- &priv->job.signalsData.blockStat->wr_bytes,
- &priv->job.signalsData.blockStat->errs);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
+ ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ if (ret == 0) {
+ ret = qemuMonitorGetBlockStatsInfo(priv->mon,
+ priv->job.signalsData.statDevName,
+ &priv->job.signalsData.blockStat->rd_req,
+ &priv->job.signalsData.blockStat->rd_bytes,
+ &priv->job.signalsData.blockStat->wr_req,
+ &priv->job.signalsData.blockStat->wr_bytes,
+ &priv->job.signalsData.blockStat->errs);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ }
*priv->job.signalsData.statRetCode = ret;
priv->job.signals ^= QEMU_JOB_SIGNAL_BLKSTAT;
@@ -799,11 +807,13 @@ qemuMigrationProcessJobSignals(struct qemud_driver *driver,
if (ret < 0)
VIR_WARN("Unable to get block statistics");
} else if (priv->job.signals & QEMU_JOB_SIGNAL_BLKINFO) {
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- ret = qemuMonitorGetBlockExtent(priv->mon,
- priv->job.signalsData.infoDevName,
- &priv->job.signalsData.blockInfo->allocation);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
+ ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ if (ret == 0) {
+ ret = qemuMonitorGetBlockExtent(priv->mon,
+ priv->job.signalsData.infoDevName,
+ &priv->job.signalsData.blockInfo->allocation);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ }
*priv->job.signalsData.infoRetCode = ret;
priv->job.signals ^= QEMU_JOB_SIGNAL_BLKINFO;
@@ -836,13 +846,15 @@ qemuMigrationUpdateJobStatus(struct qemud_driver *driver,
return -1;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- ret = qemuMonitorGetMigrationStatus(priv->mon,
- &status,
- &memProcessed,
- &memRemaining,
- &memTotal);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
+ ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ if (ret == 0) {
+ ret = qemuMonitorGetMigrationStatus(priv->mon,
+ &status,
+ &memProcessed,
+ &memRemaining,
+ &memTotal);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ }
if (ret < 0 || virTimeMs(&priv->job.info.timeElapsed) < 0) {
priv->job.info.type = VIR_DOMAIN_JOB_FAILED;
@@ -897,14 +909,14 @@ qemuMigrationWaitForCompletion(struct qemud_driver *driver,
virDomainObjPtr vm)
qemuDomainObjPrivatePtr priv = vm->privateData;
const char *job;
- switch (priv->job.active) {
- case QEMU_JOB_MIGRATION_OUT:
+ switch (priv->job.asyncJob) {
+ case QEMU_ASYNC_JOB_MIGRATION_OUT:
job = _("migration job");
break;
- case QEMU_JOB_SAVE:
+ case QEMU_ASYNC_JOB_SAVE:
job = _("domain save job");
break;
- case QEMU_JOB_DUMP:
+ case QEMU_ASYNC_JOB_DUMP:
job = _("domain core dump job");
break;
default:
@@ -969,14 +981,16 @@ qemuDomainMigrateGraphicsRelocate(struct qemud_driver *driver,
if (cookie->graphics->type != VIR_DOMAIN_GRAPHICS_TYPE_SPICE)
return 0;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- ret = qemuMonitorGraphicsRelocate(priv->mon,
- cookie->graphics->type,
- cookie->remoteHostname,
- cookie->graphics->port,
- cookie->graphics->tlsPort,
- cookie->graphics->tlsSubject);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
+ ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ if (ret == 0) {
+ ret = qemuMonitorGraphicsRelocate(priv->mon,
+ cookie->graphics->type,
+ cookie->remoteHostname,
+ cookie->graphics->port,
+ cookie->graphics->tlsPort,
+ cookie->graphics->tlsSubject);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ }
return ret;
}
@@ -1108,9 +1122,9 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver,
QEMU_MIGRATION_COOKIE_LOCKSTATE)))
goto cleanup;
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
goto cleanup;
- qemuDomainObjSetJob(vm, QEMU_JOB_MIGRATION_IN);
/* Domain starts inactive, even if the domain XML had an id field. */
vm->def->id = -1;
@@ -1144,7 +1158,7 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver,
qemuAuditDomainStart(vm, "migrated", false);
qemuProcessStop(driver, vm, 0, VIR_DOMAIN_SHUTOFF_FAILED);
if (!vm->persistent) {
- if (qemuDomainObjEndJob(vm) > 0)
+ if (qemuDomainObjEndAsyncJob(vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
}
@@ -1173,7 +1187,7 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver,
endjob:
if (vm &&
- qemuDomainObjEndJob(vm) == 0)
+ qemuDomainObjEndAsyncJob(vm) == 0)
vm = NULL;
/* We set a fake job active which is held across
@@ -1183,7 +1197,7 @@ endjob:
*/
if (vm &&
virDomainObjIsActive(vm)) {
- qemuDomainObjSetJob(vm, QEMU_JOB_MIGRATION_IN);
+ priv->job.asyncJob = QEMU_ASYNC_JOB_MIGRATION_IN;
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
priv->job.start = now;
}
@@ -1343,9 +1357,9 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver,
QEMU_MIGRATION_COOKIE_LOCKSTATE)))
goto cleanup;
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
goto cleanup;
- qemuDomainObjSetJob(vm, QEMU_JOB_MIGRATION_IN);
/* Domain starts inactive, even if the domain XML had an id field. */
vm->def->id = -1;
@@ -1361,7 +1375,7 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver,
* should have already done that.
*/
if (!vm->persistent) {
- if (qemuDomainObjEndJob(vm) > 0)
+ if (qemuDomainObjEndAsyncJob(vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
}
@@ -1394,7 +1408,7 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver,
endjob:
if (vm &&
- qemuDomainObjEndJob(vm) == 0)
+ qemuDomainObjEndAsyncJob(vm) == 0)
vm = NULL;
/* We set a fake job active which is held across
@@ -1404,7 +1418,7 @@ endjob:
*/
if (vm &&
virDomainObjIsActive(vm)) {
- qemuDomainObjSetJob(vm, QEMU_JOB_MIGRATION_IN);
+ priv->job.asyncJob = QEMU_ASYNC_JOB_MIGRATION_IN;
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
priv->job.start = now;
}
@@ -1488,7 +1502,9 @@ static int doNativeMigrate(struct qemud_driver *driver,
goto cleanup;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ if (qemuDomainObjEnterMonitorWithDriver(driver, vm) < 0)
+ goto cleanup;
+
if (resource > 0 &&
qemuMonitorSetMigrationSpeed(priv->mon, resource) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
@@ -1747,7 +1763,9 @@ static int doTunnelMigrate(struct qemud_driver *driver,
goto cleanup;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ if (qemuDomainObjEnterMonitorWithDriver(driver, vm) < 0)
+ goto cleanup;
+
if (resource > 0 &&
qemuMonitorSetMigrationSpeed(priv->mon, resource) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
@@ -1788,7 +1806,8 @@ static int doTunnelMigrate(struct qemud_driver *driver,
/* it is also possible that the migrate didn't fail initially, but
* rather failed later on. Check the output of "info migrate"
*/
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ if (qemuDomainObjEnterMonitorWithDriver(driver, vm) < 0)
+ goto cancel;
if (qemuMonitorGetMigrationStatus(priv->mon,
&status,
&transferred,
@@ -1846,9 +1865,10 @@ cancel:
if (ret != 0 && virDomainObjIsActive(vm)) {
VIR_FORCE_CLOSE(client_sock);
VIR_FORCE_CLOSE(qemu_sock);
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- qemuMonitorMigrateCancel(priv->mon);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
+ if (qemuDomainObjEnterMonitorWithDriver(driver, vm) == 0) {
+ qemuMonitorMigrateCancel(priv->mon);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ }
}
cleanup:
@@ -2284,9 +2304,9 @@ int qemuMigrationPerform(struct qemud_driver *driver,
cookieout, cookieoutlen, flags, NULLSTR(dname),
resource, v3proto);
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cleanup;
- qemuDomainObjSetJob(vm, QEMU_JOB_MIGRATION_OUT);
if (!virDomainObjIsActive(vm)) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
@@ -2365,7 +2385,7 @@ endjob:
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
}
if (vm) {
- if (qemuDomainObjEndJob(vm) == 0) {
+ if (qemuDomainObjEndAsyncJob(vm) == 0) {
vm = NULL;
} else if (!virDomainObjIsActive(vm) &&
(!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) {
@@ -2450,17 +2470,17 @@ qemuMigrationFinish(struct qemud_driver *driver,
virErrorPtr orig_err = NULL;
priv = vm->privateData;
- if (priv->job.active != QEMU_JOB_MIGRATION_IN) {
+ if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_IN) {
qemuReportError(VIR_ERR_NO_DOMAIN,
_("domain '%s' is not processing incoming
migration"), vm->def->name);
goto cleanup;
}
- qemuDomainObjDiscardJob(vm);
+ qemuDomainObjDiscardAsyncJob(vm);
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
goto cleanup;
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
/* Did the migration go as planned? If yes, return the domain
@@ -2724,7 +2744,9 @@ qemuMigrationToFile(struct qemud_driver *driver, virDomainObjPtr
vm,
restoreLabel = true;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ if (qemuDomainObjEnterMonitorWithDriver(driver, vm) < 0)
+ goto cleanup;
+
if (!compressor) {
const char *args[] = { "cat", NULL };
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 56593c2..534a8b1 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -374,7 +374,7 @@ qemuProcessFakeReboot(void *opaque)
VIR_DEBUG("vm=%p", vm);
qemuDriverLock(driver);
virDomainObjLock(vm);
- if (qemuDomainObjBeginJob(vm) < 0)
+ if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -383,7 +383,7 @@ qemuProcessFakeReboot(void *opaque)
goto endjob;
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuMonitorSystemReset(priv->mon) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
goto endjob;
@@ -814,7 +814,7 @@ qemuConnectMonitor(struct qemud_driver *driver, virDomainObjPtr vm)
}
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorSetCapabilities(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
@@ -1163,7 +1163,7 @@ qemuProcessWaitForMonitor(struct qemud_driver* driver,
goto cleanup;
priv = vm->privateData;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorGetPtyPaths(priv->mon, paths);
qemuDomainObjExitMonitorWithDriver(driver, vm);
@@ -1216,7 +1216,7 @@ qemuProcessDetectVcpuPIDs(struct qemud_driver *driver,
/* What follows is now all KVM specific */
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if ((ncpupids = qemuMonitorGetCPUInfo(priv->mon, &cpupids)) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
return -1;
@@ -1510,7 +1510,7 @@ qemuProcessInitPasswords(virConnectPtr conn,
goto cleanup;
alias = vm->def->disks[i]->info.alias;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorSetDrivePassphrase(priv->mon, alias, secret);
VIR_FREE(secret);
qemuDomainObjExitMonitorWithDriver(driver, vm);
@@ -1901,7 +1901,7 @@ qemuProcessInitPCIAddresses(struct qemud_driver *driver,
int ret;
qemuMonitorPCIAddress *addrs = NULL;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
naddrs = qemuMonitorGetAllPCIAddresses(priv->mon,
&addrs);
qemuDomainObjExitMonitorWithDriver(driver, vm);
@@ -2122,7 +2122,7 @@ qemuProcessStartCPUs(struct qemud_driver *driver, virDomainObjPtr
vm,
}
VIR_FREE(priv->lockState);
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorStartCPUs(priv->mon, conn);
qemuDomainObjExitMonitorWithDriver(driver, vm);
@@ -2150,9 +2150,11 @@ int qemuProcessStopCPUs(struct qemud_driver *driver,
virDomainObjPtr vm,
oldState = virDomainObjGetState(vm, &oldReason);
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, reason);
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- ret = qemuMonitorStopCPUs(priv->mon);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
+ ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ if (ret == 0) {
+ ret = qemuMonitorStopCPUs(priv->mon);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ }
if (ret == 0) {
if (virDomainLockProcessPause(driver->lockManager, vm,
&priv->lockState) < 0)
@@ -2198,7 +2200,7 @@ qemuProcessUpdateState(struct qemud_driver *driver, virDomainObjPtr
vm)
bool running;
int ret;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorGetStatus(priv->mon, &running);
qemuDomainObjExitMonitorWithDriver(driver, vm);
@@ -2244,6 +2246,9 @@ qemuProcessReconnect(void *payload, const void *name
ATTRIBUTE_UNUSED, void *opa
priv = obj->privateData;
+ /* Set fake job so that EnterMonitor* doesn't want to start a new one */
+ priv->job.active = QEMU_JOB_MODIFY;
+
/* Hold an extra reference because we can't allow 'vm' to be
* deleted if qemuConnectMonitor() failed */
virDomainObjRef(obj);
@@ -2282,6 +2287,8 @@ qemuProcessReconnect(void *payload, const void *name
ATTRIBUTE_UNUSED, void *opa
if (qemuProcessFiltersInstantiate(conn, obj->def))
goto error;
+ priv->job.active = QEMU_JOB_NONE;
+
/* update domain state XML with possibly updated state in virDomainObj */
if (virDomainSaveStatus(driver->caps, driver->stateDir, obj) < 0)
goto error;
@@ -2695,7 +2702,7 @@ int qemuProcessStart(virConnectPtr conn,
VIR_DEBUG("Setting initial memory amount");
cur_balloon = vm->def->mem.cur_balloon;
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuMonitorSetBalloon(priv->mon, cur_balloon) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
goto cleanup;
@@ -3013,13 +3020,14 @@ static void qemuProcessAutoDestroyDom(void *payload,
}
priv = dom->privateData;
- if (priv->job.active == QEMU_JOB_MIGRATION_IN) {
- VIR_DEBUG("vm=%s has incoming migration active, cancelling",
+ if (priv->job.asyncJob) {
+ VIR_DEBUG("vm=%s has long-term job active, cancelling",
dom->def->name);
- qemuDomainObjDiscardJob(dom);
+ qemuDomainObjDiscardAsyncJob(dom);
}
- if (qemuDomainObjBeginJobWithDriver(data->driver, dom) < 0)
+ if (qemuDomainObjBeginJobWithDriver(data->driver, dom,
+ QEMU_JOB_DESTROY) < 0)
goto cleanup;
VIR_DEBUG("Killing domain");
--
1.7.6