If libvirtd is restarted when a job is running, the new libvirtd process
needs to know about that to be able to recover and rollback the
operation.
---
src/qemu/qemu_domain.c | 124 +++++++++++++++++++++++++++++++---------
src/qemu/qemu_domain.h | 35 ++++++++----
src/qemu/qemu_driver.c | 138 +++++++++++++++++++++++----------------------
src/qemu/qemu_hotplug.c | 12 ++--
src/qemu/qemu_migration.c | 20 ++++---
src/qemu/qemu_process.c | 8 +-
6 files changed, 212 insertions(+), 125 deletions(-)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 1ed5efd..062ecc7 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -44,6 +44,26 @@
#define QEMU_NAMESPACE_HREF "http://libvirt.org/schemas/domain/qemu/1.0"
+VIR_ENUM_DECL(qemuDomainJob)
+VIR_ENUM_IMPL(qemuDomainJob, QEMU_JOB_LAST,
+ "none",
+ "query",
+ "destroy",
+ "suspend",
+ "modify",
+ "none", /* async job is never stored in job.active */
+ "async nested",
+);
+
+VIR_ENUM_DECL(qemuDomainAsyncJob)
+VIR_ENUM_IMPL(qemuDomainAsyncJob, QEMU_ASYNC_JOB_LAST,
+ "none",
+ "migration out",
+ "migration in",
+ "save",
+ "dump",
+);
+
static void qemuDomainEventDispatchFunc(virConnectPtr conn,
virDomainEventPtr event,
@@ -214,6 +234,12 @@ static int qemuDomainObjPrivateXMLFormat(virBufferPtr buf, void
*data)
if (priv->lockState)
virBufferAsprintf(buf, " <lockstate>%s</lockstate>\n",
priv->lockState);
+ if (priv->job.active || priv->job.asyncJob) {
+ virBufferAsprintf(buf, " <job type='%s'
async='%s'/>\n",
+ qemuDomainJobTypeToString(priv->job.active),
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+ }
+
return 0;
}
@@ -320,6 +346,32 @@ static int qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt, void
*data)
priv->lockState = virXPathString("string(./lockstate)", ctxt);
+ if ((tmp = virXPathString("string(./job[1]/@type)", ctxt))) {
+ int type;
+
+ if ((type = qemuDomainJobTypeFromString(tmp)) < 0) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Unknown job type %s"), tmp);
+ VIR_FREE(tmp);
+ goto error;
+ }
+ VIR_FREE(tmp);
+ priv->job.active = type;
+ }
+
+ if ((tmp = virXPathString("string(./job[1]/@async)", ctxt))) {
+ int async;
+
+ if ((async = qemuDomainAsyncJobTypeFromString(tmp)) < 0) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Unknown async job type %s"), tmp);
+ VIR_FREE(tmp);
+ goto error;
+ }
+ VIR_FREE(tmp);
+ priv->job.asyncJob = async;
+ }
+
return 0;
error:
@@ -516,12 +568,16 @@ void qemuDomainSetNamespaceHooks(virCapsPtr caps)
}
void
-qemuDomainObjSetJob(virDomainObjPtr obj,
- enum qemuDomainJob job)
+qemuDomainObjSaveJob(struct qemud_driver *driver, virDomainObjPtr obj)
{
- qemuDomainObjPrivatePtr priv = obj->privateData;
+ if (!virDomainObjIsActive(obj)) {
+ /* don't write the state file yet, it will be written once the domain
+ * gets activated */
+ return;
+ }
- priv->job.active = job;
+ if (virDomainSaveStatus(driver->caps, driver->stateDir, obj) < 0)
+ VIR_WARN("Failed to save status on vm %s", obj->def->name);
}
void
@@ -537,13 +593,14 @@ qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj,
}
void
-qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj)
+qemuDomainObjDiscardAsyncJob(struct qemud_driver *driver, virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
qemuDomainObjResetJob(priv);
qemuDomainObjResetAsyncJob(priv);
+ qemuDomainObjSaveJob(driver, obj);
}
static bool
@@ -559,7 +616,7 @@ qemuDomainJobAllowed(qemuDomainObjPrivatePtr priv, enum qemuDomainJob
job)
* obj must be locked before calling; driver_locked says if qemu_driver is
* locked or not.
*/
-static int
+static int ATTRIBUTE_NONNULL(1)
qemuDomainObjBeginJobInternal(struct qemud_driver *driver,
bool driver_locked,
virDomainObjPtr obj,
@@ -611,6 +668,8 @@ retry:
virDomainObjLock(obj);
}
+ qemuDomainObjSaveJob(driver, obj);
+
return 0;
error:
@@ -639,16 +698,19 @@ error:
* Upon successful return, the object will have its ref count increased,
* successful calls must be followed by EndJob eventually
*/
-int qemuDomainObjBeginJob(virDomainObjPtr obj, enum qemuDomainJob job)
+int qemuDomainObjBeginJob(struct qemud_driver *driver,
+ virDomainObjPtr obj,
+ enum qemuDomainJob job)
{
- return qemuDomainObjBeginJobInternal(NULL, false, obj, job,
+ return qemuDomainObjBeginJobInternal(driver, false, obj, job,
QEMU_ASYNC_JOB_NONE);
}
-int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj,
+int qemuDomainObjBeginAsyncJob(struct qemud_driver *driver,
+ virDomainObjPtr obj,
enum qemuDomainAsyncJob asyncJob)
{
- return qemuDomainObjBeginJobInternal(NULL, false, obj, QEMU_JOB_ASYNC,
+ return qemuDomainObjBeginJobInternal(driver, false, obj, QEMU_JOB_ASYNC,
asyncJob);
}
@@ -692,9 +754,10 @@ int qemuDomainObjBeginAsyncJobWithDriver(struct qemud_driver
*driver,
* qemuDomainObjBeginJob{,WithDriver} instead.
*/
int
-qemuDomainObjBeginNestedJob(virDomainObjPtr obj)
+qemuDomainObjBeginNestedJob(struct qemud_driver *driver,
+ virDomainObjPtr obj)
{
- return qemuDomainObjBeginJobInternal(NULL, false, obj,
+ return qemuDomainObjBeginJobInternal(driver, false, obj,
QEMU_JOB_ASYNC_NESTED,
QEMU_ASYNC_JOB_NONE);
}
@@ -717,33 +780,36 @@ qemuDomainObjBeginNestedJobWithDriver(struct qemud_driver *driver,
* Returns remaining refcount on 'obj', maybe 0 to indicated it
* was deleted
*/
-int qemuDomainObjEndJob(virDomainObjPtr obj)
+int qemuDomainObjEndJob(struct qemud_driver *driver, virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
qemuDomainObjResetJob(priv);
+ qemuDomainObjSaveJob(driver, obj);
virCondSignal(&priv->job.cond);
return virDomainObjUnref(obj);
}
int
-qemuDomainObjEndAsyncJob(virDomainObjPtr obj)
+qemuDomainObjEndAsyncJob(struct qemud_driver *driver, virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
qemuDomainObjResetAsyncJob(priv);
+ qemuDomainObjSaveJob(driver, obj);
virCondBroadcast(&priv->job.asyncCond);
return virDomainObjUnref(obj);
}
void
-qemuDomainObjEndNestedJob(virDomainObjPtr obj)
+qemuDomainObjEndNestedJob(struct qemud_driver *driver, virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
qemuDomainObjResetJob(priv);
+ qemuDomainObjSaveJob(driver, obj);
virCondSignal(&priv->job.cond);
/* safe to ignore since the surrounding async job increased the reference
@@ -752,14 +818,15 @@ qemuDomainObjEndNestedJob(virDomainObjPtr obj)
}
-static int
+static int ATTRIBUTE_NONNULL(1)
qemuDomainObjEnterMonitorInternal(struct qemud_driver *driver,
+ bool driver_locked,
virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
- if (qemuDomainObjBeginNestedJob(obj) < 0)
+ if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
return -1;
if (!virDomainObjIsActive(obj)) {
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
@@ -772,14 +839,15 @@ qemuDomainObjEnterMonitorInternal(struct qemud_driver *driver,
qemuMonitorRef(priv->mon);
ignore_value(virTimeMs(&priv->monStart));
virDomainObjUnlock(obj);
- if (driver)
+ if (driver_locked)
qemuDriverUnlock(driver);
return 0;
}
-static void
+static void ATTRIBUTE_NONNULL(1)
qemuDomainObjExitMonitorInternal(struct qemud_driver *driver,
+ bool driver_locked,
virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
@@ -790,7 +858,7 @@ qemuDomainObjExitMonitorInternal(struct qemud_driver *driver,
if (refs > 0)
qemuMonitorUnlock(priv->mon);
- if (driver)
+ if (driver_locked)
qemuDriverLock(driver);
virDomainObjLock(obj);
@@ -800,7 +868,7 @@ qemuDomainObjExitMonitorInternal(struct qemud_driver *driver,
}
if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
- qemuDomainObjEndNestedJob(obj);
+ qemuDomainObjEndNestedJob(driver, obj);
}
/*
@@ -813,18 +881,20 @@ qemuDomainObjExitMonitorInternal(struct qemud_driver *driver,
*
* To be followed with qemuDomainObjExitMonitor() once complete
*/
-int qemuDomainObjEnterMonitor(virDomainObjPtr obj)
+int qemuDomainObjEnterMonitor(struct qemud_driver *driver,
+ virDomainObjPtr obj)
{
- return qemuDomainObjEnterMonitorInternal(NULL, obj);
+ return qemuDomainObjEnterMonitorInternal(driver, false, obj);
}
/* obj must NOT be locked before calling, qemud_driver must be unlocked
*
* Should be paired with an earlier qemuDomainObjEnterMonitor() call
*/
-void qemuDomainObjExitMonitor(virDomainObjPtr obj)
+void qemuDomainObjExitMonitor(struct qemud_driver *driver,
+ virDomainObjPtr obj)
{
- qemuDomainObjExitMonitorInternal(NULL, obj);
+ qemuDomainObjExitMonitorInternal(driver, false, obj);
}
/*
@@ -840,7 +910,7 @@ void qemuDomainObjExitMonitor(virDomainObjPtr obj)
int qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj)
{
- return qemuDomainObjEnterMonitorInternal(driver, obj);
+ return qemuDomainObjEnterMonitorInternal(driver, true, obj);
}
/* obj must NOT be locked before calling, qemud_driver must be unlocked,
@@ -851,7 +921,7 @@ int qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
void qemuDomainObjExitMonitorWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj)
{
- qemuDomainObjExitMonitorInternal(driver, obj);
+ qemuDomainObjExitMonitorInternal(driver, true, obj);
}
void qemuDomainObjEnterRemoteWithDriver(struct qemud_driver *driver,
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 85a3c03..17d1356 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -44,9 +44,11 @@ enum qemuDomainJob {
QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
QEMU_JOB_MODIFY, /* May change state */
- /* The following two items must always be the last items */
+ /* The following two items must always be the last items before JOB_LAST */
QEMU_JOB_ASYNC, /* Asynchronous job */
QEMU_JOB_ASYNC_NESTED, /* Normal job within an async job */
+
+ QEMU_JOB_LAST
};
/* Async job consists of a series of jobs that may change state. Independent
@@ -59,6 +61,8 @@ enum qemuDomainAsyncJob {
QEMU_ASYNC_JOB_MIGRATION_IN,
QEMU_ASYNC_JOB_SAVE,
QEMU_ASYNC_JOB_DUMP,
+
+ QEMU_ASYNC_JOB_LAST
};
enum qemuDomainJobSignals {
@@ -138,13 +142,16 @@ void qemuDomainEventQueue(struct qemud_driver *driver,
void qemuDomainSetPrivateDataHooks(virCapsPtr caps);
void qemuDomainSetNamespaceHooks(virCapsPtr caps);
-int qemuDomainObjBeginJob(virDomainObjPtr obj,
+int qemuDomainObjBeginJob(struct qemud_driver *driver,
+ virDomainObjPtr obj,
enum qemuDomainJob job)
ATTRIBUTE_RETURN_CHECK;
-int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj,
+int qemuDomainObjBeginAsyncJob(struct qemud_driver *driver,
+ virDomainObjPtr obj,
enum qemuDomainAsyncJob asyncJob)
ATTRIBUTE_RETURN_CHECK;
-int qemuDomainObjBeginNestedJob(virDomainObjPtr obj)
+int qemuDomainObjBeginNestedJob(struct qemud_driver *driver,
+ virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjBeginJobWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj,
@@ -158,20 +165,26 @@ int qemuDomainObjBeginNestedJobWithDriver(struct qemud_driver
*driver,
virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK;
-int qemuDomainObjEndJob(virDomainObjPtr obj)
+int qemuDomainObjEndJob(struct qemud_driver *driver,
+ virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK;
-int qemuDomainObjEndAsyncJob(virDomainObjPtr obj)
+int qemuDomainObjEndAsyncJob(struct qemud_driver *driver,
+ virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK;
-void qemuDomainObjEndNestedJob(virDomainObjPtr obj);
+void qemuDomainObjEndNestedJob(struct qemud_driver *driver,
+ virDomainObjPtr obj);
-void qemuDomainObjSetJob(virDomainObjPtr obj, enum qemuDomainJob job);
+void qemuDomainObjSaveJob(struct qemud_driver *driver, virDomainObjPtr obj);
void qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj,
unsigned long long allowedJobs);
-void qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj);
+void qemuDomainObjDiscardAsyncJob(struct qemud_driver *driver,
+ virDomainObjPtr obj);
-int qemuDomainObjEnterMonitor(virDomainObjPtr obj)
+int qemuDomainObjEnterMonitor(struct qemud_driver *driver,
+ virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK;
-void qemuDomainObjExitMonitor(virDomainObjPtr obj);
+void qemuDomainObjExitMonitor(struct qemud_driver *driver,
+ virDomainObjPtr obj);
int qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj)
ATTRIBUTE_RETURN_CHECK;
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 96b3737..9dcb248 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -157,7 +157,7 @@ qemuAutostartDomain(void *payload, const void *name ATTRIBUTE_UNUSED,
void *opaq
err ? err->message : _("unknown error"));
}
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(data->driver, vm) == 0)
vm = NULL;
}
@@ -1283,7 +1283,7 @@ static virDomainPtr qemudDomainCreate(virConnectPtr conn, const char
*xml,
(flags & VIR_DOMAIN_START_AUTODESTROY) != 0,
-1, NULL, VIR_VM_OP_CREATE) < 0) {
qemuAuditDomainStart(vm, "booted", false);
- if (qemuDomainObjEndJob(vm) > 0)
+ if (qemuDomainObjEndJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains,
vm);
vm = NULL;
@@ -1299,7 +1299,7 @@ static virDomainPtr qemudDomainCreate(virConnectPtr conn, const char
*xml,
if (dom) dom->id = vm->def->id;
if (vm &&
- qemuDomainObjEndJob(vm) == 0)
+ qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@@ -1375,7 +1375,7 @@ static int qemudDomainSuspend(virDomainPtr dom) {
}
endjob:
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@@ -1431,7 +1431,7 @@ static int qemudDomainResume(virDomainPtr dom) {
ret = 0;
endjob:
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@@ -1462,7 +1462,7 @@ static int qemuDomainShutdown(virDomainPtr dom) {
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -1472,14 +1472,14 @@ static int qemuDomainShutdown(virDomainPtr dom) {
}
priv = vm->privateData;
- ignore_value(qemuDomainObjEnterMonitor(vm));
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
ret = qemuMonitorSystemPowerdown(priv->mon);
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
priv->fakeReboot = false;
endjob:
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@@ -1512,7 +1512,7 @@ static int qemuDomainReboot(virDomainPtr dom, unsigned int flags) {
#if HAVE_YAJL
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MONITOR_JSON)) {
- if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -1521,14 +1521,14 @@ static int qemuDomainReboot(virDomainPtr dom, unsigned int flags)
{
goto endjob;
}
- ignore_value(qemuDomainObjEnterMonitor(vm));
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
ret = qemuMonitorSystemPowerdown(priv->mon);
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
priv->fakeReboot = true;
endjob:
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
} else {
#endif
@@ -1588,7 +1588,7 @@ static int qemudDomainDestroy(virDomainPtr dom) {
qemuAuditDomainStop(vm, "destroyed");
if (!vm->persistent) {
- if (qemuDomainObjEndJob(vm) > 0)
+ if (qemuDomainObjEndJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains,
vm);
vm = NULL;
@@ -1597,7 +1597,7 @@ static int qemudDomainDestroy(virDomainPtr dom) {
endjob:
if (vm &&
- qemuDomainObjEndJob(vm) == 0)
+ qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@@ -1685,7 +1685,7 @@ static int qemudDomainSetMemoryFlags(virDomainPtr dom, unsigned long
newmem,
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
isActive = virDomainObjIsActive(vm);
@@ -1750,9 +1750,9 @@ static int qemudDomainSetMemoryFlags(virDomainPtr dom, unsigned long
newmem,
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
priv = vm->privateData;
- ignore_value(qemuDomainObjEnterMonitor(vm));
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
r = qemuMonitorSetBalloon(priv->mon, newmem);
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
qemuAuditMemory(vm, vm->def->mem.cur_balloon, newmem,
"update",
r == 1);
if (r < 0)
@@ -1776,7 +1776,7 @@ static int qemudDomainSetMemoryFlags(virDomainPtr dom, unsigned long
newmem,
ret = 0;
endjob:
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@@ -1827,7 +1827,7 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int
flags)
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorInjectNMI(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
- if (qemuDomainObjEndJob(vm) == 0) {
+ if (qemuDomainObjEndJob(driver, vm) == 0) {
vm = NULL;
goto cleanup;
}
@@ -1880,16 +1880,16 @@ static int qemudDomainGetInfo(virDomainPtr dom,
(vm->def->memballoon->model == VIR_DOMAIN_MEMBALLOON_MODEL_NONE)) {
info->memory = vm->def->mem.max_balloon;
} else if (!priv->job.active) {
- if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm))
err = 0;
else {
- ignore_value(qemuDomainObjEnterMonitor(vm));
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
err = qemuMonitorGetBalloonInfo(priv->mon, &balloon);
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
}
- if (qemuDomainObjEndJob(vm) == 0) {
+ if (qemuDomainObjEndJob(driver, vm) == 0) {
vm = NULL;
goto cleanup;
}
@@ -2294,7 +2294,7 @@ static int qemudDomainSaveFlag(struct qemud_driver *driver,
virDomainPtr dom,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_SAVED);
if (!vm->persistent) {
- if (qemuDomainObjEndAsyncJob(vm) > 0)
+ if (qemuDomainObjEndAsyncJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains,
vm);
vm = NULL;
@@ -2310,7 +2310,7 @@ endjob:
VIR_WARN("Unable to resume guest CPUs after save
failure");
}
}
- if (qemuDomainObjEndAsyncJob(vm) == 0)
+ if (qemuDomainObjEndAsyncJob(driver, vm) == 0)
vm = NULL;
}
@@ -2663,7 +2663,7 @@ endjob:
}
}
- if (qemuDomainObjEndAsyncJob(vm) == 0)
+ if (qemuDomainObjEndAsyncJob(driver, vm) == 0)
vm = NULL;
else if ((ret == 0) && (flags & VIR_DUMP_CRASH) &&
!vm->persistent) {
virDomainRemoveInactive(&driver->domains,
@@ -2707,7 +2707,7 @@ qemuDomainScreenshot(virDomainPtr dom,
priv = vm->privateData;
- if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -2737,12 +2737,12 @@ qemuDomainScreenshot(virDomainPtr dom,
virSecurityManagerSetSavedStateLabel(qemu_driver->securityManager, vm, tmp);
- ignore_value(qemuDomainObjEnterMonitor(vm));
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
if (qemuMonitorScreendump(priv->mon, tmp) < 0) {
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
goto endjob;
}
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
if (VIR_CLOSE(tmp_fd) < 0) {
virReportSystemError(errno, _("unable to close %s"), tmp);
@@ -2761,7 +2761,7 @@ endjob:
VIR_FORCE_CLOSE(tmp_fd);
VIR_FREE(tmp);
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@@ -2831,7 +2831,7 @@ endjob:
/* Safe to ignore value since ref count was incremented in
* qemuProcessHandleWatchdog().
*/
- ignore_value(qemuDomainObjEndAsyncJob(wdEvent->vm));
+ ignore_value(qemuDomainObjEndAsyncJob(driver, wdEvent->vm));
unlock:
if (virDomainObjUnref(wdEvent->vm) > 0)
@@ -2840,7 +2840,9 @@ unlock:
VIR_FREE(wdEvent);
}
-static int qemudDomainHotplugVcpus(virDomainObjPtr vm, unsigned int nvcpus)
+static int qemudDomainHotplugVcpus(struct qemud_driver *driver,
+ virDomainObjPtr vm,
+ unsigned int nvcpus)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int i, rc = 1;
@@ -2848,7 +2850,7 @@ static int qemudDomainHotplugVcpus(virDomainObjPtr vm, unsigned int
nvcpus)
int oldvcpus = vm->def->vcpus;
int vcpus = oldvcpus;
- ignore_value(qemuDomainObjEnterMonitor(vm));
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
/* We need different branches here, because we want to offline
* in reverse order to onlining, so any partial fail leaves us in a
@@ -2880,7 +2882,7 @@ static int qemudDomainHotplugVcpus(virDomainObjPtr vm, unsigned int
nvcpus)
ret = 0;
cleanup:
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
vm->def->vcpus = vcpus;
qemuAuditVcpu(vm, oldvcpus, nvcpus, "update", rc == 1);
return ret;
@@ -2934,7 +2936,7 @@ qemudDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus,
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm) && (flags & VIR_DOMAIN_AFFECT_LIVE)) {
@@ -2990,11 +2992,11 @@ qemudDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus,
break;
case VIR_DOMAIN_AFFECT_LIVE:
- ret = qemudDomainHotplugVcpus(vm, nvcpus);
+ ret = qemudDomainHotplugVcpus(driver, vm, nvcpus);
break;
case VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG:
- ret = qemudDomainHotplugVcpus(vm, nvcpus);
+ ret = qemudDomainHotplugVcpus(driver, vm, nvcpus);
if (ret == 0) {
persistentDef->vcpus = nvcpus;
}
@@ -3006,7 +3008,7 @@ qemudDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus,
ret = virDomainSaveConfig(driver->configDir, persistentDef);
endjob:
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@@ -3760,7 +3762,7 @@ qemuDomainRestore(virConnectPtr conn,
ret = qemuDomainSaveImageStartVM(conn, driver, vm, &fd, &header, path);
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
else if (ret < 0 && !vm->persistent) {
virDomainRemoveInactive(&driver->domains, vm);
@@ -3854,7 +3856,7 @@ static char *qemuDomainGetXMLDesc(virDomainPtr dom,
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
err = qemuMonitorGetBalloonInfo(priv->mon, &balloon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
- if (qemuDomainObjEndJob(vm) == 0) {
+ if (qemuDomainObjEndJob(driver, vm) == 0) {
vm = NULL;
goto cleanup;
}
@@ -4095,7 +4097,7 @@ qemudDomainStartWithFlags(virDomainPtr dom, unsigned int flags)
ret = 0;
endjob:
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@@ -4917,7 +4919,7 @@ qemuDomainModifyDeviceFlags(virDomainPtr dom, const char *xml,
}
endjob:
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@@ -6023,7 +6025,7 @@ qemudDomainBlockStats (virDomainPtr dom,
if (virDomainObjUnref(vm) == 0)
vm = NULL;
} else {
- if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -6032,7 +6034,7 @@ qemudDomainBlockStats (virDomainPtr dom,
goto endjob;
}
- ignore_value(qemuDomainObjEnterMonitor(vm));
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
ret = qemuMonitorGetBlockStatsInfo(priv->mon,
disk->info.alias,
&stats->rd_req,
@@ -6040,10 +6042,10 @@ qemudDomainBlockStats (virDomainPtr dom,
&stats->wr_req,
&stats->wr_bytes,
&stats->errs);
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
endjob:
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
}
@@ -6135,20 +6137,20 @@ qemudDomainMemoryStats (virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) {
qemuDomainObjPrivatePtr priv = vm->privateData;
- ignore_value(qemuDomainObjEnterMonitor(vm));
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
ret = qemuMonitorGetMemoryStats(priv->mon, stats, nr_stats);
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
} else {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
}
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@@ -6259,7 +6261,7 @@ qemudDomainMemoryPeek (virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -6283,19 +6285,19 @@ qemudDomainMemoryPeek (virDomainPtr dom,
virSecurityManagerSetSavedStateLabel(qemu_driver->securityManager, vm, tmp);
priv = vm->privateData;
- ignore_value(qemuDomainObjEnterMonitor(vm));
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
if (flags == VIR_MEMORY_VIRTUAL) {
if (qemuMonitorSaveVirtualMemory(priv->mon, offset, size, tmp) < 0) {
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
goto endjob;
}
} else {
if (qemuMonitorSavePhysicalMemory(priv->mon, offset, size, tmp) < 0) {
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
goto endjob;
}
}
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
/* Read the memory file into buffer. */
if (saferead (fd, buffer, size) == (ssize_t) -1) {
@@ -6308,7 +6310,7 @@ qemudDomainMemoryPeek (virDomainPtr dom,
ret = 0;
endjob:
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@@ -6470,20 +6472,20 @@ static int qemuDomainGetBlockInfo(virDomainPtr dom,
if (virDomainObjUnref(vm) == 0)
vm = NULL;
} else {
- if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) {
- ignore_value(qemuDomainObjEnterMonitor(vm));
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
ret = qemuMonitorGetBlockExtent(priv->mon,
disk->info.alias,
&info->allocation);
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
} else {
ret = 0;
}
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
}
} else {
@@ -7090,7 +7092,7 @@ qemuDomainMigrateConfirm3(virDomainPtr domain,
cookiein, cookieinlen,
flags, cancelled);
- if (qemuDomainObjEndJob(vm) == 0) {
+ if (qemuDomainObjEndJob(driver, vm) == 0) {
vm = NULL;
} else if (!virDomainObjIsActive(vm) &&
(!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) {
@@ -7671,7 +7673,7 @@ cleanup:
_("resuming after snapshot failed"));
}
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
*vmptr = NULL;
return ret;
@@ -8046,7 +8048,7 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr
snapshot,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
if (!vm->persistent) {
- if (qemuDomainObjEndJob(vm) > 0)
+ if (qemuDomainObjEndJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
goto cleanup;
@@ -8060,7 +8062,7 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr
snapshot,
ret = 0;
endjob:
- if (vm && qemuDomainObjEndJob(vm) == 0)
+ if (vm && qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@@ -8281,7 +8283,7 @@ static int qemuDomainSnapshotDelete(virDomainSnapshotPtr snapshot,
ret = qemuDomainSnapshotDiscard(driver, vm, snap);
endjob:
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@@ -8329,7 +8331,7 @@ static int qemuDomainMonitorCommand(virDomainPtr domain, const char
*cmd,
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
ret = qemuMonitorArbitraryCommand(priv->mon, cmd, result, hmp);
qemuDomainObjExitMonitorWithDriver(driver, vm);
- if (qemuDomainObjEndJob(vm) == 0) {
+ if (qemuDomainObjEndJob(driver, vm) == 0) {
vm = NULL;
goto cleanup;
}
diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c
index a7571cd..06e2c84 100644
--- a/src/qemu/qemu_hotplug.c
+++ b/src/qemu/qemu_hotplug.c
@@ -1240,14 +1240,14 @@ int qemuDomainDetachPciDiskDevice(struct qemud_driver *driver,
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) {
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
qemuAuditDisk(vm, detach, NULL, "detach", false);
goto cleanup;
}
} else {
if (qemuMonitorRemovePCIDevice(priv->mon,
&detach->info.addr.pci) < 0) {
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
qemuAuditDisk(vm, detach, NULL, "detach", false);
goto cleanup;
}
@@ -1335,7 +1335,7 @@ int qemuDomainDetachDiskDevice(struct qemud_driver *driver,
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) {
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
qemuAuditDisk(vm, detach, NULL, "detach", false);
goto cleanup;
}
@@ -1474,13 +1474,13 @@ int qemuDomainDetachPciControllerDevice(struct qemud_driver
*driver,
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorDelDevice(priv->mon, detach->info.alias)) {
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
goto cleanup;
}
} else {
if (qemuMonitorRemovePCIDevice(priv->mon,
&detach->info.addr.pci) < 0) {
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
goto cleanup;
}
}
@@ -1569,7 +1569,7 @@ int qemuDomainDetachNetDevice(struct qemud_driver *driver,
ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) {
- qemuDomainObjExitMonitor(vm);
+ qemuDomainObjExitMonitor(driver, vm);
qemuAuditNet(vm, detach, NULL, "detach", false);
goto cleanup;
}
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 0712d73..4c516b0 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -1158,7 +1158,7 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver,
qemuAuditDomainStart(vm, "migrated", false);
qemuProcessStop(driver, vm, 0, VIR_DOMAIN_SHUTOFF_FAILED);
if (!vm->persistent) {
- if (qemuDomainObjEndAsyncJob(vm) > 0)
+ if (qemuDomainObjEndAsyncJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
}
@@ -1187,7 +1187,7 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver,
endjob:
if (vm &&
- qemuDomainObjEndAsyncJob(vm) == 0)
+ qemuDomainObjEndAsyncJob(driver, vm) == 0)
vm = NULL;
/* We set a fake job active which is held across
@@ -1198,6 +1198,7 @@ endjob:
if (vm &&
virDomainObjIsActive(vm)) {
priv->job.asyncJob = QEMU_ASYNC_JOB_MIGRATION_IN;
+ qemuDomainObjSaveJob(driver, vm);
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
priv->job.start = now;
}
@@ -1375,7 +1376,7 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver,
* should have already done that.
*/
if (!vm->persistent) {
- if (qemuDomainObjEndAsyncJob(vm) > 0)
+ if (qemuDomainObjEndAsyncJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
}
@@ -1408,7 +1409,7 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver,
endjob:
if (vm &&
- qemuDomainObjEndAsyncJob(vm) == 0)
+ qemuDomainObjEndAsyncJob(driver, vm) == 0)
vm = NULL;
/* We set a fake job active which is held across
@@ -1419,6 +1420,7 @@ endjob:
if (vm &&
virDomainObjIsActive(vm)) {
priv->job.asyncJob = QEMU_ASYNC_JOB_MIGRATION_IN;
+ qemuDomainObjSaveJob(driver, vm);
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
priv->job.start = now;
}
@@ -2385,7 +2387,7 @@ endjob:
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
}
if (vm) {
- if (qemuDomainObjEndAsyncJob(vm) == 0) {
+ if (qemuDomainObjEndAsyncJob(driver, vm) == 0) {
vm = NULL;
} else if (!virDomainObjIsActive(vm) &&
(!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) {
@@ -2475,7 +2477,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
_("domain '%s' is not processing incoming
migration"), vm->def->name);
goto cleanup;
}
- qemuDomainObjDiscardAsyncJob(vm);
+ qemuDomainObjDiscardAsyncJob(driver, vm);
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
goto cleanup;
@@ -2555,7 +2557,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_FAILED);
if (!vm->persistent) {
- if (qemuDomainObjEndJob(vm) > 0)
+ if (qemuDomainObjEndJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
}
@@ -2591,7 +2593,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_FAILED);
if (!vm->persistent) {
- if (qemuDomainObjEndJob(vm) > 0)
+ if (qemuDomainObjEndJob(driver, vm) > 0)
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
}
@@ -2602,7 +2604,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
endjob:
if (vm &&
- qemuDomainObjEndJob(vm) == 0)
+ qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 534a8b1..3ffde51 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -374,7 +374,7 @@ qemuProcessFakeReboot(void *opaque)
VIR_DEBUG("vm=%p", vm);
qemuDriverLock(driver);
virDomainObjLock(vm);
- if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -410,7 +410,7 @@ qemuProcessFakeReboot(void *opaque)
ret = 0;
endjob:
- if (qemuDomainObjEndJob(vm) == 0)
+ if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup:
@@ -3023,7 +3023,7 @@ static void qemuProcessAutoDestroyDom(void *payload,
if (priv->job.asyncJob) {
VIR_DEBUG("vm=%s has long-term job active, cancelling",
dom->def->name);
- qemuDomainObjDiscardAsyncJob(dom);
+ qemuDomainObjDiscardAsyncJob(data->driver, dom);
}
if (qemuDomainObjBeginJobWithDriver(data->driver, dom,
@@ -3036,7 +3036,7 @@ static void qemuProcessAutoDestroyDom(void *payload,
event = virDomainEventNewFromObj(dom,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_DESTROYED);
- if (qemuDomainObjEndJob(dom) == 0)
+ if (qemuDomainObjEndJob(data->driver, dom) == 0)
dom = NULL;
if (dom && !dom->persistent)
virDomainRemoveInactive(&data->driver->domains, dom);
--
1.7.6