After removing all the external dependencies that `qemu_domainjob` had,
we move it to `hypervisor/virdomainjob` and thus create hypervisor-agnostic
jobs.
This change involved moving the file, and making corresponding name
changes to funcitons and structures.
Signed-off-by: Prathamesh Chavan <pc44800(a)gmail.com>
---
po/POTFILES.in | 2 +-
po/libvirt.pot | 34 +-
src/hypervisor/meson.build | 1 +
.../virdomainjob.c} | 395 ++++++------
src/hypervisor/virdomainjob.h | 243 +++++++
src/libvirt_private.syms | 28 +
src/qemu/meson.build | 1 -
src/qemu/qemu_backup.c | 46 +-
src/qemu/qemu_backup.h | 2 +-
src/qemu/qemu_block.c | 20 +-
src/qemu/qemu_block.h | 12 +-
src/qemu/qemu_blockjob.c | 32 +-
src/qemu/qemu_checkpoint.c | 22 +-
src/qemu/qemu_domain.c | 120 ++--
src/qemu/qemu_domain.h | 12 +-
src/qemu/qemu_domainjob.h | 243 -------
src/qemu/qemu_driver.c | 592 +++++++++---------
src/qemu/qemu_hotplug.c | 44 +-
src/qemu/qemu_hotplug.h | 8 +-
src/qemu/qemu_migration.c | 316 +++++-----
src/qemu/qemu_migration.h | 8 +-
src/qemu/qemu_migration_cookie.c | 2 +-
src/qemu/qemu_migration_params.c | 4 +-
src/qemu/qemu_process.c | 222 +++----
src/qemu/qemu_process.h | 22 +-
src/qemu/qemu_saveimage.c | 4 +-
src/qemu/qemu_saveimage.h | 6 +-
src/qemu/qemu_snapshot.c | 76 +--
28 files changed, 1273 insertions(+), 1244 deletions(-)
rename src/{qemu/qemu_domainjob.c => hypervisor/virdomainjob.c} (60%)
create mode 100644 src/hypervisor/virdomainjob.h
delete mode 100644 src/qemu/qemu_domainjob.h
diff --git a/po/POTFILES.in b/po/POTFILES.in
index 3d6c20c55f..e844ed4006 100644
--- a/po/POTFILES.in
+++ b/po/POTFILES.in
@@ -81,6 +81,7 @@
@SRCDIR(a)src/hypervisor/domain_cgroup.c
@SRCDIR(a)src/hypervisor/domain_driver.c
@SRCDIR(a)src/hypervisor/virclosecallbacks.c
+@SRCDIR(a)src/hypervisor/virdomainjob.c
@SRCDIR(a)src/hypervisor/virhostdev.c
@SRCDIR(a)src/interface/interface_backend_netcf.c
@SRCDIR(a)src/interface/interface_backend_udev.c
@@ -153,7 +154,6 @@
@SRCDIR(a)src/qemu/qemu_dbus.c
@SRCDIR(a)src/qemu/qemu_domain.c
@SRCDIR(a)src/qemu/qemu_domain_address.c
-@SRCDIR(a)src/qemu/qemu_domainjob.c
@SRCDIR(a)src/qemu/qemu_driver.c
@SRCDIR(a)src/qemu/qemu_extdevice.c
@SRCDIR(a)src/qemu/qemu_firmware.c
diff --git a/po/libvirt.pot b/po/libvirt.pot
index 92e77bf22b..26a43959b7 100644
--- a/po/libvirt.pot
+++ b/po/libvirt.pot
@@ -12344,7 +12344,7 @@ msgstr ""
msgid "Invalid ipv6 setting '%s' in network '%s' NAT"
msgstr ""
-#: src/qemu/qemu_domainjob.c:1426
+#: src/hypervisor/virdomainjob.c:784
msgid "Invalid job flags"
msgstr ""
@@ -23238,7 +23238,7 @@ msgstr ""
msgid "Unknown architecture %s"
msgstr ""
-#: src/qemu/qemu_domainjob.c:1408
+#: src/hypervisor/virdomainjob.c:766
#, c-format
msgid "Unknown async job type %s"
msgstr ""
@@ -23389,12 +23389,12 @@ msgstr ""
msgid "Unknown job"
msgstr ""
-#: src/qemu/qemu_domainjob.c:1418
+#: src/hypervisor/virdomainjob.c:776
#, c-format
msgid "Unknown job phase %s"
msgstr ""
-#: src/qemu/qemu_domainjob.c:1396
+#: src/hypervisor/virdomainjob.c:754
#, c-format
msgid "Unknown job type %s"
msgstr ""
@@ -25838,50 +25838,50 @@ msgid "cannot abort migration in post-copy mode"
msgstr ""
#: src/libxl/libxl_domain.c:152 src/lxc/lxc_domain.c:126
-#: src/qemu/qemu_domainjob.c:1010 src/vz/vz_utils.c:623
+#: src/hypervisor/virdomainjob.c:520 src/vz/vz_utils.c:623
msgid "cannot acquire job mutex"
msgstr ""
#: src/libxl/libxl_domain.c:149 src/lxc/lxc_domain.c:123
-#: src/qemu/qemu_domainjob.c:980 src/vz/vz_utils.c:620
+#: src/hypervisor/virdomainjob.c:490 src/vz/vz_utils.c:620
msgid "cannot acquire state change lock"
msgstr ""
-#: src/qemu/qemu_domainjob.c:975
+#: src/hypervisor/virdomainjob.c:485
#, c-format
msgid "cannot acquire state change lock (held by agent=%s)"
msgstr ""
-#: src/qemu/qemu_domainjob.c:999
+#: src/hypervisor/virdomainjob.c:509
#, c-format
msgid ""
"cannot acquire state change lock (held by agent=%s) due to max_queued limit"
msgstr ""
-#: src/qemu/qemu_domainjob.c:965
+#: src/hypervisor/virdomainjob.c:475
#, c-format
msgid "cannot acquire state change lock (held by monitor=%s agent=%s)"
msgstr ""
-#: src/qemu/qemu_domainjob.c:987
+#: src/hypervisor/virdomainjob.c:497
#, c-format
msgid ""
"cannot acquire state change lock (held by monitor=%s agent=%s) due to "
"max_queued limit"
msgstr ""
-#: src/qemu/qemu_domainjob.c:970
+#: src/hypervisor/virdomainjob.c:480
#, c-format
msgid "cannot acquire state change lock (held by monitor=%s)"
msgstr ""
-#: src/qemu/qemu_domainjob.c:993
+#: src/hypervisor/virdomainjob.c:503
#, c-format
msgid ""
"cannot acquire state change lock (held by monitor=%s) due to max_queued
limit"
msgstr ""
-#: src/qemu/qemu_domainjob.c:1005
+#: src/hypervisor/virdomainjob.c:514
msgid "cannot acquire state change lock due to max_queued limit"
msgstr ""
@@ -34693,7 +34693,7 @@ msgstr ""
msgid "invalid iothreads count '%s'"
msgstr ""
-#: src/qemu/qemu_domainjob.c:684
+#: src/qemu/qemu_domain.c:496
msgid "invalid job statistics type"
msgstr ""
@@ -37902,11 +37902,11 @@ msgstr ""
msgid "missing storage pool target path"
msgstr ""
-#: src/qemu/qemu_domainjob.c:1321
+#: src/qemu/qemu_domain.c:704
msgid "missing storage source format"
msgstr ""
-#: src/qemu/qemu_domainjob.c:1315
+#: src/qemu/qemu_domain.c:698
msgid "missing storage source type"
msgstr ""
@@ -44855,7 +44855,7 @@ msgstr ""
msgid "unexpected address type for usb disk"
msgstr ""
-#: src/qemu/qemu_domainjob.c:1083
+#: src/hypervisor/virdomainjob.c:588
#, c-format
msgid "unexpected async job %d type expected %d"
msgstr ""
diff --git a/src/hypervisor/meson.build b/src/hypervisor/meson.build
index c81bdfa2fc..96afa0c52a 100644
--- a/src/hypervisor/meson.build
+++ b/src/hypervisor/meson.build
@@ -1,6 +1,7 @@
hypervisor_sources = [
'domain_cgroup.c',
'domain_driver.c',
+ 'virdomainjob.c',
'virclosecallbacks.c',
'virhostdev.c',
'virmigration.c',
diff --git a/src/qemu/qemu_domainjob.c b/src/hypervisor/virdomainjob.c
similarity index 60%
rename from src/qemu/qemu_domainjob.c
rename to src/hypervisor/virdomainjob.c
index ecd694958c..7de8d335e5 100644
--- a/src/qemu/qemu_domainjob.c
+++ b/src/hypervisor/virdomainjob.c
@@ -1,5 +1,5 @@
/*
- * qemu_domainjob.c: helper functions for QEMU domain jobs
+ * virdomainjob.c: helper functions for domain jobs
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -18,20 +18,21 @@
#include <config.h>
-#include "qemu_migration.h"
-#include "qemu_domainjob.h"
+#include "domain_conf.h"
+#include "virdomainjob.h"
+#include "virmigration.h"
#include "viralloc.h"
#include "virlog.h"
#include "virerror.h"
#include "virtime.h"
#include "virthreadjob.h"
-#define VIR_FROM_THIS VIR_FROM_QEMU
+#define VIR_FROM_THIS VIR_FROM_NONE
-VIR_LOG_INIT("qemu.qemu_domainjob");
+VIR_LOG_INIT("util.virdomainjob");
-VIR_ENUM_IMPL(qemuDomainJob,
- QEMU_JOB_LAST,
+VIR_ENUM_IMPL(virDomainJob,
+ VIR_JOB_LAST,
"none",
"query",
"destroy",
@@ -43,15 +44,15 @@ VIR_ENUM_IMPL(qemuDomainJob,
"async nested",
);
-VIR_ENUM_IMPL(qemuDomainAgentJob,
- QEMU_AGENT_JOB_LAST,
+VIR_ENUM_IMPL(virDomainAgentJob,
+ VIR_AGENT_JOB_LAST,
"none",
"query",
"modify",
);
-VIR_ENUM_IMPL(qemuDomainAsyncJob,
- QEMU_ASYNC_JOB_LAST,
+VIR_ENUM_IMPL(virDomainAsyncJob,
+ VIR_ASYNC_JOB_LAST,
"none",
"migration out",
"migration in",
@@ -63,22 +64,22 @@ VIR_ENUM_IMPL(qemuDomainAsyncJob,
);
const char *
-qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
+virDomainAsyncJobPhaseToString(virDomainAsyncJob job,
int phase G_GNUC_UNUSED)
{
switch (job) {
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
- case QEMU_ASYNC_JOB_MIGRATION_IN:
+ case VIR_ASYNC_JOB_MIGRATION_OUT:
+ case VIR_ASYNC_JOB_MIGRATION_IN:
return virMigrationJobPhaseTypeToString(phase);
- case QEMU_ASYNC_JOB_SAVE:
- case QEMU_ASYNC_JOB_DUMP:
- case QEMU_ASYNC_JOB_SNAPSHOT:
- case QEMU_ASYNC_JOB_START:
- case QEMU_ASYNC_JOB_NONE:
- case QEMU_ASYNC_JOB_BACKUP:
+ case VIR_ASYNC_JOB_SAVE:
+ case VIR_ASYNC_JOB_DUMP:
+ case VIR_ASYNC_JOB_SNAPSHOT:
+ case VIR_ASYNC_JOB_START:
+ case VIR_ASYNC_JOB_NONE:
+ case VIR_ASYNC_JOB_BACKUP:
G_GNUC_FALLTHROUGH;
- case QEMU_ASYNC_JOB_LAST:
+ case VIR_ASYNC_JOB_LAST:
break;
}
@@ -86,25 +87,25 @@ qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
}
int
-qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
+virDomainAsyncJobPhaseFromString(virDomainAsyncJob job,
const char *phase)
{
if (!phase)
return 0;
switch (job) {
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
- case QEMU_ASYNC_JOB_MIGRATION_IN:
+ case VIR_ASYNC_JOB_MIGRATION_OUT:
+ case VIR_ASYNC_JOB_MIGRATION_IN:
return virMigrationJobPhaseTypeFromString(phase);
- case QEMU_ASYNC_JOB_SAVE:
- case QEMU_ASYNC_JOB_DUMP:
- case QEMU_ASYNC_JOB_SNAPSHOT:
- case QEMU_ASYNC_JOB_START:
- case QEMU_ASYNC_JOB_NONE:
- case QEMU_ASYNC_JOB_BACKUP:
+ case VIR_ASYNC_JOB_SAVE:
+ case VIR_ASYNC_JOB_DUMP:
+ case VIR_ASYNC_JOB_SNAPSHOT:
+ case VIR_ASYNC_JOB_START:
+ case VIR_ASYNC_JOB_NONE:
+ case VIR_ASYNC_JOB_BACKUP:
G_GNUC_FALLTHROUGH;
- case QEMU_ASYNC_JOB_LAST:
+ case VIR_ASYNC_JOB_LAST:
break;
}
@@ -115,8 +116,8 @@ qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
}
int
-qemuDomainObjInitJob(qemuDomainJobObjPtr job,
- qemuDomainJobPrivateCallbacksPtr cb)
+virDomainObjInitJob(virDomainJobObjPtr job,
+ virDomainJobPrivateCallbacksPtr cb)
{
memset(job, 0, sizeof(*job));
job->cb = cb;
@@ -140,9 +141,9 @@ qemuDomainObjInitJob(qemuDomainJobObjPtr job,
static void
-qemuDomainObjResetJob(qemuDomainJobObjPtr job)
+virDomainObjResetJob(virDomainJobObjPtr job)
{
- job->active = QEMU_JOB_NONE;
+ job->active = VIR_JOB_NONE;
job->owner = 0;
job->ownerAPI = NULL;
job->started = 0;
@@ -150,9 +151,9 @@ qemuDomainObjResetJob(qemuDomainJobObjPtr job)
static void
-qemuDomainObjResetAgentJob(qemuDomainJobObjPtr job)
+virDomainObjResetAgentJob(virDomainJobObjPtr job)
{
- job->agentActive = QEMU_AGENT_JOB_NONE;
+ job->agentActive = VIR_AGENT_JOB_NONE;
job->agentOwner = 0;
job->agentOwnerAPI = NULL;
job->agentStarted = 0;
@@ -160,14 +161,14 @@ qemuDomainObjResetAgentJob(qemuDomainJobObjPtr job)
static void
-qemuDomainObjResetAsyncJob(qemuDomainJobObjPtr job)
+virDomainObjResetAsyncJob(virDomainJobObjPtr job)
{
- job->asyncJob = QEMU_ASYNC_JOB_NONE;
+ job->asyncJob = VIR_ASYNC_JOB_NONE;
job->asyncOwner = 0;
job->asyncOwnerAPI = NULL;
job->asyncStarted = 0;
job->phase = 0;
- job->mask = QEMU_JOB_DEFAULT_MASK;
+ job->mask = VIR_JOB_DEFAULT_MASK;
job->abortJob = false;
VIR_FREE(job->error);
job->cb->jobcb->resetJobPrivate(job->privateData);
@@ -175,8 +176,8 @@ qemuDomainObjResetAsyncJob(qemuDomainJobObjPtr job)
}
int
-qemuDomainObjRestoreJob(qemuDomainJobObjPtr job,
- qemuDomainJobObjPtr oldJob)
+virDomainObjRestoreJob(virDomainJobObjPtr job,
+ virDomainJobObjPtr oldJob)
{
memset(oldJob, 0, sizeof(*oldJob));
oldJob->active = job->active;
@@ -191,32 +192,32 @@ qemuDomainObjRestoreJob(qemuDomainJobObjPtr job,
return -1;
oldJob->cb = job->cb;
- qemuDomainObjResetJob(job);
- qemuDomainObjResetAsyncJob(job);
+ virDomainObjResetJob(job);
+ virDomainObjResetAsyncJob(job);
return 0;
}
void
-qemuDomainObjFreeJob(qemuDomainJobObjPtr job)
+virDomainObjFreeJob(virDomainJobObjPtr job)
{
- qemuDomainObjResetJob(job);
- qemuDomainObjResetAsyncJob(job);
+ virDomainObjResetJob(job);
+ virDomainObjResetAsyncJob(job);
job->cb->jobcb->freeJobPrivate(job->privateData);
virCondDestroy(&job->cond);
virCondDestroy(&job->asyncCond);
}
bool
-qemuDomainTrackJob(qemuDomainJob job)
+virDomainTrackJob(virDomainJob job)
{
- return (QEMU_DOMAIN_TRACK_JOBS & JOB_MASK(job)) != 0;
+ return (VIR_DOMAIN_TRACK_JOBS & JOB_MASK(job)) != 0;
}
void
-qemuDomainObjSetJobPhase(virDomainObjPtr obj,
- qemuDomainJobObjPtr job,
- int phase)
+virDomainObjSetJobPhase(virDomainObjPtr obj,
+ virDomainJobObjPtr job,
+ int phase)
{
unsigned long long me = virThreadSelfID();
@@ -224,12 +225,12 @@ qemuDomainObjSetJobPhase(virDomainObjPtr obj,
return;
VIR_DEBUG("Setting '%s' phase to '%s'",
- qemuDomainAsyncJobTypeToString(job->asyncJob),
- qemuDomainAsyncJobPhaseToString(job->asyncJob, phase));
+ virDomainAsyncJobTypeToString(job->asyncJob),
+ virDomainAsyncJobPhaseToString(job->asyncJob, phase));
if (job->asyncOwner && me != job->asyncOwner) {
VIR_WARN("'%s' async job is owned by thread %llu",
- qemuDomainAsyncJobTypeToString(job->asyncJob),
+ virDomainAsyncJobTypeToString(job->asyncJob),
job->asyncOwner);
}
@@ -239,77 +240,77 @@ qemuDomainObjSetJobPhase(virDomainObjPtr obj,
}
void
-qemuDomainObjSetAsyncJobMask(qemuDomainJobObjPtr job,
- unsigned long long allowedJobs)
+virDomainObjSetAsyncJobMask(virDomainJobObjPtr job,
+ unsigned long long allowedJobs)
{
if (!job->asyncJob)
return;
- job->mask = allowedJobs | JOB_MASK(QEMU_JOB_DESTROY);
+ job->mask = allowedJobs | JOB_MASK(VIR_JOB_DESTROY);
}
void
-qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr job)
+virDomainObjDiscardAsyncJob(virDomainObjPtr obj,
+ virDomainJobObjPtr job)
{
- if (job->active == QEMU_JOB_ASYNC_NESTED)
- qemuDomainObjResetJob(job);
- qemuDomainObjResetAsyncJob(job);
+ if (job->active == VIR_JOB_ASYNC_NESTED)
+ virDomainObjResetJob(job);
+ virDomainObjResetAsyncJob(job);
job->cb->saveStatus(obj);
}
void
-qemuDomainObjReleaseAsyncJob(qemuDomainJobObjPtr job)
+virDomainObjReleaseAsyncJob(virDomainJobObjPtr job)
{
VIR_DEBUG("Releasing ownership of '%s' async job",
- qemuDomainAsyncJobTypeToString(job->asyncJob));
+ virDomainAsyncJobTypeToString(job->asyncJob));
if (job->asyncOwner != virThreadSelfID()) {
VIR_WARN("'%s' async job is owned by thread %llu",
- qemuDomainAsyncJobTypeToString(job->asyncJob),
+ virDomainAsyncJobTypeToString(job->asyncJob),
job->asyncOwner);
}
job->asyncOwner = 0;
}
static bool
-qemuDomainNestedJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob)
+virDomainNestedJobAllowed(virDomainJobObjPtr jobs, virDomainJob newJob)
{
return !jobs->asyncJob ||
- newJob == QEMU_JOB_NONE ||
+ newJob == VIR_JOB_NONE ||
(jobs->mask & JOB_MASK(newJob)) != 0;
}
bool
-qemuDomainJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob)
+virDomainJobAllowed(virDomainJobObjPtr jobs, virDomainJob newJob)
{
- return !jobs->active && qemuDomainNestedJobAllowed(jobs, newJob);
+ return !jobs->active && virDomainNestedJobAllowed(jobs, newJob);
}
static bool
-qemuDomainObjCanSetJob(qemuDomainJobObjPtr job,
- qemuDomainJob newJob,
- qemuDomainAgentJob newAgentJob)
+virDomainObjCanSetJob(virDomainJobObjPtr job,
+ virDomainJob newJob,
+ virDomainAgentJob newAgentJob)
{
- return ((newJob == QEMU_JOB_NONE ||
- job->active == QEMU_JOB_NONE) &&
- (newAgentJob == QEMU_AGENT_JOB_NONE ||
- job->agentActive == QEMU_AGENT_JOB_NONE));
+ return ((newJob == VIR_JOB_NONE ||
+ job->active == VIR_JOB_NONE) &&
+ (newAgentJob == VIR_AGENT_JOB_NONE ||
+ job->agentActive == VIR_AGENT_JOB_NONE));
}
/* Give up waiting for mutex after 30 seconds */
-#define QEMU_JOB_WAIT_TIME (1000ull * 30)
+#define VIR_JOB_WAIT_TIME (1000ull * 30)
/**
- * qemuDomainObjBeginJobInternal:
+ * virDomainObjBeginJobInternal:
* @obj: domain object
- * @job: qemuDomainJob to start
- * @asyncJob: qemuDomainAsyncJob to start
+ * @job: virDomainJob to start
+ * @asyncJob: virDomainAsyncJob to start
* @nowait: don't wait trying to acquire @job
*
* Acquires job for a domain object which must be locked before
* calling. If there's already a job running waits up to
- * QEMU_JOB_WAIT_TIME after which the functions fails reporting
+ * VIR_JOB_WAIT_TIME after which the functions fails reporting
* an error unless @nowait is set.
*
* If @nowait is true this function tries to acquire job and if
@@ -322,17 +323,17 @@ qemuDomainObjCanSetJob(qemuDomainJobObjPtr job,
* -1 otherwise.
*/
static int ATTRIBUTE_NONNULL(1)
-qemuDomainObjBeginJobInternal(virDomainObjPtr obj,
- qemuDomainJobObjPtr jobObj,
- qemuDomainJob job,
- qemuDomainAgentJob agentJob,
- qemuDomainAsyncJob asyncJob,
- bool nowait)
+virDomainObjBeginJobInternal(virDomainObjPtr obj,
+ virDomainJobObjPtr jobObj,
+ virDomainJob job,
+ virDomainAgentJob agentJob,
+ virDomainAsyncJob asyncJob,
+ bool nowait)
{
unsigned long long now;
unsigned long long then;
- bool nested = job == QEMU_JOB_ASYNC_NESTED;
- bool async = job == QEMU_JOB_ASYNC;
+ bool nested = job == VIR_JOB_ASYNC_NESTED;
+ bool async = job == VIR_JOB_ASYNC;
const char *blocker = NULL;
const char *agentBlocker = NULL;
int ret = -1;
@@ -342,28 +343,28 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj,
VIR_DEBUG("Starting job: job=%s agentJob=%s asyncJob=%s "
"(vm=%p name=%s, current job=%s agentJob=%s async=%s)",
- qemuDomainJobTypeToString(job),
- qemuDomainAgentJobTypeToString(agentJob),
- qemuDomainAsyncJobTypeToString(asyncJob),
+ virDomainJobTypeToString(job),
+ virDomainAgentJobTypeToString(agentJob),
+ virDomainAsyncJobTypeToString(asyncJob),
obj, obj->def->name,
- qemuDomainJobTypeToString(jobObj->active),
- qemuDomainAgentJobTypeToString(jobObj->agentActive),
- qemuDomainAsyncJobTypeToString(jobObj->asyncJob));
+ virDomainJobTypeToString(jobObj->active),
+ virDomainAgentJobTypeToString(jobObj->agentActive),
+ virDomainAsyncJobTypeToString(jobObj->asyncJob));
if (virTimeMillisNow(&now) < 0)
return -1;
jobObj->cb->jobcb->increaseJobsQueued(obj);
- then = now + QEMU_JOB_WAIT_TIME;
+ then = now + VIR_JOB_WAIT_TIME;
retry:
- if ((!async && job != QEMU_JOB_DESTROY) &&
+ if ((!async && job != VIR_JOB_DESTROY) &&
jobObj->cb->jobcb->getMaxQueuedJobs(obj) &&
jobObj->cb->jobcb->getJobsQueued(obj) >
jobObj->cb->jobcb->getMaxQueuedJobs(obj)) {
goto error;
}
- while (!nested && !qemuDomainNestedJobAllowed(jobObj, job)) {
+ while (!nested && !virDomainNestedJobAllowed(jobObj, job)) {
if (nowait)
goto cleanup;
@@ -372,7 +373,7 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj,
goto error;
}
- while (!qemuDomainObjCanSetJob(jobObj, job, agentJob)) {
+ while (!virDomainObjCanSetJob(jobObj, job, agentJob)) {
if (nowait)
goto cleanup;
@@ -383,18 +384,18 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj,
/* No job is active but a new async job could have been started while obj
* was unlocked, so we need to recheck it. */
- if (!nested && !qemuDomainNestedJobAllowed(jobObj, job))
+ if (!nested && !virDomainNestedJobAllowed(jobObj, job))
goto retry;
ignore_value(virTimeMillisNow(&now));
if (job) {
- qemuDomainObjResetJob(jobObj);
+ virDomainObjResetJob(jobObj);
- if (job != QEMU_JOB_ASYNC) {
+ if (job != VIR_JOB_ASYNC) {
VIR_DEBUG("Started job: %s (async=%s vm=%p name=%s)",
- qemuDomainJobTypeToString(job),
- qemuDomainAsyncJobTypeToString(jobObj->asyncJob),
+ virDomainJobTypeToString(job),
+ virDomainAsyncJobTypeToString(jobObj->asyncJob),
obj, obj->def->name);
jobObj->active = job;
jobObj->owner = virThreadSelfID();
@@ -402,9 +403,9 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj,
jobObj->started = now;
} else {
VIR_DEBUG("Started async job: %s (vm=%p name=%s)",
- qemuDomainAsyncJobTypeToString(asyncJob),
+ virDomainAsyncJobTypeToString(asyncJob),
obj, obj->def->name);
- qemuDomainObjResetAsyncJob(jobObj);
+ virDomainObjResetAsyncJob(jobObj);
jobObj->cb->jobcb->currentJobInfoInit(jobObj, now);
jobObj->asyncJob = asyncJob;
jobObj->asyncOwner = virThreadSelfID();
@@ -414,20 +415,20 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj,
}
if (agentJob) {
- qemuDomainObjResetAgentJob(jobObj);
+ virDomainObjResetAgentJob(jobObj);
VIR_DEBUG("Started agent job: %s (vm=%p name=%s job=%s async=%s)",
- qemuDomainAgentJobTypeToString(agentJob),
+ virDomainAgentJobTypeToString(agentJob),
obj, obj->def->name,
- qemuDomainJobTypeToString(jobObj->active),
- qemuDomainAsyncJobTypeToString(jobObj->asyncJob));
+ virDomainJobTypeToString(jobObj->active),
+ virDomainAsyncJobTypeToString(jobObj->asyncJob));
jobObj->agentActive = agentJob;
jobObj->agentOwner = virThreadSelfID();
jobObj->agentOwnerAPI = virThreadJobGet();
jobObj->agentStarted = now;
}
- if (qemuDomainTrackJob(job))
+ if (virDomainTrackJob(job))
jobObj->cb->saveStatus(obj);
return 0;
@@ -445,13 +446,13 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj,
"current job is (%s, %s, %s) "
"owned by (%llu %s, %llu %s, %llu %s (flags=0x%lx)) "
"for (%llus, %llus, %llus)",
- qemuDomainJobTypeToString(job),
- qemuDomainAgentJobTypeToString(agentJob),
- qemuDomainAsyncJobTypeToString(asyncJob),
+ virDomainJobTypeToString(job),
+ virDomainAgentJobTypeToString(agentJob),
+ virDomainAsyncJobTypeToString(asyncJob),
obj->def->name,
- qemuDomainJobTypeToString(jobObj->active),
- qemuDomainAgentJobTypeToString(jobObj->agentActive),
- qemuDomainAsyncJobTypeToString(jobObj->asyncJob),
+ virDomainJobTypeToString(jobObj->active),
+ virDomainAgentJobTypeToString(jobObj->agentActive),
+ virDomainAsyncJobTypeToString(jobObj->asyncJob),
jobObj->owner, NULLSTR(jobObj->ownerAPI),
jobObj->agentOwner, NULLSTR(jobObj->agentOwnerAPI),
jobObj->asyncOwner, NULLSTR(jobObj->asyncOwnerAPI),
@@ -459,7 +460,7 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj,
duration / 1000, agentDuration / 1000, asyncDuration / 1000);
if (job) {
- if (nested || qemuDomainNestedJobAllowed(jobObj, job))
+ if (nested || virDomainNestedJobAllowed(jobObj, job))
blocker = jobObj->ownerAPI;
else
blocker = jobObj->asyncOwnerAPI;
@@ -528,48 +529,48 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj,
* obj must be locked before calling
*
* This must be called by anything that will change the VM state
- * in any way, or anything that will use the QEMU monitor.
+ * in any way, or anything that will use the (QEMU) monitor.
*
* Successful calls must be followed by EndJob eventually
*/
-int qemuDomainObjBeginJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr jobObj,
- qemuDomainJob job)
+int virDomainObjBeginJob(virDomainObjPtr obj,
+ virDomainJobObjPtr jobObj,
+ virDomainJob job)
{
- if (qemuDomainObjBeginJobInternal(obj, jobObj, job,
- QEMU_AGENT_JOB_NONE,
- QEMU_ASYNC_JOB_NONE, false) < 0)
+ if (virDomainObjBeginJobInternal(obj, jobObj, job,
+ VIR_AGENT_JOB_NONE,
+ VIR_ASYNC_JOB_NONE, false) < 0)
return -1;
else
return 0;
}
/**
- * qemuDomainObjBeginAgentJob:
+ * virDomainObjBeginAgentJob:
*
* Grabs agent type of job. Use if caller talks to guest agent only.
*
- * To end job call qemuDomainObjEndAgentJob.
+ * To end job call virDomainObjEndAgentJob.
*/
int
-qemuDomainObjBeginAgentJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr jobObj,
- qemuDomainAgentJob agentJob)
+virDomainObjBeginAgentJob(virDomainObjPtr obj,
+ virDomainJobObjPtr jobObj,
+ virDomainAgentJob agentJob)
{
- return qemuDomainObjBeginJobInternal(obj, jobObj, QEMU_JOB_NONE,
- agentJob,
- QEMU_ASYNC_JOB_NONE, false);
+ return virDomainObjBeginJobInternal(obj, jobObj, VIR_JOB_NONE,
+ agentJob,
+ VIR_ASYNC_JOB_NONE, false);
}
-int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr jobObj,
- qemuDomainAsyncJob asyncJob,
- virDomainJobOperation operation,
- unsigned long apiFlags)
+int virDomainObjBeginAsyncJob(virDomainObjPtr obj,
+ virDomainJobObjPtr jobObj,
+ virDomainAsyncJob asyncJob,
+ virDomainJobOperation operation,
+ unsigned long apiFlags)
{
- if (qemuDomainObjBeginJobInternal(obj, jobObj, QEMU_JOB_ASYNC,
- QEMU_AGENT_JOB_NONE,
- asyncJob, false) < 0)
+ if (virDomainObjBeginJobInternal(obj, jobObj, VIR_JOB_ASYNC,
+ VIR_AGENT_JOB_NONE,
+ asyncJob, false) < 0)
return -1;
jobObj->cb->jobcb->setJobInfoOperation(jobObj, operation);
@@ -578,9 +579,9 @@ int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj,
}
int
-qemuDomainObjBeginNestedJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr jobObj,
- qemuDomainAsyncJob asyncJob)
+virDomainObjBeginNestedJob(virDomainObjPtr obj,
+ virDomainJobObjPtr jobObj,
+ virDomainAsyncJob asyncJob)
{
if (asyncJob != jobObj->asyncJob) {
virReportError(VIR_ERR_INTERNAL_ERROR,
@@ -594,56 +595,56 @@ qemuDomainObjBeginNestedJob(virDomainObjPtr obj,
jobObj->asyncOwner);
}
- return qemuDomainObjBeginJobInternal(obj, jobObj,
- QEMU_JOB_ASYNC_NESTED,
- QEMU_AGENT_JOB_NONE,
- QEMU_ASYNC_JOB_NONE,
+ return virDomainObjBeginJobInternal(obj, jobObj,
+ VIR_JOB_ASYNC_NESTED,
+ VIR_AGENT_JOB_NONE,
+ VIR_ASYNC_JOB_NONE,
false);
}
/**
- * qemuDomainObjBeginJobNowait:
+ * virDomainObjBeginJobNowait:
*
* @obj: domain object
- * @jobObj: qemuDomainJobObjPtr
- * @job: qemuDomainJob to start
+ * @jobObj: virDomainJobObjPtr
+ * @job: virDomainJob to start
*
* Acquires job for a domain object which must be locked before
* calling. If there's already a job running it returns
* immediately without any error reported.
*
- * Returns: see qemuDomainObjBeginJobInternal
+ * Returns: see virDomainObjBeginJobInternal
*/
int
-qemuDomainObjBeginJobNowait(virDomainObjPtr obj,
- qemuDomainJobObjPtr jobObj,
- qemuDomainJob job)
+virDomainObjBeginJobNowait(virDomainObjPtr obj,
+ virDomainJobObjPtr jobObj,
+ virDomainJob job)
{
- return qemuDomainObjBeginJobInternal(obj, jobObj, job,
- QEMU_AGENT_JOB_NONE,
- QEMU_ASYNC_JOB_NONE, true);
+ return virDomainObjBeginJobInternal(obj, jobObj, job,
+ VIR_AGENT_JOB_NONE,
+ VIR_ASYNC_JOB_NONE, true);
}
/*
* obj must be locked and have a reference before calling
*
* To be called after completing the work associated with the
- * earlier qemuDomainBeginJob() call
+ * earlier virDomainBeginJob() call
*/
void
-qemuDomainObjEndJob(virDomainObjPtr obj, qemuDomainJobObjPtr jobObj)
+virDomainObjEndJob(virDomainObjPtr obj, virDomainJobObjPtr jobObj)
{
- qemuDomainJob job = jobObj->active;
+ virDomainJob job = jobObj->active;
jobObj->cb->jobcb->decreaseJobsQueued(obj);
VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)",
- qemuDomainJobTypeToString(job),
- qemuDomainAsyncJobTypeToString(jobObj->asyncJob),
+ virDomainJobTypeToString(job),
+ virDomainAsyncJobTypeToString(jobObj->asyncJob),
obj, obj->def->name);
- qemuDomainObjResetJob(jobObj);
- if (qemuDomainTrackJob(job))
+ virDomainObjResetJob(jobObj);
+ if (virDomainTrackJob(job))
jobObj->cb->saveStatus(obj);
/* We indeed need to wake up ALL threads waiting because
* grabbing a job requires checking more variables. */
@@ -651,45 +652,45 @@ qemuDomainObjEndJob(virDomainObjPtr obj, qemuDomainJobObjPtr
jobObj)
}
void
-qemuDomainObjEndAgentJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr jobObj)
+virDomainObjEndAgentJob(virDomainObjPtr obj,
+ virDomainJobObjPtr jobObj)
{
- qemuDomainAgentJob agentJob = jobObj->agentActive;
+ virDomainAgentJob agentJob = jobObj->agentActive;
jobObj->cb->jobcb->decreaseJobsQueued(obj);
VIR_DEBUG("Stopping agent job: %s (async=%s vm=%p name=%s)",
- qemuDomainAgentJobTypeToString(agentJob),
- qemuDomainAsyncJobTypeToString(jobObj->asyncJob),
+ virDomainAgentJobTypeToString(agentJob),
+ virDomainAsyncJobTypeToString(jobObj->asyncJob),
obj, obj->def->name);
- qemuDomainObjResetAgentJob(jobObj);
+ virDomainObjResetAgentJob(jobObj);
/* We indeed need to wake up ALL threads waiting because
* grabbing a job requires checking more variables. */
virCondBroadcast(&jobObj->cond);
}
void
-qemuDomainObjEndAsyncJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr jobObj)
+virDomainObjEndAsyncJob(virDomainObjPtr obj,
+ virDomainJobObjPtr jobObj)
{
jobObj->cb->jobcb->decreaseJobsQueued(obj);
VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)",
- qemuDomainAsyncJobTypeToString(jobObj->asyncJob),
+ virDomainAsyncJobTypeToString(jobObj->asyncJob),
obj, obj->def->name);
- qemuDomainObjResetAsyncJob(jobObj);
+ virDomainObjResetAsyncJob(jobObj);
jobObj->cb->saveStatus(obj);
virCondBroadcast(&jobObj->asyncCond);
}
void
-qemuDomainObjAbortAsyncJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr job)
+virDomainObjAbortAsyncJob(virDomainObjPtr obj,
+ virDomainJobObjPtr job)
{
VIR_DEBUG("Requesting abort of async job: %s (vm=%p name=%s)",
- qemuDomainAsyncJobTypeToString(job->asyncJob),
+ virDomainAsyncJobTypeToString(job->asyncJob),
obj, obj->def->name);
job->abortJob = true;
@@ -697,32 +698,32 @@ qemuDomainObjAbortAsyncJob(virDomainObjPtr obj,
}
int
-qemuDomainObjPrivateXMLFormatJob(virBufferPtr buf,
- virDomainObjPtr vm,
- qemuDomainJobObjPtr jobObj)
+virDomainObjPrivateXMLFormatJob(virBufferPtr buf,
+ virDomainObjPtr vm,
+ virDomainJobObjPtr jobObj)
{
g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER;
g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf);
- qemuDomainJob job = jobObj->active;
+ virDomainJob job = jobObj->active;
- if (!qemuDomainTrackJob(job))
- job = QEMU_JOB_NONE;
+ if (!virDomainTrackJob(job))
+ job = VIR_JOB_NONE;
- if (job == QEMU_JOB_NONE &&
- jobObj->asyncJob == QEMU_ASYNC_JOB_NONE)
+ if (job == VIR_JOB_NONE &&
+ jobObj->asyncJob == VIR_ASYNC_JOB_NONE)
return 0;
virBufferAsprintf(&attrBuf, " type='%s' async='%s'",
- qemuDomainJobTypeToString(job),
- qemuDomainAsyncJobTypeToString(jobObj->asyncJob));
+ virDomainJobTypeToString(job),
+ virDomainAsyncJobTypeToString(jobObj->asyncJob));
if (jobObj->phase) {
virBufferAsprintf(&attrBuf, " phase='%s'",
- qemuDomainAsyncJobPhaseToString(jobObj->asyncJob,
+ virDomainAsyncJobPhaseToString(jobObj->asyncJob,
jobObj->phase));
}
- if (jobObj->asyncJob != QEMU_ASYNC_JOB_NONE)
+ if (jobObj->asyncJob != VIR_ASYNC_JOB_NONE)
virBufferAsprintf(&attrBuf, " flags='0x%lx'",
jobObj->apiFlags);
if (jobObj->cb->jobcb->formatJob(&childBuf, jobObj, vm) < 0)
@@ -735,9 +736,9 @@ qemuDomainObjPrivateXMLFormatJob(virBufferPtr buf,
int
-qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm,
- xmlXPathContextPtr ctxt,
- qemuDomainJobObjPtr job)
+virDomainObjPrivateXMLParseJob(virDomainObjPtr vm,
+ xmlXPathContextPtr ctxt,
+ virDomainJobObjPtr job)
{
VIR_XPATH_NODE_AUTORESTORE(ctxt)
g_autofree char *tmp = NULL;
@@ -748,7 +749,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm,
if ((tmp = virXPathString("string(@type)", ctxt))) {
int type;
- if ((type = qemuDomainJobTypeFromString(tmp)) < 0) {
+ if ((type = virDomainJobTypeFromString(tmp)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown job type %s"), tmp);
return -1;
@@ -760,7 +761,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm,
if ((tmp = virXPathString("string(@async)", ctxt))) {
int async;
- if ((async = qemuDomainAsyncJobTypeFromString(tmp)) < 0) {
+ if ((async = virDomainAsyncJobTypeFromString(tmp)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown async job type %s"), tmp);
return -1;
@@ -769,7 +770,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm,
job->asyncJob = async;
if ((tmp = virXPathString("string(@phase)", ctxt))) {
- job->phase = qemuDomainAsyncJobPhaseFromString(async, tmp);
+ job->phase = virDomainAsyncJobPhaseFromString(async, tmp);
if (job->phase < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown job phase %s"), tmp);
diff --git a/src/hypervisor/virdomainjob.h b/src/hypervisor/virdomainjob.h
new file mode 100644
index 0000000000..0c3265aeb1
--- /dev/null
+++ b/src/hypervisor/virdomainjob.h
@@ -0,0 +1,243 @@
+/*
+ * virdomainjob.h: helper functions for domain jobs
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library. If not, see
+ * <
http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <glib-object.h>
+
+#define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1))
+#define VIR_JOB_DEFAULT_MASK \
+ (JOB_MASK(VIR_JOB_QUERY) | \
+ JOB_MASK(VIR_JOB_DESTROY) | \
+ JOB_MASK(VIR_JOB_ABORT))
+
+/* Jobs which have to be tracked in domain state XML. */
+#define VIR_DOMAIN_TRACK_JOBS \
+ (JOB_MASK(VIR_JOB_DESTROY) | \
+ JOB_MASK(VIR_JOB_ASYNC))
+
+/* Only 1 job is allowed at any time
+ * A job includes *all* monitor commands, even those just querying
+ * information, not merely actions */
+typedef enum {
+ VIR_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */
+ VIR_JOB_QUERY, /* Doesn't change any state */
+ VIR_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
+ VIR_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
+ VIR_JOB_MODIFY, /* May change state */
+ VIR_JOB_ABORT, /* Abort current async job */
+ VIR_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
+
+ /* The following two items must always be the last items before JOB_LAST */
+ VIR_JOB_ASYNC, /* Asynchronous job */
+ VIR_JOB_ASYNC_NESTED, /* Normal job within an async job */
+
+ VIR_JOB_LAST
+} virDomainJob;
+VIR_ENUM_DECL(virDomainJob);
+
+typedef enum {
+ VIR_AGENT_JOB_NONE = 0, /* No agent job. */
+ VIR_AGENT_JOB_QUERY, /* Does not change state of domain */
+ VIR_AGENT_JOB_MODIFY, /* May change state of domain */
+
+ VIR_AGENT_JOB_LAST
+} virDomainAgentJob;
+VIR_ENUM_DECL(virDomainAgentJob);
+
+/* Async job consists of a series of jobs that may change state. Independent
+ * jobs that do not change state (and possibly others if explicitly allowed by
+ * current async job) are allowed to be run even if async job is active.
+ */
+typedef enum {
+ VIR_ASYNC_JOB_NONE = 0,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_IN,
+ VIR_ASYNC_JOB_SAVE,
+ VIR_ASYNC_JOB_DUMP,
+ VIR_ASYNC_JOB_SNAPSHOT,
+ VIR_ASYNC_JOB_START,
+ VIR_ASYNC_JOB_BACKUP,
+
+ VIR_ASYNC_JOB_LAST
+} virDomainAsyncJob;
+VIR_ENUM_DECL(virDomainAsyncJob);
+
+typedef enum {
+ VIR_DOMAIN_JOB_STATUS_NONE = 0,
+ VIR_DOMAIN_JOB_STATUS_ACTIVE,
+ VIR_DOMAIN_JOB_STATUS_MIGRATING,
+ VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED,
+ VIR_DOMAIN_JOB_STATUS_PAUSED,
+ VIR_DOMAIN_JOB_STATUS_POSTCOPY,
+ VIR_DOMAIN_JOB_STATUS_COMPLETED,
+ VIR_DOMAIN_JOB_STATUS_FAILED,
+ VIR_DOMAIN_JOB_STATUS_CANCELED,
+} virDomainJobStatus;
+
+typedef enum {
+ VIR_DOMAIN_JOB_STATS_TYPE_NONE = 0,
+ VIR_DOMAIN_JOB_STATS_TYPE_MIGRATION,
+ VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP,
+ VIR_DOMAIN_JOB_STATS_TYPE_MEMDUMP,
+ VIR_DOMAIN_JOB_STATS_TYPE_BACKUP,
+} virDomainJobStatsType;
+
+typedef struct _virDomainJobObj virDomainJobObj;
+typedef virDomainJobObj *virDomainJobObjPtr;
+
+typedef void *(*virDomainObjPrivateJobAlloc)(void);
+typedef void (*virDomainObjPrivateJobFree)(void *);
+typedef void (*virDomainObjPrivateJobReset)(void *);
+typedef void (*virDomainObjPrivateSaveStatus)(virDomainObjPtr);
+typedef int (*virDomainObjPrivateJobFormat)(virBufferPtr,
+ virDomainJobObjPtr,
+ virDomainObjPtr);
+typedef int (*virDomainObjPrivateJobParse)(xmlXPathContextPtr,
virDomainJobObjPtr,
+ virDomainObjPtr);
+typedef void (*virDomainObjJobInfoSetOperation)(virDomainJobObjPtr,
+ virDomainJobOperation);
+typedef void (*virDomainObjCurrentJobInfoInit)(virDomainJobObjPtr,
+ unsigned long long);
+typedef int (*virDomainObjGetJobsQueued)(virDomainObjPtr);
+typedef void (*virDomainObjIncreaseJobsQueued)(virDomainObjPtr);
+typedef void (*virDomainObjDecreaseJobsQueued)(virDomainObjPtr);
+typedef int (*virDomainObjGetMaxQueuedJobs)(virDomainObjPtr);
+
+typedef struct _virDomainJobPrivateJobCallbacks virDomainJobPrivateJobCallbacks;
+typedef virDomainJobPrivateJobCallbacks *virDomainJobPrivateJobCallbacksPtr;
+struct _virDomainJobPrivateJobCallbacks {
+ virDomainObjPrivateJobAlloc allocJobPrivate;
+ virDomainObjPrivateJobFree freeJobPrivate;
+ virDomainObjPrivateJobReset resetJobPrivate;
+ virDomainObjPrivateJobFormat formatJob;
+ virDomainObjPrivateJobParse parseJob;
+ virDomainObjJobInfoSetOperation setJobInfoOperation;
+ virDomainObjCurrentJobInfoInit currentJobInfoInit;
+ virDomainObjGetJobsQueued getJobsQueued;
+ virDomainObjIncreaseJobsQueued increaseJobsQueued;
+ virDomainObjDecreaseJobsQueued decreaseJobsQueued;
+ virDomainObjGetMaxQueuedJobs getMaxQueuedJobs;
+};
+
+typedef struct _virDomainJobPrivateCallbacks virDomainJobPrivateCallbacks;
+typedef virDomainJobPrivateCallbacks *virDomainJobPrivateCallbacksPtr;
+struct _virDomainJobPrivateCallbacks {
+ /* generic callbacks that we can't really categorize */
+ virDomainObjPrivateSaveStatus saveStatus;
+
+ /* Job related callbacks */
+ virDomainJobPrivateJobCallbacksPtr jobcb;
+};
+
+struct _virDomainJobObj {
+ virCond cond; /* Use to coordinate jobs */
+
+ /* The following members are for VIR_JOB_* */
+ virDomainJob active; /* Currently running job */
+ unsigned long long owner; /* Thread id which set current job */
+ const char *ownerAPI; /* The API which owns the job */
+ unsigned long long started; /* When the current job started */
+
+ /* The following members are for VIR_AGENT_JOB_* */
+ virDomainAgentJob agentActive; /* Currently running agent job */
+ unsigned long long agentOwner; /* Thread id which set current agent job */
+ const char *agentOwnerAPI; /* The API which owns the agent job */
+ unsigned long long agentStarted; /* When the current agent job started */
+
+ /* The following members are for VIR_ASYNC_JOB_* */
+ virCond asyncCond; /* Use to coordinate with async jobs */
+ virDomainAsyncJob asyncJob; /* Currently active async job */
+ unsigned long long asyncOwner; /* Thread which set current async job */
+ const char *asyncOwnerAPI; /* The API which owns the async job */
+ unsigned long long asyncStarted; /* When the current async job started */
+ int phase; /* Job phase (mainly for migrations) */
+ unsigned long long mask; /* Jobs allowed during async job */
+ bool abortJob; /* abort of the job requested */
+ char *error; /* job event completion error */
+ unsigned long apiFlags; /* flags passed to the API which started the async job */
+
+ void *privateData; /* job specific collection of data */
+ virDomainJobPrivateCallbacksPtr cb;
+};
+
+const char *virDomainAsyncJobPhaseToString(virDomainAsyncJob job,
+ int phase);
+int virDomainAsyncJobPhaseFromString(virDomainAsyncJob job,
+ const char *phase);
+
+int virDomainObjBeginJob(virDomainObjPtr obj,
+ virDomainJobObjPtr jobObj,
+ virDomainJob job)
+ G_GNUC_WARN_UNUSED_RESULT;
+int virDomainObjBeginAgentJob(virDomainObjPtr obj,
+ virDomainJobObjPtr jobObj,
+ virDomainAgentJob agentJob)
+ G_GNUC_WARN_UNUSED_RESULT;
+int virDomainObjBeginAsyncJob(virDomainObjPtr obj,
+ virDomainJobObjPtr jobObj,
+ virDomainAsyncJob asyncJob,
+ virDomainJobOperation operation,
+ unsigned long apiFlags)
+ G_GNUC_WARN_UNUSED_RESULT;
+int virDomainObjBeginNestedJob(virDomainObjPtr obj,
+ virDomainJobObjPtr jobObj,
+ virDomainAsyncJob asyncJob)
+ G_GNUC_WARN_UNUSED_RESULT;
+int virDomainObjBeginJobNowait(virDomainObjPtr obj,
+ virDomainJobObjPtr jobObj,
+ virDomainJob job)
+ G_GNUC_WARN_UNUSED_RESULT;
+
+void virDomainObjEndJob(virDomainObjPtr obj, virDomainJobObjPtr jobObj);
+void virDomainObjEndAgentJob(virDomainObjPtr obj,
+ virDomainJobObjPtr jobObj);
+void virDomainObjEndAsyncJob(virDomainObjPtr obj,
+ virDomainJobObjPtr jobObj);
+void virDomainObjAbortAsyncJob(virDomainObjPtr obj,
+ virDomainJobObjPtr job);
+void virDomainObjSetJobPhase(virDomainObjPtr obj,
+ virDomainJobObjPtr job,
+ int phase);
+void virDomainObjSetAsyncJobMask(virDomainJobObjPtr job,
+ unsigned long long allowedJobs);
+int virDomainObjRestoreJob(virDomainJobObjPtr job,
+ virDomainJobObjPtr oldJob);
+void virDomainObjDiscardAsyncJob(virDomainObjPtr obj,
+ virDomainJobObjPtr job);
+void virDomainObjReleaseAsyncJob(virDomainJobObjPtr job);
+
+bool virDomainTrackJob(virDomainJob job);
+
+void virDomainObjFreeJob(virDomainJobObjPtr job);
+
+int
+virDomainObjInitJob(virDomainJobObjPtr job,
+ virDomainJobPrivateCallbacksPtr cb);
+
+bool virDomainJobAllowed(virDomainJobObjPtr jobs, virDomainJob newJob);
+
+int
+virDomainObjPrivateXMLFormatJob(virBufferPtr buf,
+ virDomainObjPtr vm,
+ virDomainJobObjPtr jobObj);
+
+int
+virDomainObjPrivateXMLParseJob(virDomainObjPtr vm,
+ xmlXPathContextPtr ctxt,
+ virDomainJobObjPtr job);
diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index c7adf16aba..d3b8833844 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -1447,6 +1447,34 @@ virCloseCallbacksSet;
virCloseCallbacksUnset;
+# hypervisor/virdomainjob.h
+virDomainAsyncJobPhaseFromString;
+virDomainAsyncJobPhaseToString;
+virDomainAsyncJobTypeFromString;
+virDomainAsyncJobTypeToString;
+virDomainJobAllowed;
+virDomainJobTypeFromString;
+virDomainJobTypeToString;
+virDomainObjAbortAsyncJob;
+virDomainObjBeginAgentJob;
+virDomainObjBeginAsyncJob;
+virDomainObjBeginJob;
+virDomainObjBeginJobNowait;
+virDomainObjBeginNestedJob;
+virDomainObjDiscardAsyncJob;
+virDomainObjEndAgentJob;
+virDomainObjEndAsyncJob;
+virDomainObjEndJob;
+virDomainObjFreeJob;
+virDomainObjInitJob;
+virDomainObjPrivateXMLFormatJob;
+virDomainObjPrivateXMLParseJob;
+virDomainObjReleaseAsyncJob;
+virDomainObjRestoreJob;
+virDomainObjSetAsyncJobMask;
+virDomainObjSetJobPhase;
+
+
# hypervisor/virhostdev.h
virHostdevFindUSBDevice;
virHostdevManagerGetDefault;
diff --git a/src/qemu/meson.build b/src/qemu/meson.build
index 4e599d1e69..1be0da010b 100644
--- a/src/qemu/meson.build
+++ b/src/qemu/meson.build
@@ -12,7 +12,6 @@ qemu_driver_sources = [
'qemu_dbus.c',
'qemu_domain.c',
'qemu_domain_address.c',
- 'qemu_domainjob.c',
'qemu_driver.c',
'qemu_extdevice.c',
'qemu_firmware.c',
diff --git a/src/qemu/qemu_backup.c b/src/qemu/qemu_backup.c
index 4e606c252f..7d951d7786 100644
--- a/src/qemu/qemu_backup.c
+++ b/src/qemu/qemu_backup.c
@@ -436,10 +436,10 @@ qemuBackupDiskPrepareOneStorage(virDomainObjPtr vm,
if (qemuBlockStorageSourceCreate(vm, dd->store, dd->backingStore, NULL,
dd->crdata->srcdata[0],
- QEMU_ASYNC_JOB_BACKUP) < 0)
+ VIR_ASYNC_JOB_BACKUP) < 0)
return -1;
} else {
- if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) < 0)
+ if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_BACKUP) < 0)
return -1;
rc = qemuBlockStorageSourceAttachApply(priv->mon,
dd->crdata->srcdata[0]);
@@ -525,7 +525,7 @@ qemuBackupBeginPullExportDisks(virDomainObjPtr vm,
void
qemuBackupJobTerminate(virDomainObjPtr vm,
- qemuDomainJobStatus jobstatus)
+ virDomainJobStatus jobstatus)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -550,7 +550,7 @@ qemuBackupJobTerminate(virDomainObjPtr vm,
if (!(priv->job.apiFlags & VIR_DOMAIN_BACKUP_BEGIN_REUSE_EXTERNAL) &&
(priv->backup->type == VIR_DOMAIN_BACKUP_TYPE_PULL ||
(priv->backup->type == VIR_DOMAIN_BACKUP_TYPE_PUSH &&
- jobstatus != QEMU_DOMAIN_JOB_STATUS_COMPLETED))) {
+ jobstatus != VIR_DOMAIN_JOB_STATUS_COMPLETED))) {
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver);
@@ -572,7 +572,7 @@ qemuBackupJobTerminate(virDomainObjPtr vm,
virDomainBackupDefFree(priv->backup);
priv->backup = NULL;
- qemuDomainObjEndAsyncJob(vm, &priv->job);
+ virDomainObjEndAsyncJob(vm, &priv->job);
}
@@ -640,7 +640,7 @@ qemuBackupJobCancelBlockjobs(virDomainObjPtr vm,
}
if (terminatebackup && !has_active)
- qemuBackupJobTerminate(vm, QEMU_DOMAIN_JOB_STATUS_CANCELED);
+ qemuBackupJobTerminate(vm, VIR_DOMAIN_JOB_STATUS_CANCELED);
}
@@ -740,15 +740,15 @@ qemuBackupBegin(virDomainObjPtr vm,
* infrastructure for async jobs. We'll allow standard modify-type jobs
* as the interlocking of conflicting operations is handled on the block
* job level */
- if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_BACKUP,
+ if (virDomainObjBeginAsyncJob(vm, &priv->job, VIR_ASYNC_JOB_BACKUP,
VIR_DOMAIN_JOB_OPERATION_BACKUP, flags) < 0)
return -1;
- qemuDomainObjSetAsyncJobMask(&priv->job,
- (QEMU_JOB_DEFAULT_MASK |
- JOB_MASK(QEMU_JOB_SUSPEND) |
- JOB_MASK(QEMU_JOB_MODIFY)));
- jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP;
+ virDomainObjSetAsyncJobMask(&priv->job,
+ (VIR_JOB_DEFAULT_MASK |
+ JOB_MASK(VIR_JOB_SUSPEND) |
+ JOB_MASK(VIR_JOB_MODIFY)));
+ jobPriv->current->statsType = VIR_DOMAIN_JOB_STATS_TYPE_BACKUP;
if (!virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
@@ -787,7 +787,7 @@ qemuBackupBegin(virDomainObjPtr vm,
goto endjob;
}
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_BACKUP)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_BACKUP)))
goto endjob;
if ((ndd = qemuBackupDiskPrepareData(vm, def, blockNamedNodeData, actions,
@@ -805,7 +805,7 @@ qemuBackupBegin(virDomainObjPtr vm,
priv->backup = g_steal_pointer(&def);
- if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) < 0)
+ if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_BACKUP) < 0)
goto endjob;
/* TODO: TLS is a must-have for the modern age */
@@ -838,7 +838,7 @@ qemuBackupBegin(virDomainObjPtr vm,
}
if (pull) {
- if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) < 0)
+ if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_BACKUP) < 0)
goto endjob;
/* note that if the export fails we've already created the checkpoint
* and we will not delete it */
@@ -847,7 +847,7 @@ qemuBackupBegin(virDomainObjPtr vm,
goto endjob;
if (rc < 0) {
- qemuBackupJobCancelBlockjobs(vm, priv->backup, false,
QEMU_ASYNC_JOB_BACKUP);
+ qemuBackupJobCancelBlockjobs(vm, priv->backup, false,
VIR_ASYNC_JOB_BACKUP);
goto endjob;
}
}
@@ -864,7 +864,7 @@ qemuBackupBegin(virDomainObjPtr vm,
qemuCheckpointRollbackMetadata(vm, chk);
if (!job_started && (nbd_running || tlsAlias || tlsSecretAlias) &&
- qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) == 0) {
+ qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_BACKUP) == 0) {
if (nbd_running)
ignore_value(qemuMonitorNBDServerStop(priv->mon));
if (tlsAlias)
@@ -878,9 +878,9 @@ qemuBackupBegin(virDomainObjPtr vm,
def = g_steal_pointer(&priv->backup);
if (ret == 0)
- qemuDomainObjReleaseAsyncJob(&priv->job);
+ virDomainObjReleaseAsyncJob(&priv->job);
else
- qemuDomainObjEndAsyncJob(vm, &priv->job);
+ virDomainObjEndAsyncJob(vm, &priv->job);
return ret;
}
@@ -919,7 +919,7 @@ qemuBackupNotifyBlockjobEnd(virDomainObjPtr vm,
bool has_cancelling = false;
bool has_cancelled = false;
bool has_failed = false;
- qemuDomainJobStatus jobstatus = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
+ virDomainJobStatus jobstatus = VIR_DOMAIN_JOB_STATUS_COMPLETED;
virDomainBackupDefPtr backup = priv->backup;
size_t i;
@@ -1017,9 +1017,9 @@ qemuBackupNotifyBlockjobEnd(virDomainObjPtr vm,
/* all sub-jobs have stopped */
if (has_failed)
- jobstatus = QEMU_DOMAIN_JOB_STATUS_FAILED;
+ jobstatus = VIR_DOMAIN_JOB_STATUS_FAILED;
else if (has_cancelled && backup->type ==
VIR_DOMAIN_BACKUP_TYPE_PUSH)
- jobstatus = QEMU_DOMAIN_JOB_STATUS_CANCELED;
+ jobstatus = VIR_DOMAIN_JOB_STATUS_CANCELED;
qemuBackupJobTerminate(vm, jobstatus);
}
@@ -1088,7 +1088,7 @@ qemuBackupGetJobInfoStats(virDomainObjPtr vm,
if (qemuDomainJobInfoUpdateTime(jobInfo) < 0)
return -1;
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE;
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_ACTIVE;
qemuDomainObjEnterMonitor(vm);
diff --git a/src/qemu/qemu_backup.h b/src/qemu/qemu_backup.h
index 9925fddbf9..6cd1797cae 100644
--- a/src/qemu/qemu_backup.h
+++ b/src/qemu/qemu_backup.h
@@ -45,7 +45,7 @@ qemuBackupNotifyBlockjobEnd(virDomainObjPtr vm,
void
qemuBackupJobTerminate(virDomainObjPtr vm,
- qemuDomainJobStatus jobstatus);
+ virDomainJobStatus jobstatus);
int
qemuBackupGetJobInfoStats(virDomainObjPtr vm,
diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c
index 23b60e73ec..c2f3cacbf2 100644
--- a/src/qemu/qemu_block.c
+++ b/src/qemu/qemu_block.c
@@ -321,7 +321,7 @@ qemuBlockDiskDetectNodes(virDomainDiskDefPtr disk,
int
qemuBlockNodeNamesDetect(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
g_autoptr(virHashTable) disktable = NULL;
@@ -1985,7 +1985,7 @@ qemuBlockStorageSourceChainDetach(qemuMonitorPtr mon,
*/
int
qemuBlockStorageSourceDetachOneBlockdev(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virStorageSourcePtr src)
{
int ret;
@@ -2545,7 +2545,7 @@ qemuBlockStorageSourceCreateGeneric(virDomainObjPtr vm,
virStorageSourcePtr src,
virStorageSourcePtr chain,
bool storageCreate,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virJSONValue) props = createProps;
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -2600,7 +2600,7 @@ static int
qemuBlockStorageSourceCreateStorage(virDomainObjPtr vm,
virStorageSourcePtr src,
virStorageSourcePtr chain,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
int actualType = virStorageSourceGetActualType(src);
g_autoptr(virJSONValue) createstorageprops = NULL;
@@ -2637,7 +2637,7 @@ qemuBlockStorageSourceCreateFormat(virDomainObjPtr vm,
virStorageSourcePtr src,
virStorageSourcePtr backingStore,
virStorageSourcePtr chain,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virJSONValue) createformatprops = NULL;
int ret;
@@ -2687,7 +2687,7 @@ qemuBlockStorageSourceCreate(virDomainObjPtr vm,
virStorageSourcePtr backingStore,
virStorageSourcePtr chain,
qemuBlockStorageSourceAttachDataPtr data,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
@@ -2855,7 +2855,7 @@ qemuBlockNamedNodeDataGetBitmapByName(virHashTablePtr
blockNamedNodeData,
virHashTablePtr
qemuBlockGetNamedNodeData(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
g_autoptr(virHashTable) blockNamedNodeData = NULL;
@@ -3178,7 +3178,7 @@ qemuBlockBitmapsHandleCommitFinish(virStorageSourcePtr topsrc,
static int
qemuBlockReopenFormat(virDomainObjPtr vm,
virStorageSourcePtr src,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
g_autoptr(virJSONValue) reopenprops = NULL;
@@ -3221,7 +3221,7 @@ qemuBlockReopenFormat(virDomainObjPtr vm,
int
qemuBlockReopenReadWrite(virDomainObjPtr vm,
virStorageSourcePtr src,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
if (!src->readonly)
return 0;
@@ -3250,7 +3250,7 @@ qemuBlockReopenReadWrite(virDomainObjPtr vm,
int
qemuBlockReopenReadOnly(virDomainObjPtr vm,
virStorageSourcePtr src,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
if (src->readonly)
return 0;
diff --git a/src/qemu/qemu_block.h b/src/qemu/qemu_block.h
index 35148ea2ba..55583faa93 100644
--- a/src/qemu/qemu_block.h
+++ b/src/qemu/qemu_block.h
@@ -47,7 +47,7 @@ qemuBlockNodeNameGetBackingChain(virJSONValuePtr namednodesdata,
int
qemuBlockNodeNamesDetect(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
virHashTablePtr
qemuBlockGetNodeData(virJSONValuePtr data);
@@ -140,7 +140,7 @@ qemuBlockStorageSourceAttachRollback(qemuMonitorPtr mon,
int
qemuBlockStorageSourceDetachOneBlockdev(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virStorageSourcePtr src);
struct _qemuBlockStorageSourceChainData {
@@ -205,7 +205,7 @@ qemuBlockStorageSourceCreate(virDomainObjPtr vm,
virStorageSourcePtr backingStore,
virStorageSourcePtr chain,
qemuBlockStorageSourceAttachDataPtr data,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int
qemuBlockStorageSourceCreateDetectSize(virHashTablePtr blockNamedNodeData,
@@ -225,7 +225,7 @@ qemuBlockNamedNodeDataGetBitmapByName(virHashTablePtr
blockNamedNodeData,
virHashTablePtr
qemuBlockGetNamedNodeData(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int
qemuBlockGetBitmapMergeActions(virStorageSourcePtr topsrc,
@@ -259,11 +259,11 @@ qemuBlockBitmapsHandleCommitFinish(virStorageSourcePtr topsrc,
int
qemuBlockReopenReadWrite(virDomainObjPtr vm,
virStorageSourcePtr src,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int
qemuBlockReopenReadOnly(virDomainObjPtr vm,
virStorageSourcePtr src,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
bool
qemuBlockStorageSourceNeedsStorageSliceLayer(const virStorageSource *src);
diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c
index 265f449b7a..62b8e014d3 100644
--- a/src/qemu/qemu_blockjob.c
+++ b/src/qemu/qemu_blockjob.c
@@ -567,7 +567,7 @@ qemuBlockJobRefreshJobs(virDomainObjPtr vm)
job->reconnected = true;
if (job->newstate != -1)
- qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
/* 'job' may be invalid after this update */
}
@@ -834,7 +834,7 @@ qemuBlockJobEventProcessLegacy(virQEMUDriverPtr driver,
static void
qemuBlockJobEventProcessConcludedRemoveChain(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virStorageSourcePtr chain)
{
g_autoptr(qemuBlockStorageSourceChainData) data = NULL;
@@ -938,7 +938,7 @@ qemuBlockJobClearConfigChain(virDomainObjPtr vm,
static int
qemuBlockJobProcessEventCompletedPullBitmaps(virDomainObjPtr vm,
qemuBlockJobDataPtr job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
g_autoptr(virHashTable) blockNamedNodeData = NULL;
@@ -989,7 +989,7 @@ static void
qemuBlockJobProcessEventCompletedPull(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuBlockJobDataPtr job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
virStorageSourcePtr baseparent = NULL;
virDomainDiskDefPtr cfgdisk = NULL;
@@ -1093,7 +1093,7 @@ qemuBlockJobDeleteImages(virQEMUDriverPtr driver,
static int
qemuBlockJobProcessEventCompletedCommitBitmaps(virDomainObjPtr vm,
qemuBlockJobDataPtr job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
g_autoptr(virHashTable) blockNamedNodeData = NULL;
@@ -1156,7 +1156,7 @@ static void
qemuBlockJobProcessEventCompletedCommit(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuBlockJobDataPtr job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
virStorageSourcePtr baseparent = NULL;
virDomainDiskDefPtr cfgdisk = NULL;
@@ -1248,7 +1248,7 @@ static void
qemuBlockJobProcessEventCompletedActiveCommit(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuBlockJobDataPtr job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
virStorageSourcePtr baseparent = NULL;
virDomainDiskDefPtr cfgdisk = NULL;
@@ -1322,7 +1322,7 @@ qemuBlockJobProcessEventCompletedActiveCommit(virQEMUDriverPtr
driver,
static int
qemuBlockJobProcessEventCompletedCopyBitmaps(virDomainObjPtr vm,
qemuBlockJobDataPtr job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
g_autoptr(virHashTable) blockNamedNodeData = NULL;
@@ -1360,7 +1360,7 @@ static void
qemuBlockJobProcessEventConcludedCopyPivot(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuBlockJobDataPtr job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
VIR_DEBUG("copy job '%s' on VM '%s' pivoted", job->name,
vm->def->name);
@@ -1396,7 +1396,7 @@ static void
qemuBlockJobProcessEventConcludedCopyAbort(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuBlockJobDataPtr job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
VIR_DEBUG("copy job '%s' on VM '%s' aborted", job->name,
vm->def->name);
@@ -1416,7 +1416,7 @@ static void
qemuBlockJobProcessEventFailedActiveCommit(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuBlockJobDataPtr job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
g_autoptr(virJSONValue) actions = virJSONValueNewArray();
@@ -1452,7 +1452,7 @@ static void
qemuBlockJobProcessEventConcludedCreate(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuBlockJobDataPtr job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(qemuBlockStorageSourceAttachData) backend = NULL;
@@ -1495,7 +1495,7 @@ static void
qemuBlockJobProcessEventConcludedBackup(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuBlockJobDataPtr job,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
qemuBlockjobState newstate,
unsigned long long progressCurrent,
unsigned long long progressTotal)
@@ -1540,7 +1540,7 @@ static void
qemuBlockJobEventProcessConcludedTransition(qemuBlockJobDataPtr job,
virQEMUDriverPtr driver,
virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
unsigned long long progressCurrent,
unsigned long long progressTotal)
{
@@ -1600,7 +1600,7 @@ static void
qemuBlockJobEventProcessConcluded(qemuBlockJobDataPtr job,
virQEMUDriverPtr driver,
virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuMonitorJobInfoPtr *jobinfo = NULL;
size_t njobinfo = 0;
@@ -1682,7 +1682,7 @@ static void
qemuBlockJobEventProcess(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuBlockJobDataPtr job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
switch ((qemuBlockjobState) job->newstate) {
diff --git a/src/qemu/qemu_checkpoint.c b/src/qemu/qemu_checkpoint.c
index e9547da555..ec811c9c63 100644
--- a/src/qemu/qemu_checkpoint.c
+++ b/src/qemu/qemu_checkpoint.c
@@ -162,7 +162,7 @@ qemuCheckpointDiscardBitmaps(virDomainObjPtr vm,
actions = virJSONValueNewArray();
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
return -1;
for (i = 0; i < chkdef->ndisks; i++) {
@@ -192,7 +192,7 @@ qemuCheckpointDiscardBitmaps(virDomainObjPtr vm,
goto relabel;
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN) &&
- qemuBlockReopenReadWrite(vm, src, QEMU_ASYNC_JOB_NONE) < 0)
+ qemuBlockReopenReadWrite(vm, src, VIR_ASYNC_JOB_NONE) < 0)
goto relabel;
relabelimages = g_slist_prepend(relabelimages, src);
@@ -208,7 +208,7 @@ qemuCheckpointDiscardBitmaps(virDomainObjPtr vm,
virStorageSourcePtr src = next->data;
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN))
- ignore_value(qemuBlockReopenReadOnly(vm, src, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuBlockReopenReadOnly(vm, src, VIR_ASYNC_JOB_NONE));
ignore_value(qemuDomainStorageSourceAccessAllow(driver, vm, src,
true, false, false));
@@ -539,7 +539,7 @@ qemuCheckpointCreateXML(virDomainPtr domain,
/* Unlike snapshots, the RNG schema already ensured a sane filename. */
/* We are going to modify the domain below. */
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
return NULL;
if (redefine) {
@@ -561,7 +561,7 @@ qemuCheckpointCreateXML(virDomainPtr domain,
checkpoint = virGetDomainCheckpoint(domain, chk->def->name);
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
return checkpoint;
}
@@ -588,13 +588,13 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm,
size_t i;
int ret = -1;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
goto endjob;
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
goto endjob;
/* enumerate disks relevant for the checkpoint which are also present in the
@@ -671,7 +671,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm,
/* now do a final refresh */
virHashFree(blockNamedNodeData);
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
goto endjob;
qemuDomainObjEnterMonitor(vm);
@@ -697,7 +697,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
return ret;
}
@@ -781,7 +781,7 @@ qemuCheckpointDelete(virDomainObjPtr vm,
VIR_DOMAIN_CHECKPOINT_DELETE_METADATA_ONLY |
VIR_DOMAIN_CHECKPOINT_DELETE_CHILDREN_ONLY, -1);
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
return -1;
if (!metadata_only) {
@@ -849,6 +849,6 @@ qemuCheckpointDelete(virDomainObjPtr vm,
}
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
return ret;
}
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index cc89dec3b4..420c53b82e 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -77,26 +77,26 @@ VIR_LOG_INIT("qemu.qemu_domain");
static virDomainJobType
-qemuDomainJobStatusToType(qemuDomainJobStatus status)
+virDomainJobStatusToType(virDomainJobStatus status)
{
switch (status) {
- case QEMU_DOMAIN_JOB_STATUS_NONE:
+ case VIR_DOMAIN_JOB_STATUS_NONE:
break;
- case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
- case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
- case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
- case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
- case QEMU_DOMAIN_JOB_STATUS_PAUSED:
+ case VIR_DOMAIN_JOB_STATUS_ACTIVE:
+ case VIR_DOMAIN_JOB_STATUS_MIGRATING:
+ case VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
+ case VIR_DOMAIN_JOB_STATUS_POSTCOPY:
+ case VIR_DOMAIN_JOB_STATUS_PAUSED:
return VIR_DOMAIN_JOB_UNBOUNDED;
- case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
+ case VIR_DOMAIN_JOB_STATUS_COMPLETED:
return VIR_DOMAIN_JOB_COMPLETED;
- case QEMU_DOMAIN_JOB_STATUS_FAILED:
+ case VIR_DOMAIN_JOB_STATUS_FAILED:
return VIR_DOMAIN_JOB_FAILED;
- case QEMU_DOMAIN_JOB_STATUS_CANCELED:
+ case VIR_DOMAIN_JOB_STATUS_CANCELED:
return VIR_DOMAIN_JOB_CANCELLED;
}
@@ -151,11 +151,11 @@ int
qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo,
virDomainJobInfoPtr info)
{
- info->type = qemuDomainJobStatusToType(jobInfo->status);
+ info->type = virDomainJobStatusToType(jobInfo->status);
info->timeElapsed = jobInfo->timeElapsed;
switch (jobInfo->statsType) {
- case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION:
+ case VIR_DOMAIN_JOB_STATS_TYPE_MIGRATION:
info->memTotal = jobInfo->stats.mig.ram_total;
info->memRemaining = jobInfo->stats.mig.ram_remaining;
info->memProcessed = jobInfo->stats.mig.ram_transferred;
@@ -168,25 +168,25 @@ qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo,
jobInfo->mirrorStats.transferred;
break;
- case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
+ case VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
info->memTotal = jobInfo->stats.mig.ram_total;
info->memRemaining = jobInfo->stats.mig.ram_remaining;
info->memProcessed = jobInfo->stats.mig.ram_transferred;
break;
- case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
+ case VIR_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
info->memTotal = jobInfo->stats.dump.total;
info->memProcessed = jobInfo->stats.dump.completed;
info->memRemaining = info->memTotal - info->memProcessed;
break;
- case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP:
+ case VIR_DOMAIN_JOB_STATS_TYPE_BACKUP:
info->fileTotal = jobInfo->stats.backup.total;
info->fileProcessed = jobInfo->stats.backup.transferred;
info->fileRemaining = info->fileTotal - info->fileProcessed;
break;
- case QEMU_DOMAIN_JOB_STATS_TYPE_NONE:
+ case VIR_DOMAIN_JOB_STATS_TYPE_NONE:
break;
}
@@ -315,7 +315,7 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
/* The remaining stats are disk, mirror, or migration specific
* so if this is a SAVEDUMP, we can just skip them */
- if (jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP)
+ if (jobInfo->statsType == VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP)
goto done;
if (virTypedParamsAddULLong(&par, &npar, &maxpar,
@@ -364,7 +364,7 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
goto error;
done:
- *type = qemuDomainJobStatusToType(jobInfo->status);
+ *type = virDomainJobStatusToType(jobInfo->status);
*params = par;
*nparams = npar;
return 0;
@@ -407,7 +407,7 @@ qemuDomainDumpJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
stats->total - stats->completed) < 0)
goto error;
- *type = qemuDomainJobStatusToType(jobInfo->status);
+ *type = virDomainJobStatusToType(jobInfo->status);
*params = par;
*nparams = npar;
return 0;
@@ -459,9 +459,9 @@ qemuDomainBackupJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
return -1;
}
- if (jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE &&
+ if (jobInfo->status != VIR_DOMAIN_JOB_STATUS_ACTIVE &&
virTypedParamListAddBoolean(par,
- jobInfo->status ==
QEMU_DOMAIN_JOB_STATUS_COMPLETED,
+ jobInfo->status ==
VIR_DOMAIN_JOB_STATUS_COMPLETED,
VIR_DOMAIN_JOB_SUCCESS) < 0)
return -1;
@@ -470,7 +470,7 @@ qemuDomainBackupJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
return -1;
*nparams = virTypedParamListStealParams(par, params);
- *type = qemuDomainJobStatusToType(jobInfo->status);
+ *type = virDomainJobStatusToType(jobInfo->status);
return 0;
}
@@ -482,23 +482,23 @@ qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
int *nparams)
{
switch (jobInfo->statsType) {
- case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION:
- case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
+ case VIR_DOMAIN_JOB_STATS_TYPE_MIGRATION:
+ case VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
return qemuDomainMigrationJobInfoToParams(jobInfo, type, params, nparams);
- case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
+ case VIR_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
return qemuDomainDumpJobInfoToParams(jobInfo, type, params, nparams);
- case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP:
+ case VIR_DOMAIN_JOB_STATS_TYPE_BACKUP:
return qemuDomainBackupJobInfoToParams(jobInfo, type, params, nparams);
- case QEMU_DOMAIN_JOB_STATS_TYPE_NONE:
+ case VIR_DOMAIN_JOB_STATS_TYPE_NONE:
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("invalid job statistics type"));
break;
default:
- virReportEnumRangeError(qemuDomainJobStatsType, jobInfo->statsType);
+ virReportEnumRangeError(virDomainJobStatsType, jobInfo->statsType);
break;
}
@@ -618,12 +618,12 @@ qemuDomainObjPrivateXMLFormatNBDMigration(virBufferPtr buf,
static int
qemuDomainFormatJobPrivate(virBufferPtr buf,
- qemuDomainJobObjPtr job,
+ virDomainJobObjPtr job,
virDomainObjPtr vm)
{
qemuDomainJobPrivatePtr priv = job->privateData;
- if (job->asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT &&
+ if (job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
qemuDomainObjPrivateXMLFormatNBDMigration(buf, vm) < 0)
return -1;
@@ -634,18 +634,18 @@ qemuDomainFormatJobPrivate(virBufferPtr buf,
}
static void
-qemuDomainCurrentJobInfoInit(qemuDomainJobObjPtr job,
+qemuDomainCurrentJobInfoInit(virDomainJobObjPtr job,
unsigned long long now)
{
qemuDomainJobPrivatePtr priv = job->privateData;
priv->current = g_new0(qemuDomainJobInfo, 1);
- priv->current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE;
+ priv->current->status = VIR_DOMAIN_JOB_STATUS_ACTIVE;
priv->current->started = now;
}
static void
-qemuDomainJobInfoSetOperation(qemuDomainJobObjPtr job,
+qemuDomainJobInfoSetOperation(virDomainJobObjPtr job,
virDomainJobOperation operation)
{
qemuDomainJobPrivatePtr priv = job->privateData;
@@ -736,7 +736,7 @@ qemuDomainObjPrivateXMLParseJobNBD(virDomainObjPtr vm,
return -1;
if (n > 0) {
- if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
+ if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
VIR_WARN("Found disks marked for migration but we were not "
"migrating");
n = 0;
@@ -762,7 +762,7 @@ qemuDomainObjPrivateXMLParseJobNBD(virDomainObjPtr vm,
static int
qemuDomainParseJobPrivate(xmlXPathContextPtr ctxt,
- qemuDomainJobObjPtr job,
+ virDomainJobObjPtr job,
virDomainObjPtr vm)
{
qemuDomainJobPrivatePtr priv = job->privateData;
@@ -806,7 +806,7 @@ qemuDomainGetMaxQueuedJobs(virDomainObjPtr vm)
return cfg->maxQueuedJobs;
}
-static qemuDomainJobPrivateJobCallbacks qemuJobPrivateJobCallbacks = {
+static virDomainJobPrivateJobCallbacks qemuJobPrivateJobCallbacks = {
.allocJobPrivate = qemuJobAllocPrivate,
.freeJobPrivate = qemuJobFreePrivate,
.resetJobPrivate = qemuJobResetPrivate,
@@ -820,7 +820,7 @@ static qemuDomainJobPrivateJobCallbacks qemuJobPrivateJobCallbacks =
{
.getMaxQueuedJobs = qemuDomainGetMaxQueuedJobs,
};
-static qemuDomainJobPrivateCallbacks qemuJobPrivateCallbacks = {
+static virDomainJobPrivateCallbacks qemuJobPrivateCallbacks = {
.saveStatus = qemuDomainSaveStatus,
.jobcb = &qemuJobPrivateJobCallbacks,
};
@@ -2276,7 +2276,7 @@ qemuDomainObjPrivateAlloc(void *opaque)
if (VIR_ALLOC(priv) < 0)
return NULL;
- if (qemuDomainObjInitJob(&priv->job, &qemuJobPrivateCallbacks) < 0) {
+ if (virDomainObjInitJob(&priv->job, &qemuJobPrivateCallbacks) < 0) {
virReportSystemError(errno, "%s",
_("Unable to init qemu driver mutexes"));
goto error;
@@ -2387,7 +2387,7 @@ qemuDomainObjPrivateFree(void *data)
qemuDomainObjPrivateDataClear(priv);
virObjectUnref(priv->monConfig);
- qemuDomainObjFreeJob(&priv->job);
+ virDomainObjFreeJob(&priv->job);
VIR_FREE(priv->lockState);
VIR_FREE(priv->origname);
@@ -2994,7 +2994,7 @@ qemuDomainObjPrivateXMLFormat(virBufferPtr buf,
if (priv->lockState)
virBufferAsprintf(buf, "<lockstate>%s</lockstate>\n",
priv->lockState);
- if (qemuDomainObjPrivateXMLFormatJob(buf, vm, &priv->job) < 0)
+ if (virDomainObjPrivateXMLFormatJob(buf, vm, &priv->job) < 0)
return -1;
if (priv->fakeReboot)
@@ -3653,7 +3653,7 @@ qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt,
priv->lockState = virXPathString("string(./lockstate)", ctxt);
- if (qemuDomainObjPrivateXMLParseJob(vm, ctxt, &priv->job) < 0)
+ if (virDomainObjPrivateXMLParseJob(vm, ctxt, &priv->job) < 0)
goto error;
priv->fakeReboot = virXPathBoolean("boolean(./fakereboot)", ctxt) == 1;
@@ -6083,7 +6083,7 @@ qemuDomainSaveConfig(virDomainObjPtr obj)
* obj must be locked before calling
*
* To be called immediately before any QEMU monitor API call
- * Must have already called qemuDomainObjBeginJob() and checked
+ * Must have already called virDomainObjBeginJob() and checked
* that the VM is still active; may not be used for nested async
* jobs.
*
@@ -6091,18 +6091,18 @@ qemuDomainSaveConfig(virDomainObjPtr obj)
*/
static int
qemuDomainObjEnterMonitorInternal(virDomainObjPtr obj,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
- if (asyncJob != QEMU_ASYNC_JOB_NONE) {
+ if (asyncJob != VIR_ASYNC_JOB_NONE) {
int ret;
- if ((ret = qemuDomainObjBeginNestedJob(obj, &priv->job, asyncJob)) <
0)
+ if ((ret = virDomainObjBeginNestedJob(obj, &priv->job, asyncJob)) < 0)
return ret;
if (!virDomainObjIsActive(obj)) {
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("domain is no longer running"));
- qemuDomainObjEndJob(obj, &priv->job);
+ virDomainObjEndJob(obj, &priv->job);
return -1;
}
} else if (priv->job.asyncOwner == virThreadSelfID()) {
@@ -6111,7 +6111,7 @@ qemuDomainObjEnterMonitorInternal(virDomainObjPtr obj,
} else if (priv->job.owner != virThreadSelfID()) {
VIR_WARN("Entering a monitor without owning a job. "
"Job %s owner %s (%llu)",
- qemuDomainJobTypeToString(priv->job.active),
+ virDomainJobTypeToString(priv->job.active),
priv->job.ownerAPI, priv->job.owner);
}
@@ -6146,13 +6146,13 @@ qemuDomainObjExitMonitorInternal(virDomainObjPtr obj)
if (!hasRefs)
priv->mon = NULL;
- if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
- qemuDomainObjEndJob(obj, &priv->job);
+ if (priv->job.active == VIR_JOB_ASYNC_NESTED)
+ virDomainObjEndJob(obj, &priv->job);
}
void qemuDomainObjEnterMonitor(virDomainObjPtr obj)
{
- ignore_value(qemuDomainObjEnterMonitorInternal(obj, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuDomainObjEnterMonitorInternal(obj, VIR_ASYNC_JOB_NONE));
}
/* obj must NOT be locked before calling
@@ -6181,9 +6181,9 @@ int qemuDomainObjExitMonitor(virDomainObjPtr obj)
* obj must be locked before calling
*
* To be called immediately before any QEMU monitor API call.
- * Must have already either called qemuDomainObjBeginJob()
+ * Must have already either called virDomainObjBeginJob()
* and checked that the VM is still active, with asyncJob of
- * QEMU_ASYNC_JOB_NONE; or already called qemuDomainObjBeginAsyncJob,
+ * VIR_ASYNC_JOB_NONE; or already called virDomainObjBeginAsyncJob,
* with the same asyncJob.
*
* Returns 0 if job was started, in which case this must be followed with
@@ -6193,7 +6193,7 @@ int qemuDomainObjExitMonitor(virDomainObjPtr obj)
*/
int
qemuDomainObjEnterMonitorAsync(virDomainObjPtr obj,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
return qemuDomainObjEnterMonitorInternal(obj, asyncJob);
}
@@ -6203,7 +6203,7 @@ qemuDomainObjEnterMonitorAsync(virDomainObjPtr obj,
* obj must be locked before calling
*
* To be called immediately before any QEMU agent API call.
- * Must have already called qemuDomainObjBeginAgentJob() and
+ * Must have already called virDomainObjBeginAgentJob() and
* checked that the VM is still active.
*
* To be followed with qemuDomainObjExitAgent() once complete
@@ -7282,7 +7282,7 @@ qemuDomainRemoveInactiveLocked(virQEMUDriverPtr driver,
* qemuDomainRemoveInactiveJob:
*
* Just like qemuDomainRemoveInactive but it tries to grab a
- * QEMU_JOB_MODIFY first. Even though it doesn't succeed in
+ * VIR_JOB_MODIFY first. Even though it doesn't succeed in
* grabbing the job the control carries with
* qemuDomainRemoveInactive call.
*/
@@ -7293,12 +7293,12 @@ qemuDomainRemoveInactiveJob(virQEMUDriverPtr driver,
bool haveJob;
qemuDomainObjPrivatePtr priv = vm->privateData;
- haveJob = qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) >= 0;
+ haveJob = virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) >= 0;
qemuDomainRemoveInactive(driver, vm);
if (haveJob)
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
}
@@ -7315,12 +7315,12 @@ qemuDomainRemoveInactiveJobLocked(virQEMUDriverPtr driver,
bool haveJob;
qemuDomainObjPrivatePtr priv = vm->privateData;
- haveJob = qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) >= 0;
+ haveJob = virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) >= 0;
qemuDomainRemoveInactiveLocked(driver, vm);
if (haveJob)
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
}
@@ -10210,7 +10210,7 @@ qemuDomainVcpuPersistOrder(virDomainDefPtr def)
int
qemuDomainCheckMonitor(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret;
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 43fb37e786..137877e5fd 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -31,7 +31,7 @@
#include "qemu_monitor.h"
#include "qemu_agent.h"
#include "qemu_blockjob.h"
-#include "qemu_domainjob.h"
+#include "virdomainjob.h"
#include "qemu_conf.h"
#include "qemu_capabilities.h"
#include "qemu_migration_params.h"
@@ -133,7 +133,7 @@ typedef qemuDomainObjPrivate *qemuDomainObjPrivatePtr;
struct _qemuDomainObjPrivate {
virQEMUDriverPtr driver;
- qemuDomainJobObj job;
+ virDomainJobObj job;
virBitmapPtr namespaces;
@@ -501,7 +501,7 @@ struct _qemuDomainBackupStats {
typedef struct _qemuDomainJobInfo qemuDomainJobInfo;
typedef qemuDomainJobInfo *qemuDomainJobInfoPtr;
struct _qemuDomainJobInfo {
- qemuDomainJobStatus status;
+ virDomainJobStatus status;
virDomainJobOperation operation;
unsigned long long started; /* When the async job started */
unsigned long long stopped; /* When the domain's CPUs were stopped */
@@ -518,7 +518,7 @@ struct _qemuDomainJobInfo {
destination. */
bool timeDeltaSet;
/* Raw values from QEMU */
- qemuDomainJobStatsType statsType;
+ virDomainJobStatsType statsType;
union {
qemuMonitorMigrationStats mig;
qemuMonitorDumpStats dump;
@@ -584,7 +584,7 @@ int qemuDomainObjExitMonitor(virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2)
G_GNUC_WARN_UNUSED_RESULT;
int qemuDomainObjEnterMonitorAsync(virDomainObjPtr obj,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
@@ -967,7 +967,7 @@ void qemuDomainVcpuPersistOrder(virDomainDefPtr def)
ATTRIBUTE_NONNULL(1);
int qemuDomainCheckMonitor(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
bool qemuDomainSupportsVideoVga(virDomainVideoDefPtr video,
virQEMUCapsPtr qemuCaps);
diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h
deleted file mode 100644
index f7e5cfa1fd..0000000000
--- a/src/qemu/qemu_domainjob.h
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * qemu_domainjob.h: helper functions for QEMU domain jobs
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library. If not, see
- * <
http://www.gnu.org/licenses/>.
- */
-
-#pragma once
-
-#include <glib-object.h>
-
-#define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1))
-#define QEMU_JOB_DEFAULT_MASK \
- (JOB_MASK(QEMU_JOB_QUERY) | \
- JOB_MASK(QEMU_JOB_DESTROY) | \
- JOB_MASK(QEMU_JOB_ABORT))
-
-/* Jobs which have to be tracked in domain state XML. */
-#define QEMU_DOMAIN_TRACK_JOBS \
- (JOB_MASK(QEMU_JOB_DESTROY) | \
- JOB_MASK(QEMU_JOB_ASYNC))
-
-/* Only 1 job is allowed at any time
- * A job includes *all* monitor commands, even those just querying
- * information, not merely actions */
-typedef enum {
- QEMU_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */
- QEMU_JOB_QUERY, /* Doesn't change any state */
- QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
- QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
- QEMU_JOB_MODIFY, /* May change state */
- QEMU_JOB_ABORT, /* Abort current async job */
- QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
-
- /* The following two items must always be the last items before JOB_LAST */
- QEMU_JOB_ASYNC, /* Asynchronous job */
- QEMU_JOB_ASYNC_NESTED, /* Normal job within an async job */
-
- QEMU_JOB_LAST
-} qemuDomainJob;
-VIR_ENUM_DECL(qemuDomainJob);
-
-typedef enum {
- QEMU_AGENT_JOB_NONE = 0, /* No agent job. */
- QEMU_AGENT_JOB_QUERY, /* Does not change state of domain */
- QEMU_AGENT_JOB_MODIFY, /* May change state of domain */
-
- QEMU_AGENT_JOB_LAST
-} qemuDomainAgentJob;
-VIR_ENUM_DECL(qemuDomainAgentJob);
-
-/* Async job consists of a series of jobs that may change state. Independent
- * jobs that do not change state (and possibly others if explicitly allowed by
- * current async job) are allowed to be run even if async job is active.
- */
-typedef enum {
- QEMU_ASYNC_JOB_NONE = 0,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
- QEMU_ASYNC_JOB_MIGRATION_IN,
- QEMU_ASYNC_JOB_SAVE,
- QEMU_ASYNC_JOB_DUMP,
- QEMU_ASYNC_JOB_SNAPSHOT,
- QEMU_ASYNC_JOB_START,
- QEMU_ASYNC_JOB_BACKUP,
-
- QEMU_ASYNC_JOB_LAST
-} qemuDomainAsyncJob;
-VIR_ENUM_DECL(qemuDomainAsyncJob);
-
-typedef enum {
- QEMU_DOMAIN_JOB_STATUS_NONE = 0,
- QEMU_DOMAIN_JOB_STATUS_ACTIVE,
- QEMU_DOMAIN_JOB_STATUS_MIGRATING,
- QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED,
- QEMU_DOMAIN_JOB_STATUS_PAUSED,
- QEMU_DOMAIN_JOB_STATUS_POSTCOPY,
- QEMU_DOMAIN_JOB_STATUS_COMPLETED,
- QEMU_DOMAIN_JOB_STATUS_FAILED,
- QEMU_DOMAIN_JOB_STATUS_CANCELED,
-} qemuDomainJobStatus;
-
-typedef enum {
- QEMU_DOMAIN_JOB_STATS_TYPE_NONE = 0,
- QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION,
- QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP,
- QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP,
- QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP,
-} qemuDomainJobStatsType;
-
-typedef struct _qemuDomainJobObj qemuDomainJobObj;
-typedef qemuDomainJobObj *qemuDomainJobObjPtr;
-
-typedef void *(*qemuDomainObjPrivateJobAlloc)(void);
-typedef void (*qemuDomainObjPrivateJobFree)(void *);
-typedef void (*qemuDomainObjPrivateJobReset)(void *);
-typedef void (*qemuDomainObjPrivateSaveStatus)(virDomainObjPtr);
-typedef int (*qemuDomainObjPrivateJobFormat)(virBufferPtr,
- qemuDomainJobObjPtr,
- virDomainObjPtr);
-typedef int (*qemuDomainObjPrivateJobParse)(xmlXPathContextPtr,
qemuDomainJobObjPtr,
- virDomainObjPtr);
-typedef void (*qemuDomainObjJobInfoSetOperation)(qemuDomainJobObjPtr,
- virDomainJobOperation);
-typedef void (*qemuDomainObjCurrentJobInfoInit)(qemuDomainJobObjPtr,
- unsigned long long);
-typedef int (*qemuDomainObjGetJobsQueued)(virDomainObjPtr);
-typedef void (*qemuDomainObjIncreaseJobsQueued)(virDomainObjPtr);
-typedef void (*qemuDomainObjDecreaseJobsQueued)(virDomainObjPtr);
-typedef int (*qemuDomainObjGetMaxQueuedJobs)(virDomainObjPtr);
-
-typedef struct _qemuDomainJobPrivateJobCallbacks qemuDomainJobPrivateJobCallbacks;
-typedef qemuDomainJobPrivateJobCallbacks *qemuDomainJobPrivateJobCallbacksPtr;
-struct _qemuDomainJobPrivateJobCallbacks {
- qemuDomainObjPrivateJobAlloc allocJobPrivate;
- qemuDomainObjPrivateJobFree freeJobPrivate;
- qemuDomainObjPrivateJobReset resetJobPrivate;
- qemuDomainObjPrivateJobFormat formatJob;
- qemuDomainObjPrivateJobParse parseJob;
- qemuDomainObjJobInfoSetOperation setJobInfoOperation;
- qemuDomainObjCurrentJobInfoInit currentJobInfoInit;
- qemuDomainObjGetJobsQueued getJobsQueued;
- qemuDomainObjIncreaseJobsQueued increaseJobsQueued;
- qemuDomainObjDecreaseJobsQueued decreaseJobsQueued;
- qemuDomainObjGetMaxQueuedJobs getMaxQueuedJobs;
-};
-
-typedef struct _qemuDomainJobPrivateCallbacks qemuDomainJobPrivateCallbacks;
-typedef qemuDomainJobPrivateCallbacks *qemuDomainJobPrivateCallbacksPtr;
-struct _qemuDomainJobPrivateCallbacks {
- /* generic callbacks that we can't really categorize */
- qemuDomainObjPrivateSaveStatus saveStatus;
-
- /* Job related callbacks */
- qemuDomainJobPrivateJobCallbacksPtr jobcb;
-};
-
-struct _qemuDomainJobObj {
- virCond cond; /* Use to coordinate jobs */
-
- /* The following members are for QEMU_JOB_* */
- qemuDomainJob active; /* Currently running job */
- unsigned long long owner; /* Thread id which set current job */
- const char *ownerAPI; /* The API which owns the job */
- unsigned long long started; /* When the current job started */
-
- /* The following members are for QEMU_AGENT_JOB_* */
- qemuDomainAgentJob agentActive; /* Currently running agent job */
- unsigned long long agentOwner; /* Thread id which set current agent job */
- const char *agentOwnerAPI; /* The API which owns the agent job */
- unsigned long long agentStarted; /* When the current agent job started */
-
- /* The following members are for QEMU_ASYNC_JOB_* */
- virCond asyncCond; /* Use to coordinate with async jobs */
- qemuDomainAsyncJob asyncJob; /* Currently active async job */
- unsigned long long asyncOwner; /* Thread which set current async job */
- const char *asyncOwnerAPI; /* The API which owns the async job */
- unsigned long long asyncStarted; /* When the current async job started */
- int phase; /* Job phase (mainly for migrations) */
- unsigned long long mask; /* Jobs allowed during async job */
- bool abortJob; /* abort of the job requested */
- char *error; /* job event completion error */
- unsigned long apiFlags; /* flags passed to the API which started the async job */
-
- void *privateData; /* job specific collection of data */
- qemuDomainJobPrivateCallbacksPtr cb;
-};
-
-const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
- int phase);
-int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
- const char *phase);
-
-int qemuDomainObjBeginJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr jobObj,
- qemuDomainJob job)
- G_GNUC_WARN_UNUSED_RESULT;
-int qemuDomainObjBeginAgentJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr jobObj,
- qemuDomainAgentJob agentJob)
- G_GNUC_WARN_UNUSED_RESULT;
-int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr jobObj,
- qemuDomainAsyncJob asyncJob,
- virDomainJobOperation operation,
- unsigned long apiFlags)
- G_GNUC_WARN_UNUSED_RESULT;
-int qemuDomainObjBeginNestedJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr jobObj,
- qemuDomainAsyncJob asyncJob)
- G_GNUC_WARN_UNUSED_RESULT;
-int qemuDomainObjBeginJobNowait(virDomainObjPtr obj,
- qemuDomainJobObjPtr jobObj,
- qemuDomainJob job)
- G_GNUC_WARN_UNUSED_RESULT;
-
-void qemuDomainObjEndJob(virDomainObjPtr obj, qemuDomainJobObjPtr jobObj);
-void qemuDomainObjEndAgentJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr jobObj);
-void qemuDomainObjEndAsyncJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr jobObj);
-void qemuDomainObjAbortAsyncJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr job);
-void qemuDomainObjSetJobPhase(virDomainObjPtr obj,
- qemuDomainJobObjPtr job,
- int phase);
-void qemuDomainObjSetAsyncJobMask(qemuDomainJobObjPtr job,
- unsigned long long allowedJobs);
-int qemuDomainObjRestoreJob(qemuDomainJobObjPtr job,
- qemuDomainJobObjPtr oldJob);
-void qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr job);
-void qemuDomainObjReleaseAsyncJob(qemuDomainJobObjPtr job);
-
-bool qemuDomainTrackJob(qemuDomainJob job);
-
-void qemuDomainObjFreeJob(qemuDomainJobObjPtr job);
-
-int
-qemuDomainObjInitJob(qemuDomainJobObjPtr job,
- qemuDomainJobPrivateCallbacksPtr cb);
-
-bool qemuDomainJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob);
-
-int
-qemuDomainObjPrivateXMLFormatJob(virBufferPtr buf,
- virDomainObjPtr vm,
- qemuDomainJobObjPtr jobObj);
-
-int
-qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm,
- xmlXPathContextPtr ctxt,
- qemuDomainJobObjPtr job);
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index c0b986cddf..ca84f9d9f9 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -155,7 +155,7 @@ static int qemuDomainObjStart(virConnectPtr conn,
virQEMUDriverPtr driver,
virDomainObjPtr vm,
unsigned int flags,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
static int qemuDomainManagedSaveLoad(virDomainObjPtr vm,
void *opaque);
@@ -199,7 +199,7 @@ qemuAutostartDomain(virDomainObjPtr vm,
}
if (qemuDomainObjStart(NULL, driver, vm, flags,
- QEMU_ASYNC_JOB_START) < 0) {
+ VIR_ASYNC_JOB_START) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Failed to autostart VM '%s': %s"),
vm->def->name, virGetLastErrorMessage());
@@ -1753,7 +1753,7 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn,
goto cleanup;
}
- if (qemuProcessStart(conn, driver, vm, NULL, QEMU_ASYNC_JOB_START,
+ if (qemuProcessStart(conn, driver, vm, NULL, VIR_ASYNC_JOB_START,
NULL, -1, NULL, NULL,
VIR_NETDEV_VPORT_PROFILE_OP_CREATE,
start_flags) < 0) {
@@ -1811,15 +1811,15 @@ static int qemuDomainSuspend(virDomainPtr dom)
cfg = virQEMUDriverGetConfig(driver);
priv = vm->privateData;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_SUSPEND) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_SUSPEND) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
goto endjob;
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
reason = VIR_DOMAIN_PAUSED_MIGRATION;
- else if (priv->job.asyncJob == QEMU_ASYNC_JOB_SNAPSHOT)
+ else if (priv->job.asyncJob == VIR_ASYNC_JOB_SNAPSHOT)
reason = VIR_DOMAIN_PAUSED_SNAPSHOT;
else
reason = VIR_DOMAIN_PAUSED_USER;
@@ -1830,7 +1830,7 @@ static int qemuDomainSuspend(virDomainPtr dom)
"%s", _("domain is pmsuspended"));
goto endjob;
} else if (state != VIR_DOMAIN_PAUSED) {
- if (qemuProcessStopCPUs(driver, vm, reason, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessStopCPUs(driver, vm, reason, VIR_ASYNC_JOB_NONE) < 0)
goto endjob;
}
if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0)
@@ -1838,7 +1838,7 @@ static int qemuDomainSuspend(virDomainPtr dom)
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -1866,7 +1866,7 @@ static int qemuDomainResume(virDomainPtr dom)
if (virDomainResumeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -1886,7 +1886,7 @@ static int qemuDomainResume(virDomainPtr dom)
state == VIR_DOMAIN_PAUSED) {
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
- QEMU_ASYNC_JOB_NONE) < 0) {
+ VIR_ASYNC_JOB_NONE) < 0) {
if (virGetLastErrorCode() == VIR_ERR_OK)
virReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("resume operation failed"));
@@ -1898,7 +1898,7 @@ static int qemuDomainResume(virDomainPtr dom)
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -1918,7 +1918,7 @@ qemuDomainShutdownFlagsAgent(virQEMUDriverPtr driver,
int agentFlag = isReboot ? QEMU_AGENT_SHUTDOWN_REBOOT :
QEMU_AGENT_SHUTDOWN_POWERDOWN;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_MODIFY) <
0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job, VIR_AGENT_JOB_MODIFY) < 0)
return -1;
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) {
@@ -1936,7 +1936,7 @@ qemuDomainShutdownFlagsAgent(virQEMUDriverPtr driver,
qemuDomainObjExitAgent(vm, agent);
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
return ret;
}
@@ -1951,7 +1951,7 @@ qemuDomainShutdownFlagsMonitor(virQEMUDriverPtr driver,
priv = vm->privateData;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
return -1;
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) {
@@ -1967,7 +1967,7 @@ qemuDomainShutdownFlagsMonitor(virQEMUDriverPtr driver,
ret = -1;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
return ret;
}
@@ -2051,8 +2051,8 @@ qemuDomainRebootAgent(virQEMUDriverPtr driver,
if (!isReboot)
agentFlag = QEMU_AGENT_SHUTDOWN_POWERDOWN;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job,
- QEMU_AGENT_JOB_MODIFY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job,
+ VIR_AGENT_JOB_MODIFY) < 0)
return -1;
if (!qemuDomainAgentAvailable(vm, agentForced))
@@ -2067,7 +2067,7 @@ qemuDomainRebootAgent(virQEMUDriverPtr driver,
qemuDomainObjExitAgent(vm, agent);
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
return ret;
}
@@ -2080,7 +2080,7 @@ qemuDomainRebootMonitor(virQEMUDriverPtr driver,
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
@@ -2093,7 +2093,7 @@ qemuDomainRebootMonitor(virQEMUDriverPtr driver,
ret = -1;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
return ret;
}
@@ -2171,7 +2171,7 @@ qemuDomainReset(virDomainPtr dom, unsigned int flags)
if (virDomainResetEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -2189,7 +2189,7 @@ qemuDomainReset(virDomainPtr dom, unsigned int flags)
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_CRASHED);
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -2229,7 +2229,7 @@ qemuDomainDestroyFlags(virDomainPtr dom,
reason == VIR_DOMAIN_PAUSED_STARTING_UP &&
!priv->beingDestroyed);
- if (qemuProcessBeginStopJob(vm, QEMU_JOB_DESTROY,
+ if (qemuProcessBeginStopJob(vm, VIR_JOB_DESTROY,
!(flags & VIR_DOMAIN_DESTROY_GRACEFUL)) < 0)
goto cleanup;
@@ -2246,11 +2246,11 @@ qemuDomainDestroyFlags(virDomainPtr dom,
qemuDomainSetFakeReboot(driver, vm, false);
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_DESTROYED,
- QEMU_ASYNC_JOB_NONE, stopFlags);
+ VIR_ASYNC_JOB_NONE, stopFlags);
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_DESTROYED);
@@ -2260,7 +2260,7 @@ qemuDomainDestroyFlags(virDomainPtr dom,
endjob:
if (ret == 0)
qemuDomainRemoveInactive(driver, vm);
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -2335,7 +2335,7 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long
newmem,
if (virDomainSetMemoryFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -2421,7 +2421,7 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long
newmem,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -2461,7 +2461,7 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int
period,
if (virDomainSetMemoryStatsPeriodEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -2507,7 +2507,7 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int
period,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -2530,7 +2530,7 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int
flags)
priv = vm->privateData;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -2542,7 +2542,7 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int
flags)
ret = -1;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -2589,7 +2589,7 @@ static int qemuDomainSendKey(virDomainPtr domain,
if (virDomainSendKeyEnsureACL(domain->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -2601,7 +2601,7 @@ static int qemuDomainSendKey(virDomainPtr domain,
ret = -1;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -2771,7 +2771,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver,
if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
goto cleanup;
- if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_SAVE,
+ if (virDomainObjBeginAsyncJob(vm, &priv->job, VIR_ASYNC_JOB_SAVE,
VIR_DOMAIN_JOB_OPERATION_SAVE, flags) < 0)
goto cleanup;
@@ -2781,13 +2781,13 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver,
goto endjob;
}
- jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP;
+ jobPriv->current->statsType = VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP;
/* Pause */
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
was_running = true;
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
- QEMU_ASYNC_JOB_SAVE) < 0)
+ VIR_ASYNC_JOB_SAVE) < 0)
goto endjob;
if (!virDomainObjIsActive(vm)) {
@@ -2838,13 +2838,13 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver,
xml = NULL;
ret = qemuSaveImageCreate(driver, vm, path, data, compressor,
- flags, QEMU_ASYNC_JOB_SAVE);
+ flags, VIR_ASYNC_JOB_SAVE);
if (ret < 0)
goto endjob;
/* Shut it down */
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_SAVED,
- QEMU_ASYNC_JOB_SAVE, 0);
+ VIR_ASYNC_JOB_SAVE, 0);
virDomainAuditStop(vm, "saved");
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_SAVED);
@@ -2855,7 +2855,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver,
virErrorPreserveLast(&save_err);
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_SAVE_CANCELED,
- QEMU_ASYNC_JOB_SAVE) < 0) {
+ VIR_ASYNC_JOB_SAVE) < 0) {
VIR_WARN("Unable to resume guest CPUs after save failure");
virObjectEventStateQueue(driver->domainEventState,
virDomainEventLifecycleNewFromObj(vm,
@@ -2865,7 +2865,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver,
virErrorRestore(&save_err);
}
}
- qemuDomainObjEndAsyncJob(vm, &priv->job);
+ virDomainObjEndAsyncJob(vm, &priv->job);
if (ret == 0)
qemuDomainRemoveInactiveJob(driver, vm);
@@ -3106,7 +3106,7 @@ static int
qemuDumpToFd(virQEMUDriverPtr driver,
virDomainObjPtr vm,
int fd,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
const char *dumpformat)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -3126,7 +3126,7 @@ qemuDumpToFd(virQEMUDriverPtr driver,
return -1;
if (detach)
- jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP;
+ jobPriv->current->statsType = VIR_DOMAIN_JOB_STATS_TYPE_MEMDUMP;
else
g_clear_pointer(&jobPriv->current, qemuDomainJobInfoFree);
@@ -3215,7 +3215,7 @@ doCoreDump(virQEMUDriverPtr driver,
if (STREQ(memory_dump_format, "elf"))
memory_dump_format = NULL;
- rc = qemuDumpToFd(driver, vm, fd, QEMU_ASYNC_JOB_DUMP,
+ rc = qemuDumpToFd(driver, vm, fd, VIR_ASYNC_JOB_DUMP,
memory_dump_format);
} else {
if (dumpformat != VIR_DOMAIN_CORE_DUMP_FORMAT_RAW) {
@@ -3229,7 +3229,7 @@ doCoreDump(virQEMUDriverPtr driver,
goto cleanup;
rc = qemuMigrationSrcToFile(driver, vm, fd, compressor,
- QEMU_ASYNC_JOB_DUMP);
+ VIR_ASYNC_JOB_DUMP);
}
if (rc < 0)
@@ -3283,7 +3283,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
if (virDomainCoreDumpWithFormatEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_DUMP,
+ if (virDomainObjBeginAsyncJob(vm, &priv->job, VIR_ASYNC_JOB_DUMP,
VIR_DOMAIN_JOB_OPERATION_DUMP,
flags) < 0)
goto cleanup;
@@ -3293,7 +3293,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
priv = vm->privateData;
jobPriv = priv->job.privateData;
- jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP;
+ jobPriv->current->statsType = VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP;
/* Migrate will always stop the VM, so the resume condition is
independent of whether the stop command is issued. */
@@ -3303,7 +3303,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
if (!(flags & VIR_DUMP_LIVE) &&
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_DUMP,
- QEMU_ASYNC_JOB_DUMP) < 0)
+ VIR_ASYNC_JOB_DUMP) < 0)
goto endjob;
paused = true;
@@ -3322,7 +3322,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
endjob:
if ((ret == 0) && (flags & VIR_DUMP_CRASH)) {
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_CRASHED,
- QEMU_ASYNC_JOB_DUMP, 0);
+ VIR_ASYNC_JOB_DUMP, 0);
virDomainAuditStop(vm, "crashed");
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
@@ -3339,7 +3339,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
if (resume && virDomainObjIsActive(vm)) {
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
- QEMU_ASYNC_JOB_DUMP) < 0) {
+ VIR_ASYNC_JOB_DUMP) < 0) {
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_SUSPENDED,
VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR);
@@ -3350,7 +3350,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
}
}
- qemuDomainObjEndAsyncJob(vm, &priv->job);
+ virDomainObjEndAsyncJob(vm, &priv->job);
if (ret == 0 && flags & VIR_DUMP_CRASH)
qemuDomainRemoveInactiveJob(driver, vm);
@@ -3400,7 +3400,7 @@ qemuDomainScreenshot(virDomainPtr dom,
if (virDomainScreenshotEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -3474,7 +3474,7 @@ qemuDomainScreenshot(virDomainPtr dom,
if (unlink_tmp)
unlink(tmp);
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -3522,7 +3522,7 @@ processWatchdogEvent(virQEMUDriverPtr driver,
switch (action) {
case VIR_DOMAIN_WATCHDOG_ACTION_DUMP:
- if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_DUMP,
+ if (virDomainObjBeginAsyncJob(vm, &priv->job, VIR_ASYNC_JOB_DUMP,
VIR_DOMAIN_JOB_OPERATION_DUMP,
flags) < 0) {
return;
@@ -3539,7 +3539,7 @@ processWatchdogEvent(virQEMUDriverPtr driver,
ret = qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
- QEMU_ASYNC_JOB_DUMP);
+ VIR_ASYNC_JOB_DUMP);
if (ret < 0)
virReportError(VIR_ERR_OPERATION_FAILED,
@@ -3550,7 +3550,7 @@ processWatchdogEvent(virQEMUDriverPtr driver,
}
endjob:
- qemuDomainObjEndAsyncJob(vm, &priv->job);
+ virDomainObjEndAsyncJob(vm, &priv->job);
}
static int
@@ -3599,7 +3599,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver,
bool removeInactive = false;
unsigned long flags = VIR_DUMP_MEMORY_ONLY;
- if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_DUMP,
+ if (virDomainObjBeginAsyncJob(vm, &priv->job, VIR_ASYNC_JOB_DUMP,
VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0)
return;
@@ -3637,7 +3637,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver,
case VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY:
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_CRASHED,
- QEMU_ASYNC_JOB_DUMP, 0);
+ VIR_ASYNC_JOB_DUMP, 0);
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_CRASHED);
@@ -3665,7 +3665,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver,
}
endjob:
- qemuDomainObjEndAsyncJob(vm, &priv->job);
+ virDomainObjEndAsyncJob(vm, &priv->job);
if (removeInactive)
qemuDomainRemoveInactiveJob(driver, vm);
}
@@ -3683,7 +3683,7 @@ processDeviceDeletedEvent(virQEMUDriverPtr driver,
VIR_DEBUG("Removing device %s from domain %p %s",
devAlias, vm, vm->def->name);
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
return;
if (!virDomainObjIsActive(vm)) {
@@ -3706,7 +3706,7 @@ processDeviceDeletedEvent(virQEMUDriverPtr driver,
devAlias);
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
}
@@ -3921,7 +3921,7 @@ processNicRxFilterChangedEvent(virDomainObjPtr vm,
"from domain %p %s",
devAlias, vm, vm->def->name);
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -4003,7 +4003,7 @@ processNicRxFilterChangedEvent(virDomainObjPtr vm,
}
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virNetDevRxFilterFree(hostFilter);
@@ -4049,7 +4049,7 @@ processSerialChangedEvent(virQEMUDriverPtr driver,
memset(&dev, 0, sizeof(dev));
}
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
return;
if (!virDomainObjIsActive(vm)) {
@@ -4090,7 +4090,7 @@ processSerialChangedEvent(virQEMUDriverPtr driver,
}
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
}
@@ -4104,7 +4104,7 @@ processBlockJobEvent(virDomainObjPtr vm,
g_autoptr(qemuBlockJobData) job = NULL;
qemuDomainObjPrivatePtr priv = vm->privateData;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
return;
if (!virDomainObjIsActive(vm)) {
@@ -4126,10 +4126,10 @@ processBlockJobEvent(virDomainObjPtr vm,
job->newstate = status;
- qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
}
@@ -4139,7 +4139,7 @@ processJobStatusChangeEvent(virDomainObjPtr vm,
{
qemuDomainObjPrivatePtr priv = vm->privateData;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
return;
if (!virDomainObjIsActive(vm)) {
@@ -4147,10 +4147,10 @@ processJobStatusChangeEvent(virDomainObjPtr vm,
goto endjob;
}
- qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
}
@@ -4165,7 +4165,7 @@ processMonitorEOFEvent(virQEMUDriverPtr driver,
unsigned int stopFlags = 0;
virObjectEventPtr event = NULL;
- if (qemuProcessBeginStopJob(vm, QEMU_JOB_DESTROY, true) < 0)
+ if (qemuProcessBeginStopJob(vm, VIR_JOB_DESTROY, true) < 0)
return;
if (!virDomainObjIsActive(vm)) {
@@ -4182,7 +4182,7 @@ processMonitorEOFEvent(virQEMUDriverPtr driver,
auditReason = "failed";
}
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
qemuMigrationDstErrorSave(driver, vm->def->name,
qemuMonitorLastError(priv->mon));
@@ -4190,13 +4190,13 @@ processMonitorEOFEvent(virQEMUDriverPtr driver,
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
eventReason);
- qemuProcessStop(driver, vm, stopReason, QEMU_ASYNC_JOB_NONE, stopFlags);
+ qemuProcessStop(driver, vm, stopReason, VIR_ASYNC_JOB_NONE, stopFlags);
virDomainAuditStop(vm, auditReason);
virObjectEventStateQueue(driver->domainEventState, event);
endjob:
qemuDomainRemoveInactive(driver, vm);
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
}
@@ -4453,11 +4453,11 @@ qemuDomainSetVcpusFlags(virDomainPtr dom,
if (useAgent) {
- if (qemuDomainObjBeginAgentJob(vm, &priv->job,
- QEMU_AGENT_JOB_MODIFY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job,
+ VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
} else {
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
}
@@ -4474,9 +4474,9 @@ qemuDomainSetVcpusFlags(virDomainPtr dom,
endjob:
if (useAgent)
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
else
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -4602,7 +4602,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
if (virDomainPinVcpuFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -4641,7 +4641,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -4732,7 +4732,7 @@ qemuDomainPinEmulator(virDomainPtr dom,
priv = vm->privateData;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -4798,7 +4798,7 @@ qemuDomainPinEmulator(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
if (cgroup_emulator)
@@ -4919,8 +4919,8 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags)
goto cleanup;
if (flags & VIR_DOMAIN_VCPU_GUEST) {
- if (qemuDomainObjBeginAgentJob(vm, &priv->job,
- QEMU_AGENT_JOB_QUERY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job,
+ VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -4938,7 +4938,7 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags)
qemuDomainObjExitAgent(vm, agent);
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
if (ncpuinfo < 0)
goto cleanup;
@@ -5003,7 +5003,7 @@ qemuDomainGetIOThreadsLive(virDomainObjPtr vm,
size_t i;
int ret = -1;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -5052,7 +5052,7 @@ qemuDomainGetIOThreadsLive(virDomainObjPtr vm,
ret = niothreads;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
if (info_ret) {
@@ -5190,7 +5190,7 @@ qemuDomainPinIOThread(virDomainPtr dom,
if (virDomainPinIOThreadEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -5279,7 +5279,7 @@ qemuDomainPinIOThread(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
if (cgroup_iothread)
@@ -5640,7 +5640,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver,
priv = vm->privateData;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
return -1;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -5727,7 +5727,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
return ret;
}
@@ -6076,7 +6076,7 @@ qemuDomainRestoreFlags(virConnectPtr conn,
goto cleanup;
ret = qemuSaveImageStartVM(conn, driver, vm, &fd, data, path,
- false, QEMU_ASYNC_JOB_START);
+ false, VIR_ASYNC_JOB_START);
qemuProcessEndJob(vm);
@@ -6290,7 +6290,7 @@ qemuDomainObjRestore(virConnectPtr conn,
const char *path,
bool start_paused,
bool bypass_cache,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
virDomainDefPtr def = NULL;
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -6500,7 +6500,7 @@ qemuDomainObjStart(virConnectPtr conn,
virQEMUDriverPtr driver,
virDomainObjPtr vm,
unsigned int flags,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
int ret = -1;
g_autofree char *managed_save = NULL;
@@ -6611,7 +6611,7 @@ qemuDomainCreateWithFlags(virDomainPtr dom, unsigned int flags)
}
if (qemuDomainObjStart(dom->conn, driver, vm, flags,
- QEMU_ASYNC_JOB_START) < 0)
+ VIR_ASYNC_JOB_START) < 0)
goto endjob;
dom->id = vm->def->id;
@@ -6751,7 +6751,7 @@ qemuDomainUndefineFlags(virDomainPtr dom,
if (virDomainUndefineFlagsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (!vm->persistent) {
@@ -6847,7 +6847,7 @@ qemuDomainUndefineFlags(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -7012,7 +7012,7 @@ qemuDomainAttachDeviceLive(virDomainObjPtr vm,
}
if (ret == 0)
- ret = qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE);
+ ret = qemuDomainUpdateDeviceList(vm, VIR_ASYNC_JOB_NONE);
return ret;
}
@@ -7827,7 +7827,7 @@ qemuDomainAttachDeviceFlags(virDomainPtr dom,
if (virDomainAttachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjUpdateModificationImpact(vm, &flags) < 0)
@@ -7839,7 +7839,7 @@ qemuDomainAttachDeviceFlags(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -7884,7 +7884,7 @@ static int qemuDomainUpdateDeviceFlags(virDomainPtr dom,
if (virDomainUpdateDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjUpdateModificationImpact(vm, &flags) < 0)
@@ -7953,7 +7953,7 @@ static int qemuDomainUpdateDeviceFlags(virDomainPtr dom,
}
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainDefFree(vmdef);
@@ -8023,7 +8023,7 @@ qemuDomainDetachDeviceLiveAndConfig(virQEMUDriverPtr driver,
if ((rc = qemuDomainDetachDeviceLive(vm, dev_copy, driver, false)) < 0)
goto cleanup;
- if (rc == 0 && qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE) <
0)
+ if (rc == 0 && qemuDomainUpdateDeviceList(vm, VIR_ASYNC_JOB_NONE) <
0)
goto cleanup;
/*
@@ -8106,7 +8106,7 @@ qemuDomainDetachDeviceAliasLiveAndConfig(virQEMUDriverPtr driver,
if ((rc = qemuDomainDetachDeviceLive(vm, &dev, driver, true)) < 0)
goto cleanup;
- if (rc == 0 && qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE) <
0)
+ if (rc == 0 && qemuDomainUpdateDeviceList(vm, VIR_ASYNC_JOB_NONE) <
0)
goto cleanup;
}
@@ -8142,7 +8142,7 @@ qemuDomainDetachDeviceFlags(virDomainPtr dom,
if (virDomainDetachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjUpdateModificationImpact(vm, &flags) < 0)
@@ -8154,7 +8154,7 @@ qemuDomainDetachDeviceFlags(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -8180,7 +8180,7 @@ qemuDomainDetachDeviceAlias(virDomainPtr dom,
if (virDomainDetachDeviceAliasEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjUpdateModificationImpact(vm, &flags) < 0)
@@ -8192,7 +8192,7 @@ qemuDomainDetachDeviceAlias(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -8255,7 +8255,7 @@ static int qemuDomainSetAutostart(virDomainPtr dom,
autostart = (autostart != 0);
if (vm->autostart != autostart) {
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (!(configFile = virDomainConfigFile(cfg->configDir, vm->def->name)))
@@ -8293,7 +8293,7 @@ static int qemuDomainSetAutostart(virDomainPtr dom,
vm->autostart = autostart;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
}
ret = 0;
@@ -8401,7 +8401,7 @@ qemuDomainSetBlkioParameters(virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -8435,7 +8435,7 @@ qemuDomainSetBlkioParameters(virDomainPtr dom,
}
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -8577,7 +8577,7 @@ qemuDomainSetMemoryParameters(virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
/* QEMU and LXC implementation are identical */
@@ -8608,7 +8608,7 @@ qemuDomainSetMemoryParameters(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -8831,7 +8831,7 @@ qemuDomainSetNumaParameters(virDomainPtr dom,
}
}
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -8886,7 +8886,7 @@ qemuDomainSetNumaParameters(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virBitmapFree(nodeset);
@@ -9040,7 +9040,7 @@ qemuDomainSetPerfEvents(virDomainPtr dom,
if (virDomainSetPerfEventsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -9082,7 +9082,7 @@ qemuDomainSetPerfEvents(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -9116,7 +9116,7 @@ qemuDomainGetPerfEvents(virDomainPtr dom,
if (virDomainGetPerfEventsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
if (!(def = virDomainObjGetOneDef(vm, flags)))
@@ -9143,7 +9143,7 @@ qemuDomainGetPerfEvents(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -9317,7 +9317,7 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -9551,7 +9551,7 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainDefFree(persistentDefCopy);
@@ -9845,7 +9845,7 @@ qemuDomainBlockResize(virDomainPtr dom,
if (virDomainBlockResizeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -9890,7 +9890,7 @@ qemuDomainBlockResize(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -10045,7 +10045,7 @@ qemuDomainBlockStats(virDomainPtr dom,
if (virDomainBlockStatsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -10068,7 +10068,7 @@ qemuDomainBlockStats(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -10105,7 +10105,7 @@ qemuDomainBlockStatsFlags(virDomainPtr dom,
if (virDomainBlockStatsFlagsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -10158,7 +10158,7 @@ qemuDomainBlockStatsFlags(virDomainPtr dom,
*nparams = nstats;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
VIR_FREE(blockstats);
@@ -10252,7 +10252,7 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom,
if (virDomainSetInterfaceParametersEnsureACL(dom->conn, vm->def, flags) <
0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -10426,7 +10426,7 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virNetDevBandwidthFree(bandwidth);
@@ -10544,7 +10544,7 @@ qemuDomainGetInterfaceParameters(virDomainPtr dom,
return ret;
}
-/* This functions assumes that job QEMU_JOB_QUERY is started by a caller */
+/* This functions assumes that job VIR_JOB_QUERY is started by a caller */
static int
qemuDomainMemoryStatsInternal(virDomainObjPtr vm,
virDomainMemoryStatPtr stats,
@@ -10602,12 +10602,12 @@ qemuDomainMemoryStats(virDomainPtr dom,
if (virDomainMemoryStatsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
ret = qemuDomainMemoryStatsInternal(vm, stats, nr_stats);
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -10708,7 +10708,7 @@ qemuDomainMemoryPeek(virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -10752,7 +10752,7 @@ qemuDomainMemoryPeek(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
VIR_FORCE_CLOSE(fd);
@@ -10989,7 +10989,7 @@ qemuDomainGetBlockInfo(virDomainPtr dom,
if (virDomainGetBlockInfoEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
if (!(disk = virDomainDiskByName(vm->def, path, false))) {
@@ -11061,7 +11061,7 @@ qemuDomainGetBlockInfo(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
VIR_FREE(entry);
virDomainObjEndAPI(&vm);
@@ -12548,19 +12548,19 @@ qemuDomainGetJobInfoMigrationStats(virDomainObjPtr vm,
qemuDomainObjPrivatePtr priv = vm->privateData;
bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
- if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE ||
- jobInfo->status == QEMU_DOMAIN_JOB_STATUS_MIGRATING ||
- jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED ||
- jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) {
+ if (jobInfo->status == VIR_DOMAIN_JOB_STATUS_ACTIVE ||
+ jobInfo->status == VIR_DOMAIN_JOB_STATUS_MIGRATING ||
+ jobInfo->status == VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED ||
+ jobInfo->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) {
if (events &&
- jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE &&
- qemuMigrationAnyFetchStats(vm, QEMU_ASYNC_JOB_NONE,
+ jobInfo->status != VIR_DOMAIN_JOB_STATUS_ACTIVE &&
+ qemuMigrationAnyFetchStats(vm, VIR_ASYNC_JOB_NONE,
jobInfo, NULL) < 0)
return -1;
- if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE &&
- jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION &&
- qemuMigrationSrcFetchMirrorStats(vm, QEMU_ASYNC_JOB_NONE,
+ if (jobInfo->status == VIR_DOMAIN_JOB_STATUS_ACTIVE &&
+ jobInfo->statsType == VIR_DOMAIN_JOB_STATS_TYPE_MIGRATION &&
+ qemuMigrationSrcFetchMirrorStats(vm, VIR_ASYNC_JOB_NONE,
jobInfo) < 0)
return -1;
@@ -12580,7 +12580,7 @@ qemuDomainGetJobInfoDumpStats(virDomainObjPtr vm,
qemuMonitorDumpStats stats = { 0 };
int rc;
- if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
rc = qemuMonitorQueryDump(priv->mon, &stats);
@@ -12604,7 +12604,7 @@ qemuDomainGetJobInfoDumpStats(virDomainObjPtr vm,
break;
case QEMU_MONITOR_DUMP_STATUS_ACTIVE:
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE;
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_ACTIVE;
VIR_DEBUG("dump active, bytes written='%llu'
remaining='%llu'",
jobInfo->stats.dump.completed,
jobInfo->stats.dump.total -
@@ -12612,7 +12612,7 @@ qemuDomainGetJobInfoDumpStats(virDomainObjPtr vm,
break;
case QEMU_MONITOR_DUMP_STATUS_COMPLETED:
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
VIR_DEBUG("dump completed, bytes written='%llu'",
jobInfo->stats.dump.completed);
break;
@@ -12640,14 +12640,14 @@ qemuDomainGetJobStatsInternal(virDomainObjPtr vm,
return 0;
}
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
_("migration statistics are available only on "
"the source host"));
return -1;
}
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
@@ -12660,30 +12660,30 @@ qemuDomainGetJobStatsInternal(virDomainObjPtr vm,
*jobInfo = qemuDomainJobInfoCopy(jobPriv->current);
switch ((*jobInfo)->statsType) {
- case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION:
- case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
+ case VIR_DOMAIN_JOB_STATS_TYPE_MIGRATION:
+ case VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
if (qemuDomainGetJobInfoMigrationStats(vm, *jobInfo) < 0)
goto cleanup;
break;
- case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
+ case VIR_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
if (qemuDomainGetJobInfoDumpStats(vm, *jobInfo) < 0)
goto cleanup;
break;
- case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP:
+ case VIR_DOMAIN_JOB_STATS_TYPE_BACKUP:
if (qemuBackupGetJobInfoStats(vm, *jobInfo) < 0)
goto cleanup;
break;
- case QEMU_DOMAIN_JOB_STATS_TYPE_NONE:
+ case VIR_DOMAIN_JOB_STATS_TYPE_NONE:
break;
}
ret = 0;
cleanup:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
return ret;
}
@@ -12708,7 +12708,7 @@ qemuDomainGetJobInfo(virDomainPtr dom,
goto cleanup;
if (!jobInfo ||
- jobInfo->status == QEMU_DOMAIN_JOB_STATUS_NONE) {
+ jobInfo->status == VIR_DOMAIN_JOB_STATUS_NONE) {
ret = 0;
goto cleanup;
}
@@ -12750,7 +12750,7 @@ qemuDomainGetJobStats(virDomainPtr dom,
goto cleanup;
if (!jobInfo ||
- jobInfo->status == QEMU_DOMAIN_JOB_STATUS_NONE) {
+ jobInfo->status == VIR_DOMAIN_JOB_STATUS_NONE) {
*type = VIR_DOMAIN_JOB_NONE;
*params = NULL;
*nparams = 0;
@@ -12777,7 +12777,7 @@ qemuDomainAbortJobMigration(virDomainObjPtr vm)
VIR_DEBUG("Cancelling migration job at client request");
- qemuDomainObjAbortAsyncJob(vm, &priv->job);
+ virDomainObjAbortAsyncJob(vm, &priv->job);
qemuDomainObjEnterMonitor(vm);
ret = qemuMonitorMigrateCancel(priv->mon);
if (qemuDomainObjExitMonitor(vm) < 0)
@@ -12803,7 +12803,7 @@ static int qemuDomainAbortJob(virDomainPtr dom)
if (virDomainAbortJobEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_ABORT) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_ABORT) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -12812,25 +12812,25 @@ static int qemuDomainAbortJob(virDomainPtr dom)
jobPriv = priv->job.privateData;
switch (priv->job.asyncJob) {
- case QEMU_ASYNC_JOB_NONE:
+ case VIR_ASYNC_JOB_NONE:
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("no job is active on the domain"));
break;
- case QEMU_ASYNC_JOB_MIGRATION_IN:
+ case VIR_ASYNC_JOB_MIGRATION_IN:
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("cannot abort incoming migration;"
" use virDomainDestroy instead"));
break;
- case QEMU_ASYNC_JOB_START:
+ case VIR_ASYNC_JOB_START:
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("cannot abort VM start;"
" use virDomainDestroy instead"));
break;
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
- if ((jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY ||
+ case VIR_ASYNC_JOB_MIGRATION_OUT:
+ if ((jobPriv->current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY ||
(virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_POSTCOPY))) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
@@ -12841,11 +12841,11 @@ static int qemuDomainAbortJob(virDomainPtr dom)
ret = qemuDomainAbortJobMigration(vm);
break;
- case QEMU_ASYNC_JOB_SAVE:
+ case VIR_ASYNC_JOB_SAVE:
ret = qemuDomainAbortJobMigration(vm);
break;
- case QEMU_ASYNC_JOB_DUMP:
+ case VIR_ASYNC_JOB_DUMP:
if (priv->job.apiFlags & VIR_DUMP_MEMORY_ONLY) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("cannot abort memory-only dump"));
@@ -12855,23 +12855,23 @@ static int qemuDomainAbortJob(virDomainPtr dom)
ret = qemuDomainAbortJobMigration(vm);
break;
- case QEMU_ASYNC_JOB_SNAPSHOT:
+ case VIR_ASYNC_JOB_SNAPSHOT:
ret = qemuDomainAbortJobMigration(vm);
break;
- case QEMU_ASYNC_JOB_BACKUP:
- qemuBackupJobCancelBlockjobs(vm, priv->backup, true, QEMU_ASYNC_JOB_NONE);
+ case VIR_ASYNC_JOB_BACKUP:
+ qemuBackupJobCancelBlockjobs(vm, priv->backup, true, VIR_ASYNC_JOB_NONE);
ret = 0;
break;
- case QEMU_ASYNC_JOB_LAST:
+ case VIR_ASYNC_JOB_LAST:
default:
- virReportEnumRangeError(qemuDomainAsyncJob, priv->job.asyncJob);
+ virReportEnumRangeError(virDomainAsyncJob, priv->job.asyncJob);
break;
}
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -12900,7 +12900,7 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom,
if (virDomainMigrateSetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MIGRATION_OP) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -12917,7 +12917,7 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom,
downtime) < 0)
goto endjob;
- if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsApply(vm, VIR_ASYNC_JOB_NONE,
migParams) < 0)
goto endjob;
} else {
@@ -12930,7 +12930,7 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -12959,13 +12959,13 @@ qemuDomainMigrateGetMaxDowntime(virDomainPtr dom,
if (virDomainMigrateGetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
goto endjob;
- if (qemuMigrationParamsFetch(vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsFetch(vm, VIR_ASYNC_JOB_NONE,
&migParams) < 0)
goto endjob;
@@ -12985,7 +12985,7 @@ qemuDomainMigrateGetMaxDowntime(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
qemuMigrationParamsFree(migParams);
@@ -13015,7 +13015,7 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom,
if (virDomainMigrateGetCompressionCacheEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -13029,7 +13029,7 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom,
}
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_PARAM_XBZRLE_CACHE_SIZE))
{
- if (qemuMigrationParamsFetch(vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsFetch(vm, VIR_ASYNC_JOB_NONE,
&migParams) < 0)
goto endjob;
@@ -13047,7 +13047,7 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -13075,7 +13075,7 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom,
if (virDomainMigrateSetCompressionCacheEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MIGRATION_OP) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -13098,7 +13098,7 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom,
cacheSize) < 0)
goto endjob;
- if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsApply(vm, VIR_ASYNC_JOB_NONE,
migParams) < 0)
goto endjob;
} else {
@@ -13111,7 +13111,7 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -13158,7 +13158,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MIGRATION_OP) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -13183,7 +13183,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
bandwidth * 1024 * 1024) < 0)
goto endjob;
- if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsApply(vm, VIR_ASYNC_JOB_NONE,
migParams) < 0)
goto endjob;
} else {
@@ -13201,7 +13201,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -13219,13 +13219,13 @@ qemuDomainMigrationGetPostcopyBandwidth(virDomainObjPtr vm,
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
goto cleanup;
- if (qemuMigrationParamsFetch(vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsFetch(vm, VIR_ASYNC_JOB_NONE,
&migParams) < 0)
goto cleanup;
@@ -13256,7 +13256,7 @@ qemuDomainMigrationGetPostcopyBandwidth(virDomainObjPtr vm,
ret = 0;
cleanup:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
return ret;
}
@@ -13314,13 +13314,13 @@ qemuDomainMigrateStartPostCopy(virDomainPtr dom,
if (virDomainMigrateStartPostCopyEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MIGRATION_OP) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
goto endjob;
- if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
+ if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("post-copy can only be started while "
"outgoing migration is in progress"));
@@ -13341,7 +13341,7 @@ qemuDomainMigrateStartPostCopy(virDomainPtr dom,
ret = -1;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -14040,7 +14040,7 @@ static int qemuDomainQemuMonitorCommand(virDomainPtr domain, const
char *cmd,
if (virDomainQemuMonitorCommandEnsureACL(domain->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -14056,7 +14056,7 @@ static int qemuDomainQemuMonitorCommand(virDomainPtr domain, const
char *cmd,
ret = -1;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -14374,7 +14374,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm,
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -14470,7 +14470,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm,
qemuBlockJobStarted(job, vm);
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
qemuBlockJobStartupFinalize(vm, job);
@@ -14504,7 +14504,7 @@ qemuDomainBlockJobAbort(virDomainPtr dom,
if (virDomainBlockJobAbortEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -14558,13 +14558,13 @@ qemuDomainBlockJobAbort(virDomainPtr dom,
ignore_value(virDomainObjSave(vm, driver->xmlopt, cfg->stateDir));
if (!async) {
- qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
while (qemuBlockJobIsRunning(job)) {
if (virDomainObjWait(vm) < 0) {
ret = -1;
goto endjob;
}
- qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
}
if (pivot &&
@@ -14586,8 +14586,8 @@ qemuDomainBlockJobAbort(virDomainPtr dom,
endjob:
if (job && !async)
- qemuBlockJobSyncEnd(vm, job, QEMU_ASYNC_JOB_NONE);
- qemuDomainObjEndJob(vm, &priv->job);
+ qemuBlockJobSyncEnd(vm, job, VIR_ASYNC_JOB_NONE);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -14666,7 +14666,7 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom,
if (virDomainGetBlockJobInfoEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -14694,7 +14694,7 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom,
}
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -14736,7 +14736,7 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom,
if (virDomainBlockJobSetSpeedEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -14759,7 +14759,7 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom,
ret = -1;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -14938,7 +14938,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm,
return -1;
}
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
@@ -15115,7 +15115,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm,
goto endjob;
}
} else {
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm,
QEMU_ASYNC_JOB_NONE)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm,
VIR_ASYNC_JOB_NONE)))
goto endjob;
if (qemuBlockStorageSourceCreateDetectSize(blockNamedNodeData,
@@ -15157,7 +15157,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm,
if (crdata &&
qemuBlockStorageSourceCreate(vm, mirror, mirrorBacking,
mirror->backingStore,
- crdata->srcdata[0], QEMU_ASYNC_JOB_NONE) <
0)
+ crdata->srcdata[0], VIR_ASYNC_JOB_NONE) <
0)
goto endjob;
}
@@ -15214,7 +15214,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm,
if (need_unlink && virStorageFileUnlink(mirror) < 0)
VIR_WARN("%s", _("unable to remove just-created copy
target"));
virStorageFileDeinit(mirror);
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
qemuBlockJobStartupFinalize(vm, job);
return ret;
@@ -15438,7 +15438,7 @@ qemuDomainBlockCommit(virDomainPtr dom,
if (virDomainBlockCommitEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -15654,7 +15654,7 @@ qemuDomainBlockCommit(virDomainPtr dom,
virErrorRestore(&orig_err);
}
qemuBlockJobStartupFinalize(vm, job);
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -15683,7 +15683,7 @@ qemuDomainOpenGraphics(virDomainPtr dom,
if (virDomainOpenGraphicsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -15726,7 +15726,7 @@ qemuDomainOpenGraphics(virDomainPtr dom,
ret = -1;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -15794,14 +15794,14 @@ qemuDomainOpenGraphicsFD(virDomainPtr dom,
if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
qemuDomainObjEnterMonitor(vm);
ret = qemuMonitorOpenGraphics(priv->mon, protocol, pair[1],
"graphicsfd",
(flags & VIR_DOMAIN_OPEN_GRAPHICS_SKIPAUTH));
if (qemuDomainObjExitMonitor(vm) < 0)
ret = -1;
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
if (ret < 0)
goto cleanup;
@@ -16041,7 +16041,7 @@ qemuDomainSetBlockIoTune(virDomainPtr dom,
cfg = virQEMUDriverGetConfig(driver);
priv = vm->privateData;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -16305,7 +16305,7 @@ qemuDomainSetBlockIoTune(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
VIR_FREE(info.group_name);
@@ -16349,7 +16349,7 @@ qemuDomainGetBlockIoTune(virDomainPtr dom,
if (virDomainGetBlockIoTuneEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
/* the API check guarantees that only one of the definitions will be set */
@@ -16462,7 +16462,7 @@ qemuDomainGetBlockIoTune(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
VIR_FREE(reply.group_name);
@@ -16495,7 +16495,7 @@ qemuDomainGetDiskErrors(virDomainPtr dom,
if (virDomainGetDiskErrorsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -16536,7 +16536,7 @@ qemuDomainGetDiskErrors(virDomainPtr dom,
ret = n;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -16574,7 +16574,7 @@ qemuDomainSetMetadata(virDomainPtr dom,
if (virDomainSetMetadataEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
ret = virDomainObjSetMetadata(vm, type, metadata, key, uri,
@@ -16587,7 +16587,7 @@ qemuDomainSetMetadata(virDomainPtr dom,
virObjectEventStateQueue(driver->domainEventState, ev);
}
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -16696,7 +16696,7 @@ qemuDomainQueryWakeupSuspendSupport(virDomainObjPtr vm,
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CURRENT_MACHINE))
return -1;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
return -1;
if ((ret = virDomainObjCheckActive(vm)) < 0)
@@ -16705,7 +16705,7 @@ qemuDomainQueryWakeupSuspendSupport(virDomainObjPtr vm,
ret = qemuDomainProbeQMPCurrentMachine(vm, wakeupSupported);
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
return ret;
}
@@ -16718,8 +16718,8 @@ qemuDomainPMSuspendAgent(virDomainObjPtr vm,
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job,
- QEMU_AGENT_JOB_MODIFY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job,
+ VIR_AGENT_JOB_MODIFY) < 0)
return -1;
if ((ret = virDomainObjCheckActive(vm)) < 0)
@@ -16733,7 +16733,7 @@ qemuDomainPMSuspendAgent(virDomainObjPtr vm,
qemuDomainObjExitAgent(vm, agent);
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
return ret;
}
@@ -16828,7 +16828,7 @@ qemuDomainPMWakeup(virDomainPtr dom,
if (virDomainPMWakeupEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -16840,7 +16840,7 @@ qemuDomainPMWakeup(virDomainPtr dom,
ret = -1;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -16886,8 +16886,8 @@ qemuDomainQemuAgentCommand(virDomainPtr domain,
if (virDomainQemuAgentCommandEnsureACL(domain->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job,
- QEMU_AGENT_JOB_MODIFY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job,
+ VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -16905,7 +16905,7 @@ qemuDomainQemuAgentCommand(virDomainPtr domain,
VIR_FREE(result);
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -16984,8 +16984,8 @@ qemuDomainFSTrim(virDomainPtr dom,
if (virDomainFSTrimEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job,
- QEMU_AGENT_JOB_MODIFY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job,
+ VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
@@ -16999,7 +16999,7 @@ qemuDomainFSTrim(virDomainPtr dom,
qemuDomainObjExitAgent(vm, agent);
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -17156,8 +17156,8 @@ qemuDomainGetHostnameAgent(virDomainObjPtr vm,
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job,
- QEMU_AGENT_JOB_QUERY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job,
+ VIR_AGENT_JOB_QUERY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
@@ -17172,7 +17172,7 @@ qemuDomainGetHostnameAgent(virDomainObjPtr vm,
ret = 0;
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
return ret;
}
@@ -17189,7 +17189,7 @@ qemuDomainGetHostnameLease(virDomainObjPtr vm,
int ret = -1;
qemuDomainObjPrivatePtr priv = vm->privateData;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
@@ -17231,7 +17231,7 @@ qemuDomainGetHostnameLease(virDomainObjPtr vm,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
return ret;
}
@@ -17302,8 +17302,8 @@ qemuDomainGetTime(virDomainPtr dom,
if (virDomainGetTimeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job,
- QEMU_AGENT_JOB_QUERY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job,
+ VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -17322,7 +17322,7 @@ qemuDomainGetTime(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -17340,8 +17340,8 @@ qemuDomainSetTimeAgent(virDomainObjPtr vm,
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job,
- QEMU_AGENT_JOB_MODIFY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job,
+ VIR_AGENT_JOB_MODIFY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
@@ -17355,7 +17355,7 @@ qemuDomainSetTimeAgent(virDomainObjPtr vm,
qemuDomainObjExitAgent(vm, agent);
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
return ret;
}
@@ -17397,7 +17397,7 @@ qemuDomainSetTime(virDomainPtr dom,
if (qemuDomainSetTimeAgent(vm, seconds, nseconds, rtcSync) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -17417,7 +17417,7 @@ qemuDomainSetTime(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -17445,8 +17445,8 @@ qemuDomainFSFreeze(virDomainPtr dom,
if (virDomainFSFreezeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job,
- QEMU_AGENT_JOB_MODIFY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job,
+ VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -17455,7 +17455,7 @@ qemuDomainFSFreeze(virDomainPtr dom,
ret = qemuSnapshotFSFreeze(vm, mountpoints, nmountpoints);
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -17489,8 +17489,8 @@ qemuDomainFSThaw(virDomainPtr dom,
if (virDomainFSThawEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job,
- QEMU_AGENT_JOB_MODIFY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job,
+ VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -17499,7 +17499,7 @@ qemuDomainFSThaw(virDomainPtr dom,
ret = qemuSnapshotFSThaw(vm, true);
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -18002,7 +18002,7 @@ qemuDomainGetStatsVcpu(virQEMUDriverPtr driver G_GNUC_UNUSED,
goto cleanup;
if (HAVE_JOB(privflags) && virDomainObjIsActive(dom) &&
- qemuDomainRefreshVcpuHalted(dom, QEMU_ASYNC_JOB_NONE) < 0) {
+ qemuDomainRefreshVcpuHalted(dom, VIR_ASYNC_JOB_NONE) < 0) {
/* it's ok to be silent and go ahead, because halted vcpu info
* wasn't here from the beginning */
virResetLastError();
@@ -18743,9 +18743,9 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
int rv;
if (flags & VIR_CONNECT_GET_ALL_DOMAINS_STATS_NOWAIT)
- rv = qemuDomainObjBeginJobNowait(vm, &priv->job, QEMU_JOB_QUERY);
+ rv = virDomainObjBeginJobNowait(vm, &priv->job, VIR_JOB_QUERY);
else
- rv = qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY);
+ rv = virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY);
if (rv == 0)
domflags |= QEMU_DOMAIN_STATS_HAVE_JOB;
@@ -18756,7 +18756,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
domflags |= QEMU_DOMAIN_STATS_BACKING;
if (qemuDomainGetStats(conn, vm, stats, &tmp, domflags) < 0) {
if (HAVE_JOB(domflags) && vm)
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
virObjectUnlock(vm);
goto cleanup;
@@ -18766,7 +18766,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
tmpstats[nstats++] = tmp;
if (HAVE_JOB(domflags))
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
virObjectUnlock(vm);
}
@@ -18814,8 +18814,8 @@ qemuDomainGetFSInfoAgent(virDomainObjPtr vm,
qemuAgentPtr agent;
qemuDomainObjPrivatePtr priv = vm->privateData;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job,
- QEMU_AGENT_JOB_QUERY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job,
+ VIR_AGENT_JOB_QUERY) < 0)
return ret;
if (virDomainObjCheckActive(vm) < 0)
@@ -18829,7 +18829,7 @@ qemuDomainGetFSInfoAgent(virDomainObjPtr vm,
qemuDomainObjExitAgent(vm, agent);
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
return ret;
}
@@ -18928,7 +18928,7 @@ qemuDomainGetFSInfo(virDomainPtr dom,
if ((nfs = qemuDomainGetFSInfoAgent(vm, &agentinfo)) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -18937,7 +18937,7 @@ qemuDomainGetFSInfo(virDomainPtr dom,
ret = virDomainFSInfoFormat(agentinfo, nfs, vm->def, info);
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
g_free(agentinfo);
@@ -18976,8 +18976,8 @@ qemuDomainInterfaceAddresses(virDomainPtr dom,
break;
case VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT:
- if (qemuDomainObjBeginAgentJob(vm, &priv->job,
- QEMU_AGENT_JOB_QUERY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job,
+ VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
@@ -18988,7 +18988,7 @@ qemuDomainInterfaceAddresses(virDomainPtr dom,
qemuDomainObjExitAgent(vm, agent);
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
break;
@@ -19031,8 +19031,8 @@ qemuDomainSetUserPassword(virDomainPtr dom,
if (virDomainSetUserPasswordEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job,
- QEMU_AGENT_JOB_MODIFY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job,
+ VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -19052,7 +19052,7 @@ qemuDomainSetUserPassword(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -19187,7 +19187,7 @@ static int qemuDomainRename(virDomainPtr dom,
priv = vm->privateData;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) {
@@ -19234,7 +19234,7 @@ static int qemuDomainRename(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -19327,8 +19327,8 @@ qemuDomainGetGuestVcpus(virDomainPtr dom,
if (virDomainGetGuestVcpusEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job,
- QEMU_AGENT_JOB_QUERY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job,
+ VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
@@ -19347,7 +19347,7 @@ qemuDomainGetGuestVcpus(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
cleanup:
VIR_FREE(info);
@@ -19389,7 +19389,7 @@ qemuDomainSetGuestVcpus(virDomainPtr dom,
if (virDomainSetGuestVcpusEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_MODIFY) <
0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
@@ -19435,7 +19435,7 @@ qemuDomainSetGuestVcpus(virDomainPtr dom,
qemuDomainObjExitAgent(vm, agent);
endjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
cleanup:
VIR_FREE(info);
@@ -19485,7 +19485,7 @@ qemuDomainSetVcpu(virDomainPtr dom,
if (virDomainSetVcpuEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -19512,7 +19512,7 @@ qemuDomainSetVcpu(virDomainPtr dom,
ret = qemuDomainSetVcpuInternal(driver, vm, def, persistentDef, map, !!state);
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virBitmapFree(map);
@@ -19544,7 +19544,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom,
if (virDomainSetBlockThresholdEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -19561,7 +19561,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom,
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
!src->nodestorage &&
- qemuBlockNodeNamesDetect(vm, QEMU_ASYNC_JOB_NONE) < 0)
+ qemuBlockNodeNamesDetect(vm, VIR_ASYNC_JOB_NONE) < 0)
goto endjob;
if (!src->nodestorage) {
@@ -19581,7 +19581,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -19639,7 +19639,7 @@ qemuDomainSetLifecycleAction(virDomainPtr dom,
if (virDomainSetLifecycleActionEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -19670,7 +19670,7 @@ qemuDomainSetLifecycleAction(virDomainPtr dom,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virDomainObjEndAPI(&vm);
@@ -19763,7 +19763,7 @@ qemuDomainGetSEVMeasurement(virDomainObjPtr vm,
virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1);
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
return -1;
qemuDomainObjEnterMonitor(vm);
@@ -19783,7 +19783,7 @@ qemuDomainGetSEVMeasurement(virDomainObjPtr vm,
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
return ret;
}
@@ -19966,7 +19966,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom,
if (virDomainGetGuestInfoEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_QUERY) < 0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job, VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
@@ -20014,10 +20014,10 @@ qemuDomainGetGuestInfo(virDomainPtr dom,
qemuDomainObjExitAgent(vm, agent);
endagentjob:
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
if (nfs > 0) {
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -20028,7 +20028,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom,
qemuAgentFSInfoFormatParams(agentfsinfo, nfs, vm->def, params, nparams,
&maxparams);
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
}
cleanup:
diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c
index 7b626ee383..6927739501 100644
--- a/src/qemu/qemu_hotplug.c
+++ b/src/qemu/qemu_hotplug.c
@@ -323,7 +323,7 @@ qemuDomainChangeMediaLegacy(virDomainObjPtr vm,
int
qemuHotplugAttachDBusVMState(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
g_autoptr(virJSONValue) props = NULL;
@@ -368,7 +368,7 @@ qemuHotplugAttachDBusVMState(virQEMUDriverPtr driver,
*/
int
qemuHotplugRemoveDBusVMState(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret;
@@ -405,7 +405,7 @@ qemuHotplugRemoveDBusVMState(virDomainObjPtr vm,
static int
qemuHotplugAttachManagedPR(virDomainObjPtr vm,
virStorageSourcePtr src,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virJSONValuePtr props = NULL;
@@ -453,7 +453,7 @@ qemuHotplugAttachManagedPR(virDomainObjPtr vm,
*/
static int
qemuHotplugRemoveManagedPR(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virErrorPtr orig_err;
@@ -618,7 +618,7 @@ qemuDomainChangeEjectableMedia(virQEMUDriverPtr driver,
if (qemuDomainStorageSourceChainAccessAllow(driver, vm, newsrc) < 0)
goto cleanup;
- if (qemuHotplugAttachManagedPR(vm, newsrc, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuHotplugAttachManagedPR(vm, newsrc, VIR_ASYNC_JOB_NONE) < 0)
goto cleanup;
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV))
@@ -654,7 +654,7 @@ qemuDomainChangeEjectableMedia(virQEMUDriverPtr driver,
/* remove PR manager object if unneeded */
if (managedpr)
- ignore_value(qemuHotplugRemoveManagedPR(vm, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuHotplugRemoveManagedPR(vm, VIR_ASYNC_JOB_NONE));
/* revert old image do the disk definition */
if (oldsrc)
@@ -716,7 +716,7 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver,
if (VIR_REALLOC_N(vm->def->disks, vm->def->ndisks + 1) < 0)
goto cleanup;
- if (qemuHotplugAttachManagedPR(vm, disk->src, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuHotplugAttachManagedPR(vm, disk->src, VIR_ASYNC_JOB_NONE) < 0)
goto cleanup;
qemuDomainObjEnterMonitor(vm);
@@ -779,7 +779,7 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver,
ret = -2;
if (virStorageSourceChainHasManagedPR(disk->src) &&
- qemuHotplugRemoveManagedPR(vm, QEMU_ASYNC_JOB_NONE) < 0)
+ qemuHotplugRemoveManagedPR(vm, VIR_ASYNC_JOB_NONE) < 0)
ret = -2;
virDomainAuditDisk(vm, NULL, disk->src, "attach", false);
@@ -1665,7 +1665,7 @@ qemuDomainAttachHostPCIDevice(virQEMUDriverPtr driver,
void
qemuDomainDelTLSObjects(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
const char *secAlias,
const char *tlsAlias)
{
@@ -1695,7 +1695,7 @@ qemuDomainDelTLSObjects(virDomainObjPtr vm,
int
qemuDomainAddTLSObjects(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virJSONValuePtr *secProps,
virJSONValuePtr *tlsProps)
{
@@ -1802,7 +1802,7 @@ qemuDomainAddChardevTLSObjects(virQEMUDriverPtr driver,
goto cleanup;
dev->data.tcp.tlscreds = true;
- if (qemuDomainAddTLSObjects(vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuDomainAddTLSObjects(vm, VIR_ASYNC_JOB_NONE,
&secProps, &tlsProps) < 0)
goto cleanup;
@@ -1922,7 +1922,7 @@ int qemuDomainAttachRedirdevDevice(virQEMUDriverPtr driver,
ignore_value(qemuMonitorDetachCharDev(priv->mon, charAlias));
ignore_value(qemuDomainObjExitMonitor(vm));
virErrorRestore(&orig_err);
- qemuDomainDelTLSObjects(vm, QEMU_ASYNC_JOB_NONE,
+ qemuDomainDelTLSObjects(vm, VIR_ASYNC_JOB_NONE,
secAlias, tlsAlias);
goto audit;
}
@@ -2202,7 +2202,7 @@ int qemuDomainAttachChrDevice(virQEMUDriverPtr driver,
ignore_value(qemuDomainObjExitMonitor(vm));
virErrorRestore(&orig_err);
- qemuDomainDelTLSObjects(vm, QEMU_ASYNC_JOB_NONE,
+ qemuDomainDelTLSObjects(vm, VIR_ASYNC_JOB_NONE,
secAlias, tlsAlias);
goto audit;
}
@@ -2317,7 +2317,7 @@ qemuDomainAttachRNGDevice(virQEMUDriverPtr driver,
releaseaddr = false;
virErrorRestore(&orig_err);
- qemuDomainDelTLSObjects(vm, QEMU_ASYNC_JOB_NONE,
+ qemuDomainDelTLSObjects(vm, VIR_ASYNC_JOB_NONE,
secAlias, tlsAlias);
goto audit;
}
@@ -2415,13 +2415,13 @@ qemuDomainAttachMemory(virQEMUDriverPtr driver,
virObjectEventStateQueue(driver->domainEventState, event);
/* fix the balloon size */
- ignore_value(qemuProcessRefreshBalloonState(vm, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuProcessRefreshBalloonState(vm, VIR_ASYNC_JOB_NONE));
/* mem is consumed by vm->def */
mem = NULL;
/* this step is best effort, removing the device would be so much trouble */
- ignore_value(qemuDomainUpdateMemoryDeviceInfo(vm, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuDomainUpdateMemoryDeviceInfo(vm, VIR_ASYNC_JOB_NONE));
ret = 0;
@@ -4099,7 +4099,7 @@ qemuDomainChangeGraphics(virQEMUDriverPtr driver,
if (qemuDomainChangeGraphicsPasswords(vm, VIR_DOMAIN_GRAPHICS_TYPE_VNC,
&dev->data.vnc.auth,
cfg->vncPassword,
- QEMU_ASYNC_JOB_NONE) < 0)
+ VIR_ASYNC_JOB_NONE) < 0)
return -1;
/* Steal the new dev's char * reference */
@@ -4146,7 +4146,7 @@ qemuDomainChangeGraphics(virQEMUDriverPtr driver,
if (qemuDomainChangeGraphicsPasswords(vm, VIR_DOMAIN_GRAPHICS_TYPE_SPICE,
&dev->data.spice.auth,
cfg->spicePassword,
- QEMU_ASYNC_JOB_NONE) < 0)
+ VIR_ASYNC_JOB_NONE) < 0)
return -1;
/* Steal the new dev's char * reference */
@@ -4281,7 +4281,7 @@ qemuDomainRemoveDiskDevice(virQEMUDriverPtr driver,
ignore_value(qemuRemoveSharedDevice(driver, &dev, vm->def->name));
if (virStorageSourceChainHasManagedPR(disk->src) &&
- qemuHotplugRemoveManagedPR(vm, QEMU_ASYNC_JOB_NONE) < 0)
+ qemuHotplugRemoveManagedPR(vm, VIR_ASYNC_JOB_NONE) < 0)
goto cleanup;
ret = 0;
@@ -4358,7 +4358,7 @@ qemuDomainRemoveMemoryDevice(virQEMUDriverPtr driver,
virDomainMemoryDefFree(mem);
/* fix the balloon size */
- ignore_value(qemuProcessRefreshBalloonState(vm, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuProcessRefreshBalloonState(vm, VIR_ASYNC_JOB_NONE));
/* decrease the mlock limit after memory unplug if necessary */
ignore_value(qemuDomainAdjustMaxMemLock(vm, false));
@@ -5903,7 +5903,7 @@ qemuDomainRemoveVcpu(virDomainObjPtr vm,
virErrorPtr save_error = NULL;
size_t i;
- if (qemuDomainRefreshVcpuInfo(vm, QEMU_ASYNC_JOB_NONE, false) < 0)
+ if (qemuDomainRefreshVcpuInfo(vm, VIR_ASYNC_JOB_NONE, false) < 0)
return -1;
/* validation requires us to set the expected state prior to calling it */
@@ -6052,7 +6052,7 @@ qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver,
if (newhotplug)
vm->def->individualvcpus = true;
- if (qemuDomainRefreshVcpuInfo(vm, QEMU_ASYNC_JOB_NONE, false) < 0)
+ if (qemuDomainRefreshVcpuInfo(vm, VIR_ASYNC_JOB_NONE, false) < 0)
goto cleanup;
/* validation requires us to set the expected state prior to calling it */
diff --git a/src/qemu/qemu_hotplug.h b/src/qemu/qemu_hotplug.h
index 51af92f840..3618af87c7 100644
--- a/src/qemu/qemu_hotplug.h
+++ b/src/qemu/qemu_hotplug.h
@@ -32,12 +32,12 @@ int qemuDomainChangeEjectableMedia(virQEMUDriverPtr driver,
bool force);
void qemuDomainDelTLSObjects(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
const char *secAlias,
const char *tlsAlias);
int qemuDomainAddTLSObjects(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virJSONValuePtr *secProps,
virJSONValuePtr *tlsProps);
@@ -146,7 +146,7 @@ unsigned long long qemuDomainGetUnplugTimeout(virDomainObjPtr vm)
G_GNUC_NO_INLI
int qemuHotplugAttachDBusVMState(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuHotplugRemoveDBusVMState(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 4fa2e4cf62..b29f3130b7 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -83,7 +83,7 @@ VIR_ENUM_IMPL(virMigrationJobPhase,
static int
qemuMigrationJobStart(virDomainObjPtr vm,
- qemuDomainAsyncJob job,
+ virDomainAsyncJob job,
unsigned long apiFlags)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
@@ -103,7 +103,7 @@ qemuMigrationJobContinue(virDomainObjPtr obj)
static bool
qemuMigrationJobIsActive(virDomainObjPtr vm,
- qemuDomainAsyncJob job)
+ virDomainAsyncJob job)
ATTRIBUTE_NONNULL(1);
static void
@@ -148,7 +148,7 @@ qemuMigrationSrcRestoreDomainState(virQEMUDriverPtr driver,
virDomainObjPtr vm)
/* we got here through some sort of failure; start the domain again */
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
+ VIR_ASYNC_JOB_MIGRATION_OUT) < 0) {
/* Hm, we already know we are in error here. We don't want to
* overwrite the previous error, though, so we just throw something
* to the logs and hope for the best */
@@ -420,7 +420,7 @@ qemuMigrationDstStartNBDServer(virQEMUDriverPtr driver,
devicename = diskAlias;
}
- if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_IN) < 0)
goto cleanup;
if (port == 0) {
@@ -463,7 +463,7 @@ qemuMigrationDstStopNBDServer(virDomainObjPtr vm,
if (!mig->nbd)
return 0;
- if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_IN) < 0)
return -1;
if (qemuMonitorNBDServerStop(priv->mon) < 0)
@@ -505,7 +505,7 @@ qemuMigrationNBDReportMirrorError(qemuBlockJobDataPtr job,
*/
static int
qemuMigrationSrcNBDStorageCopyReady(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
size_t i;
size_t notReady = 0;
@@ -559,7 +559,7 @@ qemuMigrationSrcNBDStorageCopyReady(virDomainObjPtr vm,
*/
static int
qemuMigrationSrcNBDCopyCancelled(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool check)
{
size_t i;
@@ -643,7 +643,7 @@ qemuMigrationSrcNBDCopyCancelOne(virDomainObjPtr vm,
virDomainDiskDefPtr disk,
qemuBlockJobDataPtr job,
bool failNoJob,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int rv;
@@ -688,7 +688,7 @@ qemuMigrationSrcNBDCopyCancelOne(virDomainObjPtr vm,
static int
qemuMigrationSrcNBDCopyCancel(virDomainObjPtr vm,
bool check,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virConnectPtr dconn)
{
virErrorPtr err = NULL;
@@ -836,7 +836,7 @@ qemuMigrationSrcNBDStorageCopyBlockdev(virDomainObjPtr vm,
false)))
return -1;
- if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
return -1;
mon_ret = qemuBlockStorageSourceAttachApply(qemuDomainGetMonitor(vm), data);
@@ -877,7 +877,7 @@ qemuMigrationSrcNBDStorageCopyDriveMirror(virDomainObjPtr vm,
diskAlias);
}
- if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
return -1;
mon_ret = qemuMonitorDriveMirror(qemuDomainGetMonitor(vm),
@@ -1032,14 +1032,14 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver,
}
}
- while ((rv = qemuMigrationSrcNBDStorageCopyReady(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
!= 1) {
+ while ((rv = qemuMigrationSrcNBDStorageCopyReady(vm, VIR_ASYNC_JOB_MIGRATION_OUT)) !=
1) {
if (rv < 0)
return -1;
if (priv->job.abortJob) {
- jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
+ jobPriv->current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
_("canceled by client"));
return -1;
}
@@ -1054,7 +1054,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver,
return -1;
}
- qemuMigrationSrcFetchMirrorStats(vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationSrcFetchMirrorStats(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->current);
/* Okay, all disks are ready. Modify migrate_flags */
@@ -1406,7 +1406,7 @@ qemuMigrationAnyPostcopyFailed(virQEMUDriverPtr driver,
if (state == VIR_DOMAIN_RUNNING) {
if (qemuProcessStopCPUs(driver, vm,
VIR_DOMAIN_PAUSED_POSTCOPY_FAILED,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0)
VIR_WARN("Unable to pause guest CPUs for %s",
vm->def->name);
} else {
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
@@ -1438,31 +1438,31 @@ qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo)
{
switch ((qemuMonitorMigrationStatus) jobInfo->stats.mig.status) {
case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY:
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_POSTCOPY;
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_POSTCOPY;
break;
case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED;
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED;
break;
case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_NONE;
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_NONE;
break;
case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_FAILED;
break;
case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
break;
case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_PAUSED;
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_PAUSED;
break;
case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING;
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_MIGRATING;
break;
case QEMU_MONITOR_MIGRATION_STATUS_SETUP:
@@ -1477,7 +1477,7 @@ qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo)
int
qemuMigrationAnyFetchStats(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
qemuDomainJobInfoPtr jobInfo,
char **error)
{
@@ -1505,23 +1505,23 @@ qemuMigrationJobName(virDomainObjPtr vm)
qemuDomainObjPrivatePtr priv = vm->privateData;
switch (priv->job.asyncJob) {
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
+ case VIR_ASYNC_JOB_MIGRATION_OUT:
return _("migration out job");
- case QEMU_ASYNC_JOB_SAVE:
+ case VIR_ASYNC_JOB_SAVE:
return _("domain save job");
- case QEMU_ASYNC_JOB_DUMP:
+ case VIR_ASYNC_JOB_DUMP:
return _("domain core dump job");
- case QEMU_ASYNC_JOB_NONE:
+ case VIR_ASYNC_JOB_NONE:
return _("undefined");
- case QEMU_ASYNC_JOB_MIGRATION_IN:
+ case VIR_ASYNC_JOB_MIGRATION_IN:
return _("migration in job");
- case QEMU_ASYNC_JOB_SNAPSHOT:
+ case VIR_ASYNC_JOB_SNAPSHOT:
return _("snapshot job");
- case QEMU_ASYNC_JOB_START:
+ case VIR_ASYNC_JOB_START:
return _("start job");
- case QEMU_ASYNC_JOB_BACKUP:
+ case VIR_ASYNC_JOB_BACKUP:
return _("backup job");
- case QEMU_ASYNC_JOB_LAST:
+ case VIR_ASYNC_JOB_LAST:
default:
return _("job");
}
@@ -1530,7 +1530,7 @@ qemuMigrationJobName(virDomainObjPtr vm)
static int
qemuMigrationJobCheckStatus(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
qemuDomainJobPrivatePtr jobPriv = priv->job.privateData;
@@ -1548,28 +1548,28 @@ qemuMigrationJobCheckStatus(virDomainObjPtr vm,
qemuMigrationUpdateJobType(jobInfo);
switch (jobInfo->status) {
- case QEMU_DOMAIN_JOB_STATUS_NONE:
+ case VIR_DOMAIN_JOB_STATUS_NONE:
virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
qemuMigrationJobName(vm), _("is not active"));
goto cleanup;
- case QEMU_DOMAIN_JOB_STATUS_FAILED:
+ case VIR_DOMAIN_JOB_STATUS_FAILED:
virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
qemuMigrationJobName(vm),
error ? error : _("unexpectedly failed"));
goto cleanup;
- case QEMU_DOMAIN_JOB_STATUS_CANCELED:
+ case VIR_DOMAIN_JOB_STATUS_CANCELED:
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
qemuMigrationJobName(vm), _("canceled by client"));
goto cleanup;
- case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
- case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
- case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
- case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
- case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
- case QEMU_DOMAIN_JOB_STATUS_PAUSED:
+ case VIR_DOMAIN_JOB_STATUS_COMPLETED:
+ case VIR_DOMAIN_JOB_STATUS_ACTIVE:
+ case VIR_DOMAIN_JOB_STATUS_MIGRATING:
+ case VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
+ case VIR_DOMAIN_JOB_STATUS_POSTCOPY:
+ case VIR_DOMAIN_JOB_STATUS_PAUSED:
break;
}
@@ -1598,7 +1598,7 @@ enum qemuMigrationCompletedFlags {
*/
static int
qemuMigrationAnyCompleted(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virConnectPtr dconn,
unsigned int flags)
{
@@ -1634,7 +1634,7 @@ qemuMigrationAnyCompleted(virDomainObjPtr vm,
* wait again for the real end of the migration.
*/
if (flags & QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER &&
- jobInfo->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) {
+ jobInfo->status == VIR_DOMAIN_JOB_STATUS_PAUSED) {
VIR_DEBUG("Migration paused before switchover");
return 1;
}
@@ -1644,38 +1644,38 @@ qemuMigrationAnyCompleted(virDomainObjPtr vm,
* will continue waiting until the migrate state changes to completed.
*/
if (flags & QEMU_MIGRATION_COMPLETED_POSTCOPY &&
- jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) {
+ jobInfo->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) {
VIR_DEBUG("Migration switched to post-copy");
return 1;
}
- if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED)
+ if (jobInfo->status == VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED)
return 1;
else
return 0;
error:
switch (jobInfo->status) {
- case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
- case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
- case QEMU_DOMAIN_JOB_STATUS_PAUSED:
+ case VIR_DOMAIN_JOB_STATUS_MIGRATING:
+ case VIR_DOMAIN_JOB_STATUS_POSTCOPY:
+ case VIR_DOMAIN_JOB_STATUS_PAUSED:
/* The migration was aborted by us rather than QEMU itself. */
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_FAILED;
return -2;
- case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
+ case VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
/* Something failed after QEMU already finished the migration. */
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_FAILED;
return -1;
- case QEMU_DOMAIN_JOB_STATUS_FAILED:
- case QEMU_DOMAIN_JOB_STATUS_CANCELED:
+ case VIR_DOMAIN_JOB_STATUS_FAILED:
+ case VIR_DOMAIN_JOB_STATUS_CANCELED:
/* QEMU aborted the migration. */
return -1;
- case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
- case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
- case QEMU_DOMAIN_JOB_STATUS_NONE:
+ case VIR_DOMAIN_JOB_STATUS_ACTIVE:
+ case VIR_DOMAIN_JOB_STATUS_COMPLETED:
+ case VIR_DOMAIN_JOB_STATUS_NONE:
/* Impossible. */
break;
}
@@ -1689,7 +1689,7 @@ qemuMigrationAnyCompleted(virDomainObjPtr vm,
*/
static int
qemuMigrationSrcWaitForCompletion(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virConnectPtr dconn,
unsigned int flags)
{
@@ -1699,7 +1699,7 @@ qemuMigrationSrcWaitForCompletion(virDomainObjPtr vm,
bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
int rv;
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING;
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_MIGRATING;
while ((rv = qemuMigrationAnyCompleted(vm, asyncJob,
dconn, flags)) != 1) {
@@ -1709,7 +1709,7 @@ qemuMigrationSrcWaitForCompletion(virDomainObjPtr vm,
if (events) {
if (virDomainObjWait(vm) < 0) {
if (virDomainObjIsActive(vm))
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_FAILED;
return -2;
}
} else {
@@ -1729,11 +1729,11 @@ qemuMigrationSrcWaitForCompletion(virDomainObjPtr vm,
qemuDomainJobInfoUpdateDowntime(jobInfo);
g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree);
jobPriv->completed = qemuDomainJobInfoCopy(jobInfo);
- jobPriv->completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
+ jobPriv->completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
- if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT &&
- jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED)
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
+ if (asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT &&
+ jobInfo->status == VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED)
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
return 0;
}
@@ -1741,7 +1741,7 @@ qemuMigrationSrcWaitForCompletion(virDomainObjPtr vm,
static int
qemuMigrationDstWaitForCompletion(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool postcopy)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -1849,7 +1849,7 @@ qemuMigrationSrcGraphicsRelocate(virDomainObjPtr vm,
goto cleanup;
}
- if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
+ if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) == 0) {
qemuDomainJobPrivatePtr jobPriv = priv->job.privateData;
ret = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress,
@@ -1942,7 +1942,7 @@ qemuMigrationDstGetURI(const char *migrateFrom,
int
qemuMigrationDstRun(virDomainObjPtr vm,
const char *uri,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int rv;
@@ -1963,7 +1963,7 @@ qemuMigrationDstRun(virDomainObjPtr vm,
if (qemuDomainObjExitMonitor(vm) < 0 || rv < 0)
return -1;
- if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ if (asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
/* qemuMigrationDstWaitForCompletion is called from the Finish phase */
return 0;
}
@@ -1991,11 +1991,11 @@ qemuMigrationSrcCleanup(virDomainObjPtr vm,
VIR_DEBUG("vm=%s, conn=%p, asyncJob=%s, phase=%s",
vm->def->name, conn,
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
- qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobPhaseToString(priv->job.asyncJob,
priv->job.phase));
- if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
+ if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT))
return;
VIR_DEBUG("The connection which started outgoing migration of domain %s"
@@ -2005,17 +2005,17 @@ qemuMigrationSrcCleanup(virDomainObjPtr vm,
switch ((virMigrationJobPhase) priv->job.phase) {
case VIR_MIGRATION_PHASE_BEGIN3:
/* just forget we were about to migrate */
- qemuDomainObjDiscardAsyncJob(vm, &priv->job);
+ virDomainObjDiscardAsyncJob(vm, &priv->job);
break;
case VIR_MIGRATION_PHASE_PERFORM3_DONE:
VIR_WARN("Migration of domain %s finished but we don't know if
the"
" domain was successfully started on destination or not",
vm->def->name);
- qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
/* clear the job and let higher levels decide what to do */
- qemuDomainObjDiscardAsyncJob(vm, &priv->job);
+ virDomainObjDiscardAsyncJob(vm, &priv->job);
break;
case VIR_MIGRATION_PHASE_PERFORM3:
@@ -2061,11 +2061,11 @@ qemuMigrationSrcBeginPhase(virQEMUDriverPtr driver,
cookieout, cookieoutlen, nmigrate_disks,
migrate_disks, flags);
- /* Only set the phase if we are inside QEMU_ASYNC_JOB_MIGRATION_OUT.
+ /* Only set the phase if we are inside VIR_ASYNC_JOB_MIGRATION_OUT.
* Otherwise we will start the async job later in the perform phase losing
* change protection.
*/
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_BEGIN3);
if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
@@ -2206,17 +2206,17 @@ qemuMigrationSrcBegin(virConnectPtr conn,
virQEMUDriverPtr driver = conn->privateData;
qemuDomainObjPrivatePtr priv = vm->privateData;
char *xml = NULL;
- qemuDomainAsyncJob asyncJob;
+ virDomainAsyncJob asyncJob;
if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
- if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationJobStart(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
flags) < 0)
goto cleanup;
- asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT;
+ asyncJob = VIR_ASYNC_JOB_MIGRATION_OUT;
} else {
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
- asyncJob = QEMU_ASYNC_JOB_NONE;
+ asyncJob = VIR_ASYNC_JOB_NONE;
}
qemuMigrationSrcStoreDomainState(vm);
@@ -2259,7 +2259,7 @@ qemuMigrationSrcBegin(virConnectPtr conn,
if (flags & VIR_MIGRATE_CHANGE_PROTECTION)
qemuMigrationJobFinish(vm);
else
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
goto cleanup;
}
@@ -2276,15 +2276,15 @@ qemuMigrationDstPrepareCleanup(virQEMUDriverPtr driver,
VIR_DEBUG("driver=%p, vm=%s, job=%s, asyncJob=%s",
driver,
vm->def->name,
- qemuDomainJobTypeToString(priv->job.active),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+ virDomainJobTypeToString(priv->job.active),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob));
virPortAllocatorRelease(priv->migrationPort);
priv->migrationPort = 0;
- if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN))
+ if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_IN))
return;
- qemuDomainObjDiscardAsyncJob(vm, &priv->job);
+ virDomainObjDiscardAsyncJob(vm, &priv->job);
}
static qemuProcessIncomingDefPtr
@@ -2523,7 +2523,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
!!(flags & VIR_MIGRATE_NON_SHARED_INC)) <
0)
goto cleanup;
- if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_IN, flags) < 0)
+ if (qemuMigrationJobStart(vm, VIR_ASYNC_JOB_MIGRATION_IN, flags) < 0)
goto cleanup;
qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_PREPARE);
@@ -2539,7 +2539,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
startFlags = VIR_QEMU_PROCESS_START_AUTODESTROY;
- if (qemuProcessInit(driver, vm, mig->cpu, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuProcessInit(driver, vm, mig->cpu, VIR_ASYNC_JOB_MIGRATION_IN,
true, startFlags) < 0)
goto stopjob;
stopProcess = true;
@@ -2557,7 +2557,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
if (qemuProcessPrepareHost(driver, vm, startFlags) < 0)
goto stopjob;
- rv = qemuProcessLaunch(dconn, driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ rv = qemuProcessLaunch(dconn, driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
incoming, NULL,
VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
startFlags);
@@ -2582,7 +2582,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
goto stopjob;
}
- if (qemuMigrationParamsCheck(vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuMigrationParamsCheck(vm, VIR_ASYNC_JOB_MIGRATION_IN,
migParams, mig->caps->automatic) < 0)
goto stopjob;
@@ -2590,7 +2590,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
* set the migration TLS parameters */
if (flags & VIR_MIGRATE_TLS) {
if (qemuMigrationParamsEnableTLS(driver, vm, true,
- QEMU_ASYNC_JOB_MIGRATION_IN,
+ VIR_ASYNC_JOB_MIGRATION_IN,
&tlsAlias, NULL,
migParams) < 0)
goto stopjob;
@@ -2599,7 +2599,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
goto stopjob;
}
- if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuMigrationParamsApply(vm, VIR_ASYNC_JOB_MIGRATION_IN,
migParams) < 0)
goto stopjob;
@@ -2637,10 +2637,10 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
if (incoming->deferredURI &&
qemuMigrationDstRun(vm, incoming->deferredURI,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0)
goto stopjob;
- if (qemuProcessFinishStartup(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuProcessFinishStartup(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
false, VIR_DOMAIN_PAUSED_MIGRATION) < 0)
goto stopjob;
@@ -2706,7 +2706,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
return ret;
stopjob:
- qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_IN,
jobPriv->migParams, priv->job.apiFlags);
if (stopProcess) {
@@ -2715,7 +2715,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
stopFlags |= VIR_QEMU_PROCESS_STOP_NO_RELABEL;
virDomainAuditStart(vm, "migrated", false);
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
- QEMU_ASYNC_JOB_MIGRATION_IN, stopFlags);
+ VIR_ASYNC_JOB_MIGRATION_IN, stopFlags);
}
qemuMigrationJobFinish(vm);
@@ -3010,7 +3010,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver,
*/
if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_POSTCOPY &&
- qemuMigrationAnyFetchStats(vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationAnyFetchStats(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobInfo, NULL) < 0)
VIR_WARN("Could not refresh migration statistics");
@@ -3033,7 +3033,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver,
qemuMigrationSrcWaitForSpice(vm);
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
VIR_QEMU_PROCESS_STOP_MIGRATED);
virDomainAuditStop(vm, "migrated");
@@ -3049,7 +3049,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver,
virErrorPreserveLast(&orig_err);
/* cancel any outstanding NBD jobs */
- qemuMigrationSrcNBDCopyCancel(vm, false, QEMU_ASYNC_JOB_MIGRATION_OUT, NULL);
+ qemuMigrationSrcNBDCopyCancel(vm, false, VIR_ASYNC_JOB_MIGRATION_OUT, NULL);
virErrorRestore(&orig_err);
@@ -3059,7 +3059,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver,
else
qemuMigrationSrcRestoreDomainState(driver, vm);
- qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0)
@@ -3083,7 +3083,7 @@ qemuMigrationSrcConfirm(virQEMUDriverPtr driver,
cfg = virQEMUDriverGetConfig(driver);
- if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
+ if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT))
goto cleanup;
if (cancelled)
@@ -3387,7 +3387,7 @@ qemuMigrationSrcConnect(virQEMUDriverPtr driver,
static int
qemuMigrationSrcContinue(virDomainObjPtr vm,
qemuMonitorMigrationStatus status,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret;
@@ -3413,10 +3413,10 @@ qemuMigrationSetDBusVMState(virQEMUDriverPtr driver,
if (virStringListLength((const char **)priv->dbusVMStateIds) > 0) {
int rv;
- if (qemuHotplugAttachDBusVMState(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuHotplugAttachDBusVMState(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
- if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
rv = qemuMonitorSetDBusVMStateIdList(priv->mon,
@@ -3427,7 +3427,7 @@ qemuMigrationSetDBusVMState(virQEMUDriverPtr driver,
return rv;
} else {
- if (qemuHotplugRemoveDBusVMState(vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuHotplugRemoveDBusVMState(vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
}
@@ -3530,7 +3530,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
if (qemuMigrationSrcGraphicsRelocate(vm, mig, graphicsuri) < 0)
VIR_WARN("unable to provide data for graphics client relocation");
- if (qemuMigrationParamsCheck(vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationParamsCheck(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
migParams, mig->caps->automatic) < 0)
goto error;
@@ -3544,7 +3544,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
hostname = spec->dest.host.name;
if (qemuMigrationParamsEnableTLS(driver, vm, false,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
&tlsAlias, hostname,
migParams) < 0)
goto error;
@@ -3558,7 +3558,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
migrate_speed * 1024 * 1024) < 0)
goto error;
- if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationParamsApply(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
migParams) < 0)
goto error;
@@ -3601,20 +3601,20 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
if (!(flags & VIR_MIGRATE_LIVE) &&
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
}
- if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
if (priv->job.abortJob) {
/* explicitly do this *after* we entered the monitor,
* as this is a critical section so we are guaranteed
* priv->job.abortJob will not change */
- jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
+ jobPriv->current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
_("canceled by client"));
goto exit_monitor;
}
@@ -3685,7 +3685,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
if (flags & VIR_MIGRATE_POSTCOPY)
waitFlags |= QEMU_MIGRATION_COMPLETED_POSTCOPY;
- rc = qemuMigrationSrcWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ rc = qemuMigrationSrcWaitForCompletion(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
dconn, waitFlags);
if (rc == -2) {
goto error;
@@ -3708,7 +3708,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
if (mig->nbd &&
qemuMigrationSrcNBDCopyCancel(vm, true,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
dconn) < 0)
goto error;
@@ -3716,14 +3716,14 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
* resume it now once we finished all block jobs and wait for the real
* end of the migration.
*/
- if (jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) {
+ if (jobPriv->current->status == VIR_DOMAIN_JOB_STATUS_PAUSED) {
if (qemuMigrationSrcContinue(vm, QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
waitFlags ^= QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER;
- rc = qemuMigrationSrcWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ rc = qemuMigrationSrcWaitForCompletion(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
dconn, waitFlags);
if (rc == -2) {
goto error;
@@ -3774,8 +3774,8 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
if (virDomainObjIsActive(vm)) {
if (cancel &&
- jobPriv->current->status != QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED
&&
- qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
+ jobPriv->current->status != VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED
&&
+ qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) == 0) {
qemuMonitorMigrateCancel(priv->mon);
ignore_value(qemuDomainObjExitMonitor(vm));
}
@@ -3783,11 +3783,11 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
/* cancel any outstanding NBD jobs */
if (mig && mig->nbd)
qemuMigrationSrcNBDCopyCancel(vm, false,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
dconn);
- if (jobPriv->current->status != QEMU_DOMAIN_JOB_STATUS_CANCELED)
- jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
+ if (jobPriv->current->status != VIR_DOMAIN_JOB_STATUS_CANCELED)
+ jobPriv->current->status = VIR_DOMAIN_JOB_STATUS_FAILED;
}
if (iothread)
@@ -4634,7 +4634,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver,
qemuDomainObjPrivatePtr priv = vm->privateData;
qemuDomainJobPrivatePtr jobPriv = priv->job.privateData;
- if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, flags) < 0)
+ if (qemuMigrationJobStart(vm, VIR_ASYNC_JOB_MIGRATION_OUT, flags) < 0)
goto cleanup;
if (!(flags & VIR_MIGRATE_OFFLINE) && virDomainObjCheckActive(vm) <
0)
@@ -4672,7 +4672,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver,
*/
if (!v3proto) {
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
VIR_QEMU_PROCESS_STOP_MIGRATED);
virDomainAuditStop(vm, "migrated");
event = virDomainEventLifecycleNewFromObj(vm,
@@ -4688,7 +4688,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver,
* here
*/
if (!v3proto && ret < 0)
- qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
qemuMigrationSrcRestoreDomainState(driver, vm);
@@ -4735,10 +4735,10 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver,
/* If we didn't start the job in the begin phase, start it now. */
if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
- if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationJobStart(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
flags) < 0)
return ret;
- } else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) {
+ } else if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT)) {
return ret;
}
@@ -4764,7 +4764,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver,
endjob:
if (ret < 0) {
- qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
qemuMigrationJobFinish(vm);
} else {
@@ -4981,7 +4981,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver,
port = priv->migrationPort;
priv->migrationPort = 0;
- if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) {
+ if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_IN)) {
qemuMigrationDstErrorReport(driver, vm->def->name);
goto cleanup;
}
@@ -5017,7 +5017,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver,
/* Check for a possible error on the monitor in case Finish was called
* earlier than monitor EOF handler got a chance to process the error
*/
- qemuDomainCheckMonitor(vm, QEMU_ASYNC_JOB_MIGRATION_IN);
+ qemuDomainCheckMonitor(vm, VIR_ASYNC_JOB_MIGRATION_IN);
goto endjob;
}
@@ -5038,7 +5038,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver,
goto endjob;
if (qemuRefreshVirtioChannelState(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0)
goto endjob;
if (qemuConnectAgent(driver, vm) < 0)
@@ -5066,7 +5066,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver,
/* We need to wait for QEMU to process all data sent by the source
* before starting guest CPUs.
*/
- if (qemuMigrationDstWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuMigrationDstWaitForCompletion(vm, VIR_ASYNC_JOB_MIGRATION_IN,
!!(flags & VIR_MIGRATE_POSTCOPY)) < 0)
{
/* There's not much we can do for v2 protocol since the
* original domain on the source host is already gone.
@@ -5077,14 +5077,14 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver,
/* Now that the state data was transferred we can refresh the actual state
* of the devices */
- if (qemuProcessRefreshState(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
+ if (qemuProcessRefreshState(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN) < 0) {
/* Similarly to the case above v2 protocol will not be able to recover
* from this. Let's ignore this and perhaps stuff will not break. */
if (v3proto)
goto endjob;
}
- if (jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY)
+ if (jobPriv->current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
inPostCopy = true;
if (!(flags & VIR_MIGRATE_PAUSED)) {
@@ -5095,7 +5095,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver,
if (qemuProcessStartCPUs(driver, vm,
inPostCopy ? VIR_DOMAIN_RUNNING_POSTCOPY
: VIR_DOMAIN_RUNNING_MIGRATED,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0) {
if (virGetLastErrorCode() == VIR_ERR_OK)
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("resume operation failed"));
@@ -5134,7 +5134,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver,
}
if (inPostCopy) {
- if (qemuMigrationDstWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuMigrationDstWaitForCompletion(vm, VIR_ASYNC_JOB_MIGRATION_IN,
false) < 0) {
goto endjob;
}
@@ -5183,7 +5183,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver,
virDomainObjIsActive(vm)) {
if (doKill) {
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
- QEMU_ASYNC_JOB_MIGRATION_IN,
+ VIR_ASYNC_JOB_MIGRATION_IN,
VIR_QEMU_PROCESS_STOP_MIGRATED);
virDomainAuditStop(vm, "failed");
event = virDomainEventLifecycleNewFromObj(vm,
@@ -5198,8 +5198,8 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver,
if (dom) {
if (jobInfo) {
jobPriv->completed = g_steal_pointer(&jobInfo);
- jobPriv->completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
- jobPriv->completed->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;
+ jobPriv->completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
+ jobPriv->completed->statsType = VIR_DOMAIN_JOB_STATS_TYPE_MIGRATION;
}
if (qemuMigrationBakeCookie(mig, driver, vm,
@@ -5215,7 +5215,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver,
g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree);
}
- qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_IN,
jobPriv->migParams, priv->job.apiFlags);
qemuMigrationJobFinish(vm);
@@ -5245,7 +5245,7 @@ int
qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
int fd,
virCommandPtr compressor,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
bool bwParam = virQEMUCapsGet(priv->qemuCaps,
QEMU_CAPS_MIGRATION_PARAM_BANDWIDTH);
@@ -5426,7 +5426,7 @@ qemuMigrationSrcCancel(virDomainObjPtr vm)
if (storage &&
qemuMigrationSrcNBDCopyCancel(vm, false,
- QEMU_ASYNC_JOB_NONE, NULL) < 0)
+ VIR_ASYNC_JOB_NONE, NULL) < 0)
return -1;
return 0;
@@ -5435,7 +5435,7 @@ qemuMigrationSrcCancel(virDomainObjPtr vm)
static int
qemuMigrationJobStart(virDomainObjPtr vm,
- qemuDomainAsyncJob job,
+ virDomainAsyncJob job,
unsigned long apiFlags)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -5443,22 +5443,22 @@ qemuMigrationJobStart(virDomainObjPtr vm,
virDomainJobOperation op;
unsigned long long mask;
- if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ if (job == VIR_ASYNC_JOB_MIGRATION_IN) {
op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_IN;
- mask = QEMU_JOB_NONE;
+ mask = VIR_JOB_NONE;
} else {
op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT;
- mask = QEMU_JOB_DEFAULT_MASK |
- JOB_MASK(QEMU_JOB_SUSPEND) |
- JOB_MASK(QEMU_JOB_MIGRATION_OP);
+ mask = VIR_JOB_DEFAULT_MASK |
+ JOB_MASK(VIR_JOB_SUSPEND) |
+ JOB_MASK(VIR_JOB_MIGRATION_OP);
}
- if (qemuDomainObjBeginAsyncJob(vm, &priv->job, job, op, apiFlags) < 0)
+ if (virDomainObjBeginAsyncJob(vm, &priv->job, job, op, apiFlags) < 0)
return -1;
- jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;
+ jobPriv->current->statsType = VIR_DOMAIN_JOB_STATS_TYPE_MIGRATION;
- qemuDomainObjSetAsyncJobMask(&priv->job, mask);
+ virDomainObjSetAsyncJobMask(&priv->job, mask);
return 0;
}
@@ -5475,7 +5475,7 @@ qemuMigrationJobSetPhase(virDomainObjPtr vm,
return;
}
- qemuDomainObjSetJobPhase(vm, &priv->job, phase);
+ virDomainObjSetJobPhase(vm, &priv->job, phase);
}
static void
@@ -5489,19 +5489,19 @@ static void
qemuMigrationJobContinue(virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
- qemuDomainObjReleaseAsyncJob(&priv->job);
+ virDomainObjReleaseAsyncJob(&priv->job);
}
static bool
qemuMigrationJobIsActive(virDomainObjPtr vm,
- qemuDomainAsyncJob job)
+ virDomainAsyncJob job)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
if (priv->job.asyncJob != job) {
const char *msg;
- if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (job == VIR_ASYNC_JOB_MIGRATION_IN)
msg = _("domain '%s' is not processing incoming
migration");
else
msg = _("domain '%s' is not being migrated");
@@ -5516,7 +5516,7 @@ static void
qemuMigrationJobFinish(virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
- qemuDomainObjEndAsyncJob(vm, &priv->job);
+ virDomainObjEndAsyncJob(vm, &priv->job);
}
@@ -5574,7 +5574,7 @@ qemuMigrationDstErrorReport(virQEMUDriverPtr driver,
int
qemuMigrationSrcFetchMirrorStats(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
qemuDomainJobInfoPtr jobInfo)
{
size_t i;
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index 8f5e2d0f81..da087671cc 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -191,7 +191,7 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver,
virDomainObjPtr vm,
int fd,
virCommandPtr compressor,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
int
@@ -199,7 +199,7 @@ qemuMigrationSrcCancel(virDomainObjPtr vm);
int
qemuMigrationAnyFetchStats(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
qemuDomainJobInfoPtr jobInfo,
char **error);
@@ -226,7 +226,7 @@ qemuMigrationDstGetURI(const char *migrateFrom,
int
qemuMigrationDstRun(virDomainObjPtr vm,
const char *uri,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
void
qemuMigrationAnyPostcopyFailed(virQEMUDriverPtr driver,
@@ -234,5 +234,5 @@ qemuMigrationAnyPostcopyFailed(virQEMUDriverPtr driver,
int
qemuMigrationSrcFetchMirrorStats(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
qemuDomainJobInfoPtr jobInfo);
diff --git a/src/qemu/qemu_migration_cookie.c b/src/qemu/qemu_migration_cookie.c
index 68f4735bc7..ea43060c4d 100644
--- a/src/qemu/qemu_migration_cookie.c
+++ b/src/qemu/qemu_migration_cookie.c
@@ -1051,7 +1051,7 @@ qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt)
jobInfo = g_new0(qemuDomainJobInfo, 1);
stats = &jobInfo->stats.mig;
- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
+ jobInfo->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
virXPathULongLong("string(./started[1])", ctxt, &jobInfo->started);
virXPathULongLong("string(./stopped[1])", ctxt, &jobInfo->stopped);
diff --git a/src/qemu/qemu_migration_params.c b/src/qemu/qemu_migration_params.c
index 12f94098c5..c8f835f8d8 100644
--- a/src/qemu/qemu_migration_params.c
+++ b/src/qemu/qemu_migration_params.c
@@ -810,7 +810,7 @@ qemuMigrationParamsApply(virDomainObjPtr vm,
if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0)
return -1;
- if (asyncJob == QEMU_ASYNC_JOB_NONE) {
+ if (asyncJob == VIR_ASYNC_JOB_NONE) {
if (!virBitmapIsAllClear(migParams->caps)) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Migration capabilities can only be set by "
@@ -1118,7 +1118,7 @@ qemuMigrationParamsCheck(virDomainObjPtr vm,
qemuMigrationParty party;
size_t i;
- if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
+ if (asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
party = QEMU_MIGRATION_SOURCE;
else
party = QEMU_MIGRATION_DESTINATION;
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index b394bcbd3f..ca03486bc4 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -422,7 +422,7 @@ qemuProcessHandleReset(qemuMonitorPtr mon G_GNUC_UNUSED,
if (vm->def->onReboot == VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY ||
vm->def->onReboot == VIR_DOMAIN_LIFECYCLE_ACTION_PRESERVE) {
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -432,11 +432,11 @@ qemuProcessHandleReset(qemuMonitorPtr mon G_GNUC_UNUSED,
}
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_DESTROYED,
- QEMU_ASYNC_JOB_NONE, 0);
+ VIR_ASYNC_JOB_NONE, 0);
virDomainAuditStop(vm, "destroyed");
qemuDomainRemoveInactive(driver, vm);
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
}
ret = 0;
@@ -467,7 +467,7 @@ qemuProcessFakeReboot(void *opaque)
VIR_DEBUG("vm=%p", vm);
virObjectLock(vm);
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -490,7 +490,7 @@ qemuProcessFakeReboot(void *opaque)
if (qemuProcessStartCPUs(driver, vm,
reason,
- QEMU_ASYNC_JOB_NONE) < 0) {
+ VIR_ASYNC_JOB_NONE) < 0) {
if (virGetLastErrorCode() == VIR_ERR_OK)
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("resume operation failed"));
@@ -505,7 +505,7 @@ qemuProcessFakeReboot(void *opaque)
ret = 0;
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
priv->pausedShutdown = false;
@@ -669,8 +669,8 @@ qemuProcessHandleStop(qemuMonitorPtr mon G_GNUC_UNUSED,
* reveal it in domain state nor sent events */
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING &&
!priv->pausedShutdown) {
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
- if (jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY)
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
+ if (jobPriv->current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
reason = VIR_DOMAIN_PAUSED_POSTCOPY;
else
reason = VIR_DOMAIN_PAUSED_MIGRATION;
@@ -1630,7 +1630,7 @@ qemuProcessHandleSpiceMigrated(qemuMonitorPtr mon G_GNUC_UNUSED,
priv = vm->privateData;
jobPriv = priv->job.privateData;
- if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
+ if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
VIR_DEBUG("got SPICE_MIGRATE_COMPLETED event without a migration
job");
goto cleanup;
}
@@ -1665,7 +1665,7 @@ qemuProcessHandleMigrationStatus(qemuMonitorPtr mon G_GNUC_UNUSED,
priv = vm->privateData;
jobPriv = priv->job.privateData;
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
VIR_DEBUG("got MIGRATION event without a migration job");
goto cleanup;
}
@@ -1674,7 +1674,7 @@ qemuProcessHandleMigrationStatus(qemuMonitorPtr mon G_GNUC_UNUSED,
virDomainObjBroadcast(vm);
if (status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY &&
- priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT &&
+ priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_MIGRATION) {
VIR_DEBUG("Correcting paused state reason for domain %s to %s",
@@ -1714,7 +1714,7 @@ qemuProcessHandleMigrationPass(qemuMonitorPtr mon G_GNUC_UNUSED,
vm, vm->def->name, pass);
priv = vm->privateData;
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
VIR_DEBUG("got MIGRATION_PASS event without a migration job");
goto cleanup;
}
@@ -1746,7 +1746,7 @@ qemuProcessHandleDumpCompleted(qemuMonitorPtr mon G_GNUC_UNUSED,
priv = vm->privateData;
jobPriv = priv->job.privateData;
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
VIR_DEBUG("got DUMP_COMPLETED event without a dump_completed job");
goto cleanup;
}
@@ -1949,7 +1949,7 @@ qemuProcessMonitorLogFree(void *opaque)
static int
qemuProcessInitMonitor(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
int ret;
@@ -2249,7 +2249,7 @@ qemuProcessRefreshChannelVirtioState(virQEMUDriverPtr driver,
int
qemuRefreshVirtioChannelState(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virHashTablePtr info = NULL;
@@ -2586,7 +2586,7 @@ qemuProcessInitCpuAffinity(virDomainObjPtr vm G_GNUC_UNUSED)
/* set link states to down on interfaces at qemu start */
static int
qemuProcessSetLinkStates(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virDomainDefPtr def = vm->def;
@@ -3207,7 +3207,7 @@ qemuProcessPrepareMonitorChr(virDomainChrSourceDefPtr monConfig,
int
qemuProcessStartCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm,
virDomainRunningReason reason,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
int ret = -1;
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -3259,7 +3259,7 @@ qemuProcessStartCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm,
int qemuProcessStopCPUs(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainPausedReason reason,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
int ret = -1;
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -3419,7 +3419,7 @@ qemuProcessUpdateState(virDomainObjPtr vm)
static int
qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- const qemuDomainJobObj *job,
+ const virDomainJobObj *job,
virDomainState state,
int reason)
{
@@ -3454,7 +3454,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
vm->def->name);
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_MIGRATED,
- QEMU_ASYNC_JOB_NONE) < 0) {
+ VIR_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name);
}
break;
@@ -3472,7 +3472,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
break;
}
- qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_NONE,
+ qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_NONE,
jobPriv->migParams, job->apiFlags);
return 0;
}
@@ -3480,7 +3480,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
static int
qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- const qemuDomainJobObj *job,
+ const virDomainJobObj *job,
virDomainState state,
int reason,
unsigned int *stopFlags)
@@ -3562,13 +3562,13 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
- QEMU_ASYNC_JOB_NONE) < 0) {
+ VIR_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name);
}
}
}
- qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_NONE,
+ qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_NONE,
jobPriv->migParams, job->apiFlags);
return 0;
}
@@ -3576,7 +3576,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
static int
qemuProcessRecoverJob(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- const qemuDomainJobObj *job,
+ const virDomainJobObj *job,
unsigned int *stopFlags)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -3588,21 +3588,21 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver,
state = virDomainObjGetState(vm, &reason);
switch (job->asyncJob) {
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
+ case VIR_ASYNC_JOB_MIGRATION_OUT:
if (qemuProcessRecoverMigrationOut(driver, vm, job,
state, reason, stopFlags) < 0)
return -1;
break;
- case QEMU_ASYNC_JOB_MIGRATION_IN:
+ case VIR_ASYNC_JOB_MIGRATION_IN:
if (qemuProcessRecoverMigrationIn(driver, vm, job,
state, reason) < 0)
return -1;
break;
- case QEMU_ASYNC_JOB_SAVE:
- case QEMU_ASYNC_JOB_DUMP:
- case QEMU_ASYNC_JOB_SNAPSHOT:
+ case VIR_ASYNC_JOB_SAVE:
+ case VIR_ASYNC_JOB_DUMP:
+ case VIR_ASYNC_JOB_SNAPSHOT:
qemuDomainObjEnterMonitor(vm);
ignore_value(qemuMonitorMigrateCancel(priv->mon));
if (qemuDomainObjExitMonitor(vm) < 0)
@@ -3612,53 +3612,53 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver,
* recovering an async job, this function is run at startup
* and must resume things using sync monitor connections. */
if (state == VIR_DOMAIN_PAUSED &&
- ((job->asyncJob == QEMU_ASYNC_JOB_DUMP &&
+ ((job->asyncJob == VIR_ASYNC_JOB_DUMP &&
reason == VIR_DOMAIN_PAUSED_DUMP) ||
- (job->asyncJob == QEMU_ASYNC_JOB_SAVE &&
+ (job->asyncJob == VIR_ASYNC_JOB_SAVE &&
reason == VIR_DOMAIN_PAUSED_SAVE) ||
- (job->asyncJob == QEMU_ASYNC_JOB_SNAPSHOT &&
+ (job->asyncJob == VIR_ASYNC_JOB_SNAPSHOT &&
(reason == VIR_DOMAIN_PAUSED_SNAPSHOT ||
reason == VIR_DOMAIN_PAUSED_MIGRATION)) ||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_SAVE_CANCELED,
- QEMU_ASYNC_JOB_NONE) < 0) {
+ VIR_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain '%s' after migration to
file",
vm->def->name);
}
}
break;
- case QEMU_ASYNC_JOB_START:
+ case VIR_ASYNC_JOB_START:
/* Already handled in VIR_DOMAIN_PAUSED_STARTING_UP check. */
break;
- case QEMU_ASYNC_JOB_BACKUP:
+ case VIR_ASYNC_JOB_BACKUP:
ignore_value(virTimeMillisNow(&now));
/* Restore the config of the async job which is not persisted */
priv->jobs_queued++;
- priv->job.asyncJob = QEMU_ASYNC_JOB_BACKUP;
+ priv->job.asyncJob = VIR_ASYNC_JOB_BACKUP;
priv->job.asyncOwnerAPI = virThreadJobGet();
priv->job.asyncStarted = now;
- qemuDomainObjSetAsyncJobMask(&priv->job,
- (QEMU_JOB_DEFAULT_MASK |
- JOB_MASK(QEMU_JOB_SUSPEND) |
- JOB_MASK(QEMU_JOB_MODIFY)));
+ virDomainObjSetAsyncJobMask(&priv->job,
+ (VIR_JOB_DEFAULT_MASK |
+ JOB_MASK(VIR_JOB_SUSPEND) |
+ JOB_MASK(VIR_JOB_MODIFY)));
/* We reset the job parameters for backup so that the job will look
* active. This is possible because we are able to recover the state
* of blockjobs and also the backup job allows all sub-job types */
jobPriv->current = g_new0(qemuDomainJobInfo, 1);
jobPriv->current->operation = VIR_DOMAIN_JOB_OPERATION_BACKUP;
- jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP;
- jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE;
+ jobPriv->current->statsType = VIR_DOMAIN_JOB_STATS_TYPE_BACKUP;
+ jobPriv->current->status = VIR_DOMAIN_JOB_STATUS_ACTIVE;
jobPriv->current->started = now;
break;
- case QEMU_ASYNC_JOB_NONE:
- case QEMU_ASYNC_JOB_LAST:
+ case VIR_ASYNC_JOB_NONE:
+ case VIR_ASYNC_JOB_LAST:
break;
}
@@ -3666,36 +3666,36 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver,
return -1;
/* In case any special handling is added for job type that has been ignored
- * before, QEMU_DOMAIN_TRACK_JOBS (from qemu_domain.h) needs to be updated
+ * before, VIR_DOMAIN_TRACK_JOBS (from qemu_domain.h) needs to be updated
* for the job to be properly tracked in domain state XML.
*/
switch (job->active) {
- case QEMU_JOB_QUERY:
+ case VIR_JOB_QUERY:
/* harmless */
break;
- case QEMU_JOB_DESTROY:
+ case VIR_JOB_DESTROY:
VIR_DEBUG("Domain %s should have already been destroyed",
vm->def->name);
return -1;
- case QEMU_JOB_SUSPEND:
+ case VIR_JOB_SUSPEND:
/* mostly harmless */
break;
- case QEMU_JOB_MODIFY:
+ case VIR_JOB_MODIFY:
/* XXX depending on the command we may be in an inconsistent state and
* we should probably fall back to "monitor error" state and refuse to
*/
break;
- case QEMU_JOB_MIGRATION_OP:
- case QEMU_JOB_ABORT:
- case QEMU_JOB_ASYNC:
- case QEMU_JOB_ASYNC_NESTED:
+ case VIR_JOB_MIGRATION_OP:
+ case VIR_JOB_ABORT:
+ case VIR_JOB_ASYNC:
+ case VIR_JOB_ASYNC_NESTED:
/* async job was already handled above */
- case QEMU_JOB_NONE:
- case QEMU_JOB_LAST:
+ case VIR_JOB_NONE:
+ case VIR_JOB_LAST:
break;
}
@@ -3715,7 +3715,7 @@ qemuProcessUpdateDevices(virQEMUDriverPtr driver,
old = priv->qemuDevices;
priv->qemuDevices = NULL;
- if (qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuDomainUpdateDeviceList(vm, VIR_ASYNC_JOB_NONE) < 0)
goto cleanup;
qemuDevices = (const char **)priv->qemuDevices;
@@ -4191,7 +4191,7 @@ qemuProcessTranslateCPUFeatures(const char *name,
static int
qemuProcessFetchGuestCPU(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virCPUDataPtr *enabled,
virCPUDataPtr *disabled)
{
@@ -4297,7 +4297,7 @@ qemuProcessUpdateLiveGuestCPU(virDomainObjPtr vm,
static int
qemuProcessUpdateAndVerifyCPU(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
virCPUDataPtr cpu = NULL;
virCPUDataPtr disabled = NULL;
@@ -4323,7 +4323,7 @@ qemuProcessUpdateAndVerifyCPU(virDomainObjPtr vm,
static int
qemuProcessFetchCPUDefinitions(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virDomainCapsCPUModelsPtr *cpuModels)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -4345,7 +4345,7 @@ qemuProcessFetchCPUDefinitions(virDomainObjPtr vm,
static int
qemuProcessUpdateCPU(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virCPUData) cpu = NULL;
g_autoptr(virCPUData) disabled = NULL;
@@ -4562,9 +4562,9 @@ qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps,
/*
- * This function starts a new QEMU_ASYNC_JOB_START async job. The user is
+ * This function starts a new VIR_ASYNC_JOB_START async job. The user is
* responsible for calling qemuProcessEndJob to stop this job and for passing
- * QEMU_ASYNC_JOB_START as @asyncJob argument to any function requiring this
+ * VIR_ASYNC_JOB_START as @asyncJob argument to any function requiring this
* parameter between qemuProcessBeginJob and qemuProcessEndJob.
*/
int
@@ -4574,11 +4574,11 @@ qemuProcessBeginJob(virDomainObjPtr vm,
{
qemuDomainObjPrivatePtr priv = vm->privateData;
- if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_START,
+ if (virDomainObjBeginAsyncJob(vm, &priv->job, VIR_ASYNC_JOB_START,
operation, apiFlags) < 0)
return -1;
- qemuDomainObjSetAsyncJobMask(&priv->job, QEMU_JOB_NONE);
+ virDomainObjSetAsyncJobMask(&priv->job, VIR_JOB_NONE);
return 0;
}
@@ -4587,7 +4587,7 @@ void
qemuProcessEndJob(virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
- qemuDomainObjEndAsyncJob(vm, &priv->job);
+ virDomainObjEndAsyncJob(vm, &priv->job);
}
@@ -5045,7 +5045,7 @@ qemuProcessSetupRawIO(virQEMUDriverPtr driver,
static int
qemuProcessSetupBalloon(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
unsigned long long balloon = vm->def->mem.cur_balloon;
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -5517,7 +5517,7 @@ int
qemuProcessInit(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virCPUDefPtr updatedCPU,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool migration,
unsigned int flags)
{
@@ -5910,7 +5910,7 @@ qemuProcessVcpusSortOrder(const void *a,
static int
qemuProcessSetupHotpluggableVcpus(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
unsigned int maxvcpus = virDomainDefGetVcpusMax(vm->def);
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -6593,7 +6593,7 @@ qemuProcessGenID(virDomainObjPtr vm,
*/
static int
qemuProcessSetupDiskThrottlingBlockdev(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
size_t i;
@@ -6664,7 +6664,7 @@ int
qemuProcessLaunch(virConnectPtr conn,
virQEMUDriverPtr driver,
virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
qemuProcessIncomingDefPtr incoming,
virDomainMomentObjPtr snapshot,
virNetDevVPortProfileOp vmop,
@@ -7008,7 +7008,7 @@ qemuProcessLaunch(virConnectPtr conn,
int
qemuProcessRefreshState(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -7043,7 +7043,7 @@ qemuProcessRefreshState(virQEMUDriverPtr driver,
int
qemuProcessFinishStartup(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool startCPUs,
virDomainPausedReason pausedReason)
{
@@ -7081,7 +7081,7 @@ qemuProcessStart(virConnectPtr conn,
virQEMUDriverPtr driver,
virDomainObjPtr vm,
virCPUDefPtr updatedCPU,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
const char *migrateFrom,
int migrateFd,
const char *migratePath,
@@ -7101,7 +7101,7 @@ qemuProcessStart(virConnectPtr conn,
"migrateFrom=%s migrateFd=%d migratePath=%s "
"snapshot=%p vmop=%d flags=0x%x",
conn, driver, vm, vm->def->name, vm->def->id,
- qemuDomainAsyncJobTypeToString(asyncJob),
+ virDomainAsyncJobTypeToString(asyncJob),
NULLSTR(migrateFrom), migrateFd, NULLSTR(migratePath),
snapshot, vmop, flags);
@@ -7216,7 +7216,7 @@ qemuProcessCreatePretendCmd(virQEMUDriverPtr driver,
if (jsonPropsValidation)
buildflags = QEMU_BUILD_COMMANDLINE_VALIDATE_KEEP_JSON;
- if (qemuProcessInit(driver, vm, NULL, QEMU_ASYNC_JOB_NONE,
+ if (qemuProcessInit(driver, vm, NULL, VIR_ASYNC_JOB_NONE,
!!migrateURI, flags) < 0)
return NULL;
@@ -7276,7 +7276,7 @@ qemuProcessKill(virDomainObjPtr vm, unsigned int flags)
*/
int
qemuProcessBeginStopJob(virDomainObjPtr vm,
- qemuDomainJob job,
+ virDomainJob job,
bool forceKill)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -7295,7 +7295,7 @@ qemuProcessBeginStopJob(virDomainObjPtr vm,
/* Wake up anything waiting on domain condition */
virDomainObjBroadcast(vm);
- if (qemuDomainObjBeginJob(vm, &priv->job, job) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, job) < 0)
goto cleanup;
ret = 0;
@@ -7309,7 +7309,7 @@ qemuProcessBeginStopJob(virDomainObjPtr vm,
void qemuProcessStop(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainShutoffReason reason,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
unsigned int flags)
{
int ret;
@@ -7328,21 +7328,21 @@ void qemuProcessStop(virQEMUDriverPtr driver,
vm, vm->def->name, vm->def->id,
(long long)vm->pid,
virDomainShutoffReasonTypeToString(reason),
- qemuDomainAsyncJobTypeToString(asyncJob),
+ virDomainAsyncJobTypeToString(asyncJob),
flags);
/* This method is routinely used in clean up paths. Disable error
* reporting so we don't squash a legit error. */
virErrorPreserveLast(&orig_err);
- if (asyncJob != QEMU_ASYNC_JOB_NONE) {
- if (qemuDomainObjBeginNestedJob(vm, &priv->job, asyncJob) < 0)
+ if (asyncJob != VIR_ASYNC_JOB_NONE) {
+ if (virDomainObjBeginNestedJob(vm, &priv->job, asyncJob) < 0)
goto cleanup;
- } else if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE &&
+ } else if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE &&
priv->job.asyncOwner == virThreadSelfID() &&
- priv->job.active != QEMU_JOB_ASYNC_NESTED) {
+ priv->job.active != VIR_JOB_ASYNC_NESTED) {
VIR_WARN("qemuProcessStop called without a nested job (async=%s)",
- qemuDomainAsyncJobTypeToString(asyncJob));
+ virDomainAsyncJobTypeToString(asyncJob));
}
if (!virDomainObjIsActive(vm)) {
@@ -7558,7 +7558,7 @@ void qemuProcessStop(virQEMUDriverPtr driver,
/* clean up a possible backup job */
if (priv->backup)
- qemuBackupJobTerminate(vm, QEMU_DOMAIN_JOB_STATUS_CANCELED);
+ qemuBackupJobTerminate(vm, VIR_DOMAIN_JOB_STATUS_CANCELED);
qemuProcessRemoveDomainStatus(driver, vm);
@@ -7641,8 +7641,8 @@ void qemuProcessStop(virQEMUDriverPtr driver,
virDomainObjRemoveTransientDef(vm);
endjob:
- if (asyncJob != QEMU_ASYNC_JOB_NONE)
- qemuDomainObjEndJob(vm, &priv->job);
+ if (asyncJob != VIR_ASYNC_JOB_NONE)
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
virErrorRestore(&orig_err);
@@ -7661,22 +7661,22 @@ qemuProcessAutoDestroy(virDomainObjPtr dom,
VIR_DEBUG("vm=%s, conn=%p", dom->def->name, conn);
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
if (priv->job.asyncJob) {
VIR_DEBUG("vm=%s has long-term job active, cancelling",
dom->def->name);
- qemuDomainObjDiscardAsyncJob(dom, &priv->job);
+ virDomainObjDiscardAsyncJob(dom, &priv->job);
}
VIR_DEBUG("Killing domain");
- if (qemuProcessBeginStopJob(dom, QEMU_JOB_DESTROY, true) < 0)
+ if (qemuProcessBeginStopJob(dom, VIR_JOB_DESTROY, true) < 0)
return;
qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED,
- QEMU_ASYNC_JOB_NONE, stopFlags);
+ VIR_ASYNC_JOB_NONE, stopFlags);
virDomainAuditStop(dom, "destroyed");
event = virDomainEventLifecycleNewFromObj(dom,
@@ -7685,7 +7685,7 @@ qemuProcessAutoDestroy(virDomainObjPtr dom,
qemuDomainRemoveInactive(driver, dom);
- qemuDomainObjEndJob(dom, &priv->job);
+ virDomainObjEndJob(dom, &priv->job);
virObjectEventStateQueue(driver->domainEventState, event);
}
@@ -7719,7 +7719,7 @@ bool qemuProcessAutoDestroyActive(virQEMUDriverPtr driver,
int
qemuProcessRefreshDisks(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
bool blockdev = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV);
@@ -7775,7 +7775,7 @@ qemuProcessRefreshDisks(virDomainObjPtr vm,
static int
qemuProcessRefreshCPUMigratability(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virDomainDefPtr def = vm->def;
@@ -7833,7 +7833,7 @@ qemuProcessRefreshCPU(virQEMUDriverPtr driver,
if (!vm->def->cpu)
return 0;
- if (qemuProcessRefreshCPUMigratability(vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessRefreshCPUMigratability(vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
if (!(host = virQEMUDriverGetHostCPU(driver))) {
@@ -7868,7 +7868,7 @@ qemuProcessRefreshCPU(virQEMUDriverPtr driver,
if (virCPUUpdate(vm->def->os.arch, vm->def->cpu, cpu) < 0)
return -1;
- if (qemuProcessUpdateCPU(vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessUpdateCPU(vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
} else if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CPU_MODEL_EXPANSION))
{
/* We only try to fix CPUs when the libvirt/QEMU combo used to start
@@ -8010,7 +8010,7 @@ qemuProcessReconnect(void *opaque)
virQEMUDriverPtr driver = data->driver;
virDomainObjPtr obj = data->obj;
qemuDomainObjPrivatePtr priv;
- qemuDomainJobObj oldjob;
+ virDomainJobObj oldjob;
int state;
int reason;
g_autoptr(virQEMUDriverConfig) cfg = NULL;
@@ -8025,13 +8025,13 @@ qemuProcessReconnect(void *opaque)
VIR_FREE(data);
priv = obj->privateData;
- qemuDomainObjRestoreJob(&priv->job, &oldjob);
- if (oldjob.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ virDomainObjRestoreJob(&priv->job, &oldjob);
+ if (oldjob.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
cfg = virQEMUDriverGetConfig(driver);
- if (qemuDomainObjBeginJob(obj, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(obj, &priv->job, VIR_JOB_MODIFY) < 0)
goto error;
jobStarted = true;
@@ -8064,7 +8064,7 @@ qemuProcessReconnect(void *opaque)
tryMonReconn = true;
/* XXX check PID liveliness & EXE path */
- if (qemuConnectMonitor(driver, obj, QEMU_ASYNC_JOB_NONE, retry, NULL) < 0)
+ if (qemuConnectMonitor(driver, obj, VIR_ASYNC_JOB_NONE, retry, NULL) < 0)
goto error;
priv->machineName = qemuDomainGetMachineName(obj);
@@ -8164,12 +8164,12 @@ qemuProcessReconnect(void *opaque)
if (qemuProcessRefreshCPU(driver, obj) < 0)
goto error;
- if (qemuDomainRefreshVcpuInfo(obj, QEMU_ASYNC_JOB_NONE, true) < 0)
+ if (qemuDomainRefreshVcpuInfo(obj, VIR_ASYNC_JOB_NONE, true) < 0)
goto error;
qemuDomainVcpuPersistOrder(obj->def);
- if (qemuProcessDetectIOThreadPIDs(obj, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessDetectIOThreadPIDs(obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
if (qemuSecurityReserveLabel(driver->securityManager, obj->def, obj->pid)
< 0)
@@ -8179,20 +8179,20 @@ qemuProcessReconnect(void *opaque)
qemuProcessFiltersInstantiate(obj->def);
- if (qemuProcessRefreshDisks(obj, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessRefreshDisks(obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
- qemuBlockNodeNamesDetect(obj, QEMU_ASYNC_JOB_NONE) < 0)
+ qemuBlockNodeNamesDetect(obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
- if (qemuRefreshVirtioChannelState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuRefreshVirtioChannelState(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
/* If querying of guest's RTC failed, report error, but do not kill the domain.
*/
qemuRefreshRTC(obj);
- if (qemuProcessRefreshBalloonState(obj, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessRefreshBalloonState(obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
if (qemuProcessRecoverJob(driver, obj, &oldjob, &stopFlags) < 0)
@@ -8258,7 +8258,7 @@ qemuProcessReconnect(void *opaque)
if (jobStarted) {
if (!virDomainObjIsActive(obj))
qemuDomainRemoveInactive(driver, obj);
- qemuDomainObjEndJob(obj, &priv->job);
+ virDomainObjEndJob(obj, &priv->job);
} else {
if (!virDomainObjIsActive(obj))
qemuDomainRemoveInactiveJob(driver, obj);
@@ -8291,7 +8291,7 @@ qemuProcessReconnect(void *opaque)
* thread didn't have a chance to start playing with the domain yet
* (it's all we can do anyway).
*/
- qemuProcessStop(driver, obj, state, QEMU_ASYNC_JOB_NONE, stopFlags);
+ qemuProcessStop(driver, obj, state, VIR_ASYNC_JOB_NONE, stopFlags);
}
goto cleanup;
}
@@ -8336,7 +8336,7 @@ qemuProcessReconnectHelper(virDomainObjPtr obj,
* object.
*/
qemuProcessStop(src->driver, obj, VIR_DOMAIN_SHUTOFF_FAILED,
- QEMU_ASYNC_JOB_NONE, 0);
+ VIR_ASYNC_JOB_NONE, 0);
qemuDomainRemoveInactiveJobLocked(src->driver, obj);
virDomainObjEndAPI(&obj);
diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h
index 448b65537a..fb3cd85bb2 100644
--- a/src/qemu/qemu_process.h
+++ b/src/qemu/qemu_process.h
@@ -32,11 +32,11 @@ int qemuProcessPrepareMonitorChr(virDomainChrSourceDefPtr monConfig,
int qemuProcessStartCPUs(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainRunningReason reason,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuProcessStopCPUs(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainPausedReason reason,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuProcessBuildDestroyMemoryPaths(virQEMUDriverPtr driver,
virDomainObjPtr vm,
@@ -86,7 +86,7 @@ int qemuProcessStart(virConnectPtr conn,
virQEMUDriverPtr driver,
virDomainObjPtr vm,
virCPUDefPtr updatedCPU,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
const char *migrateFrom,
int stdin_fd,
const char *stdin_path,
@@ -105,7 +105,7 @@ virCommandPtr qemuProcessCreatePretendCmd(virQEMUDriverPtr driver,
int qemuProcessInit(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virCPUDefPtr updatedCPU,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool migration,
unsigned int flags);
@@ -122,7 +122,7 @@ int qemuProcessPrepareHost(virQEMUDriverPtr driver,
int qemuProcessLaunch(virConnectPtr conn,
virQEMUDriverPtr driver,
virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
qemuProcessIncomingDefPtr incoming,
virDomainMomentObjPtr snapshot,
virNetDevVPortProfileOp vmop,
@@ -130,13 +130,13 @@ int qemuProcessLaunch(virConnectPtr conn,
int qemuProcessFinishStartup(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool startCPUs,
virDomainPausedReason pausedReason);
int qemuProcessRefreshState(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
typedef enum {
VIR_QEMU_PROCESS_STOP_MIGRATED = 1 << 0,
@@ -144,12 +144,12 @@ typedef enum {
} qemuProcessStopFlags;
int qemuProcessBeginStopJob(virDomainObjPtr vm,
- qemuDomainJob job,
+ virDomainJob job,
bool forceKill);
void qemuProcessStop(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainShutoffReason reason,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
unsigned int flags);
typedef enum {
@@ -190,13 +190,13 @@ int qemuProcessSetupIOThread(virDomainObjPtr vm,
int qemuRefreshVirtioChannelState(virQEMUDriverPtr driver,
virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuProcessRefreshBalloonState(virDomainObjPtr vm,
int asyncJob);
int qemuProcessRefreshDisks(virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuProcessStartManagedPRDaemon(virDomainObjPtr vm) G_GNUC_NO_INLINE;
diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c
index 52468056ad..28d2349869 100644
--- a/src/qemu/qemu_saveimage.c
+++ b/src/qemu/qemu_saveimage.c
@@ -261,7 +261,7 @@ qemuSaveImageCreate(virQEMUDriverPtr driver,
virQEMUSaveDataPtr data,
virCommandPtr compressor,
unsigned int flags,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
bool needUnlink = false;
@@ -578,7 +578,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
virQEMUSaveDataPtr data,
const char *path,
bool start_paused,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
diff --git a/src/qemu/qemu_saveimage.h b/src/qemu/qemu_saveimage.h
index f9fecbcc46..39c4ec128a 100644
--- a/src/qemu/qemu_saveimage.h
+++ b/src/qemu/qemu_saveimage.h
@@ -22,7 +22,7 @@
#include "datatypes.h"
#include "qemu_conf.h"
-#include "qemu_domainjob.h"
+#include "virdomainjob.h"
#include "qemu_domain.h"
/* It would be nice to replace 'Qemud' with 'Qemu' but
@@ -69,7 +69,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
virQEMUSaveDataPtr data,
const char *path,
bool start_paused,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
ATTRIBUTE_NONNULL(4) ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6);
int
@@ -98,7 +98,7 @@ qemuSaveImageCreate(virQEMUDriverPtr driver,
virQEMUSaveDataPtr data,
virCommandPtr compressor,
unsigned int flags,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int
virQEMUSaveDataWrite(virQEMUSaveDataPtr data,
diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
index 8d216bbdbd..0692a4e7f3 100644
--- a/src/qemu/qemu_snapshot.c
+++ b/src/qemu/qemu_snapshot.c
@@ -284,7 +284,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriverPtr driver,
* domain. Thus we stop and start CPUs ourselves.
*/
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
- QEMU_ASYNC_JOB_SNAPSHOT) < 0)
+ VIR_ASYNC_JOB_SNAPSHOT) < 0)
goto cleanup;
resume = true;
@@ -295,7 +295,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriverPtr driver,
}
}
- if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
+ if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) {
resume = false;
goto cleanup;
}
@@ -313,7 +313,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriverPtr driver,
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
- QEMU_ASYNC_JOB_SNAPSHOT, 0);
+ VIR_ASYNC_JOB_SNAPSHOT, 0);
virDomainAuditStop(vm, "from-snapshot");
resume = false;
}
@@ -322,7 +322,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriverPtr driver,
if (resume && virDomainObjIsActive(vm) &&
qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
- QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
+ VIR_ASYNC_JOB_SNAPSHOT) < 0) {
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_SUSPENDED,
VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR);
@@ -793,7 +793,7 @@ qemuSnapshotDiskCleanup(qemuSnapshotDiskDataPtr data,
size_t ndata,
virQEMUDriverPtr driver,
virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
virErrorPtr orig_err;
size_t i;
@@ -884,7 +884,7 @@ qemuSnapshotDiskPrepareOneBlockdev(virDomainObjPtr vm,
virQEMUDriverConfigPtr cfg,
bool reuse,
virHashTablePtr blockNamedNodeData,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
g_autoptr(virStorageSource) terminator = NULL;
@@ -939,7 +939,7 @@ qemuSnapshotDiskPrepareOne(virQEMUDriverPtr driver,
virHashTablePtr blockNamedNodeData,
bool reuse,
bool blockdev,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virJSONValuePtr actions)
{
virDomainDiskDefPtr persistdisk;
@@ -1051,7 +1051,7 @@ qemuSnapshotDiskPrepare(virQEMUDriverPtr driver,
bool reuse,
bool blockdev,
virHashTablePtr blockNamedNodeData,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
qemuSnapshotDiskDataPtr *rdata,
size_t *rndata,
virJSONValuePtr actions)
@@ -1156,7 +1156,7 @@ qemuSnapshotCreateDiskActive(virQEMUDriverPtr driver,
virHashTablePtr blockNamedNodeData,
unsigned int flags,
virQEMUDriverConfigPtr cfg,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
g_autoptr(virJSONValue) actions = NULL;
@@ -1248,16 +1248,16 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver,
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) {
int freeze;
- if (qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_MODIFY) <
0)
+ if (virDomainObjBeginAgentJob(vm, &priv->job, VIR_AGENT_JOB_MODIFY) <
0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0) {
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
goto cleanup;
}
freeze = qemuSnapshotFSFreeze(vm, NULL, 0);
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
if (freeze < 0) {
/* the helper reported the error */
@@ -1281,7 +1281,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver,
if (memory && !(flags & VIR_DOMAIN_SNAPSHOT_CREATE_LIVE)) {
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SNAPSHOT,
- QEMU_ASYNC_JOB_SNAPSHOT) < 0)
+ VIR_ASYNC_JOB_SNAPSHOT) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -1298,7 +1298,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver,
* migration step as qemu deactivates bitmaps after migration so the result
* would be wrong */
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
- !(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_SNAPSHOT)))
+ !(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_SNAPSHOT)))
goto cleanup;
/* do the memory snapshot if necessary */
@@ -1309,12 +1309,12 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver,
if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
goto cleanup;
- jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP;
+ jobPriv->current->statsType = VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP;
/* allow the migration job to be cancelled or the domain to be paused */
- qemuDomainObjSetAsyncJobMask(&priv->job, (QEMU_JOB_DEFAULT_MASK |
- JOB_MASK(QEMU_JOB_SUSPEND) |
- JOB_MASK(QEMU_JOB_MIGRATION_OP)));
+ virDomainObjSetAsyncJobMask(&priv->job, (VIR_JOB_DEFAULT_MASK |
+ JOB_MASK(VIR_JOB_SUSPEND) |
+ JOB_MASK(VIR_JOB_MIGRATION_OP)));
if ((compressed =
qemuSaveImageGetCompressionProgram(cfg->snapshotImageFormat,
&compressor,
@@ -1335,21 +1335,21 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver,
if ((ret = qemuSaveImageCreate(driver, vm, snapdef->file, data,
compressor, 0,
- QEMU_ASYNC_JOB_SNAPSHOT)) < 0)
+ VIR_ASYNC_JOB_SNAPSHOT)) < 0)
goto cleanup;
/* the memory image was created, remove it on errors */
memory_unlink = true;
/* forbid any further manipulation */
- qemuDomainObjSetAsyncJobMask(&priv->job, QEMU_JOB_DEFAULT_MASK);
+ virDomainObjSetAsyncJobMask(&priv->job, VIR_JOB_DEFAULT_MASK);
}
/* the domain is now paused if a memory snapshot was requested */
if ((ret = qemuSnapshotCreateDiskActive(driver, vm, snap,
blockNamedNodeData, flags, cfg,
- QEMU_ASYNC_JOB_SNAPSHOT)) < 0)
+ VIR_ASYNC_JOB_SNAPSHOT)) < 0)
goto cleanup;
/* the snapshot is complete now */
@@ -1357,7 +1357,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver,
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
- QEMU_ASYNC_JOB_SNAPSHOT, 0);
+ VIR_ASYNC_JOB_SNAPSHOT, 0);
virDomainAuditStop(vm, "from-snapshot");
resume = false;
thaw = 0;
@@ -1379,7 +1379,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver,
if (resume && virDomainObjIsActive(vm) &&
qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
- QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
+ VIR_ASYNC_JOB_SNAPSHOT) < 0) {
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_SUSPENDED,
VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR);
@@ -1393,7 +1393,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver,
}
if (thaw != 0 &&
- qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_MODIFY) >= 0
&&
+ virDomainObjBeginAgentJob(vm, &priv->job, VIR_AGENT_JOB_MODIFY) >= 0
&&
virDomainObjIsActive(vm)) {
if (qemuSnapshotFSThaw(vm, ret == 0 && thaw > 0) < 0) {
/* helper reported the error, if it was needed */
@@ -1401,7 +1401,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver,
ret = -1;
}
- qemuDomainObjEndAgentJob(vm, &priv->job);
+ virDomainObjEndAgentJob(vm, &priv->job);
}
virQEMUSaveDataFree(data);
@@ -1544,11 +1544,11 @@ qemuSnapshotCreateXML(virDomainPtr domain,
* a regular job, so we need to set the job mask to disallow query as
* 'savevm' blocks the monitor. External snapshot will then modify the
* job mask appropriately. */
- if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_SNAPSHOT,
- VIR_DOMAIN_JOB_OPERATION_SNAPSHOT, flags) < 0)
+ if (virDomainObjBeginAsyncJob(vm, &priv->job, VIR_ASYNC_JOB_SNAPSHOT,
+ VIR_DOMAIN_JOB_OPERATION_SNAPSHOT, flags) < 0)
goto cleanup;
- qemuDomainObjSetAsyncJobMask(&priv->job, QEMU_JOB_NONE);
+ virDomainObjSetAsyncJobMask(&priv->job, VIR_JOB_NONE);
if (redefine) {
if (virDomainSnapshotRedefinePrep(vm, &def, &snap,
@@ -1679,7 +1679,7 @@ qemuSnapshotCreateXML(virDomainPtr domain,
virDomainSnapshotObjListRemove(vm->snapshots, snap);
}
- qemuDomainObjEndAsyncJob(vm, &priv->job);
+ virDomainObjEndAsyncJob(vm, &priv->job);
cleanup:
return snapshot;
@@ -1719,7 +1719,7 @@ qemuSnapshotRevert(virDomainObjPtr vm,
qemuDomainSaveCookiePtr cookie;
virCPUDefPtr origCPU = NULL;
unsigned int start_flags = VIR_QEMU_PROCESS_START_GEN_VMID;
- qemuDomainAsyncJob jobType = QEMU_ASYNC_JOB_START;
+ virDomainAsyncJob jobType = VIR_ASYNC_JOB_START;
bool defined = false;
virCheckFlags(VIR_DOMAIN_SNAPSHOT_REVERT_RUNNING |
@@ -1891,7 +1891,7 @@ qemuSnapshotRevert(virDomainObjPtr vm,
virResetError(err);
qemuProcessStop(driver, vm,
VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
- QEMU_ASYNC_JOB_START, 0);
+ VIR_ASYNC_JOB_START, 0);
virDomainAuditStop(vm, "from-snapshot");
detail = VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT;
event = virDomainEventLifecycleNewFromObj(vm,
@@ -1900,7 +1900,7 @@ qemuSnapshotRevert(virDomainObjPtr vm,
virObjectEventStateQueue(driver->domainEventState, event);
/* Start after stop won't be an async start job, so
* reset to none */
- jobType = QEMU_ASYNC_JOB_NONE;
+ jobType = VIR_ASYNC_JOB_NONE;
goto load;
}
}
@@ -1909,7 +1909,7 @@ qemuSnapshotRevert(virDomainObjPtr vm,
/* Transitions 5, 6 */
if (qemuProcessStopCPUs(driver, vm,
VIR_DOMAIN_PAUSED_FROM_SNAPSHOT,
- QEMU_ASYNC_JOB_START) < 0)
+ VIR_ASYNC_JOB_START) < 0)
goto endjob;
if (!virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
@@ -1918,7 +1918,7 @@ qemuSnapshotRevert(virDomainObjPtr vm,
}
}
- if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_START) < 0)
+ if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_START) < 0)
goto endjob;
rc = qemuMonitorLoadSnapshot(priv->mon, snap->def->name);
if (qemuDomainObjExitMonitor(vm) < 0)
@@ -2028,7 +2028,7 @@ qemuSnapshotRevert(virDomainObjPtr vm,
if (virDomainObjIsActive(vm)) {
/* Transitions 4, 7 */
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
- QEMU_ASYNC_JOB_START, 0);
+ VIR_ASYNC_JOB_START, 0);
virDomainAuditStop(vm, "from-snapshot");
detail = VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT;
event = virDomainEventLifecycleNewFromObj(vm,
@@ -2057,7 +2057,7 @@ qemuSnapshotRevert(virDomainObjPtr vm,
virObjectEventStateQueue(driver->domainEventState, event);
rc = qemuProcessStart(snapshot->domain->conn, driver, vm, NULL,
- QEMU_ASYNC_JOB_START, NULL, -1, NULL, NULL,
+ VIR_ASYNC_JOB_START, NULL, -1, NULL, NULL,
VIR_NETDEV_VPORT_PROFILE_OP_CREATE,
start_flags);
virDomainAuditStart(vm, "from-snapshot", rc >= 0);
@@ -2185,7 +2185,7 @@ qemuSnapshotDelete(virDomainObjPtr vm,
VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY |
VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN_ONLY, -1);
- if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0)
+ if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (!(snap = qemuSnapObjFromSnapshot(vm, snapshot)))
@@ -2258,7 +2258,7 @@ qemuSnapshotDelete(virDomainObjPtr vm,
}
endjob:
- qemuDomainObjEndJob(vm, &priv->job);
+ virDomainObjEndJob(vm, &priv->job);
cleanup:
return ret;
--
2.25.1